package swarm

import (
	
	
	
	
	
	
	
	

	

	
	
	
	
	
	
	

	logging 
	ma 
	madns 
)

const (
	defaultDialTimeout = 15 * time.Second

	// defaultDialTimeoutLocal is the maximum duration a Dial to local network address
	// is allowed to take.
	// This includes the time between dialing the raw network connection,
	// protocol selection as well the handshake, if applicable.
	defaultDialTimeoutLocal = 5 * time.Second

	defaultNewStreamTimeout = 15 * time.Second
)

var log = logging.Logger("swarm2")

// ErrSwarmClosed is returned when one attempts to operate on a closed swarm.
var ErrSwarmClosed = errors.New("swarm closed")

// ErrAddrFiltered is returned when trying to register a connection to a
// filtered address. You shouldn't see this error unless some underlying
// transport is misbehaving.
var ErrAddrFiltered = errors.New("address filtered")

// ErrDialTimeout is returned when one a dial times out due to the global timeout
var ErrDialTimeout = errors.New("dial timed out")

type Option func(*Swarm) error

// WithConnectionGater sets a connection gater
func ( connmgr.ConnectionGater) Option {
	return func( *Swarm) error {
		.gater = 
		return nil
	}
}

// WithMultiaddrResolver sets a custom multiaddress resolver
func ( network.MultiaddrDNSResolver) Option {
	return func( *Swarm) error {
		.multiaddrResolver = 
		return nil
	}
}

// WithMetrics sets a metrics reporter
func ( metrics.Reporter) Option {
	return func( *Swarm) error {
		.bwc = 
		return nil
	}
}

func ( MetricsTracer) Option {
	return func( *Swarm) error {
		.metricsTracer = 
		return nil
	}
}

func ( time.Duration) Option {
	return func( *Swarm) error {
		.dialTimeout = 
		return nil
	}
}

func ( time.Duration) Option {
	return func( *Swarm) error {
		.dialTimeoutLocal = 
		return nil
	}
}

func ( network.ResourceManager) Option {
	return func( *Swarm) error {
		.rcmgr = 
		return nil
	}
}

// WithDialRanker configures swarm to use d as the DialRanker
func ( network.DialRanker) Option {
	return func( *Swarm) error {
		if  == nil {
			return errors.New("swarm: dial ranker cannot be nil")
		}
		.dialRanker = 
		return nil
	}
}

// WithUDPBlackHoleSuccessCounter configures swarm to use the provided config for UDP black hole detection
// n is the size of the sliding window used to evaluate black hole state
// min is the minimum number of successes out of n required to not block requests
func ( *BlackHoleSuccessCounter) Option {
	return func( *Swarm) error {
		.udpBHF = 
		return nil
	}
}

// WithIPv6BlackHoleSuccessCounter configures swarm to use the provided config for IPv6 black hole detection
// n is the size of the sliding window used to evaluate black hole state
// min is the minimum number of successes out of n required to not block requests
func ( *BlackHoleSuccessCounter) Option {
	return func( *Swarm) error {
		.ipv6BHF = 
		return nil
	}
}

// WithReadOnlyBlackHoleDetector configures the swarm to use the black hole detector in
// read only mode. In Read Only mode dial requests are refused in unknown state and
// no updates to the detector state are made. This is useful for services like AutoNAT that
// care about accurately providing reachability info.
func () Option {
	return func( *Swarm) error {
		.readOnlyBHD = true
		return nil
	}
}

// Swarm is a connection muxer, allowing connections to other peers to
// be opened and closed, while still using the same Chan for all
// communication. The Chan sends/receives Messages, which note the
// destination or source Peer.
type Swarm struct {
	nextConnID   atomic.Uint64
	nextStreamID atomic.Uint64

	// Close refcount. This allows us to fully wait for the swarm to be torn
	// down before continuing.
	refs sync.WaitGroup

	emitter event.Emitter

	rcmgr network.ResourceManager

	local peer.ID
	peers peerstore.Peerstore

	dialTimeout      time.Duration
	dialTimeoutLocal time.Duration

	conns struct {
		sync.RWMutex
		m map[peer.ID][]*Conn
	}

	listeners struct {
		sync.RWMutex

		ifaceListenAddres []ma.Multiaddr
		cacheEOL          time.Time

		m map[transport.Listener]struct{}
	}

	notifs struct {
		sync.RWMutex
		m map[network.Notifiee]struct{}
	}

	directConnNotifs struct {
		sync.Mutex
		m map[peer.ID][]chan struct{}
	}

	transports struct {
		sync.RWMutex
		m map[int]transport.Transport
	}

	multiaddrResolver network.MultiaddrDNSResolver

	// stream handlers
	streamh atomic.Pointer[network.StreamHandler]

	// dialing helpers
	dsync   *dialSync
	backf   DialBackoff
	limiter *dialLimiter
	gater   connmgr.ConnectionGater

	closeOnce sync.Once
	ctx       context.Context // is canceled when Close is called
	ctxCancel context.CancelFunc

	bwc           metrics.Reporter
	metricsTracer MetricsTracer

	dialRanker network.DialRanker

	connectednessEventEmitter *connectednessEventEmitter
	udpBHF                    *BlackHoleSuccessCounter
	ipv6BHF                   *BlackHoleSuccessCounter
	bhd                       *blackHoleDetector
	readOnlyBHD               bool
}

// NewSwarm constructs a Swarm.
func ( peer.ID,  peerstore.Peerstore,  event.Bus,  ...Option) (*Swarm, error) {
	,  := .Emitter(new(event.EvtPeerConnectednessChanged))
	if  != nil {
		return nil, 
	}
	,  := context.WithCancel(context.Background())
	 := &Swarm{
		local:             ,
		peers:             ,
		emitter:           ,
		ctx:               ,
		ctxCancel:         ,
		dialTimeout:       defaultDialTimeout,
		dialTimeoutLocal:  defaultDialTimeoutLocal,
		multiaddrResolver: ResolverFromMaDNS{madns.DefaultResolver},
		dialRanker:        DefaultDialRanker,

		// A black hole is a binary property. On a network if UDP dials are blocked or there is
		// no IPv6 connectivity, all dials will fail. So a low success rate of 5 out 100 dials
		// is good enough.
		udpBHF:  &BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "UDP"},
		ipv6BHF: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "IPv6"},
	}

	.conns.m = make(map[peer.ID][]*Conn)
	.listeners.m = make(map[transport.Listener]struct{})
	.transports.m = make(map[int]transport.Transport)
	.notifs.m = make(map[network.Notifiee]struct{})
	.directConnNotifs.m = make(map[peer.ID][]chan struct{})
	.connectednessEventEmitter = newConnectednessEventEmitter(.Connectedness, )

	for ,  := range  {
		if  := ();  != nil {
			return nil, 
		}
	}
	if .rcmgr == nil {
		.rcmgr = &network.NullResourceManager{}
	}

	.dsync = newDialSync(.dialWorkerLoop)

	.limiter = newDialLimiter(.dialAddr)
	.backf.init(.ctx)

	.bhd = &blackHoleDetector{
		udp:      .udpBHF,
		ipv6:     .ipv6BHF,
		mt:       .metricsTracer,
		readOnly: .readOnlyBHD,
	}
	return , nil
}

func ( *Swarm) () error {
	.closeOnce.Do(.close)
	return nil
}

// Done returns a channel that is closed when the swarm is closed.
func ( *Swarm) () <-chan struct{} {
	return .ctx.Done()
}

func ( *Swarm) () {
	.ctxCancel()

	// Prevents new connections and/or listeners from being added to the swarm.
	.listeners.Lock()
	 := .listeners.m
	.listeners.m = nil
	.listeners.Unlock()

	.conns.Lock()
	 := .conns.m
	.conns.m = nil
	.conns.Unlock()

	// Lots of goroutines but we might as well do this in parallel. We want to shut down as fast as
	// possible.
	.refs.Add(len())
	for  := range  {
		go func( transport.Listener) {
			defer .refs.Done()
			if  := .Close();  != nil &&  != transport.ErrListenerClosed {
				log.Errorf("error when shutting down listener: %s", )
			}
		}()
	}

	for ,  := range  {
		for ,  := range  {
			go func( *Conn) {
				if  := .Close();  != nil {
					log.Errorf("error when shutting down connection: %s", )
				}
			}()
		}
	}

	// Wait for everything to finish.
	.refs.Wait()
	.connectednessEventEmitter.Close()
	.emitter.Close()

	// Now close out any transports (if necessary). Do this after closing
	// all connections/listeners.
	.transports.Lock()
	 := .transports.m
	.transports.m = nil
	.transports.Unlock()

	// Dedup transports that may be listening on multiple protocols
	 := make(map[transport.Transport]struct{}, len())
	for ,  := range  {
		[] = struct{}{}
	}

	var  sync.WaitGroup
	for  := range  {
		if ,  := .(io.Closer);  {
			.Add(1)
			go func( io.Closer) {
				defer .Done()
				if  := .Close();  != nil {
					log.Errorf("error when closing down transport %T: %s", , )
				}
			}()
		}
	}
	.Wait()
}

func ( *Swarm) ( transport.CapableConn,  network.Direction) (*Conn, error) {
	var (
		    = .RemotePeer()
		 = .RemoteMultiaddr()
	)

	// create the Stat object, initializing with the underlying connection Stat if available
	var  network.ConnStats
	if ,  := .(network.ConnStat);  {
		 = .Stat()
	}
	.Direction = 
	.Opened = time.Now()
	 := .Limited

	// Wrap and register the connection.
	 := &Conn{
		conn:  ,
		swarm: ,
		stat:  ,
		id:    .nextConnID.Add(1),
	}

	// we ONLY check upgraded connections here so we can send them a Disconnect message.
	// If we do this in the Upgrader, we will not be able to do this.
	if .gater != nil {
		if ,  := .gater.InterceptUpgraded(); ! {
			 := .CloseWithError(network.ConnGated)
			if  != nil {
				log.Warnf("failed to close connection with peer %s and addr %s; err: %s", , , )
			}
			return nil, ErrGaterDisallowedConnection
		}
	}

	// Add the public key.
	if  := .RemotePublicKey();  != nil {
		.peers.AddPubKey(, )
	}

	// Clear any backoffs
	.backf.Clear()

	// Finally, add the peer.
	.conns.Lock()
	// Check if we're still online
	if .conns.m == nil {
		.conns.Unlock()
		.Close()
		return nil, ErrSwarmClosed
	}

	.streams.m = make(map[*Stream]struct{})
	.conns.m[] = append(.conns.m[], )
	// Add two swarm refs:
	// * One will be decremented after the close notifications fire in Conn.doClose
	// * The other will be decremented when Conn.start exits.
	.refs.Add(2)
	// Take the notification lock before releasing the conns lock to block
	// Disconnect notifications until after the Connect notifications done.
	// This lock also ensures that swarm.refs.Wait() exits after we have
	// enqueued the peer connectedness changed notification.
	// TODO: Fix this fragility by taking a swarm ref for dial worker loop
	.notifyLk.Lock()
	.conns.Unlock()

	.connectednessEventEmitter.AddConn()

	if ! {
		// Notify goroutines waiting for a direct connection
		//
		// Go routines interested in waiting for direct connection first acquire this lock
		// and then acquire s.conns.RLock. Do not acquire this lock before conns.Unlock to
		// prevent deadlock.
		.directConnNotifs.Lock()
		for ,  := range .directConnNotifs.m[] {
			close()
		}
		delete(.directConnNotifs.m, )
		.directConnNotifs.Unlock()
	}
	.notifyAll(func( network.Notifiee) {
		.Connected(, )
	})
	.notifyLk.Unlock()

	.start()
	return , nil
}

// Peerstore returns this swarms internal Peerstore.
func ( *Swarm) () peerstore.Peerstore {
	return .peers
}

// SetStreamHandler assigns the handler for new streams.
func ( *Swarm) ( network.StreamHandler) {
	.streamh.Store(&)
}

// StreamHandler gets the handler for new streams.
func ( *Swarm) () network.StreamHandler {
	 := .streamh.Load()
	if  == nil {
		return nil
	}
	return *
}

// NewStream creates a new stream on any available connection to peer, dialing
// if necessary.
// Use network.WithAllowLimitedConn to open a stream over a limited(relayed)
// connection.
func ( *Swarm) ( context.Context,  peer.ID) (network.Stream, error) {
	log.Debugf("[%s] opening stream to peer [%s]", .local, )

	// Algorithm:
	// 1. Find the best connection, otherwise, dial.
	// 2. If the best connection is limited, wait for a direct conn via conn
	//    reversal or hole punching.
	// 3. Try opening a stream.
	// 4. If the underlying connection is, in fact, closed, close the outer
	//    connection and try again. We do this in case we have a closed
	//    connection but don't notice it until we actually try to open a
	//    stream.
	//
	// TODO: Try all connections even if we get an error opening a stream on
	// a non-closed connection.
	 := 0
	for {
		 := .bestConnToPeer()
		if  == nil {
			if ,  := network.GetNoDial(); ! {
				++
				if  > DialAttempts {
					return nil, errors.New("max dial attempts exceeded")
				}
				var  error
				,  = .dialPeer(, )
				if  != nil {
					return nil, 
				}
			} else {
				return nil, network.ErrNoConn
			}
		}

		,  := network.GetAllowLimitedConn()
		if ! && .Stat().Limited {
			var  error
			,  = .waitForDirectConn(, )
			if  != nil {
				log.Debugf("failed to get direct connection to a limited peer %s: %s", , )
				return nil, 
			}
		}

		,  := .NewStream()
		if  != nil {
			if .conn.IsClosed() {
				continue
			}
			return nil, 
		}
		return , nil
	}
}

// waitForDirectConn waits for a direct connection established through hole punching or connection reversal.
func ( *Swarm) ( context.Context,  peer.ID) (*Conn, error) {
	.directConnNotifs.Lock()
	 := .bestConnToPeer()
	if  == nil {
		.directConnNotifs.Unlock()
		return nil, network.ErrNoConn
	} else if !.Stat().Limited {
		.directConnNotifs.Unlock()
		return , nil
	}

	// Wait for limited connection to upgrade to a direct connection either by
	// connection reversal or hole punching.
	 := make(chan struct{})
	.directConnNotifs.m[] = append(.directConnNotifs.m[], )
	.directConnNotifs.Unlock()

	// apply the DialPeer timeout
	,  := context.WithTimeout(, network.GetDialPeerTimeout())
	defer ()

	// Wait for notification.
	select {
	case <-.Done():
		// Remove ourselves from the notification list
		.directConnNotifs.Lock()
		defer .directConnNotifs.Unlock()

		.directConnNotifs.m[] = slices.DeleteFunc(
			.directConnNotifs.m[],
			func( chan struct{}) bool { return  ==  },
		)
		if len(.directConnNotifs.m[]) == 0 {
			delete(.directConnNotifs.m, )
		}
		return nil, .Err()
	case <-:
		// We do not need to remove ourselves from the list here as the notifier
		// clears the map entry
		 := .bestConnToPeer()
		if  == nil {
			return nil, network.ErrNoConn
		}
		if .Stat().Limited {
			return nil, network.ErrLimitedConn
		}
		return , nil
	}
}

// ConnsToPeer returns all the live connections to peer.
func ( *Swarm) ( peer.ID) []network.Conn {
	// TODO: Consider sorting the connection list best to worst. Currently,
	// it's sorted oldest to newest.
	.conns.RLock()
	defer .conns.RUnlock()
	 := .conns.m[]
	 := make([]network.Conn, len())
	for ,  := range  {
		[] = 
	}
	return 
}

func isBetterConn(,  *Conn) bool {
	// If one is limited and not the other, prefer the unlimited connection.
	 := .Stat().Limited
	 := .Stat().Limited
	if  !=  {
		return !
	}

	// If one is direct and not the other, prefer the direct connection.
	 := isDirectConn()
	 := isDirectConn()
	if  !=  {
		return 
	}

	// Otherwise, prefer the connection with more open streams.
	.streams.Lock()
	 := len(.streams.m)
	.streams.Unlock()

	.streams.Lock()
	 := len(.streams.m)
	.streams.Unlock()

	if  !=  {
		return  > 
	}

	// finally, pick the last connection.
	return true
}

// bestConnToPeer returns the best connection to peer.
func ( *Swarm) ( peer.ID) *Conn {
	// TODO: Prefer some transports over others.
	// For now, prefers direct connections over Relayed connections.
	// For tie-breaking, select the newest non-closed connection with the most streams.
	.conns.RLock()
	defer .conns.RUnlock()

	var  *Conn
	for ,  := range .conns.m[] {
		if .conn.IsClosed() {
			// We *will* garbage collect this soon anyways.
			continue
		}
		if  == nil || isBetterConn(, ) {
			 = 
		}
	}
	return 
}

// bestAcceptableConnToPeer returns the best acceptable connection, considering the passed in ctx.
// If network.WithForceDirectDial is used, it only returns a direct connections, ignoring
// any limited (relayed) connections to the peer.
func ( *Swarm) ( context.Context,  peer.ID) *Conn {
	 := .bestConnToPeer()

	,  := network.GetForceDirectDial()
	if  && !isDirectConn() {
		return nil
	}
	return 
}

func isDirectConn( *Conn) bool {
	return  != nil && !.conn.Transport().Proxy()
}

// Connectedness returns our "connectedness" state with the given peer.
//
// To check if we have an open connection, use `s.Connectedness(p) ==
// network.Connected`.
func ( *Swarm) ( peer.ID) network.Connectedness {
	.conns.RLock()
	defer .conns.RUnlock()

	return .connectednessUnlocked()
}

// connectednessUnlocked returns the connectedness of a peer.
func ( *Swarm) ( peer.ID) network.Connectedness {
	var  bool
	for ,  := range .conns.m[] {
		if .IsClosed() {
			// These will be garbage collected soon
			continue
		}
		if .Stat().Limited {
			 = true
		} else {
			return network.Connected
		}
	}
	if  {
		return network.Limited
	}
	return network.NotConnected
}

// Conns returns a slice of all connections.
func ( *Swarm) () []network.Conn {
	.conns.RLock()
	defer .conns.RUnlock()

	 := make([]network.Conn, 0, len(.conns.m))
	for ,  := range .conns.m {
		for ,  := range  {
			 = append(, )
		}
	}
	return 
}

// ClosePeer closes all connections to the given peer.
func ( *Swarm) ( peer.ID) error {
	 := .ConnsToPeer()
	switch len() {
	case 0:
		return nil
	case 1:
		return [0].Close()
	default:
		 := make(chan error)
		for ,  := range  {
			go func( network.Conn) {
				 <- .Close()
			}()
		}

		var  []string
		for range  {
			 := <-
			if  != nil {
				 = append(, .Error())
			}
		}
		if len() > 0 {
			return fmt.Errorf("when disconnecting from peer %s: %s", , strings.Join(, ", "))
		}
		return nil
	}
}

// Peers returns a copy of the set of peers swarm is connected to.
func ( *Swarm) () []peer.ID {
	.conns.RLock()
	defer .conns.RUnlock()
	 := make([]peer.ID, 0, len(.conns.m))
	for  := range .conns.m {
		 = append(, )
	}

	return 
}

// LocalPeer returns the local peer swarm is associated to.
func ( *Swarm) () peer.ID {
	return .local
}

// Backoff returns the DialBackoff object for this swarm.
func ( *Swarm) () *DialBackoff {
	return &.backf
}

// notifyAll sends a signal to all Notifiees
func ( *Swarm) ( func(network.Notifiee)) {
	.notifs.RLock()
	for  := range .notifs.m {
		()
	}
	.notifs.RUnlock()
}

// Notify signs up Notifiee to receive signals when events happen
func ( *Swarm) ( network.Notifiee) {
	.notifs.Lock()
	.notifs.m[] = struct{}{}
	.notifs.Unlock()
}

// StopNotify unregisters Notifiee fromr receiving signals
func ( *Swarm) ( network.Notifiee) {
	.notifs.Lock()
	delete(.notifs.m, )
	.notifs.Unlock()
}

func ( *Swarm) ( *Conn) {
	 := .RemotePeer()

	.conns.Lock()
	 := .conns.m[]
	for ,  := range  {
		if  ==  {
			// NOTE: We're intentionally preserving order.
			// This way, connections to a peer are always
			// sorted oldest to newest.
			copy([:], [+1:])
			[len()-1] = nil
			.conns.m[] = [:len()-1]
			break
		}
	}
	if len(.conns.m[]) == 0 {
		delete(.conns.m, )
	}
	.conns.Unlock()
}

// String returns a string representation of Network.
func ( *Swarm) () string {
	return fmt.Sprintf("<Swarm %s>", .LocalPeer())
}

func ( *Swarm) () network.ResourceManager {
	return .rcmgr
}

// Swarm is a Network.
var (
	_ network.Network            = (*Swarm)(nil)
	_ transport.TransportNetwork = (*Swarm)(nil)
)

type connWithMetrics struct {
	transport.CapableConn
	opened        time.Time
	dir           network.Direction
	metricsTracer MetricsTracer
	once          sync.Once
	closeErr      error
}

func wrapWithMetrics( transport.CapableConn,  MetricsTracer,  time.Time,  network.Direction) *connWithMetrics {
	 := &connWithMetrics{CapableConn: , opened: , dir: , metricsTracer: }
	.metricsTracer.OpenedConnection(.dir, .RemotePublicKey(), .ConnState(), .LocalMultiaddr())
	return 
}

func ( *connWithMetrics) () {
	.metricsTracer.CompletedHandshake(time.Since(.opened), .ConnState(), .LocalMultiaddr())
}

func ( *connWithMetrics) () error {
	.once.Do(func() {
		.metricsTracer.ClosedConnection(.dir, time.Since(.opened), .ConnState(), .LocalMultiaddr())
		.closeErr = .CapableConn.Close()
	})
	return .closeErr
}

func ( *connWithMetrics) ( network.ConnErrorCode) error {
	.once.Do(func() {
		.metricsTracer.ClosedConnection(.dir, time.Since(.opened), .ConnState(), .LocalMultiaddr())
		.closeErr = .CapableConn.CloseWithError()
	})
	return .closeErr
}

func ( *connWithMetrics) () network.ConnStats {
	if ,  := .CapableConn.(network.ConnStat);  {
		return .Stat()
	}
	return network.ConnStats{}
}

var _ network.ConnStat = &connWithMetrics{}

type ResolverFromMaDNS struct {
	*madns.Resolver
}

var _ network.MultiaddrDNSResolver = ResolverFromMaDNS{}

func startsWithDNSADDR( ma.Multiaddr) bool {
	if  == nil {
		return false
	}

	 := false
	// Using ForEach to avoid allocating
	ma.ForEach(, func( ma.Component) bool {
		 = .Protocol().Code == ma.P_DNSADDR
		return false
	})
	return 
}

// ResolveDNSAddr implements MultiaddrDNSResolver
func ( ResolverFromMaDNS) ( context.Context,  peer.ID,  ma.Multiaddr,  int,  int) ([]ma.Multiaddr, error) {
	if  <= 0 {
		return nil, nil
	}
	if  <= 0 {
		return []ma.Multiaddr{}, nil
	}
	var ,  []ma.Multiaddr
	,  := .Resolve(, )
	if  != nil {
		return nil, 
	}
	if len() >  {
		 = [:]
	}

	for ,  := range  {
		if startsWithDNSADDR() {
			 = append(, )
		} else {
			 = append(, )
		}
	}

	for ,  := range  {
		// Set the nextOutputLimit to:
		//   outputLimit
		//   - len(resolved)          // What we already have resolved
		//   - (len(toResolve) - i)   // How many addresses we have left to resolve
		//   + 1                      // The current address we are resolving
		// This assumes that each DNSADDR address will resolve to at least one multiaddr.
		// This assumption lets us bound the space we reserve for resolving.
		 :=  - len() - (len() - ) + 1
		,  := .(, , , -1, )
		if  != nil {
			log.Warnf("failed to resolve dnsaddr %v %s: ", , )
			// Dropping this address
			continue
		}
		 = append(, ...)
	}

	if len() >  {
		 = [:]
	}

	// If the address contains a peer id, make sure it matches our expectedPeerID
	if  != "" {
		 := func( ma.Multiaddr) bool {
			,  := peer.IDFromP2PAddr()
			if  == peer.ErrInvalidAddr {
				// This multiaddr didn't contain a peer id, assume it's for this peer.
				// Handshake will fail later if it's not.
				return false
			} else if  != nil {
				// This multiaddr is invalid, drop it.
				return true
			}

			return  != 
		}
		 = slices.DeleteFunc(, )
	}

	return , nil
}

// ResolveDNSComponent implements MultiaddrDNSResolver
func ( ResolverFromMaDNS) ( context.Context,  ma.Multiaddr,  int) ([]ma.Multiaddr, error) {
	,  := .Resolve(, )
	if  != nil {
		return nil, 
	}
	if len() >  {
		 = [:]
	}
	return , nil
}