package basichost

import (
	
	
	
	
	
	
	
	
	

	
	
	
	
	
	
	
	
	
	ma 
	manet 
	
)

const maxObservedAddrsPerListenAddr = 3

// addrChangeTickrInterval is the interval to recompute host addrs.
var addrChangeTickrInterval = 5 * time.Second

const maxPeerRecordSize = 8 * 1024 // 8k to be compatible with identify's limit

// addrStore is a minimal interface for storing peer addresses
type addrStore interface {
	SetAddrs(peer.ID, []ma.Multiaddr, time.Duration)
}

// ObservedAddrsManager maps our local listen addrs to externally observed addrs.
type ObservedAddrsManager interface {
	Addrs(minObservers int) []ma.Multiaddr
	AddrsFor(local ma.Multiaddr) []ma.Multiaddr
}

type hostAddrs struct {
	addrs            []ma.Multiaddr
	localAddrs       []ma.Multiaddr
	reachableAddrs   []ma.Multiaddr
	unreachableAddrs []ma.Multiaddr
	unknownAddrs     []ma.Multiaddr
	relayAddrs       []ma.Multiaddr
}

type addrsManager struct {
	bus                      event.Bus
	natManager               NATManager
	addrsFactory             AddrsFactory
	listenAddrs              func() []ma.Multiaddr
	addCertHashes            func([]ma.Multiaddr) []ma.Multiaddr
	observedAddrsManager     ObservedAddrsManager
	interfaceAddrs           *interfaceAddrsCache
	addrsReachabilityTracker *addrsReachabilityTracker

	// triggerAddrsUpdateChan is used to trigger an addresses update.
	triggerAddrsUpdateChan chan chan struct{}
	// started is used to check whether the addrsManager has started.
	started atomic.Bool
	// triggerReachabilityUpdate is notified when reachable addrs are updated.
	triggerReachabilityUpdate chan struct{}

	hostReachability atomic.Pointer[network.Reachability]

	addrsMx      sync.RWMutex
	currentAddrs hostAddrs

	signKey           crypto.PrivKey
	addrStore         addrStore
	signedRecordStore peerstore.CertifiedAddrBook
	hostID            peer.ID

	wg        sync.WaitGroup
	ctx       context.Context
	ctxCancel context.CancelFunc
}

func newAddrsManager(
	 event.Bus,
	 NATManager,
	 AddrsFactory,
	 func() []ma.Multiaddr,
	 func([]ma.Multiaddr) []ma.Multiaddr,
	 ObservedAddrsManager,
	 autonatv2Client,
	 bool,
	 prometheus.Registerer,
	 bool,
	 crypto.PrivKey,
	 addrStore,
	 peer.ID,
) (*addrsManager, error) {
	,  := context.WithCancel(context.Background())
	 := &addrsManager{
		bus:                       ,
		listenAddrs:               ,
		addCertHashes:             ,
		observedAddrsManager:      ,
		natManager:                ,
		addrsFactory:              ,
		triggerAddrsUpdateChan:    make(chan chan struct{}, 1),
		triggerReachabilityUpdate: make(chan struct{}, 1),
		interfaceAddrs:            &interfaceAddrsCache{},
		signKey:                   ,
		addrStore:                 ,
		hostID:                    ,
		ctx:                       ,
		ctxCancel:                 ,
	}
	 := network.ReachabilityUnknown
	.hostReachability.Store(&)

	if ! {
		var  bool
		.signedRecordStore,  = .addrStore.(peerstore.CertifiedAddrBook)
		if ! {
			return nil, errors.New("peerstore doesn't implement CertifiedAddrBook interface")
		}
	}

	if  != nil {
		var  MetricsTracker
		if  {
			 = newMetricsTracker(withRegisterer())
		}
		.addrsReachabilityTracker = newAddrsReachabilityTracker(, .triggerReachabilityUpdate, nil, )
	}
	return , nil
}

func ( *addrsManager) () error {
	if .addrsReachabilityTracker != nil {
		 := .addrsReachabilityTracker.Start()
		if  != nil {
			return fmt.Errorf("error starting addrs reachability tracker: %s", )
		}
	}
	if  := .startBackgroundWorker();  != nil {
		return fmt.Errorf("error starting background worker: %s", )
	}

	// this ensures that listens concurrent with Start are reflected correctly after Start exits.
	.started.Store(true)
	.updateAddrsSync()
	return nil
}

func ( *addrsManager) () {
	.ctxCancel()
	if .natManager != nil {
		 := .natManager.Close()
		if  != nil {
			log.Warn("error closing natmgr", "err", )
		}
	}
	if .addrsReachabilityTracker != nil {
		 := .addrsReachabilityTracker.Close()
		if  != nil {
			log.Warn("error closing addrs reachability tracker", "err", )
		}
	}
	.wg.Wait()
}

func ( *addrsManager) () network.Notifiee {
	return &network.NotifyBundle{
		ListenF:      func(network.Network, ma.Multiaddr) { .updateAddrsSync() },
		ListenCloseF: func(network.Network, ma.Multiaddr) { .updateAddrsSync() },
	}
}

func ( *addrsManager) () {
	// This prevents a deadlock where addrs updates before starting the manager are ignored
	if !.started.Load() {
		return
	}
	 := make(chan struct{})
	select {
	case .triggerAddrsUpdateChan <- :
		select {
		case <-:
		case <-.ctx.Done():
		}
	case <-.ctx.Done():
	}
}

func ( *addrsManager) () ( error) {
	,  := .bus.Subscribe(new(event.EvtAutoRelayAddrsUpdated), eventbus.Name("addrs-manager autorelay sub"))
	if  != nil {
		return fmt.Errorf("error subscribing to auto relay addrs: %s", )
	}
	 := multiCloser{}
	,  := .bus.Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("addrs-manager autonatv1 sub"))
	if  != nil {
		return errors.Join(
			fmt.Errorf("error subscribing to autonat reachability: %s", ),
			.Close(),
		)
	}
	 = append(, )

	,  := .bus.Emitter(new(event.EvtHostReachableAddrsChanged), eventbus.Stateful)
	if  != nil {
		return errors.Join(
			fmt.Errorf("error creating reachability subscriber: %s", ),
			.Close(),
		)
	}
	 = append(, )

	,  := .bus.Emitter(new(event.EvtLocalAddressesUpdated), eventbus.Stateful)
	if  != nil {
		return errors.Join(
			fmt.Errorf("error creating local addrs emitter: %s", ),
			.Close(),
		)
	}

	.wg.Add(1)
	go .background(, , , )
	return nil
}

func ( *addrsManager) (
	,
	 event.Subscription,
	 event.Emitter,
	 event.Emitter,
) {
	defer .wg.Done()
	defer func() {
		 := .Close()
		if  != nil {
			log.Warn("error closing auto relay addrs sub", "err", )
		}
		 = .Close()
		if  != nil {
			log.Warn("error closing autonat reachability sub", "err", )
		}
		 = .Close()
		if  != nil {
			log.Warn("error closing host reachability emitter", "err", )
		}
		 = .Close()
		if  != nil {
			log.Warn("error closing local addrs emitter", "err", )
		}
	}()

	var  []ma.Multiaddr
	// update relay addrs in case we're private
	select {
	case  := <-.Out():
		if ,  := .(event.EvtAutoRelayAddrsUpdated);  {
			 = slices.Clone(.RelayAddrs)
		}
	default:
	}

	select {
	case  := <-.Out():
		if ,  := .(event.EvtLocalReachabilityChanged);  {
			.hostReachability.Store(&.Reachability)
		}
	default:
	}

	 := time.NewTicker(addrChangeTickrInterval)
	defer .Stop()
	var  hostAddrs
	 := make(chan struct{})
	for {
		select {
		case <-.C:
		case  = <-.triggerAddrsUpdateChan:
		case <-.triggerReachabilityUpdate:
		case  := <-.Out():
			if ,  := .(event.EvtAutoRelayAddrsUpdated);  {
				 = slices.Clone(.RelayAddrs)
			}
		case  := <-.Out():
			if ,  := .(event.EvtLocalReachabilityChanged);  {
				.hostReachability.Store(&.Reachability)
			}
		case <-.ctx.Done():
			return
		}

		 := .updateAddrs(, )
		if  != nil {
			close()
			 = nil
		}
		.notifyAddrsUpdated(, , , )
		 = 
	}
}

// updateAddrs updates the addresses of the host and returns the new updated
// addrs. This must only be called from the background goroutine or from the Start method otherwise
// we may end up with stale addrs.
func ( *addrsManager) ( hostAddrs,  []ma.Multiaddr) hostAddrs {
	 := .getLocalAddrs()
	var , ,  []ma.Multiaddr
	if .addrsReachabilityTracker != nil {
		, ,  = .getConfirmedAddrs()
	}
	 = slices.Clone()
	 := .getDialableAddrs(, , , )
	 = .applyAddrsFactory()

	if areAddrsDifferent(.addrs, ) {
		, ,  := diffAddrs(.addrs, )
		.updatePeerStore(, )
	}
	.addrsMx.Lock()
	.currentAddrs = hostAddrs{
		addrs:            append(.currentAddrs.addrs[:0], ...),
		localAddrs:       append(.currentAddrs.localAddrs[:0], ...),
		reachableAddrs:   append(.currentAddrs.reachableAddrs[:0], ...),
		unreachableAddrs: append(.currentAddrs.unreachableAddrs[:0], ...),
		unknownAddrs:     append(.currentAddrs.unknownAddrs[:0], ...),
		relayAddrs:       append(.currentAddrs.relayAddrs[:0], ...),
	}
	.addrsMx.Unlock()

	return hostAddrs{
		localAddrs:       ,
		addrs:            ,
		reachableAddrs:   ,
		unreachableAddrs: ,
		unknownAddrs:     ,
		relayAddrs:       ,
	}
}

// updatePeerStore updates the peer store for the host
func ( *addrsManager) ( []ma.Multiaddr,  []ma.Multiaddr) {
	// update host addresses in the peer store
	.addrStore.SetAddrs(.hostID, , peerstore.PermanentAddrTTL)
	.addrStore.SetAddrs(.hostID, , 0)

	var  *record.Envelope
	// Our addresses have changed.
	// store the signed peer record in the peer store.
	if .signedRecordStore != nil {
		var  error
		// add signed peer record to the event
		// in case of an error drop this event.
		,  = .makeSignedPeerRecord()
		if  != nil {
			log.Error("error creating a signed peer record from the set of current addresses", "err", )
			return
		}
		if ,  := .signedRecordStore.ConsumePeerRecord(, peerstore.PermanentAddrTTL);  != nil {
			log.Error("failed to persist signed peer record in peer store", "err", )
			return
		}
	}
}

func ( *addrsManager) ( event.Emitter,  event.Emitter, ,  hostAddrs) {
	if areAddrsDifferent(.localAddrs, .localAddrs) {
		log.Debug("host local addresses updated", "addrs", .localAddrs)
		if .addrsReachabilityTracker != nil {
			.addrsReachabilityTracker.UpdateAddrs(.localAddrs)
		}
	}
	if areAddrsDifferent(.addrs, .addrs) {
		log.Debug("host addresses updated", "addrs", .localAddrs)
		.emitLocalAddrsUpdated(, .addrs, .addrs)
	}

	// We *must* send both reachability changed and addrs changed events from the
	// same goroutine to ensure correct ordering
	// Consider the events:
	// 	- addr x discovered
	// 	- addr x is reachable
	// 	- addr x removed
	// We must send these events in the same order. It'll be confusing for consumers
	// if the reachable event is received after the addr removed event.
	if areAddrsDifferent(.reachableAddrs, .reachableAddrs) ||
		areAddrsDifferent(.unreachableAddrs, .unreachableAddrs) ||
		areAddrsDifferent(.unknownAddrs, .unknownAddrs) {
		log.Debug("host reachable addrs updated",
			"reachable", .reachableAddrs,
			"unreachable", .unreachableAddrs,
			"unknown", .unknownAddrs)
		if  := .Emit(event.EvtHostReachableAddrsChanged{
			Reachable:   slices.Clone(.reachableAddrs),
			Unreachable: slices.Clone(.unreachableAddrs),
			Unknown:     slices.Clone(.unknownAddrs),
		});  != nil {
			log.Error("error sending host reachable addrs changed event", "err", )
		}
	}
}

// Addrs returns the node's dialable addresses both public and private.
// If autorelay is enabled and node reachability is private, it returns
// the node's relay addresses and private network addresses.
func ( *addrsManager) () []ma.Multiaddr {
	.addrsMx.RLock()
	 := .getDialableAddrs(.currentAddrs.localAddrs, .currentAddrs.reachableAddrs, .currentAddrs.unreachableAddrs, .currentAddrs.relayAddrs)
	.addrsMx.RUnlock()
	// don't hold the lock while applying addrs factory
	return .applyAddrsFactory()
}

// getDialableAddrs returns the node's dialable addrs. Doesn't mutate any argument.
func ( *addrsManager) (, , ,  []ma.Multiaddr) []ma.Multiaddr {
	// remove known unreachable addrs
	 := removeInSource(slices.Clone(), )
	// If we have no confirmed reachable addresses, add the relay addresses
	if .addrsReachabilityTracker != nil {
		if len() == 0 {
			 = append(, ...)
		}
	} else {
		 := .hostReachability.Load()
		// If we're only using autonatv1, remove public addrs and add relay addrs
		if len() > 0 &&  != nil && * == network.ReachabilityPrivate {
			 = slices.DeleteFunc(, manet.IsPublicAddr)
			 = append(, ...)
		}
	}
	return 
}

func ( *addrsManager) ( []ma.Multiaddr) []ma.Multiaddr {
	 := .addrsFactory()
	// Copy to our slice in case addrsFactory returns its own same slice always.
	 = append([:0], ...)
	// Add certhashes for the addresses provided by the user via address factory.
	 = .addCertHashes(ma.Unique())
	slices.SortFunc(, func(,  ma.Multiaddr) int { return .Compare() })
	return 
}

// HolePunchAddrs returns all the host's direct public addresses, reachable or unreachable,
// suitable for hole punching.
func ( *addrsManager) () []ma.Multiaddr {
	 := .DirectAddrs()
	 = slices.Clone(.addrsFactory())
	// AllAddrs may ignore observed addresses in favour of NAT mappings.
	// Use both for hole punching.
	if .observedAddrsManager != nil {
		// For holepunching, include all the best addresses we know even ones with only 1 observer.
		 = append(, .observedAddrsManager.Addrs(1)...)
	}
	 = ma.Unique()
	return slices.DeleteFunc(, func( ma.Multiaddr) bool { return !manet.IsPublicAddr() })
}

// DirectAddrs returns all the addresses the host is listening on except circuit addresses.
func ( *addrsManager) () []ma.Multiaddr {
	.addrsMx.RLock()
	defer .addrsMx.RUnlock()
	return slices.Clone(.currentAddrs.localAddrs)
}

// ConfirmedAddrs returns all addresses of the host that are reachable from the internet
func ( *addrsManager) () ( []ma.Multiaddr,  []ma.Multiaddr,  []ma.Multiaddr) {
	.addrsMx.RLock()
	defer .addrsMx.RUnlock()
	return slices.Clone(.currentAddrs.reachableAddrs), slices.Clone(.currentAddrs.unreachableAddrs), slices.Clone(.currentAddrs.unknownAddrs)
}

func ( *addrsManager) ( []ma.Multiaddr) (, ,  []ma.Multiaddr) {
	, ,  = .addrsReachabilityTracker.ConfirmedAddrs()
	return removeNotInSource(, ), removeNotInSource(, ), removeNotInSource(, )
}

var p2pCircuitAddr = ma.StringCast("/p2p-circuit")

func ( *addrsManager) () []ma.Multiaddr {
	 := .listenAddrs()
	if len() == 0 {
		return nil
	}

	 := make([]ma.Multiaddr, 0, 8)
	 = .appendPrimaryInterfaceAddrs(, )
	if .natManager != nil {
		 = .appendNATAddrs(, )
	}
	if .observedAddrsManager != nil {
		 = .appendObservedAddrs(, , .interfaceAddrs.All())
	}

	// Remove "/p2p-circuit" addresses from the list.
	// The p2p-circuit listener reports its address as just /p2p-circuit. This is
	// useless for dialing. Users need to manage their circuit addresses themselves,
	// or use AutoRelay.
	 = slices.DeleteFunc(, func( ma.Multiaddr) bool {
		return .Equal(p2pCircuitAddr)
	})

	// Remove any unspecified address from the list
	 = slices.DeleteFunc(, func( ma.Multiaddr) bool {
		return manet.IsIPUnspecified()
	})

	// Add certhashes for /webrtc-direct, /webtransport, etc addresses discovered
	// using identify.
	 = .addCertHashes()
	 = ma.Unique()
	slices.SortFunc(, func(,  ma.Multiaddr) int { return .Compare() })
	return 
}

// appendPrimaryInterfaceAddrs appends the primary interface addresses to `dst`.
func ( *addrsManager) ( []ma.Multiaddr,  []ma.Multiaddr) []ma.Multiaddr {
	// resolving any unspecified listen addressees to use only the primary
	// interface to avoid advertising too many addresses.
	if ,  := manet.ResolveUnspecifiedAddresses(, .interfaceAddrs.Filtered());  != nil {
		log.Warn("failed to resolve listen addrs", "err", )
	} else {
		 = append(, ...)
	}
	return 
}

// appendNATAddrs appends the NAT-ed addrs for the listenAddrs. For unspecified listen addrs it appends the
// public address for all the interfaces.
// Inferring WebTransport from QUIC depends on the observed address manager.
func ( *addrsManager) ( []ma.Multiaddr,  []ma.Multiaddr) []ma.Multiaddr {
	for ,  := range  {
		 := .natManager.GetMapping()
		if  != nil {
			 = append(, )
		}
	}
	return 
}

func ( *addrsManager) ( []ma.Multiaddr, ,  []ma.Multiaddr) []ma.Multiaddr {
	// Add it for all the listenAddr first.
	// listenAddr maybe unspecified. That's okay as connections on UDP transports
	// will have the unspecified address as the local address.
	for ,  := range  {
		 := .observedAddrsManager.AddrsFor()
		if len() > maxObservedAddrsPerListenAddr {
			 = [:maxObservedAddrsPerListenAddr]
		}
		 = append(, ...)
	}

	// if it can be resolved into more addresses, add them too
	,  := manet.ResolveUnspecifiedAddresses(, )
	if  != nil {
		log.Warn("failed to resolve listen addr", "listen_addr", , "iface_addrs", , "err", )
		return 
	}
	for ,  := range  {
		 := .observedAddrsManager.AddrsFor()
		if len() > maxObservedAddrsPerListenAddr {
			 = [:maxObservedAddrsPerListenAddr]
		}
		 = append(, ...)
	}
	return 
}

// makeSignedPeerRecord creates a signed peer record for the given addresses
func ( *addrsManager) ( []ma.Multiaddr) (*record.Envelope, error) {
	if .signKey == nil {
		return nil, errors.New("signKey is nil")
	}
	// Limit the length of currentAddrs to ensure that our signed peer records aren't rejected
	 := 64 // HostID
	,  := .signKey.Raw()
	var  int
	if  == nil {
		 = len()
	} else {
		 = 1024 // In case of error, use a large enough value.
	}
	 += 2 *  // 1 for signature, 1 for public key
	// we want the final address list to be small for keeping the signed peer record in size
	 = trimHostAddrList(, maxPeerRecordSize--256) // 256 B of buffer
	 := peer.PeerRecordFromAddrInfo(peer.AddrInfo{
		ID:    .hostID,
		Addrs: ,
	})
	return record.Seal(, .signKey)
}

// emitLocalAddrsUpdated emits an EvtLocalAddressesUpdated event and updates the addresses in the peerstore.
func ( *addrsManager) ( event.Emitter,  []ma.Multiaddr,  []ma.Multiaddr) {
	, ,  := diffAddrs(, )
	if len() == 0 && len() == 0 {
		return
	}

	var  *record.Envelope
	if .signedRecordStore != nil {
		 = .signedRecordStore.GetPeerRecord(.hostID)
	}

	 := &event.EvtLocalAddressesUpdated{
		Diffs:            true,
		Current:          make([]event.UpdatedAddress, 0, len()),
		Removed:          make([]event.UpdatedAddress, 0, len()),
		SignedPeerRecord: ,
	}

	for ,  := range  {
		.Current = append(.Current, event.UpdatedAddress{
			Address: ,
			Action:  event.Maintained,
		})
	}

	for ,  := range  {
		.Current = append(.Current, event.UpdatedAddress{
			Address: ,
			Action:  event.Added,
		})
	}

	for ,  := range  {
		.Removed = append(.Removed, event.UpdatedAddress{
			Address: ,
			Action:  event.Removed,
		})
	}

	// emit addr change event
	if  := .Emit(*);  != nil {
		log.Warn("error emitting event for updated addrs", "err", )
	}
}

func areAddrsDifferent(,  []ma.Multiaddr) bool {
	// TODO: make the sorted nature of ma.Unique a guarantee in multiaddrs
	 = ma.Unique()
	 = ma.Unique()
	if len() != len() {
		return true
	}
	slices.SortFunc(, func(,  ma.Multiaddr) int { return .Compare() })
	slices.SortFunc(, func(,  ma.Multiaddr) int { return .Compare() })
	for  := range  {
		if ![].Equal([]) {
			return true
		}
	}
	return false
}

// diffAddrs diffs prev and current addrs and returns added, maintained, and removed addrs.
// Both prev and current are expected to be sorted using ma.Compare()
func diffAddrs(,  []ma.Multiaddr) (, ,  []ma.Multiaddr) {
	,  := 0, 0
	for  < len() &&  < len() {
		 := [].Compare([])
		switch {
		case  < 0:
			// prev < current
			 = append(, [])
			++
		case  > 0:
			// current < prev
			 = append(, [])
			++
		default:
			 = append(, [])
			++
			++
		}
	}
	// All remaining current addresses are added
	 = append(, [:]...)

	// All remaining previous addresses are removed
	 = append(, [:]...)
	return
}

// trimHostAddrList trims the address list to fit within the maximum size
func trimHostAddrList( []ma.Multiaddr,  int) []ma.Multiaddr {
	 := 0
	for ,  := range  {
		 += len(.Bytes())
	}
	if  <=  {
		return 
	}

	 := func( ma.Multiaddr) int {
		var  int
		if manet.IsPublicAddr() {
			 |= 1 << 12
		} else if !manet.IsIPLoopback() {
			 |= 1 << 11
		}
		var  int
		ma.ForEach(, func( ma.Component) bool {
			switch .Protocol().Code {
			case ma.P_QUIC_V1:
				 = 5
			case ma.P_TCP:
				 = 4
			case ma.P_WSS:
				 = 3
			case ma.P_WEBTRANSPORT:
				 = 2
			case ma.P_WEBRTC_DIRECT:
				 = 1
			case ma.P_P2P:
				return false
			}
			return true
		})
		 |= 1 << 
		return 
	}

	slices.SortStableFunc(, func(,  ma.Multiaddr) int {
		return () - () // b-a for reverse order
	})
	 = 0
	for ,  := range  {
		 += len(.Bytes())
		if  >  {
			 = [:]
			break
		}
	}
	return 
}

const interfaceAddrsCacheTTL = time.Minute

type interfaceAddrsCache struct {
	mx                     sync.RWMutex
	filtered               []ma.Multiaddr
	all                    []ma.Multiaddr
	updateLocalIPv4Backoff backoff.ExpBackoff
	updateLocalIPv6Backoff backoff.ExpBackoff
	lastUpdated            time.Time
}

func ( *interfaceAddrsCache) () []ma.Multiaddr {
	.mx.RLock()
	if time.Now().After(.lastUpdated.Add(interfaceAddrsCacheTTL)) {
		.mx.RUnlock()
		return .update(true)
	}
	defer .mx.RUnlock()
	return .filtered
}

func ( *interfaceAddrsCache) () []ma.Multiaddr {
	.mx.RLock()
	if time.Now().After(.lastUpdated.Add(interfaceAddrsCacheTTL)) {
		.mx.RUnlock()
		return .update(false)
	}
	defer .mx.RUnlock()
	return .all
}

func ( *interfaceAddrsCache) ( bool) []ma.Multiaddr {
	.mx.Lock()
	defer .mx.Unlock()
	if !time.Now().After(.lastUpdated.Add(interfaceAddrsCacheTTL)) {
		if  {
			return .filtered
		}
		return .all
	}
	.updateUnlocked()
	.lastUpdated = time.Now()
	if  {
		return .filtered
	}
	return .all
}

func ( *interfaceAddrsCache) () {
	.filtered = nil
	.all = nil

	// Try to use the default ipv4/6 addresses.
	// TODO: Remove this. We should advertise all interface addresses.
	if ,  := netroute.New();  != nil {
		log.Debug("failed to build Router for kernel's routing table", "err", )
	} else {

		var  net.IP
		var  bool
		,  = .updateLocalIPv4Backoff.Run(func() error {
			_, _, ,  = .Route(net.IPv4zero)
			return 
		})

		if  &&  != nil {
			log.Debug("failed to fetch local IPv4 address", "err", )
		} else if  && .IsGlobalUnicast() {
			,  := manet.FromIP()
			if  == nil {
				.filtered = append(.filtered, )
			}
		}

		var  net.IP
		,  = .updateLocalIPv6Backoff.Run(func() error {
			_, _, ,  = .Route(net.IPv6unspecified)
			return 
		})

		if  &&  != nil {
			log.Debug("failed to fetch local IPv6 address", "err", )
		} else if  && .IsGlobalUnicast() {
			,  := manet.FromIP()
			if  == nil {
				.filtered = append(.filtered, )
			}
		}
	}

	// Resolve the interface addresses
	,  := manet.InterfaceMultiaddrs()
	if  != nil {
		// This usually shouldn't happen, but we could be in some kind
		// of funky restricted environment.
		log.Error("failed to resolve local interface addresses", "err", )

		// Add the loopback addresses to the filtered addrs and use them as the non-filtered addrs.
		// Then bail. There's nothing else we can do here.
		.filtered = append(.filtered, manet.IP4Loopback, manet.IP6Loopback)
		.all = .filtered
		return
	}

	// remove link local ipv6 addresses
	.all = slices.DeleteFunc(, manet.IsIP6LinkLocal)

	// If netroute failed to get us any interface addresses, use all of
	// them.
	if len(.filtered) == 0 {
		// Add all addresses.
		.filtered = .all
	} else {
		// Only add loopback addresses. Filter these because we might
		// not _have_ an IPv6 loopback address.
		for ,  := range .all {
			if manet.IsIPLoopback() {
				.filtered = append(.filtered, )
			}
		}
	}
}

// removeNotInSource removes items from addrs that are not present in source.
// Modifies the addrs slice in place
// addrs and source must be sorted using multiaddr.Compare.
func removeNotInSource(,  []ma.Multiaddr) []ma.Multiaddr {
	 := 0
	// mark entries not in source as nil
	for ,  := range  {
		// move right as long as a > source[j]
		for  < len() && .Compare([]) > 0 {
			++
		}
		// a is not in source if we've reached the end, or a is lesser
		if  == len() || .Compare([]) < 0 {
			[] = nil
		}
		// a is in source, nothing to do
	}
	// Move all the nils to the end.
	// j is the current element, i is lowest index of a nil element.
	// At the end of every iteration all elements from i to j are nil.
	 := 0
	for  := range len() {
		if [] != nil {
			[], [] = [], []
			++
		}
	}
	return [:]
}

// removeInSource removes items from addrs that are present in source.
// Modifies the addrs slice in place
// addrs and source must be sorted using multiaddr.Compare.
func removeInSource(,  []ma.Multiaddr) []ma.Multiaddr {
	 := 0
	// mark entries in source as nil
	for ,  := range  {
		// move right in source as long as a > source[j]
		for  < len() && .Compare([]) > 0 {
			++
		}
		// a is in source,  mark nil
		if  < len() && .Compare([]) == 0 {
			[] = nil
		}
	}
	// Move all the nils to the end.
	// j is the current element, i is lowest index of a nil element.
	// At the end of every iteration all elements from i to j are nil.
	 := 0
	for  := range len() {
		if [] != nil {
			[], [] = [], []
			++
		}
	}
	return [:]
}

type multiCloser []io.Closer

func ( *multiCloser) () error {
	var  []error
	for ,  := range * {
		if  := .Close();  != nil {
			var  string
			if ,  := .(interface{ () string });  {
				 = .()
			} else {
				 = fmt.Sprintf("%T", )
			}
			 = append(, fmt.Errorf("error closing %s: %w", , ))
		}
	}
	return errors.Join(...)
}