package basichost

import (
	
	
	
	
	
	
	

	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	relayv2 
	
	
	
	libp2pwebrtc 
	libp2pwebtransport 
	

	logging 
	ma 
	manet 
	msmux 
)

// addrChangeTickrInterval is the interval between two address change ticks.
var addrChangeTickrInterval = 5 * time.Second

var log = logging.Logger("basichost")

var (
	// DefaultNegotiationTimeout is the default value for HostOpts.NegotiationTimeout.
	DefaultNegotiationTimeout = 10 * time.Second

	// DefaultAddrsFactory is the default value for HostOpts.AddrsFactory.
	DefaultAddrsFactory = func( []ma.Multiaddr) []ma.Multiaddr { return  }
)

const maxPeerRecordSize = 8 * 1024 // 8k to be compatible with identify's limit

// AddrsFactory functions can be passed to New in order to override
// addresses returned by Addrs.
type AddrsFactory func([]ma.Multiaddr) []ma.Multiaddr

// BasicHost is the basic implementation of the host.Host interface. This
// particular host implementation:
//   - uses a protocol muxer to mux per-protocol streams
//   - uses an identity service to send + receive node information
//   - uses a nat service to establish NAT port mappings
type BasicHost struct {
	ctx       context.Context
	ctxCancel context.CancelFunc
	// ensures we shutdown ONLY once
	closeSync sync.Once
	// keep track of resources we need to wait on before shutting down
	refCount sync.WaitGroup

	network      network.Network
	psManager    *pstoremanager.PeerstoreManager
	mux          *msmux.MultistreamMuxer[protocol.ID]
	ids          identify.IDService
	hps          *holepunch.Service
	pings        *ping.PingService
	cmgr         connmgr.ConnManager
	eventbus     event.Bus
	relayManager *relaysvc.RelayManager

	negtimeout time.Duration

	emitters struct {
		evtLocalProtocolsUpdated event.Emitter
		evtLocalAddrsUpdated     event.Emitter
	}

	disableSignedPeerRecord bool
	signKey                 crypto.PrivKey
	caBook                  peerstore.CertifiedAddrBook

	autoNATMx sync.RWMutex
	autoNat   autonat.AutoNAT

	autonatv2        *autonatv2.AutoNAT
	addressManager   *addrsManager
	addrsUpdatedChan chan struct{}
}

var _ host.Host = (*BasicHost)(nil)

// HostOpts holds options that can be passed to NewHost in order to
// customize construction of the *BasicHost.
type HostOpts struct {
	// EventBus sets the event bus. Will construct a new event bus if omitted.
	EventBus event.Bus

	// MultistreamMuxer is essential for the *BasicHost and will use a sensible default value if omitted.
	MultistreamMuxer *msmux.MultistreamMuxer[protocol.ID]

	// NegotiationTimeout determines the read and write timeouts when negotiating
	// protocols for streams. If 0 or omitted, it will use
	// DefaultNegotiationTimeout. If below 0, timeouts on streams will be
	// deactivated.
	NegotiationTimeout time.Duration

	// AddrsFactory holds a function which can be used to override or filter the result of Addrs.
	// If omitted, there's no override or filtering, and the results of Addrs and AllAddrs are the same.
	AddrsFactory AddrsFactory

	// NATManager takes care of setting NAT port mappings, and discovering external addresses.
	// If omitted, this will simply be disabled.
	NATManager func(network.Network) NATManager

	// ConnManager is a libp2p connection manager
	ConnManager connmgr.ConnManager

	// EnablePing indicates whether to instantiate the ping service
	EnablePing bool

	// EnableRelayService enables the circuit v2 relay (if we're publicly reachable).
	EnableRelayService bool
	// RelayServiceOpts are options for the circuit v2 relay.
	RelayServiceOpts []relayv2.Option

	// UserAgent sets the user-agent for the host.
	UserAgent string

	// ProtocolVersion sets the protocol version for the host.
	ProtocolVersion string

	// DisableSignedPeerRecord disables the generation of Signed Peer Records on this host.
	DisableSignedPeerRecord bool

	// EnableHolePunching enables the peer to initiate/respond to hole punching attempts for NAT traversal.
	EnableHolePunching bool
	// HolePunchingOptions are options for the hole punching service
	HolePunchingOptions []holepunch.Option

	// EnableMetrics enables the metrics subsystems
	EnableMetrics bool
	// PrometheusRegisterer is the PrometheusRegisterer used for metrics
	PrometheusRegisterer prometheus.Registerer
	// AutoNATv2MetricsTracker tracks AutoNATv2 address reachability metrics
	AutoNATv2MetricsTracker MetricsTracker

	// DisableIdentifyAddressDiscovery disables address discovery using peer provided observed addresses in identify
	DisableIdentifyAddressDiscovery bool

	AutoNATv2 *autonatv2.AutoNAT
}

// NewHost constructs a new *BasicHost and activates it by attaching its stream and connection handlers to the given inet.Network.
func ( network.Network,  *HostOpts) (*BasicHost, error) {
	if  == nil {
		 = &HostOpts{}
	}
	if .EventBus == nil {
		.EventBus = eventbus.NewBus()
	}

	,  := pstoremanager.NewPeerstoreManager(.Peerstore(), .EventBus, )
	if  != nil {
		return nil, 
	}

	,  := context.WithCancel(context.Background())
	 := &BasicHost{
		network:                 ,
		psManager:               ,
		mux:                     msmux.NewMultistreamMuxer[protocol.ID](),
		negtimeout:              DefaultNegotiationTimeout,
		eventbus:                .EventBus,
		ctx:                     ,
		ctxCancel:               ,
		disableSignedPeerRecord: .DisableSignedPeerRecord,
		addrsUpdatedChan:        make(chan struct{}, 1),
	}

	if .emitters.evtLocalProtocolsUpdated,  = .eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}, eventbus.Stateful);  != nil {
		return nil, 
	}
	if .emitters.evtLocalAddrsUpdated,  = .eventbus.Emitter(&event.EvtLocalAddressesUpdated{}, eventbus.Stateful);  != nil {
		return nil, 
	}

	if .MultistreamMuxer != nil {
		.mux = .MultistreamMuxer
	}

	 := []identify.Option{
		identify.UserAgent(.UserAgent),
		identify.ProtocolVersion(.ProtocolVersion),
	}

	// we can't set this as a default above because it depends on the *BasicHost.
	if .disableSignedPeerRecord {
		 = append(, identify.DisableSignedPeerRecord())
	}
	if .EnableMetrics {
		 = append(,
			identify.WithMetricsTracer(
				identify.NewMetricsTracer(identify.WithRegisterer(.PrometheusRegisterer))))
	}
	if .DisableIdentifyAddressDiscovery {
		 = append(, identify.DisableObservedAddrManager())
	}

	.ids,  = identify.NewIDService(, ...)
	if  != nil {
		return nil, fmt.Errorf("failed to create Identify service: %s", )
	}

	 := DefaultAddrsFactory
	if .AddrsFactory != nil {
		 = .AddrsFactory
	}

	var  NATManager
	if .NATManager != nil {
		 = .NATManager(.Network())
	}
	var  func(ma.Multiaddr) transport.Transport
	if ,  := .Network().(interface {
		(ma.Multiaddr) transport.Transport
	});  {
		 = .
	}

	if .AutoNATv2 != nil {
		.autonatv2 = .AutoNATv2
	}

	var  autonatv2Client // avoid typed nil errors
	if .autonatv2 != nil {
		 = .autonatv2
	}
	.addressManager,  = newAddrsManager(
		.eventbus,
		,
		,
		.Network().ListenAddresses,
		,
		.ids,
		.addrsUpdatedChan,
		,
		.EnableMetrics,
		.PrometheusRegisterer,
	)
	if  != nil {
		return nil, fmt.Errorf("failed to create address service: %w", )
	}
	// register to be notified when the network's listen addrs change,
	// so we can update our address set and push events if needed
	.Network().Notify(.addressManager.NetNotifee())

	if .EnableHolePunching {
		if .EnableMetrics {
			 := []holepunch.Option{
				holepunch.WithMetricsTracer(holepunch.NewMetricsTracer(holepunch.WithRegisterer(.PrometheusRegisterer)))}
			.HolePunchingOptions = append(, .HolePunchingOptions...)

		}
		.hps,  = holepunch.NewService(, .ids, .addressManager.HolePunchAddrs, .HolePunchingOptions...)
		if  != nil {
			return nil, fmt.Errorf("failed to create hole punch service: %w", )
		}
	}

	if uint64(.NegotiationTimeout) != 0 {
		.negtimeout = .NegotiationTimeout
	}

	if .ConnManager == nil {
		.cmgr = &connmgr.NullConnMgr{}
	} else {
		.cmgr = .ConnManager
		.Notify(.cmgr.Notifee())
	}

	if .EnableRelayService {
		if .EnableMetrics {
			// Prefer explicitly provided metrics tracer
			 := []relayv2.Option{
				relayv2.WithMetricsTracer(
					relayv2.NewMetricsTracer(relayv2.WithRegisterer(.PrometheusRegisterer)))}
			.RelayServiceOpts = append(, .RelayServiceOpts...)
		}
		.relayManager = relaysvc.NewRelayManager(, .RelayServiceOpts...)
	}

	if .EnablePing {
		.pings = ping.NewPingService()
	}

	if !.disableSignedPeerRecord {
		.signKey = .Peerstore().PrivKey(.ID())
		,  := peerstore.GetCertifiedAddrBook(.Peerstore())
		if ! {
			return nil, errors.New("peerstore should also be a certified address book")
		}
		.caBook = 

		,  := .makeSignedPeerRecord(.addressManager.Addrs())
		if  != nil {
			return nil, fmt.Errorf("failed to create signed record for self: %w", )
		}
		if ,  := .caBook.ConsumePeerRecord(, peerstore.PermanentAddrTTL);  != nil {
			return nil, fmt.Errorf("failed to persist signed record to peerstore: %w", )
		}
	}
	.SetStreamHandler(.newStreamHandler)

	return , nil
}

// Start starts background tasks in the host
// TODO: Return error and handle it in the caller?
func ( *BasicHost) () {
	.psManager.Start()
	if .autonatv2 != nil {
		 := .autonatv2.Start()
		if  != nil {
			log.Errorf("autonat v2 failed to start: %s", )
		}
	}
	if  := .addressManager.Start();  != nil {
		log.Errorf("address service failed to start: %s", )
	}

	if !.disableSignedPeerRecord {
		// Ensure we have the correct peer record after Start returns
		,  := .makeSignedPeerRecord(.addressManager.Addrs())
		if  != nil {
			log.Errorf("failed to create signed record: %w", )
		}
		if ,  := .caBook.ConsumePeerRecord(, peerstore.PermanentAddrTTL);  != nil {
			log.Errorf("failed to persist signed record to peerstore: %w", )
		}
	}

	.ids.Start()

	.refCount.Add(1)
	go .background()
}

// newStreamHandler is the remote-opened stream handler for network.Network
// TODO: this feels a bit wonky
func ( *BasicHost) ( network.Stream) {
	 := time.Now()

	if .negtimeout > 0 {
		if  := .SetDeadline(time.Now().Add(.negtimeout));  != nil {
			log.Debug("setting stream deadline: ", )
			.Reset()
			return
		}
	}

	, ,  := .Mux().Negotiate()
	 := time.Since()
	if  != nil {
		if  == io.EOF {
			 := log.Debugf
			if  > time.Second*10 {
				 = log.Warnf
			}
			("protocol EOF: %s (took %s)", .Conn().RemotePeer(), )
		} else {
			log.Debugf("protocol mux failed: %s (took %s, id:%s, remote peer:%s, remote addr:%v)", , , .ID(), .Conn().RemotePeer(), .Conn().RemoteMultiaddr())
		}
		.ResetWithError(network.StreamProtocolNegotiationFailed)
		return
	}

	if .negtimeout > 0 {
		if  := .SetDeadline(time.Time{});  != nil {
			log.Debugf("resetting stream deadline: ", )
			.Reset()
			return
		}
	}

	if  := .SetProtocol();  != nil {
		log.Debugf("error setting stream protocol: %s", )
		.ResetWithError(network.StreamResourceLimitExceeded)
		return
	}

	log.Debugf("negotiated: %s (took %s)", , )

	(, )
}

func ( *BasicHost) (,  []ma.Multiaddr) *event.EvtLocalAddressesUpdated {
	if  == nil &&  == nil {
		return nil
	}
	 := make(map[string]ma.Multiaddr, len())
	 := make(map[string]ma.Multiaddr, len())
	 := &event.EvtLocalAddressesUpdated{Diffs: true}
	 := false

	for ,  := range  {
		[string(.Bytes())] = 
	}
	for ,  := range  {
		[string(.Bytes())] = 
	}
	for ,  := range  {
		,  := [string(.Bytes())]
		 := event.UpdatedAddress{Address: }
		if  {
			.Action = event.Maintained
		} else {
			.Action = event.Added
			 = true
		}
		.Current = append(.Current, )
		delete(, string(.Bytes()))
	}
	for ,  := range  {
		 := event.UpdatedAddress{Action: event.Removed, Address: }
		.Removed = append(.Removed, )
	}

	if ! && len(.Removed) == 0 {
		return nil
	}

	// Our addresses have changed. Make a new signed peer record.
	if !.disableSignedPeerRecord {
		// add signed peer record to the event
		,  := .makeSignedPeerRecord()
		if  != nil {
			log.Errorf("error creating a signed peer record from the set of current addresses, err=%s", )
			// drop this change
			return nil
		}
		.SignedPeerRecord = 
	}

	return 
}

func ( *BasicHost) ( []ma.Multiaddr) (*record.Envelope, error) {
	// Limit the length of currentAddrs to ensure that our signed peer records aren't rejected
	 := 64 // HostID
	,  := .signKey.Raw()
	if  != nil {
		 += 2 * len() // 1 for signature, 1 for public key
	}
	// we want the final address list to be small for keeping the signed peer record in size
	 = trimHostAddrList(, maxPeerRecordSize--256) // 256 B of buffer
	 := peer.PeerRecordFromAddrInfo(peer.AddrInfo{
		ID:    .ID(),
		Addrs: ,
	})
	return record.Seal(, .signKey)
}

func ( *BasicHost) () {
	defer .refCount.Done()
	var  []ma.Multiaddr

	 := func( []ma.Multiaddr,  []ma.Multiaddr) {
		 := .makeUpdatedAddrEvent(, )
		if  == nil {
			return
		}
		// Our addresses have changed.
		// store the signed peer record in the peer store.
		if !.disableSignedPeerRecord {
			if ,  := .caBook.ConsumePeerRecord(.SignedPeerRecord, peerstore.PermanentAddrTTL);  != nil {
				log.Errorf("failed to persist signed peer record in peer store, err=%s", )
				return
			}
		}
		// update host addresses in the peer store
		 := make([]ma.Multiaddr, 0, len(.Removed))
		for ,  := range .Removed {
			 = append(, .Address)
		}
		.Peerstore().SetAddrs(.ID(), , peerstore.PermanentAddrTTL)
		.Peerstore().SetAddrs(.ID(), , 0)

		// emit addr change event
		if  := .emitters.evtLocalAddrsUpdated.Emit(*);  != nil {
			log.Warnf("error emitting event for updated addrs: %s", )
		}
	}

	for {
		 := .Addrs()
		(, )
		 = 

		select {
		case <-.addrsUpdatedChan:
		case <-.ctx.Done():
			return
		}
	}
}

// ID returns the (local) peer.ID associated with this Host
func ( *BasicHost) () peer.ID {
	return .Network().LocalPeer()
}

// Peerstore returns the Host's repository of Peer Addresses and Keys.
func ( *BasicHost) () peerstore.Peerstore {
	return .Network().Peerstore()
}

// Network returns the Network interface of the Host
func ( *BasicHost) () network.Network {
	return .network
}

// Mux returns the Mux multiplexing incoming streams to protocol handlers
func ( *BasicHost) () protocol.Switch {
	return .mux
}

// IDService returns
func ( *BasicHost) () identify.IDService {
	return .ids
}

func ( *BasicHost) () event.Bus {
	return .eventbus
}

// SetStreamHandler sets the protocol handler on the Host's Mux.
// This is equivalent to:
//
//	host.Mux().SetHandler(proto, handler)
//
// (Thread-safe)
func ( *BasicHost) ( protocol.ID,  network.StreamHandler) {
	.Mux().AddHandler(, func( protocol.ID,  io.ReadWriteCloser) error {
		 := .(network.Stream)
		()
		return nil
	})
	.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
		Added: []protocol.ID{},
	})
}

// SetStreamHandlerMatch sets the protocol handler on the Host's Mux
// using a matching function to do protocol comparisons
func ( *BasicHost) ( protocol.ID,  func(protocol.ID) bool,  network.StreamHandler) {
	.Mux().AddHandlerWithFunc(, , func( protocol.ID,  io.ReadWriteCloser) error {
		 := .(network.Stream)
		()
		return nil
	})
	.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
		Added: []protocol.ID{},
	})
}

// RemoveStreamHandler returns ..
func ( *BasicHost) ( protocol.ID) {
	.Mux().RemoveHandler()
	.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
		Removed: []protocol.ID{},
	})
}

// NewStream opens a new stream to given peer p, and writes a p2p/protocol
// header with given protocol.ID. If there is no connection to p, attempts
// to create one. If ProtocolID is "", writes no header.
// (Thread-safe)
func ( *BasicHost) ( context.Context,  peer.ID,  ...protocol.ID) ( network.Stream,  error) {
	if ,  := .Deadline(); ! {
		if .negtimeout > 0 {
			var  context.CancelFunc
			,  = context.WithTimeout(, .negtimeout)
			defer ()
		}
	}

	// If the caller wants to prevent the host from dialing, it should use the NoDial option.
	if ,  := network.GetNoDial(); ! {
		 := .Connect(, peer.AddrInfo{ID: })
		if  != nil {
			return nil, 
		}
	}

	,  := .Network().NewStream(network.WithNoDial(, "already dialed"), )
	if  != nil {
		// TODO: It would be nicer to get the actual error from the swarm,
		// but this will require some more work.
		if errors.Is(, network.ErrNoConn) {
			return nil, errors.New("connection failed")
		}
		return nil, fmt.Errorf("failed to open stream: %w", )
	}
	defer func() {
		if  != nil &&  != nil {
			.ResetWithError(network.StreamProtocolNegotiationFailed)
		}
	}()

	// Wait for any in-progress identifies on the connection to finish. This
	// is faster than negotiating.
	//
	// If the other side doesn't support identify, that's fine. This will
	// just be a no-op.
	select {
	case <-.ids.IdentifyWait(.Conn()):
	case <-.Done():
		return nil, fmt.Errorf("identify failed to complete: %w", .Err())
	}

	,  := .preferredProtocol(, )
	if  != nil {
		return nil, 
	}

	if  != "" {
		if  := .SetProtocol();  != nil {
			return nil, 
		}
		 := msmux.NewMSSelect(, )
		return &streamWrapper{
			Stream: ,
			rw:     ,
		}, nil
	}

	// Negotiate the protocol in the background, obeying the context.
	var  protocol.ID
	 := make(chan error, 1)
	go func() {
		,  = msmux.SelectOneOf(, )
		 <- 
	}()
	select {
	case  = <-:
		if  != nil {
			return nil, fmt.Errorf("failed to negotiate protocol: %w", )
		}
	case <-.Done():
		.ResetWithError(network.StreamProtocolNegotiationFailed)
		// wait for `SelectOneOf` to error out because of resetting the stream.
		<-
		return nil, fmt.Errorf("failed to negotiate protocol: %w", .Err())
	}

	if  := .SetProtocol();  != nil {
		.ResetWithError(network.StreamResourceLimitExceeded)
		return nil, 
	}
	_ = .Peerstore().AddProtocols(, ) // adding the protocol to the peerstore isn't critical
	return , nil
}

func ( *BasicHost) ( peer.ID,  []protocol.ID) (protocol.ID, error) {
	,  := .Peerstore().SupportsProtocols(, ...)
	if  != nil {
		return "", 
	}

	var  protocol.ID
	if len() > 0 {
		 = [0]
	}
	return , nil
}

// Connect ensures there is a connection between this host and the peer with
// given peer.ID. If there is not an active connection, Connect will issue a
// h.Network.Dial, and block until a connection is open, or an error is returned.
// Connect will absorb the addresses in pi into its internal peerstore.
// It will also resolve any /dns4, /dns6, and /dnsaddr addresses.
func ( *BasicHost) ( context.Context,  peer.AddrInfo) error {
	// absorb addresses into peerstore
	.Peerstore().AddAddrs(.ID, .Addrs, peerstore.TempAddrTTL)

	,  := network.GetForceDirectDial()
	,  := network.GetAllowLimitedConn()
	if ! {
		 := .Network().Connectedness(.ID)
		if  == network.Connected || ( &&  == network.Limited) {
			return nil
		}
	}

	return .dialPeer(, .ID)
}

// dialPeer opens a connection to peer, and makes sure to identify
// the connection once it has been opened.
func ( *BasicHost) ( context.Context,  peer.ID) error {
	log.Debugf("host %s dialing %s", .ID(), )
	,  := .Network().DialPeer(, )
	if  != nil {
		return fmt.Errorf("failed to dial: %w", )
	}

	// TODO: Consider removing this? On one hand, it's nice because we can
	// assume that things like the agent version are usually set when this
	// returns. On the other hand, we don't _really_ need to wait for this.
	//
	// This is mostly here to preserve existing behavior.
	select {
	case <-.ids.IdentifyWait():
	case <-.Done():
		return fmt.Errorf("identify failed to complete: %w", .Err())
	}

	log.Debugf("host %s finished dialing %s", .ID(), )
	return nil
}

func ( *BasicHost) () connmgr.ConnManager {
	return .cmgr
}

// Addrs returns listening addresses. The output is the same as AllAddrs, but
// processed by AddrsFactory.
// When used with AutoRelay, and if the host is not publicly reachable,
// this will only have host's private, relay, and no public addresses.
func ( *BasicHost) () []ma.Multiaddr {
	return .addressManager.Addrs()
}

// NormalizeMultiaddr returns a multiaddr suitable for equality checks.
// If the multiaddr is a webtransport component, it removes the certhashes.
func ( *BasicHost) ( ma.Multiaddr) ma.Multiaddr {
	,  := libp2pwebtransport.IsWebtransportMultiaddr()
	if ! {
		,  = libp2pwebrtc.IsWebRTCDirectMultiaddr()
	}
	if  &&  > 0 {
		 := 
		for  := 0;  < ; ++ {
			, _ = ma.SplitLast()
		}
		return 
	}
	return 
}

// AllAddrs returns all the addresses the host is listening on except circuit addresses.
func ( *BasicHost) () []ma.Multiaddr {
	return .addressManager.DirectAddrs()
}

// ConfirmedAddrs returns all addresses of the host grouped by their reachability
// as verified by autonatv2.
//
// Experimental: This API may change in the future without deprecation.
//
// Requires AutoNATv2 to be enabled.
func ( *BasicHost) () ( []ma.Multiaddr,  []ma.Multiaddr,  []ma.Multiaddr) {
	return .addressManager.ConfirmedAddrs()
}

func trimHostAddrList( []ma.Multiaddr,  int) []ma.Multiaddr {
	 := 0
	for ,  := range  {
		 += len(.Bytes())
	}
	if  <=  {
		return 
	}

	 := func( ma.Multiaddr) int {
		var  int
		if manet.IsPublicAddr() {
			 |= 1 << 12
		} else if !manet.IsIPLoopback() {
			 |= 1 << 11
		}
		var  int
		ma.ForEach(, func( ma.Component) bool {
			switch .Protocol().Code {
			case ma.P_QUIC_V1:
				 = 5
			case ma.P_TCP:
				 = 4
			case ma.P_WSS:
				 = 3
			case ma.P_WEBTRANSPORT:
				 = 2
			case ma.P_WEBRTC_DIRECT:
				 = 1
			case ma.P_P2P:
				return false
			}
			return true
		})
		 |= 1 << 
		return 
	}

	slices.SortStableFunc(, func(,  ma.Multiaddr) int {
		return () - () // b-a for reverse order
	})
	 = 0
	for ,  := range  {
		 += len(.Bytes())
		if  >  {
			 = [:]
			break
		}
	}
	return 
}

// SetAutoNat sets the autonat service for the host.
func ( *BasicHost) ( autonat.AutoNAT) {
	.autoNATMx.Lock()
	defer .autoNATMx.Unlock()
	if .autoNat == nil {
		.autoNat = 
	}
}

// GetAutoNat returns the host's AutoNAT service, if AutoNAT is enabled.
//
// Deprecated: Use `BasicHost.Reachability` to get the host's reachability.
func ( *BasicHost) () autonat.AutoNAT {
	.autoNATMx.Lock()
	defer .autoNATMx.Unlock()
	return .autoNat
}

// Reachability returns the host's reachability status.
func ( *BasicHost) () network.Reachability {
	return *.addressManager.hostReachability.Load()
}

// Close shuts down the Host's services (network, etc).
func ( *BasicHost) () error {
	.closeSync.Do(func() {
		.ctxCancel()
		if .cmgr != nil {
			.cmgr.Close()
		}
		.addressManager.Close()

		if .ids != nil {
			.ids.Close()
		}
		if .autoNat != nil {
			.autoNat.Close()
		}
		if .relayManager != nil {
			.relayManager.Close()
		}
		if .hps != nil {
			.hps.Close()
		}
		if .autonatv2 != nil {
			.autonatv2.Close()
		}

		_ = .emitters.evtLocalProtocolsUpdated.Close()
		_ = .emitters.evtLocalAddrsUpdated.Close()

		if  := .network.Close();  != nil {
			log.Errorf("swarm close failed: %v", )
		}

		.psManager.Close()
		if .Peerstore() != nil {
			.Peerstore().Close()
		}

		.refCount.Wait()

		if .Network().ResourceManager() != nil {
			.Network().ResourceManager().Close()
		}
	})

	return nil
}

type streamWrapper struct {
	network.Stream
	rw io.ReadWriteCloser
}

func ( *streamWrapper) ( []byte) (int, error) {
	return .rw.Read()
}

func ( *streamWrapper) ( []byte) (int, error) {
	return .rw.Write()
}

func ( *streamWrapper) () error {
	return .rw.Close()
}

func ( *streamWrapper) () error {
	// Flush the handshake before closing, but ignore the error. The other
	// end may have closed their side for reading.
	//
	// If something is wrong with the stream, the user will get on error on
	// read instead.
	if ,  := .rw.(interface{ () error });  {
		_ = .()
	}
	return .Stream.CloseWrite()
}