package quic

import (
	
	
	
	
	
	
	
	
	
	
	
	

	
	
	
	
	
	
	
	
	
	
	
)

type unpacker interface {
	UnpackLongHeader(hdr *wire.Header, data []byte) (*unpackedPacket, error)
	UnpackShortHeader(rcvTime monotime.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error)
}

type cryptoStreamHandler interface {
	StartHandshake(context.Context) error
	ChangeConnectionID(protocol.ConnectionID)
	SetLargest1RTTAcked(protocol.PacketNumber) error
	SetHandshakeConfirmed()
	GetSessionTicket() ([]byte, error)
	NextEvent() handshake.Event
	DiscardInitialKeys()
	HandleMessage([]byte, protocol.EncryptionLevel) error
	io.Closer
	ConnectionState() handshake.ConnectionState
}

type receivedPacket struct {
	buffer *packetBuffer

	remoteAddr net.Addr
	rcvTime    monotime.Time
	data       []byte

	ecn protocol.ECN

	info packetInfo // only valid if the contained IP address is valid
}

type receivedPacketWithDatagramID struct {
	receivedPacket
	datagramID qlog.DatagramID
}

func ( *receivedPacket) () protocol.ByteCount { return protocol.ByteCount(len(.data)) }

func ( *receivedPacket) () *receivedPacket {
	return &receivedPacket{
		remoteAddr: .remoteAddr,
		rcvTime:    .rcvTime,
		data:       .data,
		buffer:     .buffer,
		ecn:        .ecn,
		info:       .info,
	}
}

type connRunner interface {
	Add(protocol.ConnectionID, packetHandler) bool
	Remove(protocol.ConnectionID)
	ReplaceWithClosed([]protocol.ConnectionID, []byte, time.Duration)
	AddResetToken(protocol.StatelessResetToken, packetHandler)
	RemoveResetToken(protocol.StatelessResetToken)
}

type closeError struct {
	err       error
	immediate bool
}

type errCloseForRecreating struct {
	nextPacketNumber protocol.PacketNumber
	nextVersion      protocol.Version
}

func ( *errCloseForRecreating) () string {
	return "closing connection in order to recreate it"
}

var deadlineSendImmediately = monotime.Time(42 * time.Millisecond) // any value > time.Time{} and before time.Now() is fine

type blockMode uint8

const (
	// blockModeNone means that the connection is not blocked.
	blockModeNone blockMode = iota
	// blockModeCongestionLimited means that the connection is congestion limited.
	// In that case, we can still send acknowledgments and PTO probe packets.
	blockModeCongestionLimited
	// blockModeHardBlocked means that no packet can be sent, under no circumstances. This can happen when:
	// * the send queue is full
	// * the SentPacketHandler returns SendNone, e.g. when we are tracking the maximum number of packets
	// In that case, the timer will be set to the idle timeout.
	blockModeHardBlocked
)

// A Conn is a QUIC connection between two peers.
// Calls to the connection (and to streams) can return the following types of errors:
//   - [ApplicationError]: for errors triggered by the application running on top of QUIC
//   - [TransportError]: for errors triggered by the QUIC transport (in many cases a misbehaving peer)
//   - [IdleTimeoutError]: when the peer goes away unexpectedly (this is a [net.Error] timeout error)
//   - [HandshakeTimeoutError]: when the cryptographic handshake takes too long (this is a [net.Error] timeout error)
//   - [StatelessResetError]: when we receive a stateless reset
//   - [VersionNegotiationError]: returned by the client, when there's no version overlap between the peers
type Conn struct {
	// Destination connection ID used during the handshake.
	// Used to check source connection ID on incoming packets.
	handshakeDestConnID protocol.ConnectionID
	// Set for the client. Destination connection ID used on the first Initial sent.
	origDestConnID protocol.ConnectionID
	retrySrcConnID *protocol.ConnectionID // only set for the client (and if a Retry was performed)

	srcConnIDLen int

	perspective protocol.Perspective
	version     protocol.Version
	config      *Config

	conn      sendConn
	sendQueue sender

	// lazily initialzed: most connections never migrate
	pathManager         *pathManager
	largestRcvdAppData  protocol.PacketNumber
	pathManagerOutgoing atomic.Pointer[pathManagerOutgoing]

	streamsMap      *streamsMap
	connIDManager   *connIDManager
	connIDGenerator *connIDGenerator

	rttStats  *utils.RTTStats
	connStats utils.ConnectionStats

	cryptoStreamManager   *cryptoStreamManager
	sentPacketHandler     ackhandler.SentPacketHandler
	receivedPacketHandler ackhandler.ReceivedPacketHandler
	retransmissionQueue   *retransmissionQueue
	framer                *framer
	connFlowController    flowcontrol.ConnectionFlowController
	tokenStoreKey         string                    // only set for the client
	tokenGenerator        *handshake.TokenGenerator // only set for the server

	unpacker      unpacker
	frameParser   wire.FrameParser
	packer        packer
	mtuDiscoverer mtuDiscoverer // initialized when the transport parameters are received

	currentMTUEstimate atomic.Uint32

	initialStream       *initialCryptoStream
	handshakeStream     *cryptoStream
	oneRTTStream        *cryptoStream // only set for the server
	cryptoStreamHandler cryptoStreamHandler

	notifyReceivedPacket chan struct{}
	sendingScheduled     chan struct{}
	receivedPacketMx     sync.Mutex
	receivedPackets      ringbuffer.RingBuffer[receivedPacket]

	// closeChan is used to notify the run loop that it should terminate
	closeChan chan struct{}
	closeErr  atomic.Pointer[closeError]

	ctx                   context.Context
	ctxCancel             context.CancelCauseFunc
	handshakeCompleteChan chan struct{}

	undecryptablePackets          []receivedPacketWithDatagramID // undecryptable packets, waiting for a change in encryption level
	undecryptablePacketsToProcess []receivedPacketWithDatagramID

	earlyConnReadyChan chan struct{}
	sentFirstPacket    bool
	droppedInitialKeys bool
	handshakeComplete  bool
	handshakeConfirmed bool

	receivedRetry       bool
	versionNegotiated   bool
	receivedFirstPacket bool

	blocked blockMode

	// the minimum of the max_idle_timeout values advertised by both endpoints
	idleTimeout  time.Duration
	creationTime monotime.Time
	// The idle timeout is set based on the max of the time we received the last packet...
	lastPacketReceivedTime monotime.Time
	// ... and the time we sent a new ack-eliciting packet after receiving a packet.
	firstAckElicitingPacketAfterIdleSentTime monotime.Time
	// pacingDeadline is the time when the next packet should be sent
	pacingDeadline monotime.Time

	peerParams *wire.TransportParameters

	timer *time.Timer
	// keepAlivePingSent stores whether a keep alive PING is in flight.
	// It is reset as soon as we receive a packet from the peer.
	keepAlivePingSent bool
	keepAliveInterval time.Duration

	datagramQueue *datagramQueue

	connStateMutex sync.Mutex
	connState      ConnectionState

	logID     string
	qlogTrace qlogwriter.Trace
	qlogger   qlogwriter.Recorder
	logger    utils.Logger
}

var _ streamSender = &Conn{}

type connTestHooks struct {
	run                     func() error
	earlyConnReady          func() <-chan struct{}
	context                 func() context.Context
	handshakeComplete       func() <-chan struct{}
	closeWithTransportError func(TransportErrorCode)
	destroy                 func(error)
	handlePacket            func(receivedPacket)
}

type wrappedConn struct {
	testHooks *connTestHooks
	*Conn
}

var newConnection = func(
	 context.Context,
	 context.CancelCauseFunc,
	 sendConn,
	 connRunner,
	 protocol.ConnectionID,
	 *protocol.ConnectionID,
	 protocol.ConnectionID,
	 protocol.ConnectionID,
	 protocol.ConnectionID,
	 ConnectionIDGenerator,
	 *statelessResetter,
	 *Config,
	 *tls.Config,
	 *handshake.TokenGenerator,
	 bool,
	 time.Duration,
	 qlogwriter.Trace,
	 utils.Logger,
	 protocol.Version,
) *wrappedConn {
	 := &Conn{
		ctx:                 ,
		ctxCancel:           ,
		conn:                ,
		config:              ,
		handshakeDestConnID: ,
		srcConnIDLen:        .Len(),
		tokenGenerator:      ,
		oneRTTStream:        newCryptoStream(),
		perspective:         protocol.PerspectiveServer,
		qlogTrace:           ,
		logger:              ,
		version:             ,
	}
	if  != nil {
		.qlogger = .AddProducer()
	}
	if .Len() > 0 {
		.logID = .String()
	} else {
		.logID = .String()
	}
	.connIDManager = newConnIDManager(
		,
		func( protocol.StatelessResetToken) { .AddResetToken(, ) },
		.RemoveResetToken,
		.queueControlFrame,
	)
	.connIDGenerator = newConnIDGenerator(
		,
		,
		&,
		,
		connRunnerCallbacks{
			AddConnectionID:    func( protocol.ConnectionID) { .Add(, ) },
			RemoveConnectionID: .Remove,
			ReplaceWithClosed:  .ReplaceWithClosed,
		},
		.queueControlFrame,
		,
	)
	.preSetup()
	.rttStats.SetInitialRTT()
	.sentPacketHandler = ackhandler.NewSentPacketHandler(
		0,
		protocol.ByteCount(.config.InitialPacketSize),
		.rttStats,
		&.connStats,
		,
		.conn.capabilities().ECN,
		.receivedPacketHandler.IgnorePacketsBelow,
		.perspective,
		.qlogger,
		.logger,
	)
	.currentMTUEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(.config.InitialPacketSize))))
	 := .GetStatelessResetToken()
	 := &wire.TransportParameters{
		InitialMaxStreamDataBidiLocal:   protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxStreamDataBidiRemote:  protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxStreamDataUni:         protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxData:                  protocol.ByteCount(.config.InitialConnectionReceiveWindow),
		MaxIdleTimeout:                  .config.MaxIdleTimeout,
		MaxBidiStreamNum:                protocol.StreamNum(.config.MaxIncomingStreams),
		MaxUniStreamNum:                 protocol.StreamNum(.config.MaxIncomingUniStreams),
		MaxAckDelay:                     protocol.MaxAckDelayInclGranularity,
		AckDelayExponent:                protocol.AckDelayExponent,
		MaxUDPPayloadSize:               protocol.MaxPacketBufferSize,
		StatelessResetToken:             &,
		OriginalDestinationConnectionID: ,
		// For interoperability with quic-go versions before May 2023, this value must be set to a value
		// different from protocol.DefaultActiveConnectionIDLimit.
		// If set to the default value, it will be omitted from the transport parameters, which will make
		// old quic-go versions interpret it as 0, instead of the default value of 2.
		// See https://github.com/quic-go/quic-go/pull/3806.
		ActiveConnectionIDLimit:   protocol.MaxActiveConnectionIDs,
		InitialSourceConnectionID: ,
		RetrySourceConnectionID:   ,
		EnableResetStreamAt:       .EnableStreamResetPartialDelivery,
	}
	if .config.EnableDatagrams {
		.MaxDatagramFrameSize = wire.MaxDatagramSize
	} else {
		.MaxDatagramFrameSize = protocol.InvalidByteCount
	}
	if .qlogger != nil {
		.qlogTransportParameters(, protocol.PerspectiveServer, false)
	}
	 := handshake.NewCryptoSetupServer(
		,
		.LocalAddr(),
		.RemoteAddr(),
		,
		,
		.Allow0RTT,
		.rttStats,
		.qlogger,
		,
		.version,
	)
	.cryptoStreamHandler = 
	.packer = newPacketPacker(, .connIDManager.Get, .initialStream, .handshakeStream, .sentPacketHandler, .retransmissionQueue, , .framer, &.receivedPacketHandler, .datagramQueue, .perspective)
	.unpacker = newPacketUnpacker(, .srcConnIDLen)
	.cryptoStreamManager = newCryptoStreamManager(.initialStream, .handshakeStream, .oneRTTStream)
	return &wrappedConn{Conn: }
}

// declare this as a variable, such that we can it mock it in the tests
var newClientConnection = func(
	 context.Context,
	 sendConn,
	 connRunner,
	 protocol.ConnectionID,
	 protocol.ConnectionID,
	 ConnectionIDGenerator,
	 *statelessResetter,
	 *Config,
	 *tls.Config,
	 protocol.PacketNumber,
	 bool,
	 bool,
	 qlogwriter.Trace,
	 utils.Logger,
	 protocol.Version,
) *wrappedConn {
	 := &Conn{
		conn:                ,
		config:              ,
		origDestConnID:      ,
		handshakeDestConnID: ,
		srcConnIDLen:        .Len(),
		perspective:         protocol.PerspectiveClient,
		logID:               .String(),
		logger:              ,
		qlogTrace:           ,
		versionNegotiated:   ,
		version:             ,
	}
	if  != nil {
		.qlogger = .AddProducer()
	}
	if .qlogger != nil {
		var ,  *net.UDPAddr
		if ,  := .LocalAddr().(*net.UDPAddr);  {
			 = 
		}
		if ,  := .RemoteAddr().(*net.UDPAddr);  {
			 = 
		}
		.qlogger.RecordEvent(startedConnectionEvent(, ))
	}
	.connIDManager = newConnIDManager(
		,
		func( protocol.StatelessResetToken) { .AddResetToken(, ) },
		.RemoveResetToken,
		.queueControlFrame,
	)
	.connIDGenerator = newConnIDGenerator(
		,
		,
		nil,
		,
		connRunnerCallbacks{
			AddConnectionID:    func( protocol.ConnectionID) { .Add(, ) },
			RemoveConnectionID: .Remove,
			ReplaceWithClosed:  .ReplaceWithClosed,
		},
		.queueControlFrame,
		,
	)
	.ctx, .ctxCancel = context.WithCancelCause()
	.preSetup()
	.sentPacketHandler = ackhandler.NewSentPacketHandler(
		,
		protocol.ByteCount(.config.InitialPacketSize),
		.rttStats,
		&.connStats,
		false, // has no effect
		.conn.capabilities().ECN,
		.receivedPacketHandler.IgnorePacketsBelow,
		.perspective,
		.qlogger,
		.logger,
	)
	.currentMTUEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(.config.InitialPacketSize))))
	 := newCryptoStream()
	 := &wire.TransportParameters{
		InitialMaxStreamDataBidiRemote: protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxStreamDataBidiLocal:  protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxStreamDataUni:        protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxData:                 protocol.ByteCount(.config.InitialConnectionReceiveWindow),
		MaxIdleTimeout:                 .config.MaxIdleTimeout,
		MaxBidiStreamNum:               protocol.StreamNum(.config.MaxIncomingStreams),
		MaxUniStreamNum:                protocol.StreamNum(.config.MaxIncomingUniStreams),
		MaxAckDelay:                    protocol.MaxAckDelayInclGranularity,
		MaxUDPPayloadSize:              protocol.MaxPacketBufferSize,
		AckDelayExponent:               protocol.AckDelayExponent,
		// For interoperability with quic-go versions before May 2023, this value must be set to a value
		// different from protocol.DefaultActiveConnectionIDLimit.
		// If set to the default value, it will be omitted from the transport parameters, which will make
		// old quic-go versions interpret it as 0, instead of the default value of 2.
		// See https://github.com/quic-go/quic-go/pull/3806.
		ActiveConnectionIDLimit:   protocol.MaxActiveConnectionIDs,
		InitialSourceConnectionID: ,
		EnableResetStreamAt:       .EnableStreamResetPartialDelivery,
	}
	if .config.EnableDatagrams {
		.MaxDatagramFrameSize = wire.MaxDatagramSize
	} else {
		.MaxDatagramFrameSize = protocol.InvalidByteCount
	}
	if .qlogger != nil {
		.qlogTransportParameters(, protocol.PerspectiveClient, false)
	}
	 := handshake.NewCryptoSetupClient(
		,
		,
		,
		,
		.rttStats,
		.qlogger,
		,
		.version,
	)
	.cryptoStreamHandler = 
	.cryptoStreamManager = newCryptoStreamManager(.initialStream, .handshakeStream, )
	.unpacker = newPacketUnpacker(, .srcConnIDLen)
	.packer = newPacketPacker(, .connIDManager.Get, .initialStream, .handshakeStream, .sentPacketHandler, .retransmissionQueue, , .framer, &.receivedPacketHandler, .datagramQueue, .perspective)
	if len(.ServerName) > 0 {
		.tokenStoreKey = .ServerName
	} else {
		.tokenStoreKey = .RemoteAddr().String()
	}
	if .config.TokenStore != nil {
		if  := .config.TokenStore.Pop(.tokenStoreKey);  != nil {
			.packer.SetToken(.data)
			.rttStats.SetInitialRTT(.rtt)
		}
	}
	return &wrappedConn{Conn: }
}

func ( *Conn) () {
	.largestRcvdAppData = protocol.InvalidPacketNumber
	.initialStream = newInitialCryptoStream(.perspective == protocol.PerspectiveClient)
	.handshakeStream = newCryptoStream()
	.sendQueue = newSendQueue(.conn)
	.retransmissionQueue = newRetransmissionQueue()
	.frameParser = *wire.NewFrameParser(
		.config.EnableDatagrams,
		.config.EnableStreamResetPartialDelivery,
		false, // ACK_FREQUENCY is not supported yet
	)
	.rttStats = utils.NewRTTStats()
	.connFlowController = flowcontrol.NewConnectionFlowController(
		protocol.ByteCount(.config.InitialConnectionReceiveWindow),
		protocol.ByteCount(.config.MaxConnectionReceiveWindow),
		func( protocol.ByteCount) bool {
			if .config.AllowConnectionWindowIncrease == nil {
				return true
			}
			return .config.AllowConnectionWindowIncrease(, uint64())
		},
		.rttStats,
		.logger,
	)
	.earlyConnReadyChan = make(chan struct{})
	.streamsMap = newStreamsMap(
		.ctx,
		,
		.queueControlFrame,
		.newFlowController,
		uint64(.config.MaxIncomingStreams),
		uint64(.config.MaxIncomingUniStreams),
		.perspective,
	)
	.framer = newFramer(.connFlowController)
	.receivedPackets.Init(8)
	.notifyReceivedPacket = make(chan struct{}, 1)
	.closeChan = make(chan struct{}, 1)
	.sendingScheduled = make(chan struct{}, 1)
	.handshakeCompleteChan = make(chan struct{})

	 := monotime.Now()
	.lastPacketReceivedTime = 
	.creationTime = 

	.receivedPacketHandler = *ackhandler.NewReceivedPacketHandler(.logger)

	.datagramQueue = newDatagramQueue(.scheduleSending, .logger)
	.connState.Version = .version
}

// run the connection main loop
func ( *Conn) () ( error) {
	defer func() { .ctxCancel() }()

	defer func() {
		// drain queued packets that will never be processed
		.receivedPacketMx.Lock()
		defer .receivedPacketMx.Unlock()

		for !.receivedPackets.Empty() {
			 := .receivedPackets.PopFront()
			.buffer.Decrement()
			.buffer.MaybeRelease()
		}
	}()

	.timer = time.NewTimer(monotime.Until(.idleTimeoutStartTime().Add(.config.HandshakeIdleTimeout)))

	if  := .cryptoStreamHandler.StartHandshake(.ctx);  != nil {
		return 
	}
	if  := .handleHandshakeEvents(monotime.Now());  != nil {
		return 
	}
	go func() {
		if  := .sendQueue.Run();  != nil {
			.destroyImpl()
		}
	}()

	if .perspective == protocol.PerspectiveClient {
		.scheduleSending() // so the ClientHello actually gets sent
	}

	var  <-chan struct{}

:
	for {
		if .framer.QueuedTooManyControlFrames() {
			.setCloseError(&closeError{err: &qerr.TransportError{ErrorCode: InternalError}})
			break 
		}
		// Close immediately if requested
		select {
		case <-.closeChan:
			break 
		default:
		}

		// no need to set a timer if we can send packets immediately
		if .pacingDeadline != deadlineSendImmediately {
			.maybeResetTimer()
		}

		// 1st: handle undecryptable packets, if any.
		// This can only occur before completion of the handshake.
		if len(.undecryptablePacketsToProcess) > 0 {
			var  bool
			 := .undecryptablePacketsToProcess
			.undecryptablePacketsToProcess = nil
			for ,  := range  {
				,  := .handleOnePacket(.receivedPacket, .datagramID)
				if  != nil {
					.setCloseError(&closeError{err: })
					break 
				}
				if  {
					 = true
				}
			}
			if  {
				// if we processed any undecryptable packets, jump to the resetting of the timers directly
				continue
			}
		}

		// 2nd: receive packets.
		,  := .handlePackets() // don't check receivedPackets.Len() in the run loop to avoid locking the mutex
		if  != nil {
			.setCloseError(&closeError{err: })
			break 
		}

		// We don't need to wait for new events if:
		// * we processed packets: we probably need to send an ACK, and potentially more data
		// * the pacer allows us to send more packets immediately
		 :=  == nil && ( || .pacingDeadline.Equal(deadlineSendImmediately))
		if ! {
			// 3rd: wait for something to happen:
			// * closing of the connection
			// * timer firing
			// * sending scheduled
			// * send queue available
			// * received packets
			select {
			case <-.closeChan:
				break 
			case <-.timer.C:
			case <-.sendingScheduled:
			case <-:
			case <-.notifyReceivedPacket:
				,  := .handlePackets()
				if  != nil {
					.setCloseError(&closeError{err: })
					break 
				}
				// if we processed any undecryptable packets, jump to the resetting of the timers directly
				if ! {
					continue
				}
			}
		}

		// Check for loss detection timeout.
		// This could cause packets to be declared lost, and retransmissions to be enqueued.
		 := monotime.Now()
		if  := .sentPacketHandler.GetLossDetectionTimeout(); !.IsZero() && !.After() {
			if  := .sentPacketHandler.OnLossDetectionTimeout();  != nil {
				.setCloseError(&closeError{err: })
				break 
			}
		}

		if  := .nextKeepAliveTime(); !.IsZero() && !.Before() {
			// send a PING frame since there is no activity in the connection
			.logger.Debugf("Sending a keep-alive PING to keep the connection alive.")
			.framer.QueueControlFrame(&wire.PingFrame{})
			.keepAlivePingSent = true
		} else if !.handshakeComplete && .Sub(.creationTime) >= .config.handshakeTimeout() {
			.destroyImpl(qerr.ErrHandshakeTimeout)
			break 
		} else {
			 := .idleTimeoutStartTime()
			if (!.handshakeComplete && .Sub() >= .config.HandshakeIdleTimeout) ||
				(.handshakeComplete && !.Before(.nextIdleTimeoutTime())) {
				.destroyImpl(qerr.ErrIdleTimeout)
				break 
			}
		}

		.connIDGenerator.RemoveRetiredConnIDs()

		if .perspective == protocol.PerspectiveClient {
			 := .pathManagerOutgoing.Load()
			if  != nil {
				,  := .ShouldSwitchPath()
				if  {
					.switchToNewPath(, )
				}
			}
		}

		if .sendQueue.WouldBlock() {
			// The send queue is still busy sending out packets. Wait until there's space to enqueue new packets.
			 = .sendQueue.Available()
			// Cancel the pacing timer, as we can't send any more packets until the send queue is available again.
			.pacingDeadline = 0
			.blocked = blockModeHardBlocked
			continue
		}

		if .closeErr.Load() != nil {
			break 
		}

		.blocked = blockModeNone // sending might set it back to true if we're congestion limited
		if  := .triggerSending();  != nil {
			.setCloseError(&closeError{err: })
			break 
		}
		if .sendQueue.WouldBlock() {
			// The send queue is still busy sending out packets. Wait until there's space to enqueue new packets.
			 = .sendQueue.Available()
			// Cancel the pacing timer, as we can't send any more packets until the send queue is available again.
			.pacingDeadline = 0
			.blocked = blockModeHardBlocked
		} else {
			 = nil
		}
	}

	 := .closeErr.Load()
	.cryptoStreamHandler.Close()
	.sendQueue.Close() // close the send queue before sending the CONNECTION_CLOSE
	.handleCloseError()
	if .qlogger != nil {
		if  := (&errCloseForRecreating{}); !errors.As(.err, &) {
			.qlogger.Close()
		}
	}
	.logger.Infof("Connection %s closed.", .logID)
	.timer.Stop()
	return .err
}

// blocks until the early connection can be used
func ( *Conn) () <-chan struct{} {
	return .earlyConnReadyChan
}

// Context returns a context that is cancelled when the connection is closed.
// The cancellation cause is set to the error that caused the connection to close.
func ( *Conn) () context.Context {
	return .ctx
}

func ( *Conn) () bool {
	return .peerParams.MaxDatagramFrameSize > 0
}

// ConnectionState returns basic details about the QUIC connection.
func ( *Conn) () ConnectionState {
	.connStateMutex.Lock()
	defer .connStateMutex.Unlock()

	 := .cryptoStreamHandler.ConnectionState()
	.connState.TLS = .ConnectionState
	.connState.Used0RTT = .Used0RTT
	if .peerParams != nil {
		.connState.SupportsDatagrams.Remote = .supportsDatagrams()
		.connState.SupportsStreamResetPartialDelivery.Remote = .peerParams.EnableResetStreamAt
	}
	.connState.SupportsDatagrams.Local = .config.EnableDatagrams
	.connState.SupportsStreamResetPartialDelivery.Local = .config.EnableStreamResetPartialDelivery
	.connState.GSO = .conn.capabilities().GSO
	return .connState
}

// ConnectionStats contains statistics about the QUIC connection
type ConnectionStats struct {
	// MinRTT is the estimate of the minimum RTT observed on the active network
	// path.
	MinRTT time.Duration
	// LatestRTT is the last RTT sample observed on the active network path.
	LatestRTT time.Duration
	// SmoothedRTT is an exponentially weighted moving average of an endpoint's
	// RTT samples. See https://www.rfc-editor.org/rfc/rfc9002#section-5.3
	SmoothedRTT time.Duration
	// MeanDeviation estimates the variation in the RTT samples using a mean
	// variation. See https://www.rfc-editor.org/rfc/rfc9002#section-5.3
	MeanDeviation time.Duration

	// BytesSent is the number of bytes sent on the underlying connection,
	// including retransmissions. Does not include UDP or any other outer
	// framing.
	BytesSent uint64
	// PacketsSent is the number of packets sent on the underlying connection,
	// including those that are determined to have been lost.
	PacketsSent uint64
	// BytesReceived is the number of total bytes received on the underlying
	// connection, including duplicate data for streams. Does not include UDP or
	// any other outer framing.
	BytesReceived uint64
	// PacketsReceived is the number of total packets received on the underlying
	// connection, including packets that were not processable.
	PacketsReceived uint64
	// BytesLost is the number of bytes lost on the underlying connection (does
	// not monotonically increase, because packets that are declared lost can
	// subsequently be received). Does not include UDP or any other outer
	// framing.
	BytesLost uint64
	// PacketsLost is the number of packets lost on the underlying connection
	// (does not monotonically increase, because packets that are declared lost
	// can subsequently be received).
	PacketsLost uint64
}

func ( *Conn) () ConnectionStats {
	return ConnectionStats{
		MinRTT:        .rttStats.MinRTT(),
		LatestRTT:     .rttStats.LatestRTT(),
		SmoothedRTT:   .rttStats.SmoothedRTT(),
		MeanDeviation: .rttStats.MeanDeviation(),

		BytesSent:       .connStats.BytesSent.Load(),
		PacketsSent:     .connStats.PacketsSent.Load(),
		BytesReceived:   .connStats.BytesReceived.Load(),
		PacketsReceived: .connStats.PacketsReceived.Load(),
		BytesLost:       .connStats.BytesLost.Load(),
		PacketsLost:     .connStats.PacketsLost.Load(),
	}
}

// Time when the connection should time out
func ( *Conn) () monotime.Time {
	 := max(.idleTimeout, .rttStats.PTO(true)*3)
	return .idleTimeoutStartTime().Add()
}

// Time when the next keep-alive packet should be sent.
// It returns a zero time if no keep-alive should be sent.
func ( *Conn) () monotime.Time {
	if .config.KeepAlivePeriod == 0 || .keepAlivePingSent {
		return 0
	}
	 := max(.keepAliveInterval, .rttStats.PTO(true)*3/2)
	return .lastPacketReceivedTime.Add()
}

func ( *Conn) () {
	var  monotime.Time
	if !.handshakeComplete {
		 = .creationTime.Add(.config.handshakeTimeout())
		if  := .idleTimeoutStartTime().Add(.config.HandshakeIdleTimeout); .Before() {
			 = 
		}
	} else {
		// A keep-alive packet is ack-eliciting, so it can only be sent if the connection is
		// neither congestion limited nor hard-blocked.
		if .blocked != blockModeNone {
			 = .nextIdleTimeoutTime()
		} else {
			if  := .nextKeepAliveTime(); !.IsZero() {
				 = 
			} else {
				 = .nextIdleTimeoutTime()
			}
		}
	}
	// If the connection is hard-blocked, we can't even send acknowledgments,
	// nor can we send PTO probe packets.
	if .blocked == blockModeHardBlocked {
		.timer.Reset(monotime.Until())
		return
	}

	if  := .receivedPacketHandler.GetAlarmTimeout(); !.IsZero() && .Before() {
		 = 
	}
	if  := .sentPacketHandler.GetLossDetectionTimeout(); !.IsZero() && .Before() {
		 = 
	}
	if .blocked == blockModeCongestionLimited {
		.timer.Reset(monotime.Until())
		return
	}

	if !.pacingDeadline.IsZero() && .pacingDeadline.Before() {
		 = .pacingDeadline
	}
	.timer.Reset(monotime.Until())
}

func ( *Conn) () monotime.Time {
	 := .lastPacketReceivedTime
	if  := .firstAckElicitingPacketAfterIdleSentTime; !.IsZero() && .After() {
		 = 
	}
	return 
}

func ( *Conn) ( *Transport,  monotime.Time) {
	 := protocol.ByteCount(.config.InitialPacketSize)
	.sentPacketHandler.MigratedPath(, )
	 := protocol.ByteCount(protocol.MaxPacketBufferSize)
	if .peerParams.MaxUDPPayloadSize > 0 && .peerParams.MaxUDPPayloadSize <  {
		 = .peerParams.MaxUDPPayloadSize
	}
	.mtuDiscoverer.Reset(, , )
	.conn = newSendConn(.conn, .conn.RemoteAddr(), packetInfo{}, utils.DefaultLogger) // TODO: find a better way
	.sendQueue.Close()
	.sendQueue = newSendQueue(.conn)
	go func() {
		if  := .sendQueue.Run();  != nil {
			.destroyImpl()
		}
	}()
}

func ( *Conn) ( monotime.Time) error {
	defer close(.handshakeCompleteChan)
	// Once the handshake completes, we have derived 1-RTT keys.
	// There's no point in queueing undecryptable packets for later decryption anymore.
	.undecryptablePackets = nil

	.connIDManager.SetHandshakeComplete()
	.connIDGenerator.SetHandshakeComplete(.Add(3 * .rttStats.PTO(false)))

	if .qlogger != nil {
		.qlogger.RecordEvent(qlog.ALPNInformation{
			ChosenALPN: .cryptoStreamHandler.ConnectionState().NegotiatedProtocol,
		})
	}

	// The server applies transport parameters right away, but the client side has to wait for handshake completion.
	// During a 0-RTT connection, the client is only allowed to use the new transport parameters for 1-RTT packets.
	if .perspective == protocol.PerspectiveClient {
		.applyTransportParameters()
		return nil
	}

	// All these only apply to the server side.
	if  := .handleHandshakeConfirmed();  != nil {
		return 
	}

	,  := .cryptoStreamHandler.GetSessionTicket()
	if  != nil {
		return 
	}
	if  != nil { // may be nil if session tickets are disabled via tls.Config.SessionTicketsDisabled
		.oneRTTStream.Write()
		for .oneRTTStream.HasData() {
			if  := .oneRTTStream.PopCryptoFrame(protocol.MaxPostHandshakeCryptoFrameSize);  != nil {
				.queueControlFrame()
			}
		}
	}
	,  := .tokenGenerator.NewToken(.conn.RemoteAddr(), .rttStats.SmoothedRTT())
	if  != nil {
		return 
	}
	.queueControlFrame(&wire.NewTokenFrame{Token: })
	.queueControlFrame(&wire.HandshakeDoneFrame{})
	return nil
}

func ( *Conn) ( monotime.Time) error {
	// Drop initial keys.
	// On the client side, this should have happened when sending the first Handshake packet,
	// but this is not guaranteed if the server misbehaves.
	// See CVE-2025-59530 for more details.
	if  := .dropEncryptionLevel(protocol.EncryptionInitial, );  != nil {
		return 
	}
	if  := .dropEncryptionLevel(protocol.EncryptionHandshake, );  != nil {
		return 
	}

	.handshakeConfirmed = true
	.cryptoStreamHandler.SetHandshakeConfirmed()

	if !.config.DisablePathMTUDiscovery && .conn.capabilities().DF {
		.mtuDiscoverer.Start()
	}
	return nil
}

const maxPacketsToProcess = 32

func ( *Conn) () ( bool,  error) {
	// Process packets from the receivedPackets queue.
	// Limit the number of packets to process to maxPacketsToProcess,
	// so we eventually get a chance to send out an ACK when receiving a lot of packets.
	.receivedPacketMx.Lock()

	if .receivedPackets.Empty() {
		.receivedPacketMx.Unlock()
		return false, nil
	}

	var  bool
	for range maxPacketsToProcess {
		 := .receivedPackets.PopFront()
		.receivedPacketMx.Unlock()

		var  qlog.DatagramID
		if .qlogger != nil && wire.IsLongHeaderPacket(.data[0]) {
			 = qlog.CalculateDatagramID(.data)
		}
		,  := .handleOnePacket(, )
		if  != nil {
			return false, 
		}
		if  {
			 = true
		}
		.receivedPacketMx.Lock()
		 = !.receivedPackets.Empty()
		if ! {
			break
		}
		// Prioritize sending of new CRYPTO data.
		// This is especially relevant when processing 0-RTT packets.
		if !.handshakeComplete && (.initialStream.HasData() || .handshakeStream.HasData()) {
			break
		}
	}
	.receivedPacketMx.Unlock()

	if  {
		select {
		case .notifyReceivedPacket <- struct{}{}:
		default:
		}
	}
	return , nil
}

func ( *Conn) ( receivedPacket,  qlog.DatagramID) ( bool,  error) {
	.sentPacketHandler.ReceivedBytes(.Size(), .rcvTime)

	if wire.IsVersionNegotiationPacket(.data) {
		return false, .handleVersionNegotiationPacket()
	}

	var  uint8
	var  protocol.ConnectionID
	 := .data
	 := 
	for len() > 0 {
		if  > 0 {
			 = *(.Clone())
			.data = 

			,  := wire.ParseConnectionID(.data, .srcConnIDLen)
			if  != nil {
				if .qlogger != nil {
					.qlogger.RecordEvent(qlog.PacketDropped{
						Raw:        qlog.RawInfo{Length: len()},
						DatagramID: ,
						Trigger:    qlog.PacketDropHeaderParseError,
					})
				}
				.logger.Debugf("error parsing packet, couldn't parse connection ID: %s", )
				break
			}
			if  !=  {
				if .qlogger != nil {
					.qlogger.RecordEvent(qlog.PacketDropped{
						Header:     qlog.PacketHeader{DestConnectionID: },
						Raw:        qlog.RawInfo{Length: len()},
						DatagramID: ,
						Trigger:    qlog.PacketDropUnknownConnectionID,
					})
				}
				.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", , )
				break
			}
		}

		if wire.IsLongHeaderPacket(.data[0]) {
			, , ,  := wire.ParsePacket(.data)
			if  != nil {
				if .qlogger != nil {
					if  == wire.ErrUnsupportedVersion {
						.qlogger.RecordEvent(qlog.PacketDropped{
							Header:     qlog.PacketHeader{Version: .Version},
							Raw:        qlog.RawInfo{Length: len()},
							DatagramID: ,
							Trigger:    qlog.PacketDropUnsupportedVersion,
						})
					} else {
						.qlogger.RecordEvent(qlog.PacketDropped{
							Raw:        qlog.RawInfo{Length: len()},
							DatagramID: ,
							Trigger:    qlog.PacketDropHeaderParseError,
						})
					}
				}
				.logger.Debugf("error parsing packet: %s", )
				break
			}
			 = .DestConnectionID

			if .Version != .version {
				if .qlogger != nil {
					.qlogger.RecordEvent(qlog.PacketDropped{
						Raw:        qlog.RawInfo{Length: len()},
						DatagramID: ,
						Trigger:    qlog.PacketDropUnexpectedVersion,
					})
				}
				.logger.Debugf("Dropping packet with version %x. Expected %x.", .Version, .version)
				break
			}

			if  > 0 {
				.buffer.Split()
			}
			++

			// only log if this actually a coalesced packet
			if .logger.Debug() && ( > 1 || len() > 0) {
				.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", , len(), len())
			}

			.data = 

			,  := .handleLongHeaderPacket(, , )
			if  != nil {
				return false, 
			}
			if  {
				 = true
			}
			 = 
		} else {
			if  > 0 {
				.buffer.Split()
			}
			,  := .handleShortHeaderPacket(,  > 0, )
			if  != nil {
				return false, 
			}
			if  {
				 = true
			}
			break
		}
	}

	.buffer.MaybeRelease()
	.blocked = blockModeNone
	return , nil
}

func ( *Conn) (
	 receivedPacket,
	 bool,
	 qlog.DatagramID, // only for logging
) ( bool,  error) {
	var  bool

	defer func() {
		// Put back the packet buffer if the packet wasn't queued for later decryption.
		if ! {
			.buffer.Decrement()
		}
	}()

	,  := wire.ParseConnectionID(.data, .srcConnIDLen)
	if  != nil {
		.qlogger.RecordEvent(qlog.PacketDropped{
			Header: qlog.PacketHeader{
				PacketType:   qlog.PacketType1RTT,
				PacketNumber: protocol.InvalidPacketNumber,
			},
			Raw:        qlog.RawInfo{Length: len(.data)},
			DatagramID: ,
			Trigger:    qlog.PacketDropHeaderParseError,
		})
		return false, nil
	}
	, , , ,  := .unpacker.UnpackShortHeader(.rcvTime, .data)
	if  != nil {
		// Stateless reset packets (see RFC 9000, section 10.3):
		// * fill the entire UDP datagram (i.e. they cannot be part of a coalesced packet)
		// * are short header packets (first bit is 0)
		// * have the QUIC bit set (second bit is 1)
		// * are at least 21 bytes long
		if ! && len(.data) >= protocol.MinReceivedStatelessResetSize && .data[0]&0b11000000 == 0b01000000 {
			 := protocol.StatelessResetToken(.data[len(.data)-16:])
			if .connIDManager.IsActiveStatelessResetToken() {
				return false, &StatelessResetError{}
			}
		}
		,  = .handleUnpackError(, , qlog.PacketType1RTT, )
		return false, 
	}
	.largestRcvdAppData = max(.largestRcvdAppData, )

	if .logger.Debug() {
		.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, 1-RTT", , .Size(), )
		wire.LogShortHeader(.logger, , , , )
	}

	if .receivedPacketHandler.IsPotentiallyDuplicate(, protocol.Encryption1RTT) {
		.logger.Debugf("Dropping (potentially) duplicate packet.")
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:   qlog.PacketType1RTT,
					PacketNumber: ,
				},
				Raw:        qlog.RawInfo{Length: int(.Size())},
				DatagramID: ,
				Trigger:    qlog.PacketDropDuplicate,
			})
		}
		return false, nil
	}

	var  func([]qlog.Frame)
	if .qlogger != nil {
		 = func( []qlog.Frame) {
			.qlogger.RecordEvent(qlog.PacketReceived{
				Header: qlog.PacketHeader{
					PacketType:       qlog.PacketType1RTT,
					DestConnectionID: ,
					PacketNumber:     ,
					KeyPhaseBit:      ,
				},
				Raw: qlog.RawInfo{
					Length:        int(.Size()),
					PayloadLength: int(.Size() - wire.ShortHeaderLen(, )),
				},
				DatagramID: ,
				Frames:     ,
				ECN:        toQlogECN(.ecn),
			})
		}
	}
	, ,  := .handleUnpackedShortHeaderPacket(, , , .ecn, .rcvTime, )
	if  != nil {
		return false, 
	}

	// In RFC 9000, only the client can migrate between paths.
	if .perspective == protocol.PerspectiveClient {
		return true, nil
	}
	if addrsEqual(.remoteAddr, .RemoteAddr()) {
		return true, nil
	}

	var  bool
	if .pathManager == nil {
		.pathManager = newPathManager(
			.connIDManager.GetConnIDForPath,
			.connIDManager.RetireConnIDForPath,
			.logger,
		)
	}
	, ,  := .pathManager.HandlePacket(.remoteAddr, .rcvTime, , )
	if len() > 0 {
		, ,  := .packer.PackPathProbePacket(, , .version)
		if  != nil {
			return true, 
		}
		.logger.Debugf("sending path probe packet to %s", .remoteAddr)
		.logShortHeaderPacketWithDatagramID(, protocol.ECNNon, .Len(), false, )
		.registerPackedShortHeaderPacket(, protocol.ECNNon, .rcvTime)
		.sendQueue.SendProbe(, .remoteAddr)
	}
	// We only switch paths in response to the highest-numbered non-probing packet,
	// see section 9.3 of RFC 9000.
	if ! ||  != .largestRcvdAppData {
		return true, nil
	}
	.pathManager.SwitchToPath(.remoteAddr)
	.sentPacketHandler.MigratedPath(.rcvTime, protocol.ByteCount(.config.InitialPacketSize))
	 := protocol.ByteCount(protocol.MaxPacketBufferSize)
	if .peerParams.MaxUDPPayloadSize > 0 && .peerParams.MaxUDPPayloadSize <  {
		 = .peerParams.MaxUDPPayloadSize
	}
	.mtuDiscoverer.Reset(
		.rcvTime,
		protocol.ByteCount(.config.InitialPacketSize),
		,
	)
	.conn.ChangeRemoteAddr(.remoteAddr, .info)
	return true, nil
}

func ( *Conn) ( receivedPacket,  *wire.Header,  qlog.DatagramID) ( bool,  error) {
	var  bool

	defer func() {
		// Put back the packet buffer if the packet wasn't queued for later decryption.
		if ! {
			.buffer.Decrement()
		}
	}()

	if .Type == protocol.PacketTypeRetry {
		return .handleRetryPacket(, .data, .rcvTime), nil
	}

	// The server can change the source connection ID with the first Handshake packet.
	// After this, all packets with a different source connection have to be ignored.
	if .receivedFirstPacket && .Type == protocol.PacketTypeInitial && .SrcConnectionID != .handshakeDestConnID {
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:   qlog.PacketTypeInitial,
					PacketNumber: protocol.InvalidPacketNumber,
				},
				Raw:        qlog.RawInfo{Length: int(.Size())},
				DatagramID: ,
				Trigger:    qlog.PacketDropUnknownConnectionID,
			})
		}
		.logger.Debugf("Dropping Initial packet (%d bytes) with unexpected source connection ID: %s (expected %s)", .Size(), .SrcConnectionID, .handshakeDestConnID)
		return false, nil
	}
	// drop 0-RTT packets, if we are a client
	if .perspective == protocol.PerspectiveClient && .Type == protocol.PacketType0RTT {
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:   qlog.PacketType0RTT,
					PacketNumber: protocol.InvalidPacketNumber,
				},
				Raw:        qlog.RawInfo{Length: int(.Size())},
				DatagramID: ,
				Trigger:    qlog.PacketDropUnexpectedPacket,
			})
		}
		return false, nil
	}

	,  := .unpacker.UnpackLongHeader(, .data)
	if  != nil {
		,  = .handleUnpackError(, , toQlogPacketType(.Type), )
		return false, 
	}

	if .logger.Debug() {
		.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, %s", .hdr.PacketNumber, .Size(), .DestConnectionID, .encryptionLevel)
		.hdr.Log(.logger)
	}

	if  := .hdr.PacketNumber; .receivedPacketHandler.IsPotentiallyDuplicate(, .encryptionLevel) {
		.logger.Debugf("Dropping (potentially) duplicate packet.")
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:       toQlogPacketType(.hdr.Type),
					DestConnectionID: .DestConnectionID,
					SrcConnectionID:  .SrcConnectionID,
					PacketNumber:     ,
					Version:          .hdr.Version,
				},
				Raw:        qlog.RawInfo{Length: int(.Size()), PayloadLength: int(.hdr.Length)},
				DatagramID: ,
				Trigger:    qlog.PacketDropDuplicate,
			})
		}
		return false, nil
	}

	if  := .handleUnpackedLongHeaderPacket(, .ecn, .rcvTime, , .Size());  != nil {
		return false, 
	}
	return true, nil
}

func ( *Conn) ( error,  receivedPacket,  qlog.PacketType,  qlog.DatagramID) ( bool,  error) {
	switch  {
	case handshake.ErrKeysDropped:
		if .qlogger != nil {
			,  := wire.ParseConnectionID(.data, .srcConnIDLen)
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:       ,
					DestConnectionID: ,
					PacketNumber:     protocol.InvalidPacketNumber,
				},
				Raw:        qlog.RawInfo{Length: int(.Size())},
				DatagramID: ,
				Trigger:    qlog.PacketDropKeyUnavailable,
			})
		}
		.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", , .Size())
		return false, nil
	case handshake.ErrKeysNotYetAvailable:
		// Sealer for this encryption level not yet available.
		// Try again later.
		.tryQueueingUndecryptablePacket(, , )
		return true, nil
	case wire.ErrInvalidReservedBits:
		return false, &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: .Error(),
		}
	case handshake.ErrDecryptionFailed:
		// This might be a packet injected by an attacker. Drop it.
		if .qlogger != nil {
			,  := wire.ParseConnectionID(.data, .srcConnIDLen)
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:       ,
					DestConnectionID: ,
					PacketNumber:     protocol.InvalidPacketNumber,
				},
				Raw:        qlog.RawInfo{Length: int(.Size())},
				DatagramID: ,
				Trigger:    qlog.PacketDropPayloadDecryptError,
			})
		}
		.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", , .Size(), )
		return false, nil
	default:
		var  *headerParseError
		if errors.As(, &) {
			// This might be a packet injected by an attacker. Drop it.
			if .qlogger != nil {
				,  := wire.ParseConnectionID(.data, .srcConnIDLen)
				.qlogger.RecordEvent(qlog.PacketDropped{
					Header: qlog.PacketHeader{
						PacketType:       ,
						DestConnectionID: ,
						PacketNumber:     protocol.InvalidPacketNumber,
					},
					Raw:        qlog.RawInfo{Length: int(.Size())},
					DatagramID: ,
					Trigger:    qlog.PacketDropHeaderParseError,
				})
			}
			.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", , .Size(), )
			return false, nil
		}
		// This is an error returned by the AEAD (other than ErrDecryptionFailed).
		// For example, a PROTOCOL_VIOLATION due to key updates.
		return false, 
	}
}

func ( *Conn) ( *wire.Header,  []byte,  monotime.Time) bool /* was this a valid Retry */ {
	if .perspective == protocol.PerspectiveServer {
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:       qlog.PacketTypeRetry,
					SrcConnectionID:  .SrcConnectionID,
					DestConnectionID: .DestConnectionID,
					Version:          .Version,
				},
				Raw:     qlog.RawInfo{Length: len()},
				Trigger: qlog.PacketDropUnexpectedPacket,
			})
		}
		.logger.Debugf("Ignoring Retry.")
		return false
	}
	if .receivedFirstPacket {
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:       qlog.PacketTypeRetry,
					SrcConnectionID:  .SrcConnectionID,
					DestConnectionID: .DestConnectionID,
					Version:          .Version,
				},
				Raw:     qlog.RawInfo{Length: len()},
				Trigger: qlog.PacketDropUnexpectedPacket,
			})
		}
		.logger.Debugf("Ignoring Retry, since we already received a packet.")
		return false
	}
	 := .connIDManager.Get()
	if .SrcConnectionID ==  {
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:       qlog.PacketTypeRetry,
					SrcConnectionID:  .SrcConnectionID,
					DestConnectionID: .DestConnectionID,
					Version:          .Version,
				},
				Raw:     qlog.RawInfo{Length: len()},
				Trigger: qlog.PacketDropUnexpectedPacket,
			})
		}
		.logger.Debugf("Ignoring Retry, since the server didn't change the Source Connection ID.")
		return false
	}
	// If a token is already set, this means that we already received a Retry from the server.
	// Ignore this Retry packet.
	if .receivedRetry {
		.logger.Debugf("Ignoring Retry, since a Retry was already received.")
		return false
	}

	 := handshake.GetRetryIntegrityTag([:len()-16], , .Version)
	if !bytes.Equal([len()-16:], [:]) {
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:       qlog.PacketTypeRetry,
					SrcConnectionID:  .SrcConnectionID,
					DestConnectionID: .DestConnectionID,
					Version:          .Version,
				},
				Raw:     qlog.RawInfo{Length: len()},
				Trigger: qlog.PacketDropPayloadDecryptError,
			})
		}
		.logger.Debugf("Ignoring spoofed Retry. Integrity Tag doesn't match.")
		return false
	}

	 := .SrcConnectionID
	.receivedRetry = true
	.sentPacketHandler.ResetForRetry()
	.handshakeDestConnID = 
	.retrySrcConnID = &
	.cryptoStreamHandler.ChangeConnectionID()
	.packer.SetToken(.Token)
	.connIDManager.ChangeInitialConnID()

	if .logger.Debug() {
		.logger.Debugf("<- Received Retry:")
		(&wire.ExtendedHeader{Header: *}).Log(.logger)
		.logger.Debugf("Switching destination connection ID to: %s", .SrcConnectionID)
	}
	if .qlogger != nil {
		.qlogger.RecordEvent(qlog.PacketReceived{
			Header: qlog.PacketHeader{
				PacketType:       qlog.PacketTypeRetry,
				DestConnectionID: ,
				SrcConnectionID:  ,
				Version:          .Version,
				Token:            &qlog.Token{Raw: .Token},
			},
			Raw: qlog.RawInfo{Length: len()},
		})
	}

	.scheduleSending()
	return true
}

func ( *Conn) ( receivedPacket) error {
	if .perspective == protocol.PerspectiveServer || // servers never receive version negotiation packets
		.receivedFirstPacket || .versionNegotiated { // ignore delayed / duplicated version negotiation packets
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header:  qlog.PacketHeader{PacketType: qlog.PacketTypeVersionNegotiation},
				Raw:     qlog.RawInfo{Length: int(.Size())},
				Trigger: qlog.PacketDropUnexpectedPacket,
			})
		}
		return nil
	}

	, , ,  := wire.ParseVersionNegotiationPacket(.data)
	if  != nil {
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header:  qlog.PacketHeader{PacketType: qlog.PacketTypeVersionNegotiation},
				Raw:     qlog.RawInfo{Length: int(.Size())},
				Trigger: qlog.PacketDropHeaderParseError,
			})
		}
		.logger.Debugf("Error parsing Version Negotiation packet: %s", )
		return nil
	}

	if slices.Contains(, .version) {
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header:  qlog.PacketHeader{PacketType: qlog.PacketTypeVersionNegotiation},
				Raw:     qlog.RawInfo{Length: int(.Size())},
				Trigger: qlog.PacketDropUnexpectedVersion,
			})
		}
		// The Version Negotiation packet contains the version that we offered.
		// This might be a packet sent by an attacker, or it was corrupted.
		return nil
	}

	.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", )
	if .qlogger != nil {
		.qlogger.RecordEvent(qlog.VersionNegotiationReceived{
			Header: qlog.PacketHeaderVersionNegotiation{
				DestConnectionID: ,
				SrcConnectionID:  ,
			},
			SupportedVersions: ,
		})
	}
	,  := protocol.ChooseSupportedVersion(.config.Versions, )
	if ! {
		.destroyImpl(&VersionNegotiationError{
			Ours:   .config.Versions,
			Theirs: ,
		})
		.logger.Infof("No compatible QUIC version found.")
		return nil
	}
	if .qlogger != nil {
		.qlogger.RecordEvent(qlog.VersionInformation{
			ChosenVersion:  ,
			ClientVersions: .config.Versions,
			ServerVersions: ,
		})
	}

	.logger.Infof("Switching to QUIC version %s.", )
	,  := .sentPacketHandler.PeekPacketNumber(protocol.EncryptionInitial)
	return &errCloseForRecreating{
		nextPacketNumber: ,
		nextVersion:      ,
	}
}

func ( *Conn) (
	 *unpackedPacket,
	 protocol.ECN,
	 monotime.Time,
	 qlog.DatagramID, // only for logging
	 protocol.ByteCount, // only for logging
) error {
	if !.receivedFirstPacket {
		.receivedFirstPacket = true
		if !.versionNegotiated && .qlogger != nil {
			var ,  []Version
			switch .perspective {
			case protocol.PerspectiveClient:
				 = .config.Versions
			case protocol.PerspectiveServer:
				 = .config.Versions
			}
			.qlogger.RecordEvent(qlog.VersionInformation{
				ChosenVersion:  .version,
				ClientVersions: ,
				ServerVersions: ,
			})
		}
		// The server can change the source connection ID with the first Handshake packet.
		if .perspective == protocol.PerspectiveClient && .hdr.SrcConnectionID != .handshakeDestConnID {
			 := .hdr.SrcConnectionID
			.logger.Debugf("Received first packet. Switching destination connection ID to: %s", )
			.handshakeDestConnID = 
			.connIDManager.ChangeInitialConnID()
		}
		// We create the connection as soon as we receive the first packet from the client.
		// We do that before authenticating the packet.
		// That means that if the source connection ID was corrupted,
		// we might have created a connection with an incorrect source connection ID.
		// Once we authenticate the first packet, we need to update it.
		if .perspective == protocol.PerspectiveServer {
			if .hdr.SrcConnectionID != .handshakeDestConnID {
				.handshakeDestConnID = .hdr.SrcConnectionID
				.connIDManager.ChangeInitialConnID(.hdr.SrcConnectionID)
			}
			if .qlogger != nil {
				var ,  *net.UDPAddr
				if ,  := .conn.LocalAddr().(*net.UDPAddr);  {
					 = 
				}
				if ,  := .conn.RemoteAddr().(*net.UDPAddr);  {
					 = 
				}
				.qlogger.RecordEvent(startedConnectionEvent(, ))
			}
		}
	}

	if .perspective == protocol.PerspectiveServer && .encryptionLevel == protocol.EncryptionHandshake &&
		!.droppedInitialKeys {
		// On the server side, Initial keys are dropped as soon as the first Handshake packet is received.
		// See Section 4.9.1 of RFC 9001.
		if  := .dropEncryptionLevel(protocol.EncryptionInitial, );  != nil {
			return 
		}
	}

	.lastPacketReceivedTime = 
	.firstAckElicitingPacketAfterIdleSentTime = 0
	.keepAlivePingSent = false

	if .hdr.Type == protocol.PacketType0RTT {
		.largestRcvdAppData = max(.largestRcvdAppData, .hdr.PacketNumber)
	}

	var  func([]qlog.Frame)
	if .qlogger != nil {
		 = func( []qlog.Frame) {
			var  *qlog.Token
			if len(.hdr.Token) > 0 {
				 = &qlog.Token{Raw: .hdr.Token}
			}
			.qlogger.RecordEvent(qlog.PacketReceived{
				Header: qlog.PacketHeader{
					PacketType:       toQlogPacketType(.hdr.Type),
					DestConnectionID: .hdr.DestConnectionID,
					SrcConnectionID:  .hdr.SrcConnectionID,
					PacketNumber:     .hdr.PacketNumber,
					Version:          .hdr.Version,
					Token:            ,
				},
				Raw: qlog.RawInfo{
					Length:        int(),
					PayloadLength: int(.hdr.Length),
				},
				DatagramID: ,
				Frames:     ,
				ECN:        toQlogECN(),
			})
		}
	}
	, , ,  := .handleFrames(.data, .hdr.DestConnectionID, .encryptionLevel, , )
	if  != nil {
		return 
	}
	.sentPacketHandler.ReceivedPacket(.encryptionLevel, )
	return .receivedPacketHandler.ReceivedPacket(.hdr.PacketNumber, , .encryptionLevel, , )
}

func ( *Conn) (
	 protocol.ConnectionID,
	 protocol.PacketNumber,
	 []byte,
	 protocol.ECN,
	 monotime.Time,
	 func([]qlog.Frame),
) ( bool,  *wire.PathChallengeFrame,  error) {
	.lastPacketReceivedTime = 
	.firstAckElicitingPacketAfterIdleSentTime = 0
	.keepAlivePingSent = false

	, , ,  := .handleFrames(, , protocol.Encryption1RTT, , )
	if  != nil {
		return false, nil, 
	}
	.sentPacketHandler.ReceivedPacket(protocol.Encryption1RTT, )
	if  := .receivedPacketHandler.ReceivedPacket(, , protocol.Encryption1RTT, , );  != nil {
		return false, nil, 
	}
	return , , nil
}

// handleFrames parses the frames, one after the other, and handles them.
// It returns the last PATH_CHALLENGE frame contained in the packet, if any.
func ( *Conn) (
	 []byte,
	 protocol.ConnectionID,
	 protocol.EncryptionLevel,
	 func([]qlog.Frame),
	 monotime.Time,
) (,  bool,  *wire.PathChallengeFrame,  error) {
	// Only used for tracing.
	// If we're not tracing, this slice will always remain empty.
	var  []qlog.Frame
	if  != nil {
		 = make([]qlog.Frame, 0, 4)
	}
	 := .handshakeComplete
	var  error
	var  bool

	for len() > 0 {
		, ,  := .frameParser.ParseType(, )
		if  != nil {
			// The frame parser skips over PADDING frames, and returns an io.EOF if the PADDING
			// frames were the last frames in this packet.
			if  == io.EOF {
				break
			}
			return false, false, nil, 
		}
		 = [:]

		if ackhandler.IsFrameTypeAckEliciting() {
			 = true
		}
		if !wire.IsProbingFrameType() {
			 = true
		}

		// We're inlining common cases, to avoid using interfaces
		// Fast path: STREAM, DATAGRAM and ACK
		if .IsStreamFrameType() {
			, ,  := .frameParser.ParseStreamFrame(, , .version)
			if  != nil {
				return false, false, nil, 
			}
			 = [:]

			if  != nil {
				 = append(, toQlogFrame())
			}
			// an error occurred handling a previous frame, don't handle the current frame
			if  {
				continue
			}
			wire.LogFrame(.logger, , false)
			 = .streamsMap.HandleStreamFrame(, )
		} else if .IsAckFrameType() {
			, ,  := .frameParser.ParseAckFrame(, , , .version)
			if  != nil {
				return false, false, nil, 
			}
			 = [:]
			if  != nil {
				 = append(, toQlogFrame())
			}
			// an error occurred handling a previous frame, don't handle the current frame
			if  {
				continue
			}
			wire.LogFrame(.logger, , false)
			 = .handleAckFrame(, , )
		} else if .IsDatagramFrameType() {
			, ,  := .frameParser.ParseDatagramFrame(, , .version)
			if  != nil {
				return false, false, nil, 
			}
			 = [:]

			if  != nil {
				 = append(, toQlogFrame())
			}
			// an error occurred handling a previous frame, don't handle the current frame
			if  {
				continue
			}
			wire.LogFrame(.logger, , false)
			 = .handleDatagramFrame()
		} else {
			, ,  := .frameParser.ParseLessCommonFrame(, , .version)
			if  != nil {
				return false, false, nil, 
			}
			 = [:]

			if  != nil {
				 = append(, toQlogFrame())
			}
			// an error occurred handling a previous frame, don't handle the current frame
			if  {
				continue
			}
			,  := .handleFrame(, , , )
			if  != nil {
				 = 
			}
			 = 
		}

		if  != nil {
			// if we're logging, we need to keep parsing (but not handling) all frames
			 = true
			if  == nil {
				return false, false, nil, 
			}
		}
	}

	if  != nil {
		()
		if  != nil {
			return false, false, nil, 
		}
	}

	// Handle completion of the handshake after processing all the frames.
	// This ensures that we correctly handle the following case on the server side:
	// We receive a Handshake packet that contains the CRYPTO frame that allows us to complete the handshake,
	// and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame.
	if ! && .handshakeComplete {
		if  := .handleHandshakeComplete();  != nil {
			return false, false, nil, 
		}
	}
	return
}

func ( *Conn) (
	 wire.Frame,
	 protocol.EncryptionLevel,
	 protocol.ConnectionID,
	 monotime.Time,
) ( *wire.PathChallengeFrame,  error) {
	var  error
	wire.LogFrame(.logger, , false)
	switch frame := .(type) {
	case *wire.CryptoFrame:
		 = .handleCryptoFrame(, , )
	case *wire.ConnectionCloseFrame:
		 = .handleConnectionCloseFrame()
	case *wire.ResetStreamFrame:
		 = .streamsMap.HandleResetStreamFrame(, )
	case *wire.MaxDataFrame:
		.connFlowController.UpdateSendWindow(.MaximumData)
	case *wire.MaxStreamDataFrame:
		 = .streamsMap.HandleMaxStreamDataFrame()
	case *wire.MaxStreamsFrame:
		.streamsMap.HandleMaxStreamsFrame()
	case *wire.DataBlockedFrame:
	case *wire.StreamDataBlockedFrame:
		 = .streamsMap.HandleStreamDataBlockedFrame()
	case *wire.StreamsBlockedFrame:
	case *wire.StopSendingFrame:
		 = .streamsMap.HandleStopSendingFrame()
	case *wire.PingFrame:
	case *wire.PathChallengeFrame:
		.handlePathChallengeFrame()
		 = 
	case *wire.PathResponseFrame:
		 = .handlePathResponseFrame()
	case *wire.NewTokenFrame:
		 = .handleNewTokenFrame()
	case *wire.NewConnectionIDFrame:
		 = .connIDManager.Add()
	case *wire.RetireConnectionIDFrame:
		 = .connIDGenerator.Retire(.SequenceNumber, , .Add(3*.rttStats.PTO(false)))
	case *wire.HandshakeDoneFrame:
		 = .handleHandshakeDoneFrame()
	default:
		 = fmt.Errorf("unexpected frame type: %s", reflect.ValueOf(&).Elem().Type().Name())
	}
	return , 
}

// handlePacket is called by the server with a new packet
func ( *Conn) ( receivedPacket) {
	.receivedPacketMx.Lock()
	// Discard packets once the amount of queued packets is larger than
	// the channel size, protocol.MaxConnUnprocessedPackets
	if .receivedPackets.Len() >= protocol.MaxConnUnprocessedPackets {
		if .qlogger != nil {
			var  qlog.DatagramID
			if wire.IsLongHeaderPacket(.data[0]) {
				 = qlog.CalculateDatagramID(.data)
			}
			.qlogger.RecordEvent(qlog.PacketDropped{
				Raw:        qlog.RawInfo{Length: int(.Size())},
				DatagramID: ,
				Trigger:    qlog.PacketDropDOSPrevention,
			})
		}
		.receivedPacketMx.Unlock()
		return
	}
	.receivedPackets.PushBack()
	.receivedPacketMx.Unlock()

	select {
	case .notifyReceivedPacket <- struct{}{}:
	default:
	}
}

func ( *Conn) ( *wire.ConnectionCloseFrame) error {
	if .IsApplicationError {
		return &qerr.ApplicationError{
			Remote:       true,
			ErrorCode:    qerr.ApplicationErrorCode(.ErrorCode),
			ErrorMessage: .ReasonPhrase,
		}
	}
	return &qerr.TransportError{
		Remote:       true,
		ErrorCode:    qerr.TransportErrorCode(.ErrorCode),
		FrameType:    .FrameType,
		ErrorMessage: .ReasonPhrase,
	}
}

func ( *Conn) ( *wire.CryptoFrame,  protocol.EncryptionLevel,  monotime.Time) error {
	if  := .cryptoStreamManager.HandleCryptoFrame(, );  != nil {
		return 
	}
	for {
		 := .cryptoStreamManager.GetCryptoData()
		if  == nil {
			break
		}
		if  := .cryptoStreamHandler.HandleMessage(, );  != nil {
			return 
		}
	}
	return .handleHandshakeEvents()
}

func ( *Conn) ( monotime.Time) error {
	for {
		 := .cryptoStreamHandler.NextEvent()
		var  error
		switch .Kind {
		case handshake.EventNoEvent:
			return nil
		case handshake.EventHandshakeComplete:
			// Don't call handleHandshakeComplete yet.
			// It's advantageous to process ACK frames that might be serialized after the CRYPTO frame first.
			.handshakeComplete = true
		case handshake.EventReceivedTransportParameters:
			 = .handleTransportParameters(.TransportParameters)
		case handshake.EventRestoredTransportParameters:
			.restoreTransportParameters(.TransportParameters)
			close(.earlyConnReadyChan)
		case handshake.EventReceivedReadKeys:
			// queue all previously undecryptable packets
			.undecryptablePacketsToProcess = append(.undecryptablePacketsToProcess, .undecryptablePackets...)
			.undecryptablePackets = nil
		case handshake.EventDiscard0RTTKeys:
			 = .dropEncryptionLevel(protocol.Encryption0RTT, )
		case handshake.EventWriteInitialData:
			_,  = .initialStream.Write(.Data)
		case handshake.EventWriteHandshakeData:
			_,  = .handshakeStream.Write(.Data)
		}
		if  != nil {
			return 
		}
	}
}

func ( *Conn) ( *wire.PathChallengeFrame) {
	if .perspective == protocol.PerspectiveClient {
		.queueControlFrame(&wire.PathResponseFrame{Data: .Data})
	}
}

func ( *Conn) ( *wire.PathResponseFrame) error {
	switch .perspective {
	case protocol.PerspectiveClient:
		return .handlePathResponseFrameClient()
	case protocol.PerspectiveServer:
		return .handlePathResponseFrameServer()
	default:
		panic("unreachable")
	}
}

func ( *Conn) ( *wire.PathResponseFrame) error {
	 := .pathManagerOutgoing.Load()
	if  == nil {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "unexpected PATH_RESPONSE frame",
		}
	}
	.HandlePathResponseFrame()
	return nil
}

func ( *Conn) ( *wire.PathResponseFrame) error {
	if .pathManager == nil {
		// since we didn't send PATH_CHALLENGEs yet, we don't expect PATH_RESPONSEs
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "unexpected PATH_RESPONSE frame",
		}
	}
	.pathManager.HandlePathResponseFrame()
	return nil
}

func ( *Conn) ( *wire.NewTokenFrame) error {
	if .perspective == protocol.PerspectiveServer {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "received NEW_TOKEN frame from the client",
		}
	}
	if .config.TokenStore != nil {
		.config.TokenStore.Put(.tokenStoreKey, &ClientToken{data: .Token, rtt: .rttStats.SmoothedRTT()})
	}
	return nil
}

func ( *Conn) ( monotime.Time) error {
	if .perspective == protocol.PerspectiveServer {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "received a HANDSHAKE_DONE frame",
		}
	}
	if !.handshakeConfirmed {
		return .handleHandshakeConfirmed()
	}
	return nil
}

func ( *Conn) ( *wire.AckFrame,  protocol.EncryptionLevel,  monotime.Time) error {
	,  := .sentPacketHandler.ReceivedAck(, , .lastPacketReceivedTime)
	if  != nil {
		return 
	}
	if ! {
		return nil
	}
	// On the client side: If the packet acknowledged a 1-RTT packet, this confirms the handshake.
	// This is only possible if the ACK was sent in a 1-RTT packet.
	// This is an optimization over simply waiting for a HANDSHAKE_DONE frame, see section 4.1.2 of RFC 9001.
	if .perspective == protocol.PerspectiveClient && !.handshakeConfirmed {
		if  := .handleHandshakeConfirmed();  != nil {
			return 
		}
	}
	// If one of the acknowledged packets was a Path MTU probe packet, this might have increased the Path MTU estimate.
	if .mtuDiscoverer != nil {
		if  := .mtuDiscoverer.CurrentSize();  > protocol.ByteCount(.currentMTUEstimate.Load()) {
			.currentMTUEstimate.Store(uint32())
			.sentPacketHandler.SetMaxDatagramSize()
		}
	}
	return .cryptoStreamHandler.SetLargest1RTTAcked(.LargestAcked())
}

func ( *Conn) ( *wire.DatagramFrame) error {
	if .Length(.version) > wire.MaxDatagramSize {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "DATAGRAM frame too large",
		}
	}
	.datagramQueue.HandleDatagramFrame()
	return nil
}

func ( *Conn) ( *closeError) {
	.closeErr.CompareAndSwap(nil, )
	select {
	case .closeChan <- struct{}{}:
	default:
	}
}

// closeLocal closes the connection and send a CONNECTION_CLOSE containing the error
func ( *Conn) ( error) {
	.setCloseError(&closeError{err: , immediate: false})
}

// destroy closes the connection without sending the error on the wire
func ( *Conn) ( error) {
	.destroyImpl()
	<-.ctx.Done()
}

func ( *Conn) ( error) {
	.setCloseError(&closeError{err: , immediate: true})
}

// CloseWithError closes the connection with an error.
// The error string will be sent to the peer.
func ( *Conn) ( ApplicationErrorCode,  string) error {
	.closeLocal(&qerr.ApplicationError{
		ErrorCode:    ,
		ErrorMessage: ,
	})
	<-.ctx.Done()
	return nil
}

func ( *Conn) ( TransportErrorCode) {
	.closeLocal(&qerr.TransportError{ErrorCode: })
	<-.ctx.Done()
}

func ( *Conn) ( *closeError) {
	if .immediate {
		if ,  := .err.(net.Error);  && .Timeout() {
			.logger.Errorf("Destroying connection: %s", .err)
		} else {
			.logger.Errorf("Destroying connection with error: %s", .err)
		}
	} else {
		if .err == nil {
			.logger.Infof("Closing connection.")
		} else {
			.logger.Errorf("Closing connection with error: %s", .err)
		}
	}

	 := .err
	if  == nil {
		 = &qerr.ApplicationError{}
	} else {
		defer func() { .err =  }()
	}

	var (
		     *StatelessResetError
		 *VersionNegotiationError
		           *errCloseForRecreating
		        *ApplicationError
		          *TransportError
	)
	var  bool
	var  qlog.ConnectionCloseTrigger
	var  string
	var  *qlog.TransportErrorCode
	var  *qlog.ApplicationErrorCode
	switch {
	case errors.Is(, qerr.ErrIdleTimeout),
		errors.Is(, qerr.ErrHandshakeTimeout):
		 = qlog.ConnectionCloseTriggerIdleTimeout
	case errors.As(, &):
		 = qlog.ConnectionCloseTriggerStatelessReset
	case errors.As(, &):
		 = qlog.ConnectionCloseTriggerVersionMismatch
	case errors.As(, &):
	case errors.As(, &):
		 = .Remote
		 = .ErrorMessage
		 = &.ErrorCode
	case errors.As(, &):
		 = .Remote
		 = .ErrorMessage
		 = &.ErrorCode
	case .immediate:
		 = .err
	default:
		 := &qerr.TransportError{
			ErrorCode:    qerr.InternalError,
			ErrorMessage: .Error(),
		}
		 = 
		 = .ErrorMessage
		 := .ErrorCode
		 = &
	}

	.streamsMap.CloseWithError()
	if .datagramQueue != nil {
		.datagramQueue.CloseWithError()
	}

	// In rare instances, the connection ID manager might switch to a new connection ID
	// when sending the CONNECTION_CLOSE frame.
	// The connection ID manager removes the active stateless reset token from the packet
	// handler map when it is closed, so we need to make sure that this happens last.
	defer .connIDManager.Close()

	if .qlogger != nil && !errors.As(, &) {
		 := qlog.InitiatorLocal
		if  {
			 = qlog.InitiatorRemote
		}
		.qlogger.RecordEvent(qlog.ConnectionClosed{
			Initiator:        ,
			ConnectionError:  ,
			ApplicationError: ,
			Trigger:          ,
			Reason:           ,
		})
	}

	// If this is a remote close we're done here
	if  {
		.connIDGenerator.ReplaceWithClosed(nil, 3*.rttStats.PTO(false))
		return
	}
	if .immediate {
		.connIDGenerator.RemoveAll()
		return
	}
	// Don't send out any CONNECTION_CLOSE if this is an error that occurred
	// before we even sent out the first packet.
	if .perspective == protocol.PerspectiveClient && !.sentFirstPacket {
		.connIDGenerator.RemoveAll()
		return
	}
	,  := .sendConnectionClose()
	if  != nil {
		.logger.Debugf("Error sending CONNECTION_CLOSE: %s", )
	}
	.connIDGenerator.ReplaceWithClosed(, 3*.rttStats.PTO(false))
}

func ( *Conn) ( protocol.EncryptionLevel,  monotime.Time) error {
	.sentPacketHandler.DropPackets(, )
	.receivedPacketHandler.DropPackets()
	//nolint:exhaustive // only Initial and 0-RTT need special treatment
	switch  {
	case protocol.EncryptionInitial:
		.droppedInitialKeys = true
		.cryptoStreamHandler.DiscardInitialKeys()
	case protocol.Encryption0RTT:
		.streamsMap.ResetFor0RTT()
		.framer.Handle0RTTRejection()
		return .connFlowController.Reset()
	}
	return .cryptoStreamManager.Drop()
}

// is called for the client, when restoring transport parameters saved for 0-RTT
func ( *Conn) ( *wire.TransportParameters) {
	if .logger.Debug() {
		.logger.Debugf("Restoring Transport Parameters: %s", )
	}
	if .qlogger != nil {
		.qlogger.RecordEvent(qlog.ParametersSet{
			Restore:                         true,
			Initiator:                       qlog.InitiatorRemote,
			SentBy:                          .perspective,
			OriginalDestinationConnectionID: .OriginalDestinationConnectionID,
			InitialSourceConnectionID:       .InitialSourceConnectionID,
			RetrySourceConnectionID:         .RetrySourceConnectionID,
			StatelessResetToken:             .StatelessResetToken,
			DisableActiveMigration:          .DisableActiveMigration,
			MaxIdleTimeout:                  .MaxIdleTimeout,
			MaxUDPPayloadSize:               .MaxUDPPayloadSize,
			AckDelayExponent:                .AckDelayExponent,
			MaxAckDelay:                     .MaxAckDelay,
			ActiveConnectionIDLimit:         .ActiveConnectionIDLimit,
			InitialMaxData:                  .InitialMaxData,
			InitialMaxStreamDataBidiLocal:   .InitialMaxStreamDataBidiLocal,
			InitialMaxStreamDataBidiRemote:  .InitialMaxStreamDataBidiRemote,
			InitialMaxStreamDataUni:         .InitialMaxStreamDataUni,
			InitialMaxStreamsBidi:           int64(.MaxBidiStreamNum),
			InitialMaxStreamsUni:            int64(.MaxUniStreamNum),
			MaxDatagramFrameSize:            .MaxDatagramFrameSize,
			EnableResetStreamAt:             .EnableResetStreamAt,
		})
	}

	.peerParams = 
	.connIDGenerator.SetMaxActiveConnIDs(.ActiveConnectionIDLimit)
	.connFlowController.UpdateSendWindow(.InitialMaxData)
	.streamsMap.HandleTransportParameters()
}

func ( *Conn) ( *wire.TransportParameters) error {
	if .qlogger != nil {
		.qlogTransportParameters(, .perspective.Opposite(), false)
	}
	if  := .checkTransportParameters();  != nil {
		return &qerr.TransportError{
			ErrorCode:    qerr.TransportParameterError,
			ErrorMessage: .Error(),
		}
	}

	if .perspective == protocol.PerspectiveClient && .peerParams != nil && .ConnectionState().Used0RTT && !.ValidForUpdate(.peerParams) {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "server sent reduced limits after accepting 0-RTT data",
		}
	}

	.peerParams = 
	// On the client side we have to wait for handshake completion.
	// During a 0-RTT connection, we are only allowed to use the new transport parameters for 1-RTT packets.
	if .perspective == protocol.PerspectiveServer {
		.applyTransportParameters()
		// On the server side, the early connection is ready as soon as we processed
		// the client's transport parameters.
		close(.earlyConnReadyChan)
	}
	return nil
}

func ( *Conn) ( *wire.TransportParameters) error {
	if .logger.Debug() {
		.logger.Debugf("Processed Transport Parameters: %s", )
	}

	// check the initial_source_connection_id
	if .InitialSourceConnectionID != .handshakeDestConnID {
		return fmt.Errorf("expected initial_source_connection_id to equal %s, is %s", .handshakeDestConnID, .InitialSourceConnectionID)
	}

	if .perspective == protocol.PerspectiveServer {
		return nil
	}
	// check the original_destination_connection_id
	if .OriginalDestinationConnectionID != .origDestConnID {
		return fmt.Errorf("expected original_destination_connection_id to equal %s, is %s", .origDestConnID, .OriginalDestinationConnectionID)
	}
	if .retrySrcConnID != nil { // a Retry was performed
		if .RetrySourceConnectionID == nil {
			return errors.New("missing retry_source_connection_id")
		}
		if *.RetrySourceConnectionID != *.retrySrcConnID {
			return fmt.Errorf("expected retry_source_connection_id to equal %s, is %s", .retrySrcConnID, *.RetrySourceConnectionID)
		}
	} else if .RetrySourceConnectionID != nil {
		return errors.New("received retry_source_connection_id, although no Retry was performed")
	}
	return nil
}

func ( *Conn) () {
	 := .peerParams
	// Our local idle timeout will always be > 0.
	.idleTimeout = .config.MaxIdleTimeout
	// If the peer advertised an idle timeout, take the minimum of the values.
	if .MaxIdleTimeout > 0 {
		.idleTimeout = min(.idleTimeout, .MaxIdleTimeout)
	}
	.keepAliveInterval = min(.config.KeepAlivePeriod, .idleTimeout/2)
	.streamsMap.HandleTransportParameters()
	.frameParser.SetAckDelayExponent(.AckDelayExponent)
	.connFlowController.UpdateSendWindow(.InitialMaxData)
	.rttStats.SetMaxAckDelay(.MaxAckDelay)
	.connIDGenerator.SetMaxActiveConnIDs(.ActiveConnectionIDLimit)
	if .StatelessResetToken != nil {
		.connIDManager.SetStatelessResetToken(*.StatelessResetToken)
	}
	// We don't support connection migration yet, so we don't have any use for the preferred_address.
	if .PreferredAddress != nil {
		// Retire the connection ID.
		.connIDManager.AddFromPreferredAddress(.PreferredAddress.ConnectionID, .PreferredAddress.StatelessResetToken)
	}
	 := protocol.ByteCount(protocol.MaxPacketBufferSize)
	if .MaxUDPPayloadSize > 0 && .MaxUDPPayloadSize <  {
		 = .MaxUDPPayloadSize
	}
	.mtuDiscoverer = newMTUDiscoverer(
		.rttStats,
		protocol.ByteCount(.config.InitialPacketSize),
		,
		.qlogger,
	)
}

func ( *Conn) ( monotime.Time) error {
	.pacingDeadline = 0

	 := .sentPacketHandler.SendMode()
	switch  {
	case ackhandler.SendAny:
		return .sendPackets()
	case ackhandler.SendNone:
		.blocked = blockModeHardBlocked
		return nil
	case ackhandler.SendPacingLimited:
		 := .sentPacketHandler.TimeUntilSend()
		if .IsZero() {
			 = deadlineSendImmediately
		}
		.pacingDeadline = 
		// Allow sending of an ACK if we're pacing limit.
		// This makes sure that a peer that is mostly receiving data (and thus has an inaccurate cwnd estimate)
		// sends enough ACKs to allow its peer to utilize the bandwidth.
		return .maybeSendAckOnlyPacket()
	case ackhandler.SendAck:
		// We can at most send a single ACK only packet.
		// There will only be a new ACK after receiving new packets.
		// SendAck is only returned when we're congestion limited, so we don't need to set the pacing timer.
		.blocked = blockModeCongestionLimited
		return .maybeSendAckOnlyPacket()
	case ackhandler.SendPTOInitial, ackhandler.SendPTOHandshake, ackhandler.SendPTOAppData:
		if  := .sendProbePacket(, );  != nil {
			return 
		}
		if .sendQueue.WouldBlock() {
			.scheduleSending()
			return nil
		}
		return .()
	default:
		return fmt.Errorf("BUG: invalid send mode %d", )
	}
}

func ( *Conn) ( monotime.Time) error {
	if .perspective == protocol.PerspectiveClient && .handshakeConfirmed {
		if  := .pathManagerOutgoing.Load();  != nil {
			, , ,  := .NextPathToProbe()
			if  {
				, ,  := .packer.PackPathProbePacket(, []ackhandler.Frame{}, .version)
				if  != nil {
					return 
				}
				.logger.Debugf("sending path probe packet from %s", .LocalAddr())
				.logShortHeaderPacket(, protocol.ECNNon, .Len())
				.registerPackedShortHeaderPacket(, protocol.ECNNon, )
				.WriteTo(.Data, .conn.RemoteAddr())
				// There's (likely) more data to send. Loop around again.
				.scheduleSending()
				return nil
			}
		}
	}

	// Path MTU Discovery
	// Can't use GSO, since we need to send a single packet that's larger than our current maximum size.
	// Performance-wise, this doesn't matter, since we only send a very small (<10) number of
	// MTU probe packets per connection.
	if .handshakeConfirmed && .mtuDiscoverer != nil && .mtuDiscoverer.ShouldSendProbe() {
		,  := .mtuDiscoverer.GetPing()
		, ,  := .packer.PackMTUProbePacket(, , .version)
		if  != nil {
			return 
		}
		 := .sentPacketHandler.ECNMode(true)
		.logShortHeaderPacket(, , .Len())
		.registerPackedShortHeaderPacket(, , )
		.sendQueue.Send(, 0, )
		// There's (likely) more data to send. Loop around again.
		.scheduleSending()
		return nil
	}

	if  := .connFlowController.GetWindowUpdate();  > 0 {
		.framer.QueueControlFrame(&wire.MaxDataFrame{MaximumData: })
	}
	if  := .cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize);  != nil {
		.queueControlFrame()
	}

	if !.handshakeConfirmed {
		,  := .packer.PackCoalescedPacket(false, .maxPacketSize(), , .version)
		if  != nil ||  == nil {
			return 
		}
		.sentFirstPacket = true
		if  := .sendPackedCoalescedPacket(, .sentPacketHandler.ECNMode(.IsOnlyShortHeaderPacket()), );  != nil {
			return 
		}
		//nolint:exhaustive // only need to handle pacing-related events here
		switch .sentPacketHandler.SendMode() {
		case ackhandler.SendPacingLimited:
			.resetPacingDeadline()
		case ackhandler.SendAny:
			.pacingDeadline = deadlineSendImmediately
		}
		return nil
	}

	if .conn.capabilities().GSO {
		return .sendPacketsWithGSO()
	}
	return .sendPacketsWithoutGSO()
}

func ( *Conn) ( monotime.Time) error {
	for {
		 := getPacketBuffer()
		 := .sentPacketHandler.ECNMode(true)
		if ,  := .appendOneShortHeaderPacket(, .maxPacketSize(), , );  != nil {
			if  == errNothingToPack {
				.Release()
				return nil
			}
			return 
		}

		.sendQueue.Send(, 0, )

		if .sendQueue.WouldBlock() {
			return nil
		}
		 := .sentPacketHandler.SendMode()
		if  == ackhandler.SendPacingLimited {
			.resetPacingDeadline()
			return nil
		}
		if  != ackhandler.SendAny {
			return nil
		}
		// Prioritize receiving of packets over sending out more packets.
		.receivedPacketMx.Lock()
		 := !.receivedPackets.Empty()
		.receivedPacketMx.Unlock()
		if  {
			.pacingDeadline = deadlineSendImmediately
			return nil
		}
	}
}

func ( *Conn) ( monotime.Time) error {
	 := getLargePacketBuffer()
	 := .maxPacketSize()

	 := .sentPacketHandler.ECNMode(true)
	for {
		var  bool
		,  := .appendOneShortHeaderPacket(, , , )
		if  != nil {
			if  != errNothingToPack {
				return 
			}
			if .Len() == 0 {
				.Release()
				return nil
			}
			 = true
		}

		if ! {
			 := .sentPacketHandler.SendMode()
			if  == ackhandler.SendPacingLimited {
				.resetPacingDeadline()
			}
			if  != ackhandler.SendAny {
				 = true
			}
		}

		// Don't send more packets in this batch if they require a different ECN marking than the previous ones.
		 := .sentPacketHandler.ECNMode(true)

		// Append another packet if
		// 1. The congestion controller and pacer allow sending more
		// 2. The last packet appended was a full-size packet
		// 3. The next packet will have the same ECN marking
		// 4. We still have enough space for another full-size packet in the buffer
		if ! &&  ==  &&  ==  && .Len()+ <= .Cap() {
			continue
		}

		.sendQueue.Send(, uint16(), )

		if  {
			return nil
		}
		if .sendQueue.WouldBlock() {
			return nil
		}

		// Prioritize receiving of packets over sending out more packets.
		.receivedPacketMx.Lock()
		 := !.receivedPackets.Empty()
		.receivedPacketMx.Unlock()
		if  {
			.pacingDeadline = deadlineSendImmediately
			return nil
		}

		 = 
		 = getLargePacketBuffer()
	}
}

func ( *Conn) () {
	 := .sentPacketHandler.TimeUntilSend()
	if .IsZero() {
		 = deadlineSendImmediately
	}
	.pacingDeadline = 
}

func ( *Conn) ( monotime.Time) error {
	if !.handshakeConfirmed {
		 := .sentPacketHandler.ECNMode(false)
		,  := .packer.PackCoalescedPacket(true, .maxPacketSize(), , .version)
		if  != nil {
			return 
		}
		if  == nil {
			return nil
		}
		return .sendPackedCoalescedPacket(, , )
	}

	 := .sentPacketHandler.ECNMode(true)
	, ,  := .packer.PackAckOnlyPacket(.maxPacketSize(), , .version)
	if  != nil {
		if  == errNothingToPack {
			return nil
		}
		return 
	}
	.logShortHeaderPacket(, , .Len())
	.registerPackedShortHeaderPacket(, , )
	.sendQueue.Send(, 0, )
	return nil
}

func ( *Conn) ( ackhandler.SendMode,  monotime.Time) error {
	var  protocol.EncryptionLevel
	//nolint:exhaustive // We only need to handle the PTO send modes here.
	switch  {
	case ackhandler.SendPTOInitial:
		 = protocol.EncryptionInitial
	case ackhandler.SendPTOHandshake:
		 = protocol.EncryptionHandshake
	case ackhandler.SendPTOAppData:
		 = protocol.Encryption1RTT
	default:
		return fmt.Errorf("connection BUG: unexpected send mode: %d", )
	}
	// Queue probe packets until we actually send out a packet,
	// or until there are no more packets to queue.
	var  *coalescedPacket
	for  == nil {
		if  := .sentPacketHandler.QueueProbePacket(); ! {
			break
		}
		var  error
		,  = .packer.PackPTOProbePacket(, .maxPacketSize(), false, , .version)
		if  != nil {
			return 
		}
	}
	if  == nil {
		var  error
		,  = .packer.PackPTOProbePacket(, .maxPacketSize(), true, , .version)
		if  != nil {
			return 
		}
	}
	if  == nil || (len(.longHdrPackets) == 0 && .shortHdrPacket == nil) {
		return fmt.Errorf("connection BUG: couldn't pack %s probe packet: %v", , )
	}
	return .sendPackedCoalescedPacket(, .sentPacketHandler.ECNMode(.IsOnlyShortHeaderPacket()), )
}

// appendOneShortHeaderPacket appends a new packet to the given packetBuffer.
// If there was nothing to pack, the returned size is 0.
func ( *Conn) ( *packetBuffer,  protocol.ByteCount,  protocol.ECN,  monotime.Time) (protocol.ByteCount, error) {
	 := .Len()
	,  := .packer.AppendPacket(, , , .version)
	if  != nil {
		return 0, 
	}
	 := .Len() - 
	.logShortHeaderPacket(, , )
	.registerPackedShortHeaderPacket(, , )
	return , nil
}

func ( *Conn) ( shortHeaderPacket,  protocol.ECN,  monotime.Time) {
	if .IsPathProbePacket {
		.sentPacketHandler.SentPacket(
			,
			.PacketNumber,
			protocol.InvalidPacketNumber,
			.StreamFrames,
			.Frames,
			protocol.Encryption1RTT,
			,
			.Length,
			.IsPathMTUProbePacket,
			true,
		)
		return
	}
	if .firstAckElicitingPacketAfterIdleSentTime.IsZero() && (len(.StreamFrames) > 0 || ackhandler.HasAckElicitingFrames(.Frames)) {
		.firstAckElicitingPacketAfterIdleSentTime = 
	}

	 := protocol.InvalidPacketNumber
	if .Ack != nil {
		 = .Ack.LargestAcked()
	}
	.sentPacketHandler.SentPacket(
		,
		.PacketNumber,
		,
		.StreamFrames,
		.Frames,
		protocol.Encryption1RTT,
		,
		.Length,
		.IsPathMTUProbePacket,
		false,
	)
	.connIDManager.SentPacket()
}

func ( *Conn) ( *coalescedPacket,  protocol.ECN,  monotime.Time) error {
	.logCoalescedPacket(, )
	for ,  := range .longHdrPackets {
		if .firstAckElicitingPacketAfterIdleSentTime.IsZero() && .IsAckEliciting() {
			.firstAckElicitingPacketAfterIdleSentTime = 
		}
		 := protocol.InvalidPacketNumber
		if .ack != nil {
			 = .ack.LargestAcked()
		}
		.sentPacketHandler.SentPacket(
			,
			.header.PacketNumber,
			,
			.streamFrames,
			.frames,
			.EncryptionLevel(),
			,
			.length,
			false,
			false,
		)
		if .perspective == protocol.PerspectiveClient && .EncryptionLevel() == protocol.EncryptionHandshake &&
			!.droppedInitialKeys {
			// On the client side, Initial keys are dropped as soon as the first Handshake packet is sent.
			// See Section 4.9.1 of RFC 9001.
			if  := .dropEncryptionLevel(protocol.EncryptionInitial, );  != nil {
				return 
			}
		}
	}
	if  := .shortHdrPacket;  != nil {
		if .firstAckElicitingPacketAfterIdleSentTime.IsZero() && .IsAckEliciting() {
			.firstAckElicitingPacketAfterIdleSentTime = 
		}
		 := protocol.InvalidPacketNumber
		if .Ack != nil {
			 = .Ack.LargestAcked()
		}
		.sentPacketHandler.SentPacket(
			,
			.PacketNumber,
			,
			.StreamFrames,
			.Frames,
			protocol.Encryption1RTT,
			,
			.Length,
			.IsPathMTUProbePacket,
			false,
		)
	}
	.connIDManager.SentPacket()
	.sendQueue.Send(.buffer, 0, )
	return nil
}

func ( *Conn) ( error) ([]byte, error) {
	var  *coalescedPacket
	var  error
	var  *qerr.TransportError
	var  *qerr.ApplicationError
	if errors.As(, &) {
		,  = .packer.PackConnectionClose(, .maxPacketSize(), .version)
	} else if errors.As(, &) {
		,  = .packer.PackApplicationClose(, .maxPacketSize(), .version)
	} else {
		,  = .packer.PackConnectionClose(&qerr.TransportError{
			ErrorCode:    qerr.InternalError,
			ErrorMessage: fmt.Sprintf("connection BUG: unspecified error type (msg: %s)", .Error()),
		}, .maxPacketSize(), .version)
	}
	if  != nil {
		return nil, 
	}
	 := .sentPacketHandler.ECNMode(.IsOnlyShortHeaderPacket())
	.logCoalescedPacket(, )
	return .buffer.Data, .conn.Write(.buffer.Data, 0, )
}

func ( *Conn) () protocol.ByteCount {
	if .mtuDiscoverer == nil {
		// Use the configured packet size on the client side.
		// If the server sends a max_udp_payload_size that's smaller than this size, we can ignore this:
		// Apparently the server still processed the (fully padded) Initial packet anyway.
		if .perspective == protocol.PerspectiveClient {
			return protocol.ByteCount(.config.InitialPacketSize)
		}
		// On the server side, there's no downside to using 1200 bytes until we received the client's transport
		// parameters:
		// * If the first packet didn't contain the entire ClientHello, all we can do is ACK that packet. We don't
		//   need a lot of bytes for that.
		// * If it did, we will have processed the transport parameters and initialized the MTU discoverer.
		return protocol.MinInitialPacketSize
	}
	return .mtuDiscoverer.CurrentSize()
}

// AcceptStream returns the next stream opened by the peer, blocking until one is available.
func ( *Conn) ( context.Context) (*Stream, error) {
	return .streamsMap.AcceptStream()
}

// AcceptUniStream returns the next unidirectional stream opened by the peer, blocking until one is available.
func ( *Conn) ( context.Context) (*ReceiveStream, error) {
	return .streamsMap.AcceptUniStream()
}

// OpenStream opens a new bidirectional QUIC stream.
// There is no signaling to the peer about new streams:
// The peer can only accept the stream after data has been sent on the stream,
// or the stream has been reset or closed.
// When reaching the peer's stream limit, it is not possible to open a new stream until the
// peer raises the stream limit. In that case, a [StreamLimitReachedError] is returned.
func ( *Conn) () (*Stream, error) {
	return .streamsMap.OpenStream()
}

// OpenStreamSync opens a new bidirectional QUIC stream.
// It blocks until a new stream can be opened.
// There is no signaling to the peer about new streams:
// The peer can only accept the stream after data has been sent on the stream,
// or the stream has been reset or closed.
func ( *Conn) ( context.Context) (*Stream, error) {
	return .streamsMap.OpenStreamSync()
}

// OpenUniStream opens a new outgoing unidirectional QUIC stream.
// There is no signaling to the peer about new streams:
// The peer can only accept the stream after data has been sent on the stream,
// or the stream has been reset or closed.
// When reaching the peer's stream limit, it is not possible to open a new stream until the
// peer raises the stream limit. In that case, a [StreamLimitReachedError] is returned.
func ( *Conn) () (*SendStream, error) {
	return .streamsMap.OpenUniStream()
}

// OpenUniStreamSync opens a new outgoing unidirectional QUIC stream.
// It blocks until a new stream can be opened.
// There is no signaling to the peer about new streams:
// The peer can only accept the stream after data has been sent on the stream,
// or the stream has been reset or closed.
func ( *Conn) ( context.Context) (*SendStream, error) {
	return .streamsMap.OpenUniStreamSync()
}

func ( *Conn) ( protocol.StreamID) flowcontrol.StreamFlowController {
	 := .peerParams.InitialMaxStreamDataUni
	if .Type() == protocol.StreamTypeBidi {
		if .InitiatedBy() == .perspective {
			 = .peerParams.InitialMaxStreamDataBidiRemote
		} else {
			 = .peerParams.InitialMaxStreamDataBidiLocal
		}
	}
	return flowcontrol.NewStreamFlowController(
		,
		.connFlowController,
		protocol.ByteCount(.config.InitialStreamReceiveWindow),
		protocol.ByteCount(.config.MaxStreamReceiveWindow),
		,
		.rttStats,
		.logger,
	)
}

// scheduleSending signals that we have data for sending
func ( *Conn) () {
	select {
	case .sendingScheduled <- struct{}{}:
	default:
	}
}

// tryQueueingUndecryptablePacket queues a packet for which we're missing the decryption keys.
// The qlogevents.PacketType is only used for logging purposes.
func ( *Conn) ( receivedPacket,  qlog.PacketType,  qlog.DatagramID) {
	if .handshakeComplete {
		panic("shouldn't queue undecryptable packets after handshake completion")
	}
	if len(.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets {
		if .qlogger != nil {
			.qlogger.RecordEvent(qlog.PacketDropped{
				Header: qlog.PacketHeader{
					PacketType:   ,
					PacketNumber: protocol.InvalidPacketNumber,
				},
				Raw:        qlog.RawInfo{Length: int(.Size())},
				DatagramID: ,
				Trigger:    qlog.PacketDropDOSPrevention,
			})
		}
		.logger.Infof("Dropping undecryptable packet (%d bytes). Undecryptable packet queue full.", .Size())
		return
	}
	.logger.Infof("Queueing packet (%d bytes) for later decryption", .Size())
	if .qlogger != nil {
		.qlogger.RecordEvent(qlog.PacketBuffered{
			Header: qlog.PacketHeader{
				PacketType:   ,
				PacketNumber: protocol.InvalidPacketNumber,
			},
			Raw:        qlog.RawInfo{Length: int(.Size())},
			DatagramID: ,
		})
	}
	.undecryptablePackets = append(.undecryptablePackets, receivedPacketWithDatagramID{receivedPacket: , datagramID: })
}

func ( *Conn) ( wire.Frame) {
	.framer.QueueControlFrame()
	.scheduleSending()
}

func ( *Conn) () { .scheduleSending() }

func ( *Conn) ( protocol.StreamID,  *SendStream) {
	.framer.AddActiveStream(, )
	.scheduleSending()
}

func ( *Conn) ( protocol.StreamID,  streamControlFrameGetter) {
	.framer.AddStreamWithControlFrames(, )
	.scheduleSending()
}

func ( *Conn) ( protocol.StreamID) {
	if  := .streamsMap.DeleteStream();  != nil {
		.closeLocal()
	}
	.framer.RemoveActiveStream()
}

// SendDatagram sends a message using a QUIC datagram, as specified in RFC 9221,
// if the peer enabled datagram support.
// There is no delivery guarantee for DATAGRAM frames, they are not retransmitted if lost.
// The payload of the datagram needs to fit into a single QUIC packet.
// In addition, a datagram may be dropped before being sent out if the available packet size suddenly decreases.
// If the payload is too large to be sent at the current time, a DatagramTooLargeError is returned.
func ( *Conn) ( []byte) error {
	if !.supportsDatagrams() {
		return errors.New("datagram support disabled")
	}

	 := &wire.DatagramFrame{DataLenPresent: true}
	// The payload size estimate is conservative.
	// Under many circumstances we could send a few more bytes.
	 := min(
		.MaxDataLen(.peerParams.MaxDatagramFrameSize, .version),
		protocol.ByteCount(.currentMTUEstimate.Load()),
	)
	if protocol.ByteCount(len()) >  {
		return &DatagramTooLargeError{MaxDatagramPayloadSize: int64()}
	}
	.Data = make([]byte, len())
	copy(.Data, )
	return .datagramQueue.Add()
}

// ReceiveDatagram gets a message received in a QUIC datagram, as specified in RFC 9221.
func ( *Conn) ( context.Context) ([]byte, error) {
	if !.config.EnableDatagrams {
		return nil, errors.New("datagram support disabled")
	}
	return .datagramQueue.Receive()
}

// LocalAddr returns the local address of the QUIC connection.
func ( *Conn) () net.Addr { return .conn.LocalAddr() }

// RemoteAddr returns the remote address of the QUIC connection.
func ( *Conn) () net.Addr { return .conn.RemoteAddr() }

// getPathManager lazily initializes the Conn's pathManagerOutgoing.
// May create multiple pathManagerOutgoing objects if called concurrently.
func ( *Conn) () *pathManagerOutgoing {
	 := .pathManagerOutgoing.Load()
	if  != nil {
		// Path manager is already initialized
		return 
	}

	// Initialize the path manager
	 := newPathManagerOutgoing(
		.connIDManager.GetConnIDForPath,
		.connIDManager.RetireConnIDForPath,
		.scheduleSending,
	)
	if .pathManagerOutgoing.CompareAndSwap(, ) {
		return 
	}

	// Swap failed. A concurrent writer wrote first, use their value.
	return .pathManagerOutgoing.Load()
}

func ( *Conn) ( *Transport) (*Path, error) {
	if .perspective == protocol.PerspectiveServer {
		return nil, errors.New("server cannot initiate connection migration")
	}
	if .peerParams.DisableActiveMigration {
		return nil, errors.New("server disabled connection migration")
	}
	if  := .init(false);  != nil {
		return nil, 
	}
	return .getPathManager().NewPath(
		,
		200*time.Millisecond, // initial RTT estimate
		func() {
			 := (*packetHandlerMap)()
			.connIDGenerator.AddConnRunner(
				,
				connRunnerCallbacks{
					AddConnectionID:    func( protocol.ConnectionID) { .Add(, ) },
					RemoveConnectionID: .Remove,
					ReplaceWithClosed:  .ReplaceWithClosed,
				},
			)
		},
	), nil
}

// HandshakeComplete blocks until the handshake completes (or fails).
// For the client, data sent before completion of the handshake is encrypted with 0-RTT keys.
// For the server, data sent before completion of the handshake is encrypted with 1-RTT keys,
// however the client's identity is only verified once the handshake completes.
func ( *Conn) () <-chan struct{} {
	return .handshakeCompleteChan
}

// QlogTrace returns the qlog trace of the QUIC connection.
// It is nil if qlog is not enabled.
func ( *Conn) () qlogwriter.Trace {
	return .qlogTrace
}

// NextConnection transitions a connection to be usable after a 0-RTT rejection.
// It waits for the handshake to complete and then enables the connection for normal use.
// This should be called when the server rejects 0-RTT and the application receives
// [Err0RTTRejected] errors.
//
// Note that 0-RTT rejection invalidates all data sent in 0-RTT packets. It is the
// application's responsibility to handle this (for example by resending the data).
func ( *Conn) ( context.Context) (*Conn, error) {
	// The handshake might fail after the server rejected 0-RTT.
	// This could happen if the Finished message is malformed or never received.
	select {
	case <-.Done():
		return nil, context.Cause()
	case <-.Context().Done():
	case <-.HandshakeComplete():
		.streamsMap.UseResetMaps()
	}
	return , nil
}

// estimateMaxPayloadSize estimates the maximum payload size for short header packets.
// It is not very sophisticated: it just subtracts the size of header (assuming the maximum
// connection ID length), and the size of the encryption tag.
func estimateMaxPayloadSize( protocol.ByteCount) protocol.ByteCount {
	return  - 1 /* type byte */ - 20 /* maximum connection ID length */ - 16 /* tag size */
}