package quic

import (
	
	
	
	
	
	
	
	
	
	
	
	

	
	
	
	
	
	
	
	
	
)

type unpacker interface {
	UnpackLongHeader(hdr *wire.Header, data []byte) (*unpackedPacket, error)
	UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error)
}

type streamManager interface {
	GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error)
	GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error)
	OpenStream() (Stream, error)
	OpenUniStream() (SendStream, error)
	OpenStreamSync(context.Context) (Stream, error)
	OpenUniStreamSync(context.Context) (SendStream, error)
	AcceptStream(context.Context) (Stream, error)
	AcceptUniStream(context.Context) (ReceiveStream, error)
	DeleteStream(protocol.StreamID) error
	UpdateLimits(*wire.TransportParameters)
	HandleMaxStreamsFrame(*wire.MaxStreamsFrame)
	CloseWithError(error)
	ResetFor0RTT()
	UseResetMaps()
}

type cryptoStreamHandler interface {
	StartHandshake(context.Context) error
	ChangeConnectionID(protocol.ConnectionID)
	SetLargest1RTTAcked(protocol.PacketNumber) error
	SetHandshakeConfirmed()
	GetSessionTicket() ([]byte, error)
	NextEvent() handshake.Event
	DiscardInitialKeys()
	HandleMessage([]byte, protocol.EncryptionLevel) error
	io.Closer
	ConnectionState() handshake.ConnectionState
}

type receivedPacket struct {
	buffer *packetBuffer

	remoteAddr net.Addr
	rcvTime    time.Time
	data       []byte

	ecn protocol.ECN

	info packetInfo // only valid if the contained IP address is valid
}

func ( *receivedPacket) () protocol.ByteCount { return protocol.ByteCount(len(.data)) }

func ( *receivedPacket) () *receivedPacket {
	return &receivedPacket{
		remoteAddr: .remoteAddr,
		rcvTime:    .rcvTime,
		data:       .data,
		buffer:     .buffer,
		ecn:        .ecn,
		info:       .info,
	}
}

type connRunner interface {
	Add(protocol.ConnectionID, packetHandler) bool
	Remove(protocol.ConnectionID)
	ReplaceWithClosed([]protocol.ConnectionID, []byte, time.Duration)
	AddResetToken(protocol.StatelessResetToken, packetHandler)
	RemoveResetToken(protocol.StatelessResetToken)
}

type closeError struct {
	err       error
	immediate bool
}

type errCloseForRecreating struct {
	nextPacketNumber protocol.PacketNumber
	nextVersion      protocol.Version
}

func ( *errCloseForRecreating) () string {
	return "closing connection in order to recreate it"
}

var connTracingID atomic.Uint64              // to be accessed atomically
func nextConnTracingID() ConnectionTracingID { return ConnectionTracingID(connTracingID.Add(1)) }

// A Connection is a QUIC connection
type connection struct {
	// Destination connection ID used during the handshake.
	// Used to check source connection ID on incoming packets.
	handshakeDestConnID protocol.ConnectionID
	// Set for the client. Destination connection ID used on the first Initial sent.
	origDestConnID protocol.ConnectionID
	retrySrcConnID *protocol.ConnectionID // only set for the client (and if a Retry was performed)

	srcConnIDLen int

	perspective protocol.Perspective
	version     protocol.Version
	config      *Config

	conn      sendConn
	sendQueue sender

	// lazily initialzed: most connections never migrate
	pathManager         *pathManager
	largestRcvdAppData  protocol.PacketNumber
	pathManagerOutgoing atomic.Pointer[pathManagerOutgoing]

	streamsMap      streamManager
	connIDManager   *connIDManager
	connIDGenerator *connIDGenerator

	rttStats *utils.RTTStats

	cryptoStreamManager   *cryptoStreamManager
	sentPacketHandler     ackhandler.SentPacketHandler
	receivedPacketHandler ackhandler.ReceivedPacketHandler
	retransmissionQueue   *retransmissionQueue
	framer                *framer
	connFlowController    flowcontrol.ConnectionFlowController
	tokenStoreKey         string                    // only set for the client
	tokenGenerator        *handshake.TokenGenerator // only set for the server

	unpacker      unpacker
	frameParser   wire.FrameParser
	packer        packer
	mtuDiscoverer mtuDiscoverer // initialized when the transport parameters are received

	currentMTUEstimate atomic.Uint32

	initialStream       *initialCryptoStream
	handshakeStream     *cryptoStream
	oneRTTStream        *cryptoStream // only set for the server
	cryptoStreamHandler cryptoStreamHandler

	notifyReceivedPacket chan struct{}
	sendingScheduled     chan struct{}
	receivedPacketMx     sync.Mutex
	receivedPackets      ringbuffer.RingBuffer[receivedPacket]

	// closeChan is used to notify the run loop that it should terminate
	closeChan chan struct{}
	closeErr  atomic.Pointer[closeError]

	ctx                   context.Context
	ctxCancel             context.CancelCauseFunc
	handshakeCompleteChan chan struct{}

	undecryptablePackets          []receivedPacket // undecryptable packets, waiting for a change in encryption level
	undecryptablePacketsToProcess []receivedPacket

	earlyConnReadyChan chan struct{}
	sentFirstPacket    bool
	droppedInitialKeys bool
	handshakeComplete  bool
	handshakeConfirmed bool

	receivedRetry       bool
	versionNegotiated   bool
	receivedFirstPacket bool

	// the minimum of the max_idle_timeout values advertised by both endpoints
	idleTimeout  time.Duration
	creationTime time.Time
	// The idle timeout is set based on the max of the time we received the last packet...
	lastPacketReceivedTime time.Time
	// ... and the time we sent a new ack-eliciting packet after receiving a packet.
	firstAckElicitingPacketAfterIdleSentTime time.Time
	// pacingDeadline is the time when the next packet should be sent
	pacingDeadline time.Time

	peerParams *wire.TransportParameters

	timer connectionTimer
	// keepAlivePingSent stores whether a keep alive PING is in flight.
	// It is reset as soon as we receive a packet from the peer.
	keepAlivePingSent bool
	keepAliveInterval time.Duration

	datagramQueue *datagramQueue

	connStateMutex sync.Mutex
	connState      ConnectionState

	logID  string
	tracer *logging.ConnectionTracer
	logger utils.Logger
}

var (
	_ Connection      = &connection{}
	_ EarlyConnection = &connection{}
	_ streamSender    = &connection{}
)

var newConnection = func(
	 context.Context,
	 context.CancelCauseFunc,
	 sendConn,
	 connRunner,
	 protocol.ConnectionID,
	 *protocol.ConnectionID,
	 protocol.ConnectionID,
	 protocol.ConnectionID,
	 protocol.ConnectionID,
	 ConnectionIDGenerator,
	 *statelessResetter,
	 *Config,
	 *tls.Config,
	 *handshake.TokenGenerator,
	 bool,
	 time.Duration,
	 *logging.ConnectionTracer,
	 utils.Logger,
	 protocol.Version,
) quicConn {
	 := &connection{
		ctx:                 ,
		ctxCancel:           ,
		conn:                ,
		config:              ,
		handshakeDestConnID: ,
		srcConnIDLen:        .Len(),
		tokenGenerator:      ,
		oneRTTStream:        newCryptoStream(),
		perspective:         protocol.PerspectiveServer,
		tracer:              ,
		logger:              ,
		version:             ,
	}
	if .Len() > 0 {
		.logID = .String()
	} else {
		.logID = .String()
	}
	.connIDManager = newConnIDManager(
		,
		func( protocol.StatelessResetToken) { .AddResetToken(, ) },
		.RemoveResetToken,
		.queueControlFrame,
	)
	.connIDGenerator = newConnIDGenerator(
		,
		,
		&,
		,
		connRunnerCallbacks{
			AddConnectionID:    func( protocol.ConnectionID) { .Add(, ) },
			RemoveConnectionID: .Remove,
			ReplaceWithClosed:  .ReplaceWithClosed,
		},
		.queueControlFrame,
		,
	)
	.preSetup()
	.rttStats.SetInitialRTT()
	.sentPacketHandler, .receivedPacketHandler = ackhandler.NewAckHandler(
		0,
		protocol.ByteCount(.config.InitialPacketSize),
		.rttStats,
		,
		.conn.capabilities().ECN,
		.perspective,
		.tracer,
		.logger,
	)
	.currentMTUEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(.config.InitialPacketSize))))
	 := .GetStatelessResetToken()
	 := &wire.TransportParameters{
		InitialMaxStreamDataBidiLocal:   protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxStreamDataBidiRemote:  protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxStreamDataUni:         protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxData:                  protocol.ByteCount(.config.InitialConnectionReceiveWindow),
		MaxIdleTimeout:                  .config.MaxIdleTimeout,
		MaxBidiStreamNum:                protocol.StreamNum(.config.MaxIncomingStreams),
		MaxUniStreamNum:                 protocol.StreamNum(.config.MaxIncomingUniStreams),
		MaxAckDelay:                     protocol.MaxAckDelayInclGranularity,
		AckDelayExponent:                protocol.AckDelayExponent,
		MaxUDPPayloadSize:               protocol.MaxPacketBufferSize,
		StatelessResetToken:             &,
		OriginalDestinationConnectionID: ,
		// For interoperability with quic-go versions before May 2023, this value must be set to a value
		// different from protocol.DefaultActiveConnectionIDLimit.
		// If set to the default value, it will be omitted from the transport parameters, which will make
		// old quic-go versions interpret it as 0, instead of the default value of 2.
		// See https://github.com/quic-go/quic-go/pull/3806.
		ActiveConnectionIDLimit:   protocol.MaxActiveConnectionIDs,
		InitialSourceConnectionID: ,
		RetrySourceConnectionID:   ,
	}
	if .config.EnableDatagrams {
		.MaxDatagramFrameSize = wire.MaxDatagramSize
	} else {
		.MaxDatagramFrameSize = protocol.InvalidByteCount
	}
	if .tracer != nil && .tracer.SentTransportParameters != nil {
		.tracer.SentTransportParameters()
	}
	 := handshake.NewCryptoSetupServer(
		,
		.LocalAddr(),
		.RemoteAddr(),
		,
		,
		.Allow0RTT,
		.rttStats,
		,
		,
		.version,
	)
	.cryptoStreamHandler = 
	.packer = newPacketPacker(, .connIDManager.Get, .initialStream, .handshakeStream, .sentPacketHandler, .retransmissionQueue, , .framer, .receivedPacketHandler, .datagramQueue, .perspective)
	.unpacker = newPacketUnpacker(, .srcConnIDLen)
	.cryptoStreamManager = newCryptoStreamManager(.initialStream, .handshakeStream, .oneRTTStream)
	return 
}

// declare this as a variable, such that we can it mock it in the tests
var newClientConnection = func(
	 context.Context,
	 sendConn,
	 connRunner,
	 protocol.ConnectionID,
	 protocol.ConnectionID,
	 ConnectionIDGenerator,
	 *statelessResetter,
	 *Config,
	 *tls.Config,
	 protocol.PacketNumber,
	 bool,
	 bool,
	 *logging.ConnectionTracer,
	 utils.Logger,
	 protocol.Version,
) quicConn {
	 := &connection{
		conn:                ,
		config:              ,
		origDestConnID:      ,
		handshakeDestConnID: ,
		srcConnIDLen:        .Len(),
		perspective:         protocol.PerspectiveClient,
		logID:               .String(),
		logger:              ,
		tracer:              ,
		versionNegotiated:   ,
		version:             ,
	}
	.connIDManager = newConnIDManager(
		,
		func( protocol.StatelessResetToken) { .AddResetToken(, ) },
		.RemoveResetToken,
		.queueControlFrame,
	)
	.connIDGenerator = newConnIDGenerator(
		,
		,
		nil,
		,
		connRunnerCallbacks{
			AddConnectionID:    func( protocol.ConnectionID) { .Add(, ) },
			RemoveConnectionID: .Remove,
			ReplaceWithClosed:  .ReplaceWithClosed,
		},
		.queueControlFrame,
		,
	)
	.ctx, .ctxCancel = context.WithCancelCause()
	.preSetup()
	.sentPacketHandler, .receivedPacketHandler = ackhandler.NewAckHandler(
		,
		protocol.ByteCount(.config.InitialPacketSize),
		.rttStats,
		false, // has no effect
		.conn.capabilities().ECN,
		.perspective,
		.tracer,
		.logger,
	)
	.currentMTUEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(.config.InitialPacketSize))))
	 := newCryptoStream()
	 := &wire.TransportParameters{
		InitialMaxStreamDataBidiRemote: protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxStreamDataBidiLocal:  protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxStreamDataUni:        protocol.ByteCount(.config.InitialStreamReceiveWindow),
		InitialMaxData:                 protocol.ByteCount(.config.InitialConnectionReceiveWindow),
		MaxIdleTimeout:                 .config.MaxIdleTimeout,
		MaxBidiStreamNum:               protocol.StreamNum(.config.MaxIncomingStreams),
		MaxUniStreamNum:                protocol.StreamNum(.config.MaxIncomingUniStreams),
		MaxAckDelay:                    protocol.MaxAckDelayInclGranularity,
		MaxUDPPayloadSize:              protocol.MaxPacketBufferSize,
		AckDelayExponent:               protocol.AckDelayExponent,
		// For interoperability with quic-go versions before May 2023, this value must be set to a value
		// different from protocol.DefaultActiveConnectionIDLimit.
		// If set to the default value, it will be omitted from the transport parameters, which will make
		// old quic-go versions interpret it as 0, instead of the default value of 2.
		// See https://github.com/quic-go/quic-go/pull/3806.
		ActiveConnectionIDLimit:   protocol.MaxActiveConnectionIDs,
		InitialSourceConnectionID: ,
	}
	if .config.EnableDatagrams {
		.MaxDatagramFrameSize = wire.MaxDatagramSize
	} else {
		.MaxDatagramFrameSize = protocol.InvalidByteCount
	}
	if .tracer != nil && .tracer.SentTransportParameters != nil {
		.tracer.SentTransportParameters()
	}
	 := handshake.NewCryptoSetupClient(
		,
		,
		,
		,
		.rttStats,
		,
		,
		.version,
	)
	.cryptoStreamHandler = 
	.cryptoStreamManager = newCryptoStreamManager(.initialStream, .handshakeStream, )
	.unpacker = newPacketUnpacker(, .srcConnIDLen)
	.packer = newPacketPacker(, .connIDManager.Get, .initialStream, .handshakeStream, .sentPacketHandler, .retransmissionQueue, , .framer, .receivedPacketHandler, .datagramQueue, .perspective)
	if len(.ServerName) > 0 {
		.tokenStoreKey = .ServerName
	} else {
		.tokenStoreKey = .RemoteAddr().String()
	}
	if .config.TokenStore != nil {
		if  := .config.TokenStore.Pop(.tokenStoreKey);  != nil {
			.packer.SetToken(.data)
			.rttStats.SetInitialRTT(.rtt)
		}
	}
	return 
}

func ( *connection) () {
	.largestRcvdAppData = protocol.InvalidPacketNumber
	.initialStream = newInitialCryptoStream(.perspective == protocol.PerspectiveClient)
	.handshakeStream = newCryptoStream()
	.sendQueue = newSendQueue(.conn)
	.retransmissionQueue = newRetransmissionQueue()
	.frameParser = *wire.NewFrameParser(.config.EnableDatagrams)
	.rttStats = &utils.RTTStats{}
	.connFlowController = flowcontrol.NewConnectionFlowController(
		protocol.ByteCount(.config.InitialConnectionReceiveWindow),
		protocol.ByteCount(.config.MaxConnectionReceiveWindow),
		func( protocol.ByteCount) bool {
			if .config.AllowConnectionWindowIncrease == nil {
				return true
			}
			return .config.AllowConnectionWindowIncrease(, uint64())
		},
		.rttStats,
		.logger,
	)
	.earlyConnReadyChan = make(chan struct{})
	.streamsMap = newStreamsMap(
		.ctx,
		,
		.queueControlFrame,
		.newFlowController,
		uint64(.config.MaxIncomingStreams),
		uint64(.config.MaxIncomingUniStreams),
		.perspective,
	)
	.framer = newFramer(.connFlowController)
	.receivedPackets.Init(8)
	.notifyReceivedPacket = make(chan struct{}, 1)
	.closeChan = make(chan struct{}, 1)
	.sendingScheduled = make(chan struct{}, 1)
	.handshakeCompleteChan = make(chan struct{})

	 := time.Now()
	.lastPacketReceivedTime = 
	.creationTime = 

	.datagramQueue = newDatagramQueue(.scheduleSending, .logger)
	.connState.Version = .version
}

// run the connection main loop
func ( *connection) () ( error) {
	defer func() { .ctxCancel() }()

	defer func() {
		// drain queued packets that will never be processed
		.receivedPacketMx.Lock()
		defer .receivedPacketMx.Unlock()

		for !.receivedPackets.Empty() {
			 := .receivedPackets.PopFront()
			.buffer.Decrement()
			.buffer.MaybeRelease()
		}
	}()

	.timer = *newTimer()

	if  := .cryptoStreamHandler.StartHandshake(.ctx);  != nil {
		return 
	}
	if  := .handleHandshakeEvents(time.Now());  != nil {
		return 
	}
	go func() {
		if  := .sendQueue.Run();  != nil {
			.destroyImpl()
		}
	}()

	if .perspective == protocol.PerspectiveClient {
		.scheduleSending() // so the ClientHello actually gets sent
	}

	var  <-chan struct{}

:
	for {
		if .framer.QueuedTooManyControlFrames() {
			.setCloseError(&closeError{err: &qerr.TransportError{ErrorCode: InternalError}})
			break 
		}
		// Close immediately if requested
		select {
		case <-.closeChan:
			break 
		default:
		}

		// no need to set a timer if we can send packets immediately
		if .pacingDeadline != deadlineSendImmediately {
			.maybeResetTimer()
		}

		// 1st: handle undecryptable packets, if any.
		// This can only occur before completion of the handshake.
		if len(.undecryptablePacketsToProcess) > 0 {
			var  bool
			 := .undecryptablePacketsToProcess
			.undecryptablePacketsToProcess = nil
			for ,  := range  {
				,  := .handleOnePacket()
				if  != nil {
					.setCloseError(&closeError{err: })
					break 
				}
				if  {
					 = true
				}
			}
			if  {
				// if we processed any undecryptable packets, jump to the resetting of the timers directly
				continue
			}
		}

		// 2nd: receive packets.
		,  := .handlePackets() // don't check receivedPackets.Len() in the run loop to avoid locking the mutex
		if  != nil {
			.setCloseError(&closeError{err: })
			break 
		}

		// We don't need to wait for new events if:
		// * we processed packets: we probably need to send an ACK, and potentially more data
		// * the pacer allows us to send more packets immediately
		 :=  == nil && ( || .pacingDeadline.Equal(deadlineSendImmediately))
		if ! {
			// 3rd: wait for something to happen:
			// * closing of the connection
			// * timer firing
			// * sending scheduled
			// * send queue available
			// * received packets
			select {
			case <-.closeChan:
				break 
			case <-.timer.Chan():
				.timer.SetRead()
			case <-.sendingScheduled:
			case <-:
			case <-.notifyReceivedPacket:
				,  := .handlePackets()
				if  != nil {
					.setCloseError(&closeError{err: })
					break 
				}
				// if we processed any undecryptable packets, jump to the resetting of the timers directly
				if ! {
					continue
				}
			}
		}

		// Check for loss detection timeout.
		// This could cause packets to be declared lost, and retransmissions to be enqueued.
		 := time.Now()
		if  := .sentPacketHandler.GetLossDetectionTimeout(); !.IsZero() && .Before() {
			if  := .sentPacketHandler.OnLossDetectionTimeout();  != nil {
				.setCloseError(&closeError{err: })
				break 
			}
		}

		if  := .nextKeepAliveTime(); !.IsZero() && !.Before() {
			// send a PING frame since there is no activity in the connection
			.logger.Debugf("Sending a keep-alive PING to keep the connection alive.")
			.framer.QueueControlFrame(&wire.PingFrame{})
			.keepAlivePingSent = true
		} else if !.handshakeComplete && .Sub(.creationTime) >= .config.handshakeTimeout() {
			.destroyImpl(qerr.ErrHandshakeTimeout)
			break 
		} else {
			 := .idleTimeoutStartTime()
			if (!.handshakeComplete && .Sub() >= .config.HandshakeIdleTimeout) ||
				(.handshakeComplete && .After(.nextIdleTimeoutTime())) {
				.destroyImpl(qerr.ErrIdleTimeout)
				break 
			}
		}

		.connIDGenerator.RemoveRetiredConnIDs()

		if .perspective == protocol.PerspectiveClient {
			 := .pathManagerOutgoing.Load()
			if  != nil {
				,  := .ShouldSwitchPath()
				if  {
					.switchToNewPath(, )
				}
			}
		}

		if .sendQueue.WouldBlock() {
			// The send queue is still busy sending out packets. Wait until there's space to enqueue new packets.
			 = .sendQueue.Available()
			// Cancel the pacing timer, as we can't send any more packets until the send queue is available again.
			.pacingDeadline = time.Time{}
			continue
		}

		if .closeErr.Load() != nil {
			break 
		}

		if  := .triggerSending();  != nil {
			.setCloseError(&closeError{err: })
			break 
		}
		if .sendQueue.WouldBlock() {
			// The send queue is still busy sending out packets. Wait until there's space to enqueue new packets.
			 = .sendQueue.Available()
			// Cancel the pacing timer, as we can't send any more packets until the send queue is available again.
			.pacingDeadline = time.Time{}
		} else {
			 = nil
		}
	}

	 := .closeErr.Load()
	.cryptoStreamHandler.Close()
	.sendQueue.Close() // close the send queue before sending the CONNECTION_CLOSE
	.handleCloseError()
	if .tracer != nil && .tracer.Close != nil {
		if  := (&errCloseForRecreating{}); !errors.As(.err, &) {
			.tracer.Close()
		}
	}
	.logger.Infof("Connection %s closed.", .logID)
	.timer.Stop()
	return .err
}

// blocks until the early connection can be used
func ( *connection) () <-chan struct{} {
	return .earlyConnReadyChan
}

func ( *connection) () <-chan struct{} {
	return .handshakeCompleteChan
}

func ( *connection) () context.Context {
	return .ctx
}

func ( *connection) () bool {
	return .peerParams.MaxDatagramFrameSize > 0
}

func ( *connection) () ConnectionState {
	.connStateMutex.Lock()
	defer .connStateMutex.Unlock()
	 := .cryptoStreamHandler.ConnectionState()
	.connState.TLS = .ConnectionState
	.connState.Used0RTT = .Used0RTT
	.connState.GSO = .conn.capabilities().GSO
	return .connState
}

// Time when the connection should time out
func ( *connection) () time.Time {
	 := max(.idleTimeout, .rttStats.PTO(true)*3)
	return .idleTimeoutStartTime().Add()
}

// Time when the next keep-alive packet should be sent.
// It returns a zero time if no keep-alive should be sent.
func ( *connection) () time.Time {
	if .config.KeepAlivePeriod == 0 || .keepAlivePingSent {
		return time.Time{}
	}
	 := max(.keepAliveInterval, .rttStats.PTO(true)*3/2)
	return .lastPacketReceivedTime.Add()
}

func ( *connection) () {
	var  time.Time
	if !.handshakeComplete {
		 = .creationTime.Add(.config.handshakeTimeout())
		if  := .idleTimeoutStartTime().Add(.config.HandshakeIdleTimeout); .Before() {
			 = 
		}
	} else {
		if  := .nextKeepAliveTime(); !.IsZero() {
			 = 
		} else {
			 = .nextIdleTimeoutTime()
		}
	}

	.timer.SetTimer(
		,
		.connIDGenerator.NextRetireTime(),
		.receivedPacketHandler.GetAlarmTimeout(),
		.sentPacketHandler.GetLossDetectionTimeout(),
		.pacingDeadline,
	)
}

func ( *connection) () time.Time {
	 := .lastPacketReceivedTime
	if  := .firstAckElicitingPacketAfterIdleSentTime; .After() {
		 = 
	}
	return 
}

func ( *connection) ( *Transport,  time.Time) {
	 := protocol.ByteCount(.config.InitialPacketSize)
	.sentPacketHandler.MigratedPath(, )
	 := protocol.ByteCount(protocol.MaxPacketBufferSize)
	if .peerParams.MaxUDPPayloadSize > 0 && .peerParams.MaxUDPPayloadSize <  {
		 = .peerParams.MaxUDPPayloadSize
	}
	.mtuDiscoverer.Reset(, , )
	.conn = newSendConn(.conn, .conn.RemoteAddr(), packetInfo{}, utils.DefaultLogger) // TODO: find a better way
	.sendQueue.Close()
	.sendQueue = newSendQueue(.conn)
	go func() {
		if  := .sendQueue.Run();  != nil {
			.destroyImpl()
		}
	}()
}

func ( *connection) ( time.Time) error {
	defer close(.handshakeCompleteChan)
	// Once the handshake completes, we have derived 1-RTT keys.
	// There's no point in queueing undecryptable packets for later decryption anymore.
	.undecryptablePackets = nil

	.connIDManager.SetHandshakeComplete()
	.connIDGenerator.SetHandshakeComplete(.Add(3 * .rttStats.PTO(false)))

	if .tracer != nil && .tracer.ChoseALPN != nil {
		.tracer.ChoseALPN(.cryptoStreamHandler.ConnectionState().NegotiatedProtocol)
	}

	// The server applies transport parameters right away, but the client side has to wait for handshake completion.
	// During a 0-RTT connection, the client is only allowed to use the new transport parameters for 1-RTT packets.
	if .perspective == protocol.PerspectiveClient {
		.applyTransportParameters()
		return nil
	}

	// All these only apply to the server side.
	if  := .handleHandshakeConfirmed();  != nil {
		return 
	}

	,  := .cryptoStreamHandler.GetSessionTicket()
	if  != nil {
		return 
	}
	if  != nil { // may be nil if session tickets are disabled via tls.Config.SessionTicketsDisabled
		.oneRTTStream.Write()
		for .oneRTTStream.HasData() {
			if  := .oneRTTStream.PopCryptoFrame(protocol.MaxPostHandshakeCryptoFrameSize);  != nil {
				.queueControlFrame()
			}
		}
	}
	,  := .tokenGenerator.NewToken(.conn.RemoteAddr(), .rttStats.SmoothedRTT())
	if  != nil {
		return 
	}
	.queueControlFrame(&wire.NewTokenFrame{Token: })
	.queueControlFrame(&wire.HandshakeDoneFrame{})
	return nil
}

func ( *connection) ( time.Time) error {
	if  := .dropEncryptionLevel(protocol.EncryptionHandshake, );  != nil {
		return 
	}

	.handshakeConfirmed = true
	.cryptoStreamHandler.SetHandshakeConfirmed()

	if !.config.DisablePathMTUDiscovery && .conn.capabilities().DF {
		.mtuDiscoverer.Start()
	}
	return nil
}

func ( *connection) () ( bool,  error) {
	// Now process all packets in the receivedPackets channel.
	// Limit the number of packets to the length of the receivedPackets channel,
	// so we eventually get a chance to send out an ACK when receiving a lot of packets.
	.receivedPacketMx.Lock()
	 := .receivedPackets.Len()
	if  == 0 {
		.receivedPacketMx.Unlock()
		return false, nil
	}

	var  bool
	for  := 0;  < ; ++ {
		if  > 0 {
			.receivedPacketMx.Lock()
		}
		 := .receivedPackets.PopFront()
		 = !.receivedPackets.Empty()
		.receivedPacketMx.Unlock()

		,  := .handleOnePacket()
		if  != nil {
			return false, 
		}
		if  {
			 = true
		}
		if ! {
			break
		}
		// only process a single packet at a time before handshake completion
		if !.handshakeComplete {
			break
		}
	}
	if  {
		select {
		case .notifyReceivedPacket <- struct{}{}:
		default:
		}
	}
	return , nil
}

func ( *connection) ( receivedPacket) ( bool,  error) {
	.sentPacketHandler.ReceivedBytes(.Size(), .rcvTime)

	if wire.IsVersionNegotiationPacket(.data) {
		.handleVersionNegotiationPacket()
		return false, nil
	}

	var  uint8
	var  protocol.ConnectionID
	 := .data
	 := 
	for len() > 0 {
		if  > 0 {
			 = *(.Clone())
			.data = 

			,  := wire.ParseConnectionID(.data, .srcConnIDLen)
			if  != nil {
				if .tracer != nil && .tracer.DroppedPacket != nil {
					.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len()), logging.PacketDropHeaderParseError)
				}
				.logger.Debugf("error parsing packet, couldn't parse connection ID: %s", )
				break
			}
			if  !=  {
				if .tracer != nil && .tracer.DroppedPacket != nil {
					.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len()), logging.PacketDropUnknownConnectionID)
				}
				.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", , )
				break
			}
		}

		if wire.IsLongHeaderPacket(.data[0]) {
			, , ,  := wire.ParsePacket(.data)
			if  != nil {
				if .tracer != nil && .tracer.DroppedPacket != nil {
					 := logging.PacketDropHeaderParseError
					if  == wire.ErrUnsupportedVersion {
						 = logging.PacketDropUnsupportedVersion
					}
					.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len()), )
				}
				.logger.Debugf("error parsing packet: %s", )
				break
			}
			 = .DestConnectionID

			if .Version != .version {
				if .tracer != nil && .tracer.DroppedPacket != nil {
					.tracer.DroppedPacket(logging.PacketTypeFromHeader(), protocol.InvalidPacketNumber, protocol.ByteCount(len()), logging.PacketDropUnexpectedVersion)
				}
				.logger.Debugf("Dropping packet with version %x. Expected %x.", .Version, .version)
				break
			}

			if  > 0 {
				.buffer.Split()
			}
			++

			// only log if this actually a coalesced packet
			if .logger.Debug() && ( > 1 || len() > 0) {
				.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", , len(), len())
			}

			.data = 

			,  := .handleLongHeaderPacket(, )
			if  != nil {
				return false, 
			}
			if  {
				 = true
			}
			 = 
		} else {
			if  > 0 {
				.buffer.Split()
			}
			,  := .handleShortHeaderPacket(,  > 0)
			if  != nil {
				return false, 
			}
			if  {
				 = true
			}
			break
		}
	}

	.buffer.MaybeRelease()
	return , nil
}

func ( *connection) ( receivedPacket,  bool) ( bool,  error) {
	var  bool

	defer func() {
		// Put back the packet buffer if the packet wasn't queued for later decryption.
		if ! {
			.buffer.Decrement()
		}
	}()

	,  := wire.ParseConnectionID(.data, .srcConnIDLen)
	if  != nil {
		.tracer.DroppedPacket(logging.PacketType1RTT, protocol.InvalidPacketNumber, protocol.ByteCount(len(.data)), logging.PacketDropHeaderParseError)
		return false, nil
	}
	, , , ,  := .unpacker.UnpackShortHeader(.rcvTime, .data)
	if  != nil {
		// Stateless reset packets (see RFC 9000, section 10.3):
		// * fill the entire UDP datagram (i.e. they cannot be part of a coalesced packet)
		// * are short header packets (first bit is 0)
		// * have the QUIC bit set (second bit is 1)
		// * are at least 21 bytes long
		if ! && len(.data) >= protocol.MinReceivedStatelessResetSize && .data[0]&0b11000000 == 0b01000000 {
			 := protocol.StatelessResetToken(.data[len(.data)-16:])
			if .connIDManager.IsActiveStatelessResetToken() {
				return false, &StatelessResetError{}
			}
		}
		,  = .handleUnpackError(, , logging.PacketType1RTT)
		return false, 
	}
	.largestRcvdAppData = max(.largestRcvdAppData, )

	if .logger.Debug() {
		.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, 1-RTT", , .Size(), )
		wire.LogShortHeader(.logger, , , , )
	}

	if .receivedPacketHandler.IsPotentiallyDuplicate(, protocol.Encryption1RTT) {
		.logger.Debugf("Dropping (potentially) duplicate packet.")
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketType1RTT, , .Size(), logging.PacketDropDuplicate)
		}
		return false, nil
	}

	var  func([]logging.Frame)
	if .tracer != nil && .tracer.ReceivedShortHeaderPacket != nil {
		 = func( []logging.Frame) {
			.tracer.ReceivedShortHeaderPacket(
				&logging.ShortHeader{
					DestConnectionID: ,
					PacketNumber:     ,
					PacketNumberLen:  ,
					KeyPhase:         ,
				},
				.Size(),
				.ecn,
				,
			)
		}
	}
	, ,  := .handleUnpackedShortHeaderPacket(, , , .ecn, .rcvTime, )
	if  != nil {
		return false, 
	}

	// In RFC 9000, only the client can migrate between paths.
	if .perspective == protocol.PerspectiveClient {
		return true, nil
	}
	if addrsEqual(.remoteAddr, .RemoteAddr()) {
		return true, nil
	}

	var  bool
	if .pathManager == nil {
		.pathManager = newPathManager(
			.connIDManager.GetConnIDForPath,
			.connIDManager.RetireConnIDForPath,
			.logger,
		)
	}
	, ,  := .pathManager.HandlePacket(.remoteAddr, .rcvTime, , )
	if len() > 0 {
		, ,  := .packer.PackPathProbePacket(, , .version)
		if  != nil {
			return true, 
		}
		.logger.Debugf("sending path probe packet to %s", .remoteAddr)
		.logShortHeaderPacket(.DestConnID, .Ack, .Frames, .StreamFrames, .PacketNumber, .PacketNumberLen, .KeyPhase, protocol.ECNNon, .Len(), false)
		.registerPackedShortHeaderPacket(, protocol.ECNNon, .rcvTime)
		.sendQueue.SendProbe(, .remoteAddr)
	}
	// We only switch paths in response to the highest-numbered non-probing packet,
	// see section 9.3 of RFC 9000.
	if ! ||  != .largestRcvdAppData {
		return true, nil
	}
	.pathManager.SwitchToPath(.remoteAddr)
	.sentPacketHandler.MigratedPath(.rcvTime, protocol.ByteCount(.config.InitialPacketSize))
	 := protocol.ByteCount(protocol.MaxPacketBufferSize)
	if .peerParams.MaxUDPPayloadSize > 0 && .peerParams.MaxUDPPayloadSize <  {
		 = .peerParams.MaxUDPPayloadSize
	}
	.mtuDiscoverer.Reset(
		.rcvTime,
		protocol.ByteCount(.config.InitialPacketSize),
		,
	)
	.conn.ChangeRemoteAddr(.remoteAddr, .info)
	return true, nil
}

func ( *connection) ( receivedPacket,  *wire.Header) ( bool,  error) {
	var  bool

	defer func() {
		// Put back the packet buffer if the packet wasn't queued for later decryption.
		if ! {
			.buffer.Decrement()
		}
	}()

	if .Type == protocol.PacketTypeRetry {
		return .handleRetryPacket(, .data, .rcvTime), nil
	}

	// The server can change the source connection ID with the first Handshake packet.
	// After this, all packets with a different source connection have to be ignored.
	if .receivedFirstPacket && .Type == protocol.PacketTypeInitial && .SrcConnectionID != .handshakeDestConnID {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeInitial, protocol.InvalidPacketNumber, .Size(), logging.PacketDropUnknownConnectionID)
		}
		.logger.Debugf("Dropping Initial packet (%d bytes) with unexpected source connection ID: %s (expected %s)", .Size(), .SrcConnectionID, .handshakeDestConnID)
		return false, nil
	}
	// drop 0-RTT packets, if we are a client
	if .perspective == protocol.PerspectiveClient && .Type == protocol.PacketType0RTT {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, .Size(), logging.PacketDropUnexpectedPacket)
		}
		return false, nil
	}

	,  := .unpacker.UnpackLongHeader(, .data)
	if  != nil {
		,  = .handleUnpackError(, , logging.PacketTypeFromHeader())
		return false, 
	}

	if .logger.Debug() {
		.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, %s", .hdr.PacketNumber, .Size(), .DestConnectionID, .encryptionLevel)
		.hdr.Log(.logger)
	}

	if  := .hdr.PacketNumber; .receivedPacketHandler.IsPotentiallyDuplicate(, .encryptionLevel) {
		.logger.Debugf("Dropping (potentially) duplicate packet.")
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeFromHeader(), , .Size(), logging.PacketDropDuplicate)
		}
		return false, nil
	}

	if  := .handleUnpackedLongHeaderPacket(, .ecn, .rcvTime, .Size());  != nil {
		return false, 
	}
	return true, nil
}

func ( *connection) ( error,  receivedPacket,  logging.PacketType) ( bool,  error) {
	switch  {
	case handshake.ErrKeysDropped:
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(, protocol.InvalidPacketNumber, .Size(), logging.PacketDropKeyUnavailable)
		}
		.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", , .Size())
		return false, nil
	case handshake.ErrKeysNotYetAvailable:
		// Sealer for this encryption level not yet available.
		// Try again later.
		.tryQueueingUndecryptablePacket(, )
		return true, nil
	case wire.ErrInvalidReservedBits:
		return false, &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: .Error(),
		}
	case handshake.ErrDecryptionFailed:
		// This might be a packet injected by an attacker. Drop it.
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(, protocol.InvalidPacketNumber, .Size(), logging.PacketDropPayloadDecryptError)
		}
		.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", , .Size(), )
		return false, nil
	default:
		var  *headerParseError
		if errors.As(, &) {
			// This might be a packet injected by an attacker. Drop it.
			if .tracer != nil && .tracer.DroppedPacket != nil {
				.tracer.DroppedPacket(, protocol.InvalidPacketNumber, .Size(), logging.PacketDropHeaderParseError)
			}
			.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", , .Size(), )
			return false, nil
		}
		// This is an error returned by the AEAD (other than ErrDecryptionFailed).
		// For example, a PROTOCOL_VIOLATION due to key updates.
		return false, 
	}
}

func ( *connection) ( *wire.Header,  []byte,  time.Time) bool /* was this a valid Retry */ {
	if .perspective == protocol.PerspectiveServer {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len()), logging.PacketDropUnexpectedPacket)
		}
		.logger.Debugf("Ignoring Retry.")
		return false
	}
	if .receivedFirstPacket {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len()), logging.PacketDropUnexpectedPacket)
		}
		.logger.Debugf("Ignoring Retry, since we already received a packet.")
		return false
	}
	 := .connIDManager.Get()
	if .SrcConnectionID ==  {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len()), logging.PacketDropUnexpectedPacket)
		}
		.logger.Debugf("Ignoring Retry, since the server didn't change the Source Connection ID.")
		return false
	}
	// If a token is already set, this means that we already received a Retry from the server.
	// Ignore this Retry packet.
	if .receivedRetry {
		.logger.Debugf("Ignoring Retry, since a Retry was already received.")
		return false
	}

	 := handshake.GetRetryIntegrityTag([:len()-16], , .Version)
	if !bytes.Equal([len()-16:], [:]) {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len()), logging.PacketDropPayloadDecryptError)
		}
		.logger.Debugf("Ignoring spoofed Retry. Integrity Tag doesn't match.")
		return false
	}

	 := .SrcConnectionID
	.receivedRetry = true
	.sentPacketHandler.ResetForRetry()
	.handshakeDestConnID = 
	.retrySrcConnID = &
	.cryptoStreamHandler.ChangeConnectionID()
	.packer.SetToken(.Token)
	.connIDManager.ChangeInitialConnID()

	if .logger.Debug() {
		.logger.Debugf("<- Received Retry:")
		(&wire.ExtendedHeader{Header: *}).Log(.logger)
		.logger.Debugf("Switching destination connection ID to: %s", .SrcConnectionID)
	}
	if .tracer != nil && .tracer.ReceivedRetry != nil {
		.tracer.ReceivedRetry()
	}

	.scheduleSending()
	return true
}

func ( *connection) ( receivedPacket) {
	if .perspective == protocol.PerspectiveServer || // servers never receive version negotiation packets
		.receivedFirstPacket || .versionNegotiated { // ignore delayed / duplicated version negotiation packets
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, .Size(), logging.PacketDropUnexpectedPacket)
		}
		return
	}

	, , ,  := wire.ParseVersionNegotiationPacket(.data)
	if  != nil {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, .Size(), logging.PacketDropHeaderParseError)
		}
		.logger.Debugf("Error parsing Version Negotiation packet: %s", )
		return
	}

	if slices.Contains(, .version) {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, .Size(), logging.PacketDropUnexpectedVersion)
		}
		// The Version Negotiation packet contains the version that we offered.
		// This might be a packet sent by an attacker, or it was corrupted.
		return
	}

	.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", )
	if .tracer != nil && .tracer.ReceivedVersionNegotiationPacket != nil {
		.tracer.ReceivedVersionNegotiationPacket(, , )
	}
	,  := protocol.ChooseSupportedVersion(.config.Versions, )
	if ! {
		.destroyImpl(&VersionNegotiationError{
			Ours:   .config.Versions,
			Theirs: ,
		})
		.logger.Infof("No compatible QUIC version found.")
		return
	}
	if .tracer != nil && .tracer.NegotiatedVersion != nil {
		.tracer.NegotiatedVersion(, .config.Versions, )
	}

	.logger.Infof("Switching to QUIC version %s.", )
	,  := .sentPacketHandler.PeekPacketNumber(protocol.EncryptionInitial)
	.destroyImpl(&errCloseForRecreating{
		nextPacketNumber: ,
		nextVersion:      ,
	})
}

func ( *connection) (
	 *unpackedPacket,
	 protocol.ECN,
	 time.Time,
	 protocol.ByteCount, // only for logging
) error {
	if !.receivedFirstPacket {
		.receivedFirstPacket = true
		if !.versionNegotiated && .tracer != nil && .tracer.NegotiatedVersion != nil {
			var ,  []protocol.Version
			switch .perspective {
			case protocol.PerspectiveClient:
				 = .config.Versions
			case protocol.PerspectiveServer:
				 = .config.Versions
			}
			.tracer.NegotiatedVersion(.version, , )
		}
		// The server can change the source connection ID with the first Handshake packet.
		if .perspective == protocol.PerspectiveClient && .hdr.SrcConnectionID != .handshakeDestConnID {
			 := .hdr.SrcConnectionID
			.logger.Debugf("Received first packet. Switching destination connection ID to: %s", )
			.handshakeDestConnID = 
			.connIDManager.ChangeInitialConnID()
		}
		// We create the connection as soon as we receive the first packet from the client.
		// We do that before authenticating the packet.
		// That means that if the source connection ID was corrupted,
		// we might have created a connection with an incorrect source connection ID.
		// Once we authenticate the first packet, we need to update it.
		if .perspective == protocol.PerspectiveServer {
			if .hdr.SrcConnectionID != .handshakeDestConnID {
				.handshakeDestConnID = .hdr.SrcConnectionID
				.connIDManager.ChangeInitialConnID(.hdr.SrcConnectionID)
			}
			if .tracer != nil && .tracer.StartedConnection != nil {
				.tracer.StartedConnection(
					.conn.LocalAddr(),
					.conn.RemoteAddr(),
					.hdr.SrcConnectionID,
					.hdr.DestConnectionID,
				)
			}
		}
	}

	if .perspective == protocol.PerspectiveServer && .encryptionLevel == protocol.EncryptionHandshake &&
		!.droppedInitialKeys {
		// On the server side, Initial keys are dropped as soon as the first Handshake packet is received.
		// See Section 4.9.1 of RFC 9001.
		if  := .dropEncryptionLevel(protocol.EncryptionInitial, );  != nil {
			return 
		}
	}

	.lastPacketReceivedTime = 
	.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
	.keepAlivePingSent = false

	if .hdr.Type == protocol.PacketType0RTT {
		.largestRcvdAppData = max(.largestRcvdAppData, .hdr.PacketNumber)
	}

	var  func([]logging.Frame)
	if .tracer != nil && .tracer.ReceivedLongHeaderPacket != nil {
		 = func( []logging.Frame) {
			.tracer.ReceivedLongHeaderPacket(.hdr, , , )
		}
	}
	, , ,  := .handleFrames(.data, .hdr.DestConnectionID, .encryptionLevel, , )
	if  != nil {
		return 
	}
	return .receivedPacketHandler.ReceivedPacket(.hdr.PacketNumber, , .encryptionLevel, , )
}

func ( *connection) (
	 protocol.ConnectionID,
	 protocol.PacketNumber,
	 []byte,
	 protocol.ECN,
	 time.Time,
	 func([]logging.Frame),
) ( bool,  *wire.PathChallengeFrame,  error) {
	.lastPacketReceivedTime = 
	.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
	.keepAlivePingSent = false

	, , ,  := .handleFrames(, , protocol.Encryption1RTT, , )
	if  != nil {
		return false, nil, 
	}
	if  := .receivedPacketHandler.ReceivedPacket(, , protocol.Encryption1RTT, , );  != nil {
		return false, nil, 
	}
	return , , nil
}

// handleFrames parses the frames, one after the other, and handles them.
// It returns the last PATH_CHALLENGE frame contained in the packet, if any.
func ( *connection) (
	 []byte,
	 protocol.ConnectionID,
	 protocol.EncryptionLevel,
	 func([]logging.Frame),
	 time.Time,
) (,  bool,  *wire.PathChallengeFrame,  error) {
	// Only used for tracing.
	// If we're not tracing, this slice will always remain empty.
	var  []logging.Frame
	if  != nil {
		 = make([]logging.Frame, 0, 4)
	}
	 := .handshakeComplete
	var  error
	for len() > 0 {
		, ,  := .frameParser.ParseNext(, , .version)
		if  != nil {
			return false, false, nil, 
		}
		 = [:]
		if  == nil {
			break
		}
		if ackhandler.IsFrameAckEliciting() {
			 = true
		}
		if !wire.IsProbingFrame() {
			 = true
		}
		if  != nil {
			 = append(, toLoggingFrame())
		}
		// An error occurred handling a previous frame.
		// Don't handle the current frame.
		if  != nil {
			continue
		}
		,  := .handleFrame(, , , )
		if  != nil {
			if  == nil {
				return false, false, nil, 
			}
			// If we're logging, we need to keep parsing (but not handling) all frames.
			 = 
		}
		if  != nil {
			 = 
		}
	}

	if  != nil {
		()
		if  != nil {
			return false, false, nil, 
		}
	}

	// Handle completion of the handshake after processing all the frames.
	// This ensures that we correctly handle the following case on the server side:
	// We receive a Handshake packet that contains the CRYPTO frame that allows us to complete the handshake,
	// and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame.
	if ! && .handshakeComplete {
		if  := .handleHandshakeComplete();  != nil {
			return false, false, nil, 
		}
	}
	return
}

func ( *connection) (
	 wire.Frame,
	 protocol.EncryptionLevel,
	 protocol.ConnectionID,
	 time.Time,
) ( *wire.PathChallengeFrame,  error) {
	var  error
	wire.LogFrame(.logger, , false)
	switch frame := .(type) {
	case *wire.CryptoFrame:
		 = .handleCryptoFrame(, , )
	case *wire.StreamFrame:
		 = .handleStreamFrame(, )
	case *wire.AckFrame:
		 = .handleAckFrame(, , )
	case *wire.ConnectionCloseFrame:
		 = .handleConnectionCloseFrame()
	case *wire.ResetStreamFrame:
		 = .handleResetStreamFrame(, )
	case *wire.MaxDataFrame:
		.handleMaxDataFrame()
	case *wire.MaxStreamDataFrame:
		 = .handleMaxStreamDataFrame()
	case *wire.MaxStreamsFrame:
		.handleMaxStreamsFrame()
	case *wire.DataBlockedFrame:
	case *wire.StreamDataBlockedFrame:
		 = .handleStreamDataBlockedFrame()
	case *wire.StreamsBlockedFrame:
	case *wire.StopSendingFrame:
		 = .handleStopSendingFrame()
	case *wire.PingFrame:
	case *wire.PathChallengeFrame:
		.handlePathChallengeFrame()
		 = 
	case *wire.PathResponseFrame:
		 = .handlePathResponseFrame()
	case *wire.NewTokenFrame:
		 = .handleNewTokenFrame()
	case *wire.NewConnectionIDFrame:
		 = .handleNewConnectionIDFrame()
	case *wire.RetireConnectionIDFrame:
		 = .handleRetireConnectionIDFrame(, , )
	case *wire.HandshakeDoneFrame:
		 = .handleHandshakeDoneFrame()
	case *wire.DatagramFrame:
		 = .handleDatagramFrame()
	default:
		 = fmt.Errorf("unexpected frame type: %s", reflect.ValueOf(&).Elem().Type().Name())
	}
	return , 
}

// handlePacket is called by the server with a new packet
func ( *connection) ( receivedPacket) {
	.receivedPacketMx.Lock()
	// Discard packets once the amount of queued packets is larger than
	// the channel size, protocol.MaxConnUnprocessedPackets
	if .receivedPackets.Len() >= protocol.MaxConnUnprocessedPackets {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, .Size(), logging.PacketDropDOSPrevention)
		}
		.receivedPacketMx.Unlock()
		return
	}
	.receivedPackets.PushBack()
	.receivedPacketMx.Unlock()

	select {
	case .notifyReceivedPacket <- struct{}{}:
	default:
	}
}

func ( *connection) ( *wire.ConnectionCloseFrame) error {
	if .IsApplicationError {
		return &qerr.ApplicationError{
			Remote:       true,
			ErrorCode:    qerr.ApplicationErrorCode(.ErrorCode),
			ErrorMessage: .ReasonPhrase,
		}
	}
	return &qerr.TransportError{
		Remote:       true,
		ErrorCode:    qerr.TransportErrorCode(.ErrorCode),
		FrameType:    .FrameType,
		ErrorMessage: .ReasonPhrase,
	}
}

func ( *connection) ( *wire.CryptoFrame,  protocol.EncryptionLevel,  time.Time) error {
	if  := .cryptoStreamManager.HandleCryptoFrame(, );  != nil {
		return 
	}
	for {
		 := .cryptoStreamManager.GetCryptoData()
		if  == nil {
			break
		}
		if  := .cryptoStreamHandler.HandleMessage(, );  != nil {
			return 
		}
	}
	return .handleHandshakeEvents()
}

func ( *connection) ( time.Time) error {
	for {
		 := .cryptoStreamHandler.NextEvent()
		var  error
		switch .Kind {
		case handshake.EventNoEvent:
			return nil
		case handshake.EventHandshakeComplete:
			// Don't call handleHandshakeComplete yet.
			// It's advantageous to process ACK frames that might be serialized after the CRYPTO frame first.
			.handshakeComplete = true
		case handshake.EventReceivedTransportParameters:
			 = .handleTransportParameters(.TransportParameters)
		case handshake.EventRestoredTransportParameters:
			.restoreTransportParameters(.TransportParameters)
			close(.earlyConnReadyChan)
		case handshake.EventReceivedReadKeys:
			// queue all previously undecryptable packets
			.undecryptablePacketsToProcess = append(.undecryptablePacketsToProcess, .undecryptablePackets...)
			.undecryptablePackets = nil
		case handshake.EventDiscard0RTTKeys:
			 = .dropEncryptionLevel(protocol.Encryption0RTT, )
		case handshake.EventWriteInitialData:
			_,  = .initialStream.Write(.Data)
		case handshake.EventWriteHandshakeData:
			_,  = .handshakeStream.Write(.Data)
		}
		if  != nil {
			return 
		}
	}
}

func ( *connection) ( *wire.StreamFrame,  time.Time) error {
	,  := .streamsMap.GetOrOpenReceiveStream(.StreamID)
	if  != nil {
		return 
	}
	if  == nil { // stream was already closed and garbage collected
		return nil
	}
	return .handleStreamFrame(, )
}

func ( *connection) ( *wire.MaxDataFrame) {
	.connFlowController.UpdateSendWindow(.MaximumData)
}

func ( *connection) ( *wire.MaxStreamDataFrame) error {
	,  := .streamsMap.GetOrOpenSendStream(.StreamID)
	if  != nil {
		return 
	}
	if  == nil {
		// stream is closed and already garbage collected
		return nil
	}
	.updateSendWindow(.MaximumStreamData)
	return nil
}

func ( *connection) ( *wire.StreamDataBlockedFrame) error {
	// We don't need to do anything in response to a STREAM_DATA_BLOCKED frame,
	// but we need to make sure that the stream ID is valid.
	,  := .streamsMap.GetOrOpenReceiveStream(.StreamID)
	return 
}

func ( *connection) ( *wire.MaxStreamsFrame) {
	.streamsMap.HandleMaxStreamsFrame()
}

func ( *connection) ( *wire.ResetStreamFrame,  time.Time) error {
	,  := .streamsMap.GetOrOpenReceiveStream(.StreamID)
	if  != nil {
		return 
	}
	if  == nil {
		// stream is closed and already garbage collected
		return nil
	}
	return .handleResetStreamFrame(, )
}

func ( *connection) ( *wire.StopSendingFrame) error {
	,  := .streamsMap.GetOrOpenSendStream(.StreamID)
	if  != nil {
		return 
	}
	if  == nil {
		// stream is closed and already garbage collected
		return nil
	}
	.handleStopSendingFrame()
	return nil
}

func ( *connection) ( *wire.PathChallengeFrame) {
	if .perspective == protocol.PerspectiveClient {
		.queueControlFrame(&wire.PathResponseFrame{Data: .Data})
	}
}

func ( *connection) ( *wire.PathResponseFrame) error {
	switch .perspective {
	case protocol.PerspectiveClient:
		return .handlePathResponseFrameClient()
	case protocol.PerspectiveServer:
		return .handlePathResponseFrameServer()
	default:
		panic("unreachable")
	}
}

func ( *connection) ( *wire.PathResponseFrame) error {
	 := .pathManagerOutgoing.Load()
	if  == nil {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "unexpected PATH_RESPONSE frame",
		}
	}
	.HandlePathResponseFrame()
	return nil
}

func ( *connection) ( *wire.PathResponseFrame) error {
	if .pathManager == nil {
		// since we didn't send PATH_CHALLENGEs yet, we don't expect PATH_RESPONSEs
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "unexpected PATH_RESPONSE frame",
		}
	}
	.pathManager.HandlePathResponseFrame()
	return nil
}

func ( *connection) ( *wire.NewTokenFrame) error {
	if .perspective == protocol.PerspectiveServer {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "received NEW_TOKEN frame from the client",
		}
	}
	if .config.TokenStore != nil {
		.config.TokenStore.Put(.tokenStoreKey, &ClientToken{data: .Token, rtt: .rttStats.SmoothedRTT()})
	}
	return nil
}

func ( *connection) ( *wire.NewConnectionIDFrame) error {
	return .connIDManager.Add()
}

func ( *connection) ( time.Time,  *wire.RetireConnectionIDFrame,  protocol.ConnectionID) error {
	return .connIDGenerator.Retire(.SequenceNumber, , .Add(3*.rttStats.PTO(false)))
}

func ( *connection) ( time.Time) error {
	if .perspective == protocol.PerspectiveServer {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "received a HANDSHAKE_DONE frame",
		}
	}
	if !.handshakeConfirmed {
		return .handleHandshakeConfirmed()
	}
	return nil
}

func ( *connection) ( *wire.AckFrame,  protocol.EncryptionLevel,  time.Time) error {
	,  := .sentPacketHandler.ReceivedAck(, , .lastPacketReceivedTime)
	if  != nil {
		return 
	}
	if ! {
		return nil
	}
	// On the client side: If the packet acknowledged a 1-RTT packet, this confirms the handshake.
	// This is only possible if the ACK was sent in a 1-RTT packet.
	// This is an optimization over simply waiting for a HANDSHAKE_DONE frame, see section 4.1.2 of RFC 9001.
	if .perspective == protocol.PerspectiveClient && !.handshakeConfirmed {
		if  := .handleHandshakeConfirmed();  != nil {
			return 
		}
	}
	// If one of the acknowledged packets was a Path MTU probe packet, this might have increased the Path MTU estimate.
	if .mtuDiscoverer != nil {
		if  := .mtuDiscoverer.CurrentSize();  > protocol.ByteCount(.currentMTUEstimate.Load()) {
			.currentMTUEstimate.Store(uint32())
			.sentPacketHandler.SetMaxDatagramSize()
		}
	}
	return .cryptoStreamHandler.SetLargest1RTTAcked(.LargestAcked())
}

func ( *connection) ( *wire.DatagramFrame) error {
	if .Length(.version) > wire.MaxDatagramSize {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "DATAGRAM frame too large",
		}
	}
	.datagramQueue.HandleDatagramFrame()
	return nil
}

func ( *connection) ( *closeError) {
	.closeErr.CompareAndSwap(nil, )
	select {
	case .closeChan <- struct{}{}:
	default:
	}
}

// closeLocal closes the connection and send a CONNECTION_CLOSE containing the error
func ( *connection) ( error) {
	.setCloseError(&closeError{err: , immediate: false})
}

// destroy closes the connection without sending the error on the wire
func ( *connection) ( error) {
	.destroyImpl()
	<-.ctx.Done()
}

func ( *connection) ( error) {
	.setCloseError(&closeError{err: , immediate: true})
}

func ( *connection) ( ApplicationErrorCode,  string) error {
	.closeLocal(&qerr.ApplicationError{
		ErrorCode:    ,
		ErrorMessage: ,
	})
	<-.ctx.Done()
	return nil
}

func ( *connection) ( TransportErrorCode) {
	.closeLocal(&qerr.TransportError{ErrorCode: })
	<-.ctx.Done()
}

func ( *connection) ( *closeError) {
	if .immediate {
		if ,  := .err.(net.Error);  && .Timeout() {
			.logger.Errorf("Destroying connection: %s", .err)
		} else {
			.logger.Errorf("Destroying connection with error: %s", .err)
		}
	} else {
		if .err == nil {
			.logger.Infof("Closing connection.")
		} else {
			.logger.Errorf("Closing connection with error: %s", .err)
		}
	}

	 := .err
	if  == nil {
		 = &qerr.ApplicationError{}
	} else {
		defer func() { .err =  }()
	}

	var (
		     *StatelessResetError
		 *VersionNegotiationError
		           *errCloseForRecreating
		        *ApplicationError
		          *TransportError
	)
	var  bool
	switch {
	case errors.Is(, qerr.ErrIdleTimeout),
		errors.Is(, qerr.ErrHandshakeTimeout),
		errors.As(, &),
		errors.As(, &),
		errors.As(, &):
	case errors.As(, &):
		 = .Remote
	case errors.As(, &):
		 = .Remote
	case .immediate:
		 = .err
	default:
		 = &qerr.TransportError{
			ErrorCode:    qerr.InternalError,
			ErrorMessage: .Error(),
		}
	}

	.streamsMap.CloseWithError()
	if .datagramQueue != nil {
		.datagramQueue.CloseWithError()
	}

	// In rare instances, the connection ID manager might switch to a new connection ID
	// when sending the CONNECTION_CLOSE frame.
	// The connection ID manager removes the active stateless reset token from the packet
	// handler map when it is closed, so we need to make sure that this happens last.
	defer .connIDManager.Close()

	if .tracer != nil && .tracer.ClosedConnection != nil && !errors.As(, &) {
		.tracer.ClosedConnection()
	}

	// If this is a remote close we're done here
	if  {
		.connIDGenerator.ReplaceWithClosed(nil, 3*.rttStats.PTO(false))
		return
	}
	if .immediate {
		.connIDGenerator.RemoveAll()
		return
	}
	// Don't send out any CONNECTION_CLOSE if this is an error that occurred
	// before we even sent out the first packet.
	if .perspective == protocol.PerspectiveClient && !.sentFirstPacket {
		.connIDGenerator.RemoveAll()
		return
	}
	,  := .sendConnectionClose()
	if  != nil {
		.logger.Debugf("Error sending CONNECTION_CLOSE: %s", )
	}
	.connIDGenerator.ReplaceWithClosed(, 3*.rttStats.PTO(false))
}

func ( *connection) ( protocol.EncryptionLevel,  time.Time) error {
	if .tracer != nil && .tracer.DroppedEncryptionLevel != nil {
		.tracer.DroppedEncryptionLevel()
	}
	.sentPacketHandler.DropPackets(, )
	.receivedPacketHandler.DropPackets()
	//nolint:exhaustive // only Initial and 0-RTT need special treatment
	switch  {
	case protocol.EncryptionInitial:
		.droppedInitialKeys = true
		.cryptoStreamHandler.DiscardInitialKeys()
	case protocol.Encryption0RTT:
		.streamsMap.ResetFor0RTT()
		.framer.Handle0RTTRejection()
		return .connFlowController.Reset()
	}
	return .cryptoStreamManager.Drop()
}

// is called for the client, when restoring transport parameters saved for 0-RTT
func ( *connection) ( *wire.TransportParameters) {
	if .logger.Debug() {
		.logger.Debugf("Restoring Transport Parameters: %s", )
	}

	.peerParams = 
	.connIDGenerator.SetMaxActiveConnIDs(.ActiveConnectionIDLimit)
	.connFlowController.UpdateSendWindow(.InitialMaxData)
	.streamsMap.UpdateLimits()
	.connStateMutex.Lock()
	.connState.SupportsDatagrams = .supportsDatagrams()
	.connStateMutex.Unlock()
}

func ( *connection) ( *wire.TransportParameters) error {
	if .tracer != nil && .tracer.ReceivedTransportParameters != nil {
		.tracer.ReceivedTransportParameters()
	}
	if  := .checkTransportParameters();  != nil {
		return &qerr.TransportError{
			ErrorCode:    qerr.TransportParameterError,
			ErrorMessage: .Error(),
		}
	}

	if .perspective == protocol.PerspectiveClient && .peerParams != nil && .ConnectionState().Used0RTT && !.ValidForUpdate(.peerParams) {
		return &qerr.TransportError{
			ErrorCode:    qerr.ProtocolViolation,
			ErrorMessage: "server sent reduced limits after accepting 0-RTT data",
		}
	}

	.peerParams = 
	// On the client side we have to wait for handshake completion.
	// During a 0-RTT connection, we are only allowed to use the new transport parameters for 1-RTT packets.
	if .perspective == protocol.PerspectiveServer {
		.applyTransportParameters()
		// On the server side, the early connection is ready as soon as we processed
		// the client's transport parameters.
		close(.earlyConnReadyChan)
	}

	.connStateMutex.Lock()
	.connState.SupportsDatagrams = .supportsDatagrams()
	.connStateMutex.Unlock()
	return nil
}

func ( *connection) ( *wire.TransportParameters) error {
	if .logger.Debug() {
		.logger.Debugf("Processed Transport Parameters: %s", )
	}

	// check the initial_source_connection_id
	if .InitialSourceConnectionID != .handshakeDestConnID {
		return fmt.Errorf("expected initial_source_connection_id to equal %s, is %s", .handshakeDestConnID, .InitialSourceConnectionID)
	}

	if .perspective == protocol.PerspectiveServer {
		return nil
	}
	// check the original_destination_connection_id
	if .OriginalDestinationConnectionID != .origDestConnID {
		return fmt.Errorf("expected original_destination_connection_id to equal %s, is %s", .origDestConnID, .OriginalDestinationConnectionID)
	}
	if .retrySrcConnID != nil { // a Retry was performed
		if .RetrySourceConnectionID == nil {
			return errors.New("missing retry_source_connection_id")
		}
		if *.RetrySourceConnectionID != *.retrySrcConnID {
			return fmt.Errorf("expected retry_source_connection_id to equal %s, is %s", .retrySrcConnID, *.RetrySourceConnectionID)
		}
	} else if .RetrySourceConnectionID != nil {
		return errors.New("received retry_source_connection_id, although no Retry was performed")
	}
	return nil
}

func ( *connection) () {
	 := .peerParams
	// Our local idle timeout will always be > 0.
	.idleTimeout = .config.MaxIdleTimeout
	// If the peer advertised an idle timeout, take the minimum of the values.
	if .MaxIdleTimeout > 0 {
		.idleTimeout = min(.idleTimeout, .MaxIdleTimeout)
	}
	.keepAliveInterval = min(.config.KeepAlivePeriod, .idleTimeout/2)
	.streamsMap.UpdateLimits()
	.frameParser.SetAckDelayExponent(.AckDelayExponent)
	.connFlowController.UpdateSendWindow(.InitialMaxData)
	.rttStats.SetMaxAckDelay(.MaxAckDelay)
	.connIDGenerator.SetMaxActiveConnIDs(.ActiveConnectionIDLimit)
	if .StatelessResetToken != nil {
		.connIDManager.SetStatelessResetToken(*.StatelessResetToken)
	}
	// We don't support connection migration yet, so we don't have any use for the preferred_address.
	if .PreferredAddress != nil {
		// Retire the connection ID.
		.connIDManager.AddFromPreferredAddress(.PreferredAddress.ConnectionID, .PreferredAddress.StatelessResetToken)
	}
	 := protocol.ByteCount(protocol.MaxPacketBufferSize)
	if .MaxUDPPayloadSize > 0 && .MaxUDPPayloadSize <  {
		 = .MaxUDPPayloadSize
	}
	.mtuDiscoverer = newMTUDiscoverer(
		.rttStats,
		protocol.ByteCount(.config.InitialPacketSize),
		,
		.tracer,
	)
}

func ( *connection) ( time.Time) error {
	.pacingDeadline = time.Time{}

	 := .sentPacketHandler.SendMode()
	//nolint:exhaustive // No need to handle pacing limited here.
	switch  {
	case ackhandler.SendAny:
		return .sendPackets()
	case ackhandler.SendNone:
		return nil
	case ackhandler.SendPacingLimited:
		 := .sentPacketHandler.TimeUntilSend()
		if .IsZero() {
			 = deadlineSendImmediately
		}
		.pacingDeadline = 
		// Allow sending of an ACK if we're pacing limit.
		// This makes sure that a peer that is mostly receiving data (and thus has an inaccurate cwnd estimate)
		// sends enough ACKs to allow its peer to utilize the bandwidth.
		fallthrough
	case ackhandler.SendAck:
		// We can at most send a single ACK only packet.
		// There will only be a new ACK after receiving new packets.
		// SendAck is only returned when we're congestion limited, so we don't need to set the pacing timer.
		return .maybeSendAckOnlyPacket()
	case ackhandler.SendPTOInitial, ackhandler.SendPTOHandshake, ackhandler.SendPTOAppData:
		if  := .sendProbePacket(, );  != nil {
			return 
		}
		if .sendQueue.WouldBlock() {
			.scheduleSending()
			return nil
		}
		return .()
	default:
		return fmt.Errorf("BUG: invalid send mode %d", )
	}
}

func ( *connection) ( time.Time) error {
	if .perspective == protocol.PerspectiveClient && .handshakeConfirmed {
		if  := .pathManagerOutgoing.Load();  != nil {
			, , ,  := .NextPathToProbe()
			if  {
				, ,  := .packer.PackPathProbePacket(, []ackhandler.Frame{}, .version)
				if  != nil {
					return 
				}
				.logger.Debugf("sending path probe packet from %s", .LocalAddr())
				.logShortHeaderPacket(.DestConnID, .Ack, .Frames, .StreamFrames, .PacketNumber, .PacketNumberLen, .KeyPhase, protocol.ECNNon, .Len(), false)
				.registerPackedShortHeaderPacket(, protocol.ECNNon, )
				.WriteTo(.Data, .conn.RemoteAddr())
				// There's (likely) more data to send. Loop around again.
				.scheduleSending()
				return nil
			}
		}
	}

	// Path MTU Discovery
	// Can't use GSO, since we need to send a single packet that's larger than our current maximum size.
	// Performance-wise, this doesn't matter, since we only send a very small (<10) number of
	// MTU probe packets per connection.
	if .handshakeConfirmed && .mtuDiscoverer != nil && .mtuDiscoverer.ShouldSendProbe() {
		,  := .mtuDiscoverer.GetPing()
		, ,  := .packer.PackMTUProbePacket(, , .version)
		if  != nil {
			return 
		}
		 := .sentPacketHandler.ECNMode(true)
		.logShortHeaderPacket(.DestConnID, .Ack, .Frames, .StreamFrames, .PacketNumber, .PacketNumberLen, .KeyPhase, , .Len(), false)
		.registerPackedShortHeaderPacket(, , )
		.sendQueue.Send(, 0, )
		// There's (likely) more data to send. Loop around again.
		.scheduleSending()
		return nil
	}

	if  := .connFlowController.GetWindowUpdate();  > 0 {
		.framer.QueueControlFrame(&wire.MaxDataFrame{MaximumData: })
	}
	if  := .cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize);  != nil {
		.queueControlFrame()
	}

	if !.handshakeConfirmed {
		,  := .packer.PackCoalescedPacket(false, .maxPacketSize(), , .version)
		if  != nil ||  == nil {
			return 
		}
		.sentFirstPacket = true
		if  := .sendPackedCoalescedPacket(, .sentPacketHandler.ECNMode(.IsOnlyShortHeaderPacket()), );  != nil {
			return 
		}
		//nolint:exhaustive // only need to handle pacing-related events here
		switch .sentPacketHandler.SendMode() {
		case ackhandler.SendPacingLimited:
			.resetPacingDeadline()
		case ackhandler.SendAny:
			.pacingDeadline = deadlineSendImmediately
		}
		return nil
	}

	if .conn.capabilities().GSO {
		return .sendPacketsWithGSO()
	}
	return .sendPacketsWithoutGSO()
}

func ( *connection) ( time.Time) error {
	for {
		 := getPacketBuffer()
		 := .sentPacketHandler.ECNMode(true)
		if ,  := .appendOneShortHeaderPacket(, .maxPacketSize(), , );  != nil {
			if  == errNothingToPack {
				.Release()
				return nil
			}
			return 
		}

		.sendQueue.Send(, 0, )

		if .sendQueue.WouldBlock() {
			return nil
		}
		 := .sentPacketHandler.SendMode()
		if  == ackhandler.SendPacingLimited {
			.resetPacingDeadline()
			return nil
		}
		if  != ackhandler.SendAny {
			return nil
		}
		// Prioritize receiving of packets over sending out more packets.
		.receivedPacketMx.Lock()
		 := !.receivedPackets.Empty()
		.receivedPacketMx.Unlock()
		if  {
			.pacingDeadline = deadlineSendImmediately
			return nil
		}
	}
}

func ( *connection) ( time.Time) error {
	 := getLargePacketBuffer()
	 := .maxPacketSize()

	 := .sentPacketHandler.ECNMode(true)
	for {
		var  bool
		,  := .appendOneShortHeaderPacket(, , , )
		if  != nil {
			if  != errNothingToPack {
				return 
			}
			if .Len() == 0 {
				.Release()
				return nil
			}
			 = true
		}

		if ! {
			 := .sentPacketHandler.SendMode()
			if  == ackhandler.SendPacingLimited {
				.resetPacingDeadline()
			}
			if  != ackhandler.SendAny {
				 = true
			}
		}

		// Don't send more packets in this batch if they require a different ECN marking than the previous ones.
		 := .sentPacketHandler.ECNMode(true)

		// Append another packet if
		// 1. The congestion controller and pacer allow sending more
		// 2. The last packet appended was a full-size packet
		// 3. The next packet will have the same ECN marking
		// 4. We still have enough space for another full-size packet in the buffer
		if ! &&  ==  &&  ==  && .Len()+ <= .Cap() {
			continue
		}

		.sendQueue.Send(, uint16(), )

		if  {
			return nil
		}
		if .sendQueue.WouldBlock() {
			return nil
		}

		// Prioritize receiving of packets over sending out more packets.
		.receivedPacketMx.Lock()
		 := !.receivedPackets.Empty()
		.receivedPacketMx.Unlock()
		if  {
			.pacingDeadline = deadlineSendImmediately
			return nil
		}

		 = 
		 = getLargePacketBuffer()
	}
}

func ( *connection) () {
	 := .sentPacketHandler.TimeUntilSend()
	if .IsZero() {
		 = deadlineSendImmediately
	}
	.pacingDeadline = 
}

func ( *connection) ( time.Time) error {
	if !.handshakeConfirmed {
		 := .sentPacketHandler.ECNMode(false)
		,  := .packer.PackCoalescedPacket(true, .maxPacketSize(), , .version)
		if  != nil {
			return 
		}
		if  == nil {
			return nil
		}
		return .sendPackedCoalescedPacket(, , )
	}

	 := .sentPacketHandler.ECNMode(true)
	, ,  := .packer.PackAckOnlyPacket(.maxPacketSize(), , .version)
	if  != nil {
		if  == errNothingToPack {
			return nil
		}
		return 
	}
	.logShortHeaderPacket(.DestConnID, .Ack, .Frames, .StreamFrames, .PacketNumber, .PacketNumberLen, .KeyPhase, , .Len(), false)
	.registerPackedShortHeaderPacket(, , )
	.sendQueue.Send(, 0, )
	return nil
}

func ( *connection) ( ackhandler.SendMode,  time.Time) error {
	var  protocol.EncryptionLevel
	//nolint:exhaustive // We only need to handle the PTO send modes here.
	switch  {
	case ackhandler.SendPTOInitial:
		 = protocol.EncryptionInitial
	case ackhandler.SendPTOHandshake:
		 = protocol.EncryptionHandshake
	case ackhandler.SendPTOAppData:
		 = protocol.Encryption1RTT
	default:
		return fmt.Errorf("connection BUG: unexpected send mode: %d", )
	}
	// Queue probe packets until we actually send out a packet,
	// or until there are no more packets to queue.
	var  *coalescedPacket
	for  == nil {
		if  := .sentPacketHandler.QueueProbePacket(); ! {
			break
		}
		var  error
		,  = .packer.PackPTOProbePacket(, .maxPacketSize(), false, , .version)
		if  != nil {
			return 
		}
	}
	if  == nil {
		var  error
		,  = .packer.PackPTOProbePacket(, .maxPacketSize(), true, , .version)
		if  != nil {
			return 
		}
	}
	if  == nil || (len(.longHdrPackets) == 0 && .shortHdrPacket == nil) {
		return fmt.Errorf("connection BUG: couldn't pack %s probe packet: %v", , )
	}
	return .sendPackedCoalescedPacket(, .sentPacketHandler.ECNMode(.IsOnlyShortHeaderPacket()), )
}

// appendOneShortHeaderPacket appends a new packet to the given packetBuffer.
// If there was nothing to pack, the returned size is 0.
func ( *connection) ( *packetBuffer,  protocol.ByteCount,  protocol.ECN,  time.Time) (protocol.ByteCount, error) {
	 := .Len()
	,  := .packer.AppendPacket(, , , .version)
	if  != nil {
		return 0, 
	}
	 := .Len() - 
	.logShortHeaderPacket(.DestConnID, .Ack, .Frames, .StreamFrames, .PacketNumber, .PacketNumberLen, .KeyPhase, , , false)
	.registerPackedShortHeaderPacket(, , )
	return , nil
}

func ( *connection) ( shortHeaderPacket,  protocol.ECN,  time.Time) {
	if .IsPathProbePacket {
		.sentPacketHandler.SentPacket(
			,
			.PacketNumber,
			protocol.InvalidPacketNumber,
			.StreamFrames,
			.Frames,
			protocol.Encryption1RTT,
			,
			.Length,
			.IsPathMTUProbePacket,
			true,
		)
		return
	}
	if .firstAckElicitingPacketAfterIdleSentTime.IsZero() && (len(.StreamFrames) > 0 || ackhandler.HasAckElicitingFrames(.Frames)) {
		.firstAckElicitingPacketAfterIdleSentTime = 
	}

	 := protocol.InvalidPacketNumber
	if .Ack != nil {
		 = .Ack.LargestAcked()
	}
	.sentPacketHandler.SentPacket(
		,
		.PacketNumber,
		,
		.StreamFrames,
		.Frames,
		protocol.Encryption1RTT,
		,
		.Length,
		.IsPathMTUProbePacket,
		false,
	)
	.connIDManager.SentPacket()
}

func ( *connection) ( *coalescedPacket,  protocol.ECN,  time.Time) error {
	.logCoalescedPacket(, )
	for ,  := range .longHdrPackets {
		if .firstAckElicitingPacketAfterIdleSentTime.IsZero() && .IsAckEliciting() {
			.firstAckElicitingPacketAfterIdleSentTime = 
		}
		 := protocol.InvalidPacketNumber
		if .ack != nil {
			 = .ack.LargestAcked()
		}
		.sentPacketHandler.SentPacket(
			,
			.header.PacketNumber,
			,
			.streamFrames,
			.frames,
			.EncryptionLevel(),
			,
			.length,
			false,
			false,
		)
		if .perspective == protocol.PerspectiveClient && .EncryptionLevel() == protocol.EncryptionHandshake &&
			!.droppedInitialKeys {
			// On the client side, Initial keys are dropped as soon as the first Handshake packet is sent.
			// See Section 4.9.1 of RFC 9001.
			if  := .dropEncryptionLevel(protocol.EncryptionInitial, );  != nil {
				return 
			}
		}
	}
	if  := .shortHdrPacket;  != nil {
		if .firstAckElicitingPacketAfterIdleSentTime.IsZero() && .IsAckEliciting() {
			.firstAckElicitingPacketAfterIdleSentTime = 
		}
		 := protocol.InvalidPacketNumber
		if .Ack != nil {
			 = .Ack.LargestAcked()
		}
		.sentPacketHandler.SentPacket(
			,
			.PacketNumber,
			,
			.StreamFrames,
			.Frames,
			protocol.Encryption1RTT,
			,
			.Length,
			.IsPathMTUProbePacket,
			false,
		)
	}
	.connIDManager.SentPacket()
	.sendQueue.Send(.buffer, 0, )
	return nil
}

func ( *connection) ( error) ([]byte, error) {
	var  *coalescedPacket
	var  error
	var  *qerr.TransportError
	var  *qerr.ApplicationError
	if errors.As(, &) {
		,  = .packer.PackConnectionClose(, .maxPacketSize(), .version)
	} else if errors.As(, &) {
		,  = .packer.PackApplicationClose(, .maxPacketSize(), .version)
	} else {
		,  = .packer.PackConnectionClose(&qerr.TransportError{
			ErrorCode:    qerr.InternalError,
			ErrorMessage: fmt.Sprintf("connection BUG: unspecified error type (msg: %s)", .Error()),
		}, .maxPacketSize(), .version)
	}
	if  != nil {
		return nil, 
	}
	 := .sentPacketHandler.ECNMode(.IsOnlyShortHeaderPacket())
	.logCoalescedPacket(, )
	return .buffer.Data, .conn.Write(.buffer.Data, 0, )
}

func ( *connection) () protocol.ByteCount {
	if .mtuDiscoverer == nil {
		// Use the configured packet size on the client side.
		// If the server sends a max_udp_payload_size that's smaller than this size, we can ignore this:
		// Apparently the server still processed the (fully padded) Initial packet anyway.
		if .perspective == protocol.PerspectiveClient {
			return protocol.ByteCount(.config.InitialPacketSize)
		}
		// On the server side, there's no downside to using 1200 bytes until we received the client's transport
		// parameters:
		// * If the first packet didn't contain the entire ClientHello, all we can do is ACK that packet. We don't
		//   need a lot of bytes for that.
		// * If it did, we will have processed the transport parameters and initialized the MTU discoverer.
		return protocol.MinInitialPacketSize
	}
	return .mtuDiscoverer.CurrentSize()
}

// AcceptStream returns the next stream openend by the peer
func ( *connection) ( context.Context) (Stream, error) {
	return .streamsMap.AcceptStream()
}

func ( *connection) ( context.Context) (ReceiveStream, error) {
	return .streamsMap.AcceptUniStream()
}

// OpenStream opens a stream
func ( *connection) () (Stream, error) {
	return .streamsMap.OpenStream()
}

func ( *connection) ( context.Context) (Stream, error) {
	return .streamsMap.OpenStreamSync()
}

func ( *connection) () (SendStream, error) {
	return .streamsMap.OpenUniStream()
}

func ( *connection) ( context.Context) (SendStream, error) {
	return .streamsMap.OpenUniStreamSync()
}

func ( *connection) ( protocol.StreamID) flowcontrol.StreamFlowController {
	 := .peerParams.InitialMaxStreamDataUni
	if .Type() == protocol.StreamTypeBidi {
		if .InitiatedBy() == .perspective {
			 = .peerParams.InitialMaxStreamDataBidiRemote
		} else {
			 = .peerParams.InitialMaxStreamDataBidiLocal
		}
	}
	return flowcontrol.NewStreamFlowController(
		,
		.connFlowController,
		protocol.ByteCount(.config.InitialStreamReceiveWindow),
		protocol.ByteCount(.config.MaxStreamReceiveWindow),
		,
		.rttStats,
		.logger,
	)
}

// scheduleSending signals that we have data for sending
func ( *connection) () {
	select {
	case .sendingScheduled <- struct{}{}:
	default:
	}
}

// tryQueueingUndecryptablePacket queues a packet for which we're missing the decryption keys.
// The logging.PacketType is only used for logging purposes.
func ( *connection) ( receivedPacket,  logging.PacketType) {
	if .handshakeComplete {
		panic("shouldn't queue undecryptable packets after handshake completion")
	}
	if len(.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets {
		if .tracer != nil && .tracer.DroppedPacket != nil {
			.tracer.DroppedPacket(, protocol.InvalidPacketNumber, .Size(), logging.PacketDropDOSPrevention)
		}
		.logger.Infof("Dropping undecryptable packet (%d bytes). Undecryptable packet queue full.", .Size())
		return
	}
	.logger.Infof("Queueing packet (%d bytes) for later decryption", .Size())
	if .tracer != nil && .tracer.BufferedPacket != nil {
		.tracer.BufferedPacket(, .Size())
	}
	.undecryptablePackets = append(.undecryptablePackets, )
}

func ( *connection) ( wire.Frame) {
	.framer.QueueControlFrame()
	.scheduleSending()
}

func ( *connection) () { .scheduleSending() }

func ( *connection) ( protocol.StreamID,  sendStreamI) {
	.framer.AddActiveStream(, )
	.scheduleSending()
}

func ( *connection) ( protocol.StreamID,  streamControlFrameGetter) {
	.framer.AddStreamWithControlFrames(, )
	.scheduleSending()
}

func ( *connection) ( protocol.StreamID) {
	if  := .streamsMap.DeleteStream();  != nil {
		.closeLocal()
	}
	.framer.RemoveActiveStream()
}

func ( *connection) ( []byte) error {
	if !.supportsDatagrams() {
		return errors.New("datagram support disabled")
	}

	 := &wire.DatagramFrame{DataLenPresent: true}
	// The payload size estimate is conservative.
	// Under many circumstances we could send a few more bytes.
	 := min(
		.MaxDataLen(.peerParams.MaxDatagramFrameSize, .version),
		protocol.ByteCount(.currentMTUEstimate.Load()),
	)
	if protocol.ByteCount(len()) >  {
		return &DatagramTooLargeError{MaxDatagramPayloadSize: int64()}
	}
	.Data = make([]byte, len())
	copy(.Data, )
	return .datagramQueue.Add()
}

func ( *connection) ( context.Context) ([]byte, error) {
	if !.config.EnableDatagrams {
		return nil, errors.New("datagram support disabled")
	}
	return .datagramQueue.Receive()
}

func ( *connection) () net.Addr  { return .conn.LocalAddr() }
func ( *connection) () net.Addr { return .conn.RemoteAddr() }

func ( *connection) () *pathManagerOutgoing {
	.pathManagerOutgoing.CompareAndSwap(nil,
		func() *pathManagerOutgoing { // this function is only called if a swap is performed
			return newPathManagerOutgoing(
				.connIDManager.GetConnIDForPath,
				.connIDManager.RetireConnIDForPath,
				.scheduleSending,
			)
		}(),
	)
	return .pathManagerOutgoing.Load()
}

func ( *connection) ( *Transport) (*Path, error) {
	if .perspective == protocol.PerspectiveServer {
		return nil, errors.New("server cannot initiate connection migration")
	}
	if .peerParams.DisableActiveMigration {
		return nil, errors.New("server disabled connection migration")
	}
	if  := .init(false);  != nil {
		return nil, 
	}
	return .getPathManager().NewPath(
		,
		200*time.Millisecond, // initial RTT estimate
		func() {
			 := (*packetHandlerMap)()
			.connIDGenerator.AddConnRunner(
				,
				connRunnerCallbacks{
					AddConnectionID:    func( protocol.ConnectionID) { .Add(, ) },
					RemoveConnectionID: .Remove,
					ReplaceWithClosed:  .ReplaceWithClosed,
				},
			)
		},
	), nil
}

func ( *connection) ( context.Context) (Connection, error) {
	// The handshake might fail after the server rejected 0-RTT.
	// This could happen if the Finished message is malformed or never received.
	select {
	case <-.Done():
		return nil, context.Cause()
	case <-.Context().Done():
	case <-.HandshakeComplete():
		.streamsMap.UseResetMaps()
	}
	return , nil
}

// estimateMaxPayloadSize estimates the maximum payload size for short header packets.
// It is not very sophisticated: it just subtracts the size of header (assuming the maximum
// connection ID length), and the size of the encryption tag.
func estimateMaxPayloadSize( protocol.ByteCount) protocol.ByteCount {
	return  - 1 /* type byte */ - 20 /* maximum connection ID length */ - 16 /* tag size */
}