// SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
// SPDX-License-Identifier: MIT

// Package ice implements the Interactive Connectivity Establishment (ICE) // protocol defined in rfc5245.
package ice import ( stunx ) type bindingRequest struct { timestamp time.Time transactionID [stun.TransactionIDSize]byte destination net.Addr isUseCandidate bool } // Agent represents the ICE agent. type Agent struct { loop *taskloop.Loop onConnectionStateChangeHdlr atomic.Value // func(ConnectionState) onSelectedCandidatePairChangeHdlr atomic.Value // func(Candidate, Candidate) onCandidateHdlr atomic.Value // func(Candidate) onConnected chan struct{} onConnectedOnce sync.Once // Force candidate to be contacted immediately (instead of waiting for task ticker) forceCandidateContact chan bool tieBreaker uint64 lite bool connectionState ConnectionState gatheringState GatheringState mDNSMode MulticastDNSMode mDNSName string mDNSConn *mdns.Conn muHaveStarted sync.Mutex startedCh <-chan struct{} startedFn func() isControlling bool maxBindingRequests uint16 hostAcceptanceMinWait time.Duration srflxAcceptanceMinWait time.Duration prflxAcceptanceMinWait time.Duration relayAcceptanceMinWait time.Duration stunGatherTimeout time.Duration tcpPriorityOffset uint16 disableActiveTCP bool portMin uint16 portMax uint16 candidateTypes []CandidateType // How long connectivity checks can fail before the ICE Agent // goes to disconnected disconnectedTimeout time.Duration // How long connectivity checks can fail before the ICE Agent // goes to failed failedTimeout time.Duration // How often should we send keepalive packets? // 0 means never keepaliveInterval time.Duration // How often should we run our internal taskLoop to check for state changes when connecting checkInterval time.Duration localUfrag string localPwd string localCandidates map[NetworkType][]Candidate remoteUfrag string remotePwd string remoteCandidates map[NetworkType][]Candidate checklist []*CandidatePair selector pairCandidateSelector selectedPair atomic.Value // *CandidatePair urls []*stun.URI networkTypes []NetworkType buf *packetio.Buffer // LRU of outbound Binding request Transaction IDs pendingBindingRequests []bindingRequest // 1:1 D-NAT IP address mapping extIPMapper *externalIPMapper // Callback that allows user to implement custom behavior // for STUN Binding Requests userBindingRequestHandler func(m *stun.Message, local, remote Candidate, pair *CandidatePair) bool gatherCandidateCancel func() gatherCandidateDone chan struct{} connectionStateNotifier *handlerNotifier candidateNotifier *handlerNotifier selectedCandidatePairNotifier *handlerNotifier loggerFactory logging.LoggerFactory log logging.LeveledLogger net transport.Net tcpMux TCPMux udpMux UDPMux udpMuxSrflx UniversalUDPMux interfaceFilter func(string) (keep bool) ipFilter func(net.IP) (keep bool) includeLoopback bool insecureSkipVerify bool proxyDialer proxy.Dialer enableUseCandidateCheckPriority bool } // NewAgent creates a new Agent. func ( *AgentConfig) (*Agent, error) { //nolint:gocognit,cyclop var error if .PortMax < .PortMin { return nil, ErrPort } := .MulticastDNSHostName if == "" { if , = generateMulticastDNSName(); != nil { return nil, } } if !strings.HasSuffix(, ".local") || len(strings.Split(, ".")) != 2 { return nil, ErrInvalidMulticastDNSHostName } := .MulticastDNSMode if == 0 { = MulticastDNSModeQueryOnly } := .LoggerFactory if == nil { = logging.NewDefaultLoggerFactory() } := .NewLogger("ice") , := context.WithCancel(context.Background()) := &Agent{ tieBreaker: globalMathRandomGenerator.Uint64(), lite: .Lite, gatheringState: GatheringStateNew, connectionState: ConnectionStateNew, localCandidates: make(map[NetworkType][]Candidate), remoteCandidates: make(map[NetworkType][]Candidate), urls: .Urls, networkTypes: .NetworkTypes, onConnected: make(chan struct{}), buf: packetio.NewBuffer(), startedCh: .Done(), startedFn: , portMin: .PortMin, portMax: .PortMax, loggerFactory: , log: , net: .Net, proxyDialer: .ProxyDialer, tcpMux: .TCPMux, udpMux: .UDPMux, udpMuxSrflx: .UDPMuxSrflx, mDNSMode: , mDNSName: , gatherCandidateCancel: func() {}, forceCandidateContact: make(chan bool, 1), interfaceFilter: .InterfaceFilter, ipFilter: .IPFilter, insecureSkipVerify: .InsecureSkipVerify, includeLoopback: .IncludeLoopback, disableActiveTCP: .DisableActiveTCP, userBindingRequestHandler: .BindingRequestHandler, enableUseCandidateCheckPriority: .EnableUseCandidateCheckPriority, } .connectionStateNotifier = &handlerNotifier{ connectionStateFunc: .onConnectionStateChange, done: make(chan struct{}), } .candidateNotifier = &handlerNotifier{candidateFunc: .onCandidate, done: make(chan struct{})} .selectedCandidatePairNotifier = &handlerNotifier{ candidatePairFunc: .onSelectedCandidatePairChange, done: make(chan struct{}), } if .net == nil { .net, = stdnet.NewNet() if != nil { return nil, fmt.Errorf("failed to create network: %w", ) } } else if , := .net.(*vnet.Net); { .log.Warn("Virtual network is enabled") if .mDNSMode != MulticastDNSModeDisabled { .log.Warn("Virtual network does not support mDNS yet") } } , , := localInterfaces( .net, .interfaceFilter, .ipFilter, .networkTypes, .includeLoopback, ) if != nil { return nil, fmt.Errorf("error getting local interfaces: %w", ) } // Opportunistic mDNS: If we can't open the connection, that's ok: we // can continue without it. if .mDNSConn, .mDNSMode, = createMulticastDNS( .net, .networkTypes, , .includeLoopback, , , , , ); != nil { .Warnf("Failed to initialize mDNS %s: %v", , ) } .initWithDefaults() // Make sure the buffer doesn't grow indefinitely. // NOTE: We actually won't get anywhere close to this limit. // SRTP will constantly read from the endpoint and drop packets if it's full. .buf.SetLimitSize(maxBufferSize) if .lite && (len(.candidateTypes) != 1 || .candidateTypes[0] != CandidateTypeHost) { .closeMulticastConn() return nil, ErrLiteUsingNonHostCandidates } if len(.Urls) > 0 && !containsCandidateType(CandidateTypeServerReflexive, .candidateTypes) && !containsCandidateType(CandidateTypeRelay, .candidateTypes) { .closeMulticastConn() return nil, ErrUselessUrlsProvided } if = .initExtIPMapping(); != nil { .closeMulticastConn() return nil, } .loop = taskloop.New(func() { .removeUfragFromMux() .deleteAllCandidates() .startedFn() if := .buf.Close(); != nil { .log.Warnf("Failed to close buffer: %v", ) } .closeMulticastConn() .updateConnectionState(ConnectionStateClosed) .gatherCandidateCancel() if .gatherCandidateDone != nil { <-.gatherCandidateDone } }) // Restart is also used to initialize the agent for the first time if := .Restart(.LocalUfrag, .LocalPwd); != nil { .closeMulticastConn() _ = .Close() return nil, } return , nil } func ( *Agent) ( bool, , string) error { .muHaveStarted.Lock() defer .muHaveStarted.Unlock() select { case <-.startedCh: return ErrMultipleStart default: } if := .SetRemoteCredentials(, ); != nil { //nolint:contextcheck return } .log.Debugf("Started agent: isControlling? %t, remoteUfrag: %q, remotePwd: %q", , , ) return .loop.Run(.loop, func( context.Context) { .isControlling = .remoteUfrag = .remotePwd = if { .selector = &controllingSelector{agent: , log: .log} } else { .selector = &controlledSelector{agent: , log: .log} } if .lite { .selector = &liteSelector{pairCandidateSelector: .selector} } .selector.Start() .startedFn() .updateConnectionState(ConnectionStateChecking) .requestConnectivityCheck() go .connectivityChecks() //nolint:contextcheck }) } func ( *Agent) () { //nolint:cyclop := ConnectionState(0) := time.Time{} := func() { if := .loop.Run(.loop, func( context.Context) { defer func() { = .connectionState }() switch .connectionState { case ConnectionStateFailed: // The connection is currently failed so don't send any checks // In the future it may be restarted though return case ConnectionStateChecking: // We have just entered checking for the first time so update our checking timer if != .connectionState { = time.Now() } // We have been in checking longer then Disconnect+Failed timeout, set the connection to Failed if time.Since() > .disconnectedTimeout+.failedTimeout { .updateConnectionState(ConnectionStateFailed) return } default: } .selector.ContactCandidates() }); != nil { .log.Warnf("Failed to start connectivity checks: %v", ) } } := time.NewTimer(math.MaxInt64) .Stop() for { := defaultKeepaliveInterval := func( time.Duration) { if != 0 && ( == 0 || > ) { = } } switch { case ConnectionStateNew, ConnectionStateChecking: // While connecting, check candidates more frequently (.checkInterval) case ConnectionStateConnected, ConnectionStateDisconnected: (.keepaliveInterval) default: } // Ensure we run our task loop as quickly as the minimum of our various configured timeouts (.disconnectedTimeout) (.failedTimeout) .Reset() select { case <-.forceCandidateContact: if !.Stop() { <-.C } () case <-.C: () case <-.loop.Done(): .Stop() return } } } func ( *Agent) ( ConnectionState) { if .connectionState != { // Connection has gone to failed, release all gathered candidates if == ConnectionStateFailed { .removeUfragFromMux() .checklist = make([]*CandidatePair, 0) .pendingBindingRequests = make([]bindingRequest, 0) .setSelectedPair(nil) .deleteAllCandidates() } .log.Infof("Setting new connection state: %s", ) .connectionState = .connectionStateNotifier.EnqueueConnectionState() } } func ( *Agent) ( *CandidatePair) { if == nil { var *CandidatePair .selectedPair.Store() .log.Tracef("Unset selected candidate pair") return } .nominated = true .selectedPair.Store() .log.Tracef("Set selected candidate pair: %s", ) .updateConnectionState(ConnectionStateConnected) // Notify when the selected pair changes .selectedCandidatePairNotifier.EnqueueSelectedCandidatePair() // Signal connected .onConnectedOnce.Do(func() { close(.onConnected) }) } func ( *Agent) () { .log.Trace("Pinging all candidates") if len(.checklist) == 0 { .log.Warn("Failed to ping without candidate pairs. Connection is not possible yet.") } for , := range .checklist { if .state == CandidatePairStateWaiting { .state = CandidatePairStateInProgress } else if .state != CandidatePairStateInProgress { continue } if .bindingRequestCount > .maxBindingRequests { .log.Tracef("Maximum requests reached for pair %s, marking it as failed", ) .state = CandidatePairStateFailed } else { .selector.PingCandidate(.Local, .Remote) .bindingRequestCount++ } } } func ( *Agent) () *CandidatePair { var *CandidatePair for , := range .checklist { if .state == CandidatePairStateFailed { continue } if == nil { = } else if .priority() < .priority() { = } } return } func ( *Agent) () *CandidatePair { var *CandidatePair for , := range .checklist { if .state != CandidatePairStateSucceeded { continue } if == nil { = } else if .priority() < .priority() { = } } return } func ( *Agent) (, Candidate) *CandidatePair { := newCandidatePair(, , .isControlling) .checklist = append(.checklist, ) return } func ( *Agent) (, Candidate) *CandidatePair { for , := range .checklist { if .Local.Equal() && .Remote.Equal() { return } } return nil } // validateSelectedPair checks if the selected pair is (still) valid // Note: the caller should hold the agent lock. func ( *Agent) () bool { := .getSelectedPair() if == nil { return false } := time.Since(.Remote.LastReceived()) // Only allow transitions to failed if a.failedTimeout is non-zero := .failedTimeout if != 0 { += .disconnectedTimeout } switch { case != 0 && > : .updateConnectionState(ConnectionStateFailed) case .disconnectedTimeout != 0 && > .disconnectedTimeout: .updateConnectionState(ConnectionStateDisconnected) default: .updateConnectionState(ConnectionStateConnected) } return true } // checkKeepalive sends STUN Binding Indications to the selected pair // if no packet has been sent on that pair in the last keepaliveInterval // Note: the caller should hold the agent lock. func ( *Agent) () { := .getSelectedPair() if == nil { return } if .keepaliveInterval != 0 { // We use binding request instead of indication to support refresh consent schemas // see https://tools.ietf.org/html/rfc7675 .selector.PingCandidate(.Local, .Remote) } } // AddRemoteCandidate adds a new remote candidate. func ( *Agent) ( Candidate) error { if == nil { return nil } // TCP Candidates with TCP type active will probe server passive ones, so // no need to do anything with them. if .TCPType() == TCPTypeActive { .log.Infof("Ignoring remote candidate with tcpType active: %s", ) return nil } // If we have a mDNS Candidate lets fully resolve it before adding it locally if .Type() == CandidateTypeHost && strings.HasSuffix(.Address(), ".local") { if .mDNSMode == MulticastDNSModeDisabled { .log.Warnf("Remote mDNS candidate added, but mDNS is disabled: (%s)", .Address()) return nil } , := .(*CandidateHost) if ! { return ErrAddressParseFailed } go .resolveAndAddMulticastCandidate() return nil } go func() { if := .loop.Run(.loop, func( context.Context) { // nolint: contextcheck .addRemoteCandidate() }); != nil { .log.Warnf("Failed to add remote candidate %s: %v", .Address(), ) return } }() return nil } func ( *Agent) ( *CandidateHost) { if .mDNSConn == nil { return } , , := .mDNSConn.QueryAddr(.context(), .Address()) if != nil { .log.Warnf("Failed to discover mDNS candidate %s: %v", .Address(), ) return } if = .setIPAddr(); != nil { .log.Warnf("Failed to discover mDNS candidate %s: %v", .Address(), ) return } if = .loop.Run(.loop, func( context.Context) { // nolint: contextcheck .addRemoteCandidate() }); != nil { .log.Warnf("Failed to add mDNS candidate %s: %v", .Address(), ) return } } func ( *Agent) () { select { case .forceCandidateContact <- true: default: } } func ( *Agent) ( Candidate) { , , := localInterfaces( .net, .interfaceFilter, .ipFilter, []NetworkType{.NetworkType()}, .includeLoopback, ) if != nil { .log.Warnf("Failed to iterate local interfaces, host candidates will not be gathered %s", ) return } for := range { , , , := parseAddr(.addr()) if != nil { .log.Warnf("Failed to parse address: %s; error: %s", .addr(), ) continue } := newActiveTCPConn( .loop, net.JoinHostPort([].String(), "0"), netip.AddrPortFrom(, uint16(.Port())), //nolint:gosec // G115, no overflow, a port .log, ) , := .LocalAddr().(*net.TCPAddr) if ! { closeConnAndLog(, .log, "Failed to create Active ICE-TCP Candidate: %v", errInvalidAddress) continue } , := NewCandidateHost(&CandidateHostConfig{ Network: .NetworkType().String(), Address: [].String(), Port: .Port, Component: ComponentRTP, TCPType: TCPTypeActive, }) if != nil { closeConnAndLog(, .log, "Failed to create Active ICE-TCP Candidate: %v", ) continue } .start(, , .startedCh) .localCandidates[.NetworkType()] = append( .localCandidates[.NetworkType()], , ) .candidateNotifier.EnqueueCandidate() .addPair(, ) } } // addRemoteCandidate assumes you are holding the lock (must be execute using a.run). func ( *Agent) ( Candidate) { //nolint:cyclop := .remoteCandidates[.NetworkType()] for , := range { if .Equal() { return } } := false // Assert that TCP4 or TCP6 is a enabled NetworkType locally if !.disableActiveTCP && .TCPType() == TCPTypePassive { for , := range .networkTypes { if .NetworkType() == { = true } } } if { .addRemotePassiveTCPCandidate() } = append(, ) .remoteCandidates[.NetworkType()] = if .TCPType() != TCPTypePassive { if , := .localCandidates[.NetworkType()]; { for , := range { .addPair(, ) } } } .requestConnectivityCheck() } func ( *Agent) ( context.Context, Candidate, net.PacketConn) error { return .loop.Run(, func(context.Context) { := .localCandidates[.NetworkType()] for , := range { if .Equal() { .log.Debugf("Ignore duplicate candidate: %s", ) if := .close(); != nil { .log.Warnf("Failed to close duplicate candidate: %v", ) } if := .Close(); != nil { .log.Warnf("Failed to close duplicate candidate connection: %v", ) } return } } .setCandidateExtensions() .start(, , .startedCh) = append(, ) .localCandidates[.NetworkType()] = if , := .remoteCandidates[.NetworkType()]; { for , := range { .addPair(, ) } } .requestConnectivityCheck() if !.filterForLocationTracking() { .candidateNotifier.EnqueueCandidate() } }) } func ( *Agent) ( Candidate) { := .AddExtension(CandidateExtension{ Key: "ufrag", Value: .localUfrag, }) if != nil { .log.Errorf("Failed to add ufrag extension to candidate: %v", ) } } // GetRemoteCandidates returns the remote candidates. func ( *Agent) () ([]Candidate, error) { var []Candidate := .loop.Run(.loop, func( context.Context) { var []Candidate for , := range .remoteCandidates { = append(, ...) } = }) if != nil { return nil, } return , nil } // GetLocalCandidates returns the local candidates. func ( *Agent) () ([]Candidate, error) { var []Candidate := .loop.Run(.loop, func( context.Context) { var []Candidate for , := range .localCandidates { for , := range { if .filterForLocationTracking() { continue } = append(, ) } } = }) if != nil { return nil, } return , nil } // GetLocalUserCredentials returns the local user credentials. func ( *Agent) () ( string, string, error) { := make(chan struct{}) = .loop.Run(.loop, func( context.Context) { = .localUfrag = .localPwd close() }) if == nil { <- } return } // GetRemoteUserCredentials returns the remote user credentials. func ( *Agent) () ( string, string, error) { := make(chan struct{}) = .loop.Run(.loop, func( context.Context) { = .remoteUfrag = .remotePwd close() }) if == nil { <- } return } func ( *Agent) () { if .tcpMux != nil { .tcpMux.RemoveConnByUfrag(.localUfrag) } if .udpMux != nil { .udpMux.RemoveConnByUfrag(.localUfrag) } if .udpMuxSrflx != nil { .udpMuxSrflx.RemoveConnByUfrag(.localUfrag) } } // Close cleans up the Agent. func ( *Agent) () error { return .close(false) } // GracefulClose cleans up the Agent and waits for any goroutines it started // to complete. This is only safe to call outside of Agent callbacks or if in a callback, // in its own goroutine. func ( *Agent) () error { return .close(true) } func ( *Agent) ( bool) error { // the loop is safe to wait on no matter what .loop.Close() // but we are in less control of the notifiers, so we will // pass through `graceful`. .connectionStateNotifier.Close() .candidateNotifier.Close() .selectedCandidatePairNotifier.Close() return nil } // Remove all candidates. This closes any listening sockets // and removes both the local and remote candidate lists. // // This is used for restarts, failures and on close. func ( *Agent) () { for , := range .localCandidates { for , := range { if := .close(); != nil { .log.Warnf("Failed to close candidate %s: %v", , ) } } delete(.localCandidates, ) } for , := range .remoteCandidates { for , := range { if := .close(); != nil { .log.Warnf("Failed to close candidate %s: %v", , ) } } delete(.remoteCandidates, ) } } func ( *Agent) ( NetworkType, net.Addr) Candidate { , , , := parseAddr() if != nil { .log.Warnf("Failed to parse address: %s; error: %s", , ) return nil } := .remoteCandidates[] for , := range { if .Address() == .String() && .Port() == { return } } return nil } func ( *Agent) ( *stun.Message, , Candidate) { .log.Tracef("Ping STUN from %s to %s", , ) .invalidatePendingBindingRequests(time.Now()) .pendingBindingRequests = append(.pendingBindingRequests, bindingRequest{ timestamp: time.Now(), transactionID: .TransactionID, destination: .addr(), isUseCandidate: .Contains(stun.AttrUseCandidate), }) if := .findPair(, ); != nil { .UpdateRequestSent() } else { .log.Warnf("Failed to find pair for add binding request from %s to %s", , ) } .sendSTUN(, , ) } func ( *Agent) ( *stun.Message, , Candidate) { := , , , := parseAddr(.addr()) if != nil { .log.Warnf("Failed to parse address: %s; error: %s", .addr(), ) return } if , := stun.Build(, stun.BindingSuccess, &stun.XORMappedAddress{ IP: .AsSlice(), Port: , }, stun.NewShortTermIntegrity(.localPwd), stun.Fingerprint, ); != nil { .log.Warnf("Failed to handle inbound ICE from: %s to: %s error: %s", , , ) } else { if := .findPair(, ); != nil { .UpdateResponseSent() } else { .log.Warnf("Failed to find pair for add binding response from %s to %s", , ) } .sendSTUN(, , ) } } // Removes pending binding requests that are over maxBindingRequestTimeout old // // Let HTO be the transaction timeout, which SHOULD be 2*RTT if // RTT is known or 500 ms otherwise. // https://tools.ietf.org/html/rfc8445#appendix-B.1 func ( *Agent) ( time.Time) { := len(.pendingBindingRequests) := .pendingBindingRequests[:0] for , := range .pendingBindingRequests { if .Sub(.timestamp) < maxBindingRequestTimeout { = append(, ) } } .pendingBindingRequests = if := - len(.pendingBindingRequests); > 0 { .log.Tracef("Discarded %d binding requests because they expired", ) } } // Assert that the passed TransactionID is in our pendingBindingRequests and returns the destination // If the bindingRequest was valid remove it from our pending cache. func ( *Agent) ( [stun.TransactionIDSize]byte) (bool, *bindingRequest, time.Duration) { .invalidatePendingBindingRequests(time.Now()) for := range .pendingBindingRequests { if .pendingBindingRequests[].transactionID == { := .pendingBindingRequests[] .pendingBindingRequests = append(.pendingBindingRequests[:], .pendingBindingRequests[+1:]...) return true, &, time.Since(.timestamp) } } return false, nil, 0 } // handleInbound processes STUN traffic from a remote candidate. func ( *Agent) ( *stun.Message, Candidate, net.Addr) { //nolint:gocognit,cyclop var error if == nil || == nil { return } if .Type.Method != stun.MethodBinding || !(.Type.Class == stun.ClassSuccessResponse || .Type.Class == stun.ClassRequest || .Type.Class == stun.ClassIndication) { .log.Tracef("Unhandled STUN from %s to %s class(%s) method(%s)", , , .Type.Class, .Type.Method) return } if .isControlling { if .Contains(stun.AttrICEControlling) { .log.Debug("Inbound STUN message: isControlling && a.isControlling == true") return } else if .Contains(stun.AttrUseCandidate) { .log.Debug("Inbound STUN message: useCandidate && a.isControlling == true") return } } else { if .Contains(stun.AttrICEControlled) { .log.Debug("Inbound STUN message: isControlled && a.isControlling == false") return } } := .findRemoteCandidate(.NetworkType(), ) if .Type.Class == stun.ClassSuccessResponse { //nolint:nestif if = stun.MessageIntegrity([]byte(.remotePwd)).Check(); != nil { .log.Warnf("Discard message from (%s), %v", , ) return } if == nil { .log.Warnf("Discard success message from (%s), no such remote", ) return } .selector.HandleSuccessResponse(, , , ) } else if .Type.Class == stun.ClassRequest { .log.Tracef( "Inbound STUN (Request) from %s to %s, useCandidate: %v", , , .Contains(stun.AttrUseCandidate), ) if = stunx.AssertUsername(, .localUfrag+":"+.remoteUfrag); != nil { .log.Warnf("Discard message from (%s), %v", , ) return } else if = stun.MessageIntegrity([]byte(.localPwd)).Check(); != nil { .log.Warnf("Discard message from (%s), %v", , ) return } if == nil { , , , := parseAddr() if != nil { .log.Errorf("Failed to create parse remote net.Addr when creating remote prflx candidate: %s", ) return } := CandidatePeerReflexiveConfig{ Network: .String(), Address: .String(), Port: , Component: .Component(), RelAddr: "", RelPort: 0, } , := NewCandidatePeerReflexive(&) if != nil { .log.Errorf("Failed to create new remote prflx candidate (%s)", ) return } = .log.Debugf("Adding a new peer-reflexive candidate: %s ", ) .addRemoteCandidate() } .selector.HandleBindingRequest(, , ) } if != nil { .seen(false) } } // validateNonSTUNTraffic processes non STUN traffic from a remote candidate, // and returns true if it is an actual remote candidate. func ( *Agent) ( Candidate, net.Addr) (Candidate, bool) { var Candidate if := .loop.Run(.context(), func(context.Context) { = .findRemoteCandidate(.NetworkType(), ) if != nil { .seen(false) } }); != nil { .log.Warnf("Failed to validate remote candidate: %v", ) } return , != nil } // GetSelectedCandidatePair returns the selected pair or nil if there is none. func ( *Agent) () (*CandidatePair, error) { := .getSelectedPair() if == nil { return nil, nil //nolint:nilnil } , := .Local.copy() if != nil { return nil, } , := .Remote.copy() if != nil { return nil, } return &CandidatePair{Local: , Remote: }, nil } func ( *Agent) () *CandidatePair { if , := .selectedPair.Load().(*CandidatePair); { return } return nil } func ( *Agent) () { if .mDNSConn != nil { if := .mDNSConn.Close(); != nil { .log.Warnf("Failed to close mDNS Conn: %v", ) } } } // SetRemoteCredentials sets the credentials of the remote agent. func ( *Agent) (, string) error { switch { case == "": return ErrRemoteUfragEmpty case == "": return ErrRemotePwdEmpty } return .loop.Run(.loop, func( context.Context) { .remoteUfrag = .remotePwd = }) } // Restart restarts the ICE Agent with the provided ufrag/pwd // If no ufrag/pwd is provided the Agent will generate one itself // // If there is a gatherer routine currently running, Restart will // cancel it. // After a Restart, the user must then call GatherCandidates explicitly // to start generating new ones. func ( *Agent) (, string) error { //nolint:cyclop if == "" { var error , = generateUFrag() if != nil { return } } if == "" { var error , = generatePwd() if != nil { return } } if len([]rune())*8 < 24 { return ErrLocalUfragInsufficientBits } if len([]rune())*8 < 128 { return ErrLocalPwdInsufficientBits } var error if := .loop.Run(.loop, func( context.Context) { if .gatheringState == GatheringStateGathering { .gatherCandidateCancel() } // Clear all agent needed to take back to fresh state .removeUfragFromMux() .localUfrag = .localPwd = .remoteUfrag = "" .remotePwd = "" .gatheringState = GatheringStateNew .checklist = make([]*CandidatePair, 0) .pendingBindingRequests = make([]bindingRequest, 0) .setSelectedPair(nil) .deleteAllCandidates() if .selector != nil { .selector.Start() } // Restart is used by NewAgent. Accept/Connect should be used to move to checking // for new Agents if .connectionState != ConnectionStateNew { .updateConnectionState(ConnectionStateChecking) } }); != nil { return } return } func ( *Agent) ( GatheringState) error { := make(chan struct{}) if := .loop.Run(.loop, func(context.Context) { if .gatheringState != && == GatheringStateComplete { .candidateNotifier.EnqueueCandidate(nil) } .gatheringState = close() }); != nil { return } <- return nil } func ( *Agent) () bool { return !.lite || .enableUseCandidateCheckPriority }