package pubsub

import (
	
	
	
	
	

	
	
	

	manet 
)

type peerStats struct {
	// true if the peer is currently connected
	connected bool

	// expiration time of the score stats for disconnected peers
	expire time.Time

	// per topc stats
	topics map[string]*topicStats

	// IP tracking; store as string for easy processing
	ips []string

	// IP whitelisting cache
	ipWhitelist map[string]bool

	// behavioural pattern penalties (applied by the router)
	behaviourPenalty float64
}

type topicStats struct {
	// true if the peer is in the mesh
	inMesh bool

	// time when the peer was (last) GRAFTed; valid only when in mesh
	graftTime time.Time

	// time in mesh (updated during refresh/decay to avoid calling gettimeofday on
	// every score invocation)
	meshTime time.Duration

	// first message deliveries
	firstMessageDeliveries float64

	// mesh message deliveries
	meshMessageDeliveries float64

	// true if the peer has been enough time in the mesh to activate mess message deliveries
	meshMessageDeliveriesActive bool

	// sticky mesh rate failure penalty counter
	meshFailurePenalty float64

	// invalid message counter
	invalidMessageDeliveries float64
}

type peerScore struct {
	sync.Mutex

	// the score parameters
	params *PeerScoreParams

	// per peer stats for score calculation
	peerStats map[peer.ID]*peerStats

	// IP colocation tracking; maps IP => set of peers.
	peerIPs map[string]map[peer.ID]struct{}

	// message delivery tracking
	deliveries *messageDeliveries

	idGen *msgIDGenerator
	host  host.Host

	// debugging inspection
	inspect       PeerScoreInspectFn
	inspectEx     ExtendedPeerScoreInspectFn
	inspectPeriod time.Duration
}

var _ RawTracer = (*peerScore)(nil)

type messageDeliveries struct {
	seenMsgTTL time.Duration

	records map[string]*deliveryRecord

	// queue for cleaning up old delivery records
	head *deliveryEntry
	tail *deliveryEntry
}

type deliveryRecord struct {
	status    int
	firstSeen time.Time
	validated time.Time
	peers     map[peer.ID]struct{}
}

type deliveryEntry struct {
	id     string
	expire time.Time
	next   *deliveryEntry
}

// delivery record status
const (
	deliveryUnknown   = iota // we don't know (yet) if the message is valid
	deliveryValid            // we know the message is valid
	deliveryInvalid          // we know the message is invalid
	deliveryIgnored          // we were intructed by the validator to ignore the message
	deliveryThrottled        // we can't tell if it is valid because validation throttled
)

type (
	PeerScoreInspectFn         = func(map[peer.ID]float64)
	ExtendedPeerScoreInspectFn = func(map[peer.ID]*PeerScoreSnapshot)
)

type PeerScoreSnapshot struct {
	Score              float64
	Topics             map[string]*TopicScoreSnapshot
	AppSpecificScore   float64
	IPColocationFactor float64
	BehaviourPenalty   float64
}

type TopicScoreSnapshot struct {
	TimeInMesh               time.Duration
	FirstMessageDeliveries   float64
	MeshMessageDeliveries    float64
	InvalidMessageDeliveries float64
}

// WithPeerScoreInspect is a gossipsub router option that enables peer score debugging.
// When this option is enabled, the supplied function will be invoked periodically to allow
// the application to inspect or dump the scores for connected peers.
// The supplied function can have one of two signatures:
//   - PeerScoreInspectFn, which takes a map of peer IDs to score.
//   - ExtendedPeerScoreInspectFn, which takes a map of peer IDs to
//     PeerScoreSnapshots and allows inspection of individual score
//     components for debugging peer scoring.
//
// This option must be passed _after_ the WithPeerScore option.
func ( interface{},  time.Duration) Option {
	return func( *PubSub) error {
		,  := .rt.(*GossipSubRouter)
		if ! {
			return fmt.Errorf("pubsub router is not gossipsub")
		}

		if .score == nil {
			return fmt.Errorf("peer scoring is not enabled")
		}

		if .score.inspect != nil || .score.inspectEx != nil {
			return fmt.Errorf("duplicate peer score inspector")
		}

		switch i := .(type) {
		case PeerScoreInspectFn:
			.score.inspect = 
		case ExtendedPeerScoreInspectFn:
			.score.inspectEx = 
		default:
			return fmt.Errorf("unknown peer score insector type: %v", )
		}

		.score.inspectPeriod = 

		return nil
	}
}

// implementation
func newPeerScore( *PeerScoreParams) *peerScore {
	 := .SeenMsgTTL
	if  == 0 {
		 = TimeCacheDuration
	}
	return &peerScore{
		params:     ,
		peerStats:  make(map[peer.ID]*peerStats),
		peerIPs:    make(map[string]map[peer.ID]struct{}),
		deliveries: &messageDeliveries{seenMsgTTL: , records: make(map[string]*deliveryRecord)},
		idGen:      newMsgIdGenerator(),
	}
}

// SetTopicScoreParams sets new score parameters for a topic.
// If the topic previously had parameters and the parameters are lowering delivery caps,
// then the score counters are recapped appropriately.
// Note: assumes that the topic score parameters have already been validated
func ( *peerScore) ( string,  *TopicScoreParams) error {
	.Lock()
	defer .Unlock()

	,  := .params.Topics[]
	.params.Topics[] = 

	if ! {
		return nil
	}

	// check to see if the counter Caps are being lowered; if that's the case we need to recap them
	 := false
	if .FirstMessageDeliveriesCap < .FirstMessageDeliveriesCap {
		 = true
	}
	if .MeshMessageDeliveriesCap < .MeshMessageDeliveriesCap {
		 = true
	}
	if ! {
		return nil
	}

	// recap counters for topic
	for ,  := range .peerStats {
		,  := .topics[]
		if ! {
			continue
		}

		if .firstMessageDeliveries > .FirstMessageDeliveriesCap {
			.firstMessageDeliveries = .FirstMessageDeliveriesCap
		}

		if .meshMessageDeliveries > .MeshMessageDeliveriesCap {
			.meshMessageDeliveries = .MeshMessageDeliveriesCap
		}
	}

	return nil
}

// router interface
func ( *peerScore) ( *GossipSubRouter) {
	if  == nil {
		return
	}

	.idGen = .p.idGen
	.host = .p.host
	go .background(.p.ctx)
}

func ( *peerScore) ( peer.ID) float64 {
	if  == nil {
		return 0
	}

	.Lock()
	defer .Unlock()

	return .score()
}

func ( *peerScore) ( peer.ID) float64 {
	,  := .peerStats[]
	if ! {
		return 0
	}

	var  float64

	// topic scores
	for ,  := range .topics {
		// the topic parameters
		,  := .params.Topics[]
		if ! {
			// we are not scoring this topic
			continue
		}

		// the topic score
		var  float64

		// P1: time in Mesh
		if .inMesh {
			 := float64(.meshTime / .TimeInMeshQuantum)
			if  > .TimeInMeshCap {
				 = .TimeInMeshCap
			}
			 +=  * .TimeInMeshWeight
		}

		// P2: first message deliveries
		 := .firstMessageDeliveries
		 +=  * .FirstMessageDeliveriesWeight

		// P3: mesh message deliveries
		if .meshMessageDeliveriesActive {
			if .meshMessageDeliveries < .MeshMessageDeliveriesThreshold {
				 := .MeshMessageDeliveriesThreshold - .meshMessageDeliveries
				 :=  * 
				 +=  * .MeshMessageDeliveriesWeight
			}
		}

		// P3b:
		// NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so this detracts.
		 := .meshFailurePenalty
		 +=  * .MeshFailurePenaltyWeight

		// P4: invalid messages
		// NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so this detracts.
		 := (.invalidMessageDeliveries * .invalidMessageDeliveries)
		 +=  * .InvalidMessageDeliveriesWeight

		// update score, mixing with topic weight
		 +=  * .TopicWeight
	}

	// apply the topic score cap, if any
	if .params.TopicScoreCap > 0 &&  > .params.TopicScoreCap {
		 = .params.TopicScoreCap
	}

	// P5: application-specific score
	 := .params.AppSpecificScore()
	 +=  * .params.AppSpecificWeight

	// P6: IP collocation factor
	 := .ipColocationFactor()
	 +=  * .params.IPColocationFactorWeight

	// P7: behavioural pattern penalty
	if .behaviourPenalty > .params.BehaviourPenaltyThreshold {
		 := .behaviourPenalty - .params.BehaviourPenaltyThreshold
		 :=  * 
		 +=  * .params.BehaviourPenaltyWeight
	}

	return 
}

func ( *peerScore) ( peer.ID) float64 {
	,  := .peerStats[]
	if ! {
		return 0
	}

	var  float64
:
	for ,  := range .ips {
		if len(.params.IPColocationFactorWhitelist) > 0 {
			if .ipWhitelist == nil {
				.ipWhitelist = make(map[string]bool)
			}

			,  := .ipWhitelist[]
			if ! {
				 := net.ParseIP()
				for ,  := range .params.IPColocationFactorWhitelist {
					if .Contains() {
						.ipWhitelist[] = true
						continue 
					}
				}

				.ipWhitelist[] = false
			}

			if  {
				continue 
			}
		}

		// P6 has a cliff (IPColocationFactorThreshold); it's only applied iff
		// at least that many peers are connected to us from that source IP
		// addr. It is quadratic, and the weight is negative (validated by
		// PeerScoreParams.validate).
		 := len(.peerIPs[])
		if  > .params.IPColocationFactorThreshold {
			 := float64( - .params.IPColocationFactorThreshold)
			 +=  * 
		}
	}

	return 
}

// behavioural pattern penalties
func ( *peerScore) ( peer.ID,  int) {
	if  == nil {
		return
	}

	.Lock()
	defer .Unlock()

	,  := .peerStats[]
	if ! {
		return
	}

	.behaviourPenalty += float64()
}

// periodic maintenance
func ( *peerScore) ( context.Context) {
	 := time.NewTicker(.params.DecayInterval)
	defer .Stop()

	 := time.NewTicker(time.Minute)
	defer .Stop()

	 := time.NewTicker(time.Minute)
	defer .Stop()

	var  <-chan time.Time
	if .inspect != nil || .inspectEx != nil {
		 := time.NewTicker(.inspectPeriod)
		defer .Stop()
		// also dump at exit for one final sample
		defer .inspectScores()
		 = .C
	}

	for {
		select {
		case <-.C:
			.refreshScores()

		case <-.C:
			.refreshIPs()

		case <-.C:
			.gcDeliveryRecords()

		case <-:
			.inspectScores()

		case <-.Done():
			return
		}
	}
}

// inspectScores dumps all tracked scores into the inspect function.
func ( *peerScore) () {
	if .inspect != nil {
		.inspectScoresSimple()
	}
	if .inspectEx != nil {
		.inspectScoresExtended()
	}
}

func ( *peerScore) () {
	.Lock()
	 := make(map[peer.ID]float64, len(.peerStats))
	for  := range .peerStats {
		[] = .score()
	}
	.Unlock()

	// Since this is a user-injected function, it could be performing I/O, and
	// we don't want to block the scorer's background loop. Therefore, we launch
	// it in a separate goroutine. If the function needs to synchronise, it
	// should do so locally.
	go .inspect()
}

func ( *peerScore) () {
	.Lock()
	 := make(map[peer.ID]*PeerScoreSnapshot, len(.peerStats))
	for ,  := range .peerStats {
		 := new(PeerScoreSnapshot)
		.Score = .score()
		if len(.topics) > 0 {
			.Topics = make(map[string]*TopicScoreSnapshot, len(.topics))
			for ,  := range .topics {
				 := &TopicScoreSnapshot{
					FirstMessageDeliveries:   .firstMessageDeliveries,
					MeshMessageDeliveries:    .meshMessageDeliveries,
					InvalidMessageDeliveries: .invalidMessageDeliveries,
				}
				if .inMesh {
					.TimeInMesh = .meshTime
				}
				.Topics[] = 
			}
		}
		.AppSpecificScore = .params.AppSpecificScore()
		.IPColocationFactor = .ipColocationFactor()
		.BehaviourPenalty = .behaviourPenalty
		[] = 
	}
	.Unlock()

	go .inspectEx()
}

// refreshScores decays scores, and purges score records for disconnected peers,
// once their expiry has elapsed.
func ( *peerScore) () {
	.Lock()
	defer .Unlock()

	 := time.Now()
	for ,  := range .peerStats {
		if !.connected {
			// has the retention period expired?
			if .After(.expire) {
				// yes, throw it away (but clean up the IP tracking first)
				.removeIPs(, .ips)
				delete(.peerStats, )
			}

			// we don't decay retained scores, as the peer is not active.
			// this way the peer cannot reset a negative score by simply disconnecting and reconnecting,
			// unless the retention period has ellapsed.
			// similarly, a well behaved peer does not lose its score by getting disconnected.
			continue
		}

		for ,  := range .topics {
			// the topic parameters
			,  := .params.Topics[]
			if ! {
				// we are not scoring this topic
				continue
			}

			// decay counters
			.firstMessageDeliveries *= .FirstMessageDeliveriesDecay
			if .firstMessageDeliveries < .params.DecayToZero {
				.firstMessageDeliveries = 0
			}
			.meshMessageDeliveries *= .MeshMessageDeliveriesDecay
			if .meshMessageDeliveries < .params.DecayToZero {
				.meshMessageDeliveries = 0
			}
			.meshFailurePenalty *= .MeshFailurePenaltyDecay
			if .meshFailurePenalty < .params.DecayToZero {
				.meshFailurePenalty = 0
			}
			.invalidMessageDeliveries *= .InvalidMessageDeliveriesDecay
			if .invalidMessageDeliveries < .params.DecayToZero {
				.invalidMessageDeliveries = 0
			}
			// update mesh time and activate mesh message delivery parameter if need be
			if .inMesh {
				.meshTime = .Sub(.graftTime)
				if .meshTime > .MeshMessageDeliveriesActivation {
					.meshMessageDeliveriesActive = true
				}
			}
		}

		// decay P7 counter
		.behaviourPenalty *= .params.BehaviourPenaltyDecay
		if .behaviourPenalty < .params.DecayToZero {
			.behaviourPenalty = 0
		}
	}
}

// refreshIPs refreshes IPs we know of peers we're tracking.
func ( *peerScore) () {
	.Lock()
	defer .Unlock()

	// peer IPs may change, so we periodically refresh them
	//
	// TODO: it could be more efficient to collect connections for all peers
	// from the Network, populate a new map, and replace it in place. We are
	// incurring in those allocs anyway, and maybe even in more, in the form of
	// slices.
	for ,  := range .peerStats {
		if .connected {
			 := .getIPs()
			.setIPs(, , .ips)
			.ips = 
		}
	}
}

func ( *peerScore) () {
	.Lock()
	defer .Unlock()

	.deliveries.gc()
}

// tracer interface
func ( *peerScore) ( peer.ID,  protocol.ID) {
	.Lock()
	defer .Unlock()

	,  := .peerStats[]
	if ! {
		 = &peerStats{topics: make(map[string]*topicStats)}
		.peerStats[] = 
	}

	.connected = true
	 := .getIPs()
	.setIPs(, , .ips)
	.ips = 
}

func ( *peerScore) ( peer.ID) {
	.Lock()
	defer .Unlock()

	,  := .peerStats[]
	if ! {
		return
	}

	// decide whether to retain the score; this currently only retains non-positive scores
	// to dissuade attacks on the score function.
	if .score() > 0 {
		.removeIPs(, .ips)
		delete(.peerStats, )
		return
	}

	// furthermore, when we decide to retain the score, the firstMessageDelivery counters are
	// reset to 0 and mesh delivery penalties applied.
	for ,  := range .topics {
		.firstMessageDeliveries = 0

		 := .params.Topics[].MeshMessageDeliveriesThreshold
		if .inMesh && .meshMessageDeliveriesActive && .meshMessageDeliveries <  {
			 :=  - .meshMessageDeliveries
			.meshFailurePenalty +=  * 
		}

		.inMesh = false
	}

	.connected = false
	.expire = time.Now().Add(.params.RetainScore)
}

func ( *peerScore) ( string)  {}
func ( *peerScore) ( string) {}

func ( *peerScore) ( peer.ID,  string) {
	.Lock()
	defer .Unlock()

	,  := .peerStats[]
	if ! {
		return
	}

	,  := .getTopicStats(, .params)
	if ! {
		return
	}

	.inMesh = true
	.graftTime = time.Now()
	.meshTime = 0
	.meshMessageDeliveriesActive = false
}

func ( *peerScore) ( peer.ID,  string) {
	.Lock()
	defer .Unlock()

	,  := .peerStats[]
	if ! {
		return
	}

	,  := .getTopicStats(, .params)
	if ! {
		return
	}

	// sticky mesh delivery rate failure penalty
	 := .params.Topics[].MeshMessageDeliveriesThreshold
	if .meshMessageDeliveriesActive && .meshMessageDeliveries <  {
		 :=  - .meshMessageDeliveries
		.meshFailurePenalty +=  * 
	}

	.inMesh = false
}

func ( *peerScore) ( *Message) {
	.Lock()
	defer .Unlock()

	// the pubsub subsystem is beginning validation; create a record to track time in
	// the validation pipeline with an accurate firstSeen time.
	_ = .deliveries.getRecord(.idGen.ID())
}

func ( *peerScore) ( *Message) {
	.Lock()
	defer .Unlock()

	.markFirstMessageDelivery(.ReceivedFrom, )

	 := .deliveries.getRecord(.idGen.ID())

	// defensive check that this is the first delivery trace -- delivery status should be unknown
	if .status != deliveryUnknown {
		log.Debugf("unexpected delivery trace: message from %s was first seen %s ago and has delivery status %d", .ReceivedFrom, time.Since(.firstSeen), .status)
		return
	}

	// mark the message as valid and reward mesh peers that have already forwarded it to us
	.status = deliveryValid
	.validated = time.Now()
	for  := range .peers {
		// this check is to make sure a peer can't send us a message twice and get a double count
		// if it is a first delivery.
		if  != .ReceivedFrom {
			.markDuplicateMessageDelivery(, , time.Time{})
		}
	}
}

func ( *peerScore) ( *Message,  string) {
	.Lock()
	defer .Unlock()

	switch  {
	// we don't track those messages, but we penalize the peer as they are clearly invalid
	case RejectMissingSignature:
		fallthrough
	case RejectInvalidSignature:
		fallthrough
	case RejectUnexpectedSignature:
		fallthrough
	case RejectUnexpectedAuthInfo:
		fallthrough
	case RejectSelfOrigin:
		.markInvalidMessageDelivery(.ReceivedFrom, )
		return

		// we ignore those messages, so do nothing.
	case RejectBlacklstedPeer:
		fallthrough
	case RejectBlacklistedSource:
		return

	case RejectValidationQueueFull:
		// the message was rejected before it entered the validation pipeline;
		// we don't know if this message has a valid signature, and thus we also don't know if
		// it has a valid message ID; all we can do is ignore it.
		return
	}

	 := .deliveries.getRecord(.idGen.ID())

	// defensive check that this is the first rejection trace -- delivery status should be unknown
	if .status != deliveryUnknown {
		log.Debugf("unexpected rejection trace: message from %s was first seen %s ago and has delivery status %d", .ReceivedFrom, time.Since(.firstSeen), .status)
		return
	}

	switch  {
	case RejectValidationThrottled:
		// if we reject with "validation throttled" we don't penalize the peer(s) that forward it
		// because we don't know if it was valid.
		.status = deliveryThrottled
		// release the delivery time tracking map to free some memory early
		.peers = nil
		return
	case RejectValidationIgnored:
		// we were explicitly instructed by the validator to ignore the message but not penalize
		// the peer
		.status = deliveryIgnored
		.peers = nil
		return
	}

	// mark the message as invalid and penalize peers that have already forwarded it.
	.status = deliveryInvalid

	.markInvalidMessageDelivery(.ReceivedFrom, )
	for  := range .peers {
		.markInvalidMessageDelivery(, )
	}

	// release the delivery time tracking map to free some memory early
	.peers = nil
}

func ( *peerScore) ( *Message) {
	.Lock()
	defer .Unlock()

	 := .deliveries.getRecord(.idGen.ID())

	,  := .peers[.ReceivedFrom]
	if  {
		// we have already seen this duplicate!
		return
	}

	switch .status {
	case deliveryUnknown:
		// the message is being validated; track the peer delivery and wait for
		// the Deliver/Reject notification.
		.peers[.ReceivedFrom] = struct{}{}

	case deliveryValid:
		// mark the peer delivery time to only count a duplicate delivery once.
		.peers[.ReceivedFrom] = struct{}{}
		.markDuplicateMessageDelivery(.ReceivedFrom, , .validated)

	case deliveryInvalid:
		// we no longer track delivery time
		.markInvalidMessageDelivery(.ReceivedFrom, )

	case deliveryThrottled:
		// the message was throttled; do nothing (we don't know if it was valid)
	case deliveryIgnored:
		// the message was ignored; do nothing
	}
}

func ( *peerScore) ( peer.ID) {}

func ( *peerScore) ( *RPC) {}

func ( *peerScore) ( *RPC,  peer.ID) {}

func ( *peerScore) ( *RPC,  peer.ID) {}

func ( *peerScore) ( *Message) {}

// message delivery records
func ( *messageDeliveries) ( string) *deliveryRecord {
	,  := .records[]
	if  {
		return 
	}

	 := time.Now()

	 = &deliveryRecord{peers: make(map[peer.ID]struct{}), firstSeen: }
	.records[] = 

	 := &deliveryEntry{id: , expire: .Add(.seenMsgTTL)}
	if .tail != nil {
		.tail.next = 
		.tail = 
	} else {
		.head = 
		.tail = 
	}

	return 
}

func ( *messageDeliveries) () {
	if .head == nil {
		return
	}

	 := time.Now()
	for .head != nil && .After(.head.expire) {
		delete(.records, .head.id)
		.head = .head.next
	}

	if .head == nil {
		.tail = nil
	}
}

// getTopicStats returns existing topic stats for a given a given (peer, topic)
// tuple, or initialises a new topicStats object and inserts it in the
// peerStats, iff the topic is scored.
func ( *peerStats) ( string,  *PeerScoreParams) (*topicStats, bool) {
	,  := .topics[]
	if  {
		return , true
	}

	,  := .Topics[]
	if ! {
		return nil, false
	}

	 = &topicStats{}
	.topics[] = 

	return , true
}

// markInvalidMessageDelivery increments the "invalid message deliveries"
// counter for all scored topics the message is published in.
func ( *peerScore) ( peer.ID,  *Message) {
	,  := .peerStats[]
	if ! {
		return
	}

	 := .GetTopic()
	,  := .getTopicStats(, .params)
	if ! {
		return
	}

	.invalidMessageDeliveries += 1
}

// markFirstMessageDelivery increments the "first message deliveries" counter
// for all scored topics the message is published in, as well as the "mesh
// message deliveries" counter, if the peer is in the mesh for the topic.
func ( *peerScore) ( peer.ID,  *Message) {
	,  := .peerStats[]
	if ! {
		return
	}

	 := .GetTopic()
	,  := .getTopicStats(, .params)
	if ! {
		return
	}

	 := .params.Topics[].FirstMessageDeliveriesCap
	.firstMessageDeliveries += 1
	if .firstMessageDeliveries >  {
		.firstMessageDeliveries = 
	}

	if !.inMesh {
		return
	}

	 = .params.Topics[].MeshMessageDeliveriesCap
	.meshMessageDeliveries += 1
	if .meshMessageDeliveries >  {
		.meshMessageDeliveries = 
	}
}

// markDuplicateMessageDelivery increments the "mesh message deliveries" counter
// for messages we've seen before, as long the message was received within the
// P3 window.
func ( *peerScore) ( peer.ID,  *Message,  time.Time) {
	,  := .peerStats[]
	if ! {
		return
	}

	 := .GetTopic()
	,  := .getTopicStats(, .params)
	if ! {
		return
	}

	if !.inMesh {
		return
	}

	 := .params.Topics[]

	// check against the mesh delivery window -- if the validated time is passed as 0, then
	// the message was received before we finished validation and thus falls within the mesh
	// delivery window.
	if !.IsZero() && time.Since() > .MeshMessageDeliveriesWindow {
		return
	}

	 := .MeshMessageDeliveriesCap
	.meshMessageDeliveries += 1
	if .meshMessageDeliveries >  {
		.meshMessageDeliveries = 
	}
}

// getIPs gets the current IPs for a peer.
func ( *peerScore) ( peer.ID) []string {
	// in unit tests this can be nil
	if .host == nil {
		return nil
	}

	 := .host.Network().ConnsToPeer()
	 := make([]string, 0, 1)
	for ,  := range  {
		if .Stat().Limited {
			// ignore transient
			continue
		}

		 := .RemoteMultiaddr()
		,  := manet.ToIP()
		if  != nil {
			continue
		}

		// ignore those; loopback is used for unit testing
		if .IsLoopback() {
			continue
		}

		if len(.To4()) == 4 {
			// IPv4 address
			 := .String()
			 = append(, )
		} else {
			// IPv6 address -- we add both the actual address and the /64 subnet
			 := .String()
			 = append(, )

			 := .Mask(net.CIDRMask(64, 128)).String()
			 = append(, )
		}
	}

	return 
}

// setIPs adds tracking for the new IPs in the list, and removes tracking from
// the obsolete IPs.
func ( *peerScore) ( peer.ID, ,  []string) {
:
	// add the new IPs to the tracking
	for ,  := range  {
		// check if it is in the old ips list
		for ,  := range  {
			if  ==  {
				continue 
			}
		}
		// no, it's a new one -- add it to the tracker
		,  := .peerIPs[]
		if ! {
			 = make(map[peer.ID]struct{})
			.peerIPs[] = 
		}
		[] = struct{}{}
	}

:
	// remove the obsolete old IPs from the tracking
	for ,  := range  {
		// check if it is in the new ips list
		for ,  := range  {
			if  ==  {
				continue 
			}
		}
		// no, it's obsolete -- remove it from the tracker
		,  := .peerIPs[]
		if ! {
			continue
		}
		delete(, )
		if len() == 0 {
			delete(.peerIPs, )
		}
	}
}

// removeIPs removes an IP list from the tracking list for a peer.
func ( *peerScore) ( peer.ID,  []string) {
	for ,  := range  {
		,  := .peerIPs[]
		if ! {
			continue
		}

		delete(, )
		if len() == 0 {
			delete(.peerIPs, )
		}
	}
}