package pstoremem

import (
	
	
	
	
	
	
	

	
	
	

	logging 
	ma 
)

var log = logging.Logger("peerstore")

type expiringAddr struct {
	Addr   ma.Multiaddr
	TTL    time.Duration
	Expiry time.Time
	Peer   peer.ID
	// to sort by expiry time, -1 means it's not in the heap
	heapIndex int
}

func ( *expiringAddr) ( time.Time) bool {
	return !.Before(.Expiry)
}

func ( *expiringAddr) () bool {
	return ttlIsConnected(.TTL)
}

// ttlIsConnected returns true if the TTL is at least as long as the connected
// TTL.
func ttlIsConnected( time.Duration) bool {
	return  >= peerstore.ConnectedAddrTTL
}

type peerRecordState struct {
	Envelope *record.Envelope
	Seq      uint64
}

// Essentially Go stdlib's Priority Queue example
var _ heap.Interface = &peerAddrs{}

type peerAddrs struct {
	Addrs map[peer.ID]map[string]*expiringAddr // peer.ID -> addr.Bytes() -> *expiringAddr
	// expiringHeap only stores non-connected addresses. Since connected address
	// basically have an infinite TTL
	expiringHeap []*expiringAddr
}

func newPeerAddrs() peerAddrs {
	return peerAddrs{
		Addrs: make(map[peer.ID]map[string]*expiringAddr),
	}
}

func ( *peerAddrs) () int { return len(.expiringHeap) }
func ( *peerAddrs) (,  int) bool {
	return .expiringHeap[].Expiry.Before(.expiringHeap[].Expiry)
}
func ( *peerAddrs) (,  int) {
	.expiringHeap[], .expiringHeap[] = .expiringHeap[], .expiringHeap[]
	.expiringHeap[].heapIndex = 
	.expiringHeap[].heapIndex = 
}
func ( *peerAddrs) ( any) {
	 := .(*expiringAddr)
	.heapIndex = len(.expiringHeap)
	.expiringHeap = append(.expiringHeap, )
}
func ( *peerAddrs) () any {
	 := .expiringHeap[len(.expiringHeap)-1]
	.heapIndex = -1
	.expiringHeap = .expiringHeap[0 : len(.expiringHeap)-1]
	return 
}

func ( *peerAddrs) ( *expiringAddr) {
	if ,  := .Addrs[.Peer][string(.Addr.Bytes())];  {
		if .heapIndex != -1 {
			heap.Remove(, .heapIndex)
		}
		delete(.Addrs[.Peer], string(.Addr.Bytes()))
		if len(.Addrs[.Peer]) == 0 {
			delete(.Addrs, .Peer)
		}
	}
}

func ( *peerAddrs) ( peer.ID,  ma.Multiaddr) (*expiringAddr, bool) {
	if ,  := .Addrs[];  {
		,  := [string(.Bytes())]
		return , 
	}
	return nil, false
}

func ( *peerAddrs) () time.Time {
	if len(.expiringHeap) == 0 {
		return time.Time{}
	}
	return .expiringHeap[0].Expiry
}

func ( *peerAddrs) ( time.Time) (*expiringAddr, bool) {
	// Use `!Before` instead of `After` to ensure that we expire *at* now, and not *just after now*.
	if len(.expiringHeap) > 0 && !.Before(.NextExpiry()) {
		 := heap.Pop().(*expiringAddr)
		delete(.Addrs[.Peer], string(.Addr.Bytes()))
		if len(.Addrs[.Peer]) == 0 {
			delete(.Addrs, .Peer)
		}
		return , true
	}
	return nil, false
}

func ( *peerAddrs) ( *expiringAddr) {
	if .heapIndex == -1 {
		return
	}
	if .IsConnected() {
		heap.Remove(, .heapIndex)
	} else {
		heap.Fix(, .heapIndex)
	}
}

func ( *peerAddrs) ( *expiringAddr) {
	.heapIndex = -1
	if ,  := .Addrs[.Peer]; ! {
		.Addrs[.Peer] = make(map[string]*expiringAddr)
	}
	.Addrs[.Peer][string(.Addr.Bytes())] = 
	// don't add connected addr to heap.
	if .IsConnected() {
		return
	}
	heap.Push(, )
}

func ( *peerAddrs) () int {
	return len(.expiringHeap)
}

type clock interface {
	Now() time.Time
}

type realclock struct{}

func ( realclock) () time.Time {
	return time.Now()
}

const (
	defaultMaxSignedPeerRecords = 100_000
	defaultMaxUnconnectedAddrs  = 1_000_000
)

// memoryAddrBook manages addresses.
type memoryAddrBook struct {
	mu                   sync.RWMutex
	addrs                peerAddrs
	signedPeerRecords    map[peer.ID]*peerRecordState
	maxUnconnectedAddrs  int
	maxSignedPeerRecords int

	refCount sync.WaitGroup
	cancel   func()

	subManager *AddrSubManager
	clock      clock
}

var _ peerstore.AddrBook = (*memoryAddrBook)(nil)
var _ peerstore.CertifiedAddrBook = (*memoryAddrBook)(nil)

func ( ...AddrBookOption) *memoryAddrBook {
	,  := context.WithCancel(context.Background())

	 := &memoryAddrBook{
		addrs:                newPeerAddrs(),
		signedPeerRecords:    make(map[peer.ID]*peerRecordState),
		subManager:           NewAddrSubManager(),
		cancel:               ,
		clock:                realclock{},
		maxUnconnectedAddrs:  defaultMaxUnconnectedAddrs,
		maxSignedPeerRecords: defaultMaxSignedPeerRecords,
	}
	for ,  := range  {
		()
	}

	.refCount.Add(1)
	go .background()
	return 
}

type AddrBookOption func(book *memoryAddrBook) error

func ( clock) AddrBookOption {
	return func( *memoryAddrBook) error {
		.clock = 
		return nil
	}
}

// WithMaxAddresses sets the maximum number of unconnected addresses to store.
// The maximum number of connected addresses is bounded by the connection
// limits in the Connection Manager and Resource Manager.
func ( int) AddrBookOption {
	return func( *memoryAddrBook) error {
		.maxUnconnectedAddrs = 
		return nil
	}
}

func ( int) AddrBookOption {
	return func( *memoryAddrBook) error {
		.maxSignedPeerRecords = 
		return nil
	}
}

// background periodically schedules a gc
func ( *memoryAddrBook) ( context.Context) {
	defer .refCount.Done()
	 := time.NewTicker(1 * time.Minute)
	defer .Stop()

	for {
		select {
		case <-.C:
			.gc()
		case <-.Done():
			return
		}
	}
}

func ( *memoryAddrBook) () error {
	.cancel()
	.refCount.Wait()
	return nil
}

// gc garbage collects the in-memory address book.
func ( *memoryAddrBook) () {
	 := .clock.Now()
	.mu.Lock()
	defer .mu.Unlock()
	for {
		,  := .addrs.PopIfExpired()
		if ! {
			return
		}
		.maybeDeleteSignedPeerRecordUnlocked(.Peer)
	}
}

func ( *memoryAddrBook) () peer.IDSlice {
	.mu.RLock()
	defer .mu.RUnlock()
	 := make(peer.IDSlice, 0, len(.addrs.Addrs))
	for  := range .addrs.Addrs {
		 = append(, )
	}
	return 
}

// AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)
func ( *memoryAddrBook) ( peer.ID,  ma.Multiaddr,  time.Duration) {
	.AddAddrs(, []ma.Multiaddr{}, )
}

// AddAddrs adds `addrs` for peer `p`, which will expire after the given `ttl`.
// This function never reduces the TTL or expiration of an address.
func ( *memoryAddrBook) ( peer.ID,  []ma.Multiaddr,  time.Duration) {
	.addAddrs(, , )
}

// ConsumePeerRecord adds addresses from a signed peer.PeerRecord, which will expire after the given TTL.
// See https://godoc.org/github.com/libp2p/go-libp2p/core/peerstore#CertifiedAddrBook for more details.
func ( *memoryAddrBook) ( *record.Envelope,  time.Duration) (bool, error) {
	,  := .Record()
	if  != nil {
		return false, 
	}
	,  := .(*peer.PeerRecord)
	if ! {
		return false, fmt.Errorf("unable to process envelope: not a PeerRecord")
	}
	if !.PeerID.MatchesPublicKey(.PublicKey) {
		return false, fmt.Errorf("signing key does not match PeerID in PeerRecord")
	}

	.mu.Lock()
	defer .mu.Unlock()

	// ensure seq is greater than or equal to the last received
	,  := .signedPeerRecords[.PeerID]
	if  && .Seq > .Seq {
		return false, nil
	}
	// check if we are over the max signed peer record limit
	if ! && len(.signedPeerRecords) >= .maxSignedPeerRecords {
		return false, errors.New("too many signed peer records")
	}
	.signedPeerRecords[.PeerID] = &peerRecordState{
		Envelope: ,
		Seq:      .Seq,
	}
	.addAddrsUnlocked(.PeerID, .Addrs, )
	return true, nil
}

func ( *memoryAddrBook) ( peer.ID) {
	if len(.addrs.Addrs[]) == 0 {
		delete(.signedPeerRecords, )
	}
}

func ( *memoryAddrBook) ( peer.ID,  []ma.Multiaddr,  time.Duration) {
	.mu.Lock()
	defer .mu.Unlock()

	.addAddrsUnlocked(, , )
}

func ( *memoryAddrBook) ( peer.ID,  []ma.Multiaddr,  time.Duration) {
	defer .maybeDeleteSignedPeerRecordUnlocked()

	// if ttl is zero, exit. nothing to do.
	if  <= 0 {
		return
	}

	// we are over limit, drop these addrs.
	if !ttlIsConnected() && .addrs.NumUnconnectedAddrs() >= .maxUnconnectedAddrs {
		return
	}

	 := .clock.Now().Add()
	for ,  := range  {
		// Remove suffix of /p2p/peer-id from address
		,  := peer.SplitAddr()
		if  == nil {
			log.Warnw("Was passed nil multiaddr", "peer", )
			continue
		}
		if  != "" &&  !=  {
			log.Warnf("Was passed p2p address with a different peerId. found: %s, expected: %s", , )
			continue
		}
		,  := .addrs.FindAddr(, )
		if ! {
			// not found, announce it.
			 := &expiringAddr{Addr: , Expiry: , TTL: , Peer: }
			.addrs.Insert()
			.subManager.BroadcastAddr(, )
		} else {
			// update ttl & exp to whichever is greater between new and existing entry
			var  bool
			if  > .TTL {
				 = true
				.TTL = 
			}
			if .After(.Expiry) {
				 = true
				.Expiry = 
			}
			if  {
				.addrs.Update()
			}
		}
	}
}

// SetAddr calls mgr.SetAddrs(p, addr, ttl)
func ( *memoryAddrBook) ( peer.ID,  ma.Multiaddr,  time.Duration) {
	.SetAddrs(, []ma.Multiaddr{}, )
}

// SetAddrs sets the ttl on addresses. This clears any TTL there previously.
// This is used when we receive the best estimate of the validity of an address.
func ( *memoryAddrBook) ( peer.ID,  []ma.Multiaddr,  time.Duration) {
	.mu.Lock()
	defer .mu.Unlock()

	defer .maybeDeleteSignedPeerRecordUnlocked()

	 := .clock.Now().Add()
	for ,  := range  {
		,  := peer.SplitAddr()
		if  == nil {
			log.Warnw("was passed nil multiaddr", "peer", )
			continue
		}
		if  != "" &&  !=  {
			log.Warnf("was passed p2p address with a different peerId, found: %s wanted: %s", , )
			continue
		}

		if ,  := .addrs.FindAddr(, );  {
			if  > 0 {
				if .IsConnected() && !ttlIsConnected() && .addrs.NumUnconnectedAddrs() >= .maxUnconnectedAddrs {
					.addrs.Delete()
				} else {
					.Addr = 
					.Expiry = 
					.TTL = 
					.addrs.Update()
					.subManager.BroadcastAddr(, )
				}
			} else {
				.addrs.Delete()
			}
		} else {
			if  > 0 {
				if !ttlIsConnected() && .addrs.NumUnconnectedAddrs() >= .maxUnconnectedAddrs {
					continue
				}
				 := &expiringAddr{Addr: , Expiry: , TTL: , Peer: }
				.addrs.Insert()
				.subManager.BroadcastAddr(, )
			}
		}
	}
}

// UpdateAddrs updates the addresses associated with the given peer that have
// the given oldTTL to have the given newTTL.
func ( *memoryAddrBook) ( peer.ID,  time.Duration,  time.Duration) {
	.mu.Lock()
	defer .mu.Unlock()

	defer .maybeDeleteSignedPeerRecordUnlocked()

	 := .clock.Now().Add()
	for ,  := range .addrs.Addrs[] {
		if  == .TTL {
			if  == 0 {
				.addrs.Delete()
			} else {
				// We are over limit, drop these addresses.
				if ttlIsConnected() && !ttlIsConnected() && .addrs.NumUnconnectedAddrs() >= .maxUnconnectedAddrs {
					.addrs.Delete()
				} else {
					.TTL = 
					.Expiry = 
					.addrs.Update()
				}
			}
		}
	}
}

// Addrs returns all known (and valid) addresses for a given peer
func ( *memoryAddrBook) ( peer.ID) []ma.Multiaddr {
	.mu.RLock()
	defer .mu.RUnlock()
	if ,  := .addrs.Addrs[]; ! {
		return nil
	}
	return validAddrs(.clock.Now(), .addrs.Addrs[])
}

func validAddrs( time.Time,  map[string]*expiringAddr) []ma.Multiaddr {
	 := make([]ma.Multiaddr, 0, len())
	if  == nil {
		return 
	}
	for ,  := range  {
		if !.ExpiredBy() {
			 = append(, .Addr)
		}
	}
	return 
}

// GetPeerRecord returns a Envelope containing a PeerRecord for the
// given peer id, if one exists.
// Returns nil if no signed PeerRecord exists for the peer.
func ( *memoryAddrBook) ( peer.ID) *record.Envelope {
	.mu.RLock()
	defer .mu.RUnlock()

	if ,  := .addrs.Addrs[]; ! {
		return nil
	}
	// The record may have expired, but not gargage collected.
	if len(validAddrs(.clock.Now(), .addrs.Addrs[])) == 0 {
		return nil
	}

	 := .signedPeerRecords[]
	if  == nil {
		return nil
	}
	return .Envelope
}

// ClearAddrs removes all previously stored addresses
func ( *memoryAddrBook) ( peer.ID) {
	.mu.Lock()
	defer .mu.Unlock()

	delete(.signedPeerRecords, )
	for ,  := range .addrs.Addrs[] {
		.addrs.Delete()
	}
}

// AddrStream returns a channel on which all new addresses discovered for a
// given peer ID will be published.
func ( *memoryAddrBook) ( context.Context,  peer.ID) <-chan ma.Multiaddr {
	var  []ma.Multiaddr

	.mu.RLock()
	if ,  := .addrs.Addrs[];  {
		 = make([]ma.Multiaddr, 0, len())
		for ,  := range  {
			 = append(, .Addr)
		}
	}
	.mu.RUnlock()

	return .subManager.AddrStream(, , )
}

type addrSub struct {
	pubch chan ma.Multiaddr
	ctx   context.Context
}

func ( *addrSub) ( ma.Multiaddr) {
	select {
	case .pubch <- :
	case <-.ctx.Done():
	}
}

// An abstracted, pub-sub manager for address streams. Extracted from
// memoryAddrBook in order to support additional implementations.
type AddrSubManager struct {
	mu   sync.RWMutex
	subs map[peer.ID][]*addrSub
}

// NewAddrSubManager initializes an AddrSubManager.
func () *AddrSubManager {
	return &AddrSubManager{
		subs: make(map[peer.ID][]*addrSub),
	}
}

// Used internally by the address stream coroutine to remove a subscription
// from the manager.
func ( *AddrSubManager) ( peer.ID,  *addrSub) {
	.mu.Lock()
	defer .mu.Unlock()

	 := .subs[]
	if len() == 1 {
		if [0] !=  {
			return
		}
		delete(.subs, )
		return
	}

	for ,  := range  {
		if  ==  {
			[] = [len()-1]
			[len()-1] = nil
			.subs[] = [:len()-1]
			return
		}
	}
}

// BroadcastAddr broadcasts a new address to all subscribed streams.
func ( *AddrSubManager) ( peer.ID,  ma.Multiaddr) {
	.mu.RLock()
	defer .mu.RUnlock()

	if ,  := .subs[];  {
		for ,  := range  {
			.pubAddr()
		}
	}
}

// AddrStream creates a new subscription for a given peer ID, pre-populating the
// channel with any addresses we might already have on file.
func ( *AddrSubManager) ( context.Context,  peer.ID,  []ma.Multiaddr) <-chan ma.Multiaddr {
	 := &addrSub{pubch: make(chan ma.Multiaddr), ctx: }
	 := make(chan ma.Multiaddr)

	.mu.Lock()
	.subs[] = append(.subs[], )
	.mu.Unlock()

	sort.Sort(addrList())

	go func( []ma.Multiaddr) {
		defer close()

		 := make(map[string]struct{}, len())
		for ,  := range  {
			[string(.Bytes())] = struct{}{}
		}

		var  chan ma.Multiaddr
		var  ma.Multiaddr
		if len() > 0 {
			 = [0]
			 = [1:]
			 = 
		}

		for {
			select {
			case  <- :
				if len() > 0 {
					 = [0]
					 = [1:]
				} else {
					 = nil
					 = nil
				}
			case  := <-.pubch:
				if ,  := [string(.Bytes())];  {
					continue
				}
				[string(.Bytes())] = struct{}{}

				if  == nil {
					 = 
					 = 
				} else {
					 = append(, )
				}
			case <-.Done():
				.removeSub(, )
				return
			}
		}
	}()

	return 
}