package rcmgr

import (
	

	
	
)

const metricNamespace = "libp2p_rcmgr"

var (

	// Conns
	conns = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Namespace: metricNamespace,
		Name:      "connections",
		Help:      "Number of Connections",
	}, []string{"dir", "scope"})

	connsInboundSystem     = conns.With(prometheus.Labels{"dir": "inbound", "scope": "system"})
	connsInboundTransient  = conns.With(prometheus.Labels{"dir": "inbound", "scope": "transient"})
	connsOutboundSystem    = conns.With(prometheus.Labels{"dir": "outbound", "scope": "system"})
	connsOutboundTransient = conns.With(prometheus.Labels{"dir": "outbound", "scope": "transient"})

	oneTenThenExpDistributionBuckets = []float64{
		1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16, 32, 64, 128, 256,
	}

	// PeerConns
	peerConns = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: metricNamespace,
		Name:      "peer_connections",
		Buckets:   oneTenThenExpDistributionBuckets,
		Help:      "Number of connections this peer has",
	}, []string{"dir"})
	peerConnsInbound  = peerConns.With(prometheus.Labels{"dir": "inbound"})
	peerConnsOutbound = peerConns.With(prometheus.Labels{"dir": "outbound"})

	// Lets us build a histogram of our current state. See https://github.com/libp2p/go-libp2p-resource-manager/pull/54#discussion_r911244757 for more information.
	previousPeerConns = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: metricNamespace,
		Name:      "previous_peer_connections",
		Buckets:   oneTenThenExpDistributionBuckets,
		Help:      "Number of connections this peer previously had. This is used to get the current connection number per peer histogram by subtracting this from the peer_connections histogram",
	}, []string{"dir"})
	previousPeerConnsInbound  = previousPeerConns.With(prometheus.Labels{"dir": "inbound"})
	previousPeerConnsOutbound = previousPeerConns.With(prometheus.Labels{"dir": "outbound"})

	// Streams
	streams = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Namespace: metricNamespace,
		Name:      "streams",
		Help:      "Number of Streams",
	}, []string{"dir", "scope", "protocol"})

	peerStreams = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: metricNamespace,
		Name:      "peer_streams",
		Buckets:   oneTenThenExpDistributionBuckets,
		Help:      "Number of streams this peer has",
	}, []string{"dir"})
	peerStreamsInbound  = peerStreams.With(prometheus.Labels{"dir": "inbound"})
	peerStreamsOutbound = peerStreams.With(prometheus.Labels{"dir": "outbound"})

	previousPeerStreams = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: metricNamespace,
		Name:      "previous_peer_streams",
		Buckets:   oneTenThenExpDistributionBuckets,
		Help:      "Number of streams this peer has",
	}, []string{"dir"})
	previousPeerStreamsInbound  = previousPeerStreams.With(prometheus.Labels{"dir": "inbound"})
	previousPeerStreamsOutbound = previousPeerStreams.With(prometheus.Labels{"dir": "outbound"})

	// Memory
	memoryTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Namespace: metricNamespace,
		Name:      "memory",
		Help:      "Amount of memory reserved as reported to the Resource Manager",
	}, []string{"scope", "protocol"})

	// PeerMemory
	peerMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: metricNamespace,
		Name:      "peer_memory",
		Buckets:   memDistribution,
		Help:      "How many peers have reserved this bucket of memory, as reported to the Resource Manager",
	})
	previousPeerMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: metricNamespace,
		Name:      "previous_peer_memory",
		Buckets:   memDistribution,
		Help:      "How many peers have previously reserved this bucket of memory, as reported to the Resource Manager",
	})

	// ConnMemory
	connMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: metricNamespace,
		Name:      "conn_memory",
		Buckets:   memDistribution,
		Help:      "How many conns have reserved this bucket of memory, as reported to the Resource Manager",
	})
	previousConnMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: metricNamespace,
		Name:      "previous_conn_memory",
		Buckets:   memDistribution,
		Help:      "How many conns have previously reserved this bucket of memory, as reported to the Resource Manager",
	})

	// FDs
	fds = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Namespace: metricNamespace,
		Name:      "fds",
		Help:      "Number of file descriptors reserved as reported to the Resource Manager",
	}, []string{"scope"})

	fdsSystem    = fds.With(prometheus.Labels{"scope": "system"})
	fdsTransient = fds.With(prometheus.Labels{"scope": "transient"})

	// Blocked resources
	blockedResources = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Namespace: metricNamespace,
		Name:      "blocked_resources",
		Help:      "Number of blocked resources",
	}, []string{"dir", "scope", "resource"})
)

var (
	memDistribution = []float64{
		1 << 10,   // 1KB
		4 << 10,   // 4KB
		32 << 10,  // 32KB
		1 << 20,   // 1MB
		32 << 20,  // 32MB
		256 << 20, // 256MB
		512 << 20, // 512MB
		1 << 30,   // 1GB
		2 << 30,   // 2GB
		4 << 30,   // 4GB
	}
)

func ( prometheus.Registerer) {
	metricshelper.RegisterCollectors(,
		conns,
		peerConns,
		previousPeerConns,
		streams,
		peerStreams,

		previousPeerStreams,

		memoryTotal,
		peerMemory,
		previousPeerMemory,
		connMemory,
		previousConnMemory,
		fds,
		blockedResources,
	)
}

func () Option {
	return func( *resourceManager) error {
		.disableMetrics = true
		return nil
	}
}

// StatsTraceReporter reports stats on the resource manager using its traces.
type StatsTraceReporter struct{}

func () (StatsTraceReporter, error) {
	// TODO tell prometheus the system limits
	return StatsTraceReporter{}, nil
}

func ( StatsTraceReporter) ( TraceEvt) {
	 := metricshelper.GetStringSlice()
	defer metricshelper.PutStringSlice()

	.consumeEventWithLabelSlice(, )
}

// Separate func so that we can test that this function does not allocate. The syncPool may allocate.
func ( StatsTraceReporter) ( TraceEvt,  *[]string) {
	switch .Type {
	case TraceAddStreamEvt, TraceRemoveStreamEvt:
		if  := PeerStrInScopeName(.Name);  != "" {
			// Aggregated peer stats. Counts how many peers have N number of streams open.
			// Uses two buckets aggregations. One to count how many streams the
			// peer has now. The other to count the negative value, or how many
			// streams did the peer use to have. When looking at the data you
			// take the difference from the two.

			 := int64(.StreamsOut - .DeltaOut)
			 := int64(.StreamsOut)
			if  !=  {
				if  != 0 {
					previousPeerStreamsOutbound.Observe(float64())
				}
				if  != 0 {
					peerStreamsOutbound.Observe(float64())
				}
			}

			 := int64(.StreamsIn - .DeltaIn)
			 := int64(.StreamsIn)
			if  !=  {
				if  != 0 {
					previousPeerStreamsInbound.Observe(float64())
				}
				if  != 0 {
					peerStreamsInbound.Observe(float64())
				}
			}
		} else {
			if .DeltaOut != 0 {
				if IsSystemScope(.Name) || IsTransientScope(.Name) {
					* = (*)[:0]
					* = append(*, "outbound", .Name, "")
					streams.WithLabelValues(*...).Set(float64(.StreamsOut))
				} else if  := ParseProtocolScopeName(.Name);  != "" {
					* = (*)[:0]
					* = append(*, "outbound", "protocol", )
					streams.WithLabelValues(*...).Set(float64(.StreamsOut))
				} else {
					// Not measuring service scope, connscope, servicepeer and protocolpeer. Lots of data, and
					// you can use aggregated peer stats + service stats to infer
					// this.
					break
				}
			}

			if .DeltaIn != 0 {
				if IsSystemScope(.Name) || IsTransientScope(.Name) {
					* = (*)[:0]
					* = append(*, "inbound", .Name, "")
					streams.WithLabelValues(*...).Set(float64(.StreamsIn))
				} else if  := ParseProtocolScopeName(.Name);  != "" {
					* = (*)[:0]
					* = append(*, "inbound", "protocol", )
					streams.WithLabelValues(*...).Set(float64(.StreamsIn))
				} else {
					// Not measuring service scope, connscope, servicepeer and protocolpeer. Lots of data, and
					// you can use aggregated peer stats + service stats to infer
					// this.
					break
				}
			}
		}

	case TraceAddConnEvt, TraceRemoveConnEvt:
		if  := PeerStrInScopeName(.Name);  != "" {
			// Aggregated peer stats. Counts how many peers have N number of connections.
			// Uses two buckets aggregations. One to count how many streams the
			// peer has now. The other to count the negative value, or how many
			// conns did the peer use to have. When looking at the data you
			// take the difference from the two.

			 := int64(.ConnsOut - .DeltaOut)
			 := int64(.ConnsOut)
			if  !=  {
				if  != 0 {
					previousPeerConnsOutbound.Observe(float64())
				}
				if  != 0 {
					peerConnsOutbound.Observe(float64())
				}
			}

			 := int64(.ConnsIn - .DeltaIn)
			 := int64(.ConnsIn)
			if  !=  {
				if  != 0 {
					previousPeerConnsInbound.Observe(float64())
				}
				if  != 0 {
					peerConnsInbound.Observe(float64())
				}
			}
		} else {
			if IsConnScope(.Name) {
				// Not measuring this. I don't think it's useful.
				break
			}

			if IsSystemScope(.Name) {
				connsInboundSystem.Set(float64(.ConnsIn))
				connsOutboundSystem.Set(float64(.ConnsOut))
			} else if IsTransientScope(.Name) {
				connsInboundTransient.Set(float64(.ConnsIn))
				connsOutboundTransient.Set(float64(.ConnsOut))
			}

			// Represents the delta in fds
			if .Delta != 0 {
				if IsSystemScope(.Name) {
					fdsSystem.Set(float64(.FD))
				} else if IsTransientScope(.Name) {
					fdsTransient.Set(float64(.FD))
				}
			}
		}

	case TraceReserveMemoryEvt, TraceReleaseMemoryEvt:
		if  := PeerStrInScopeName(.Name);  != "" {
			 := .Memory - .Delta
			if  != .Memory {
				if  != 0 {
					previousPeerMemory.Observe(float64())
				}
				if .Memory != 0 {
					peerMemory.Observe(float64(.Memory))
				}
			}
		} else if IsConnScope(.Name) {
			 := .Memory - .Delta
			if  != .Memory {
				if  != 0 {
					previousConnMemory.Observe(float64())
				}
				if .Memory != 0 {
					connMemory.Observe(float64(.Memory))
				}
			}
		} else {
			if IsSystemScope(.Name) || IsTransientScope(.Name) {
				* = (*)[:0]
				* = append(*, .Name, "")
				memoryTotal.WithLabelValues(*...).Set(float64(.Memory))
			} else if  := ParseProtocolScopeName(.Name);  != "" {
				* = (*)[:0]
				* = append(*, "protocol", )
				memoryTotal.WithLabelValues(*...).Set(float64(.Memory))
			} else {
				// Not measuring connscope, servicepeer and protocolpeer. Lots of data, and
				// you can use aggregated peer stats + service stats to infer
				// this.
				break
			}
		}

	case TraceBlockAddConnEvt, TraceBlockAddStreamEvt, TraceBlockReserveMemoryEvt:
		var  string
		if .Type == TraceBlockAddConnEvt {
			 = "connection"
		} else if .Type == TraceBlockAddStreamEvt {
			 = "stream"
		} else {
			 = "memory"
		}

		 := .Name
		// Only the top scopeName. We don't want to get the peerid here.
		// Using indexes and slices to avoid allocating.
		 := strings.IndexByte(, ':')
		if  != -1 {
			 = .Name[0:]
		}
		// Drop the connection or stream id
		 := strings.IndexByte(, '-')
		if  != -1 {
			 = [0:]
		}

		if .DeltaIn != 0 {
			* = (*)[:0]
			* = append(*, "inbound", , )
			blockedResources.WithLabelValues(*...).Add(float64(.DeltaIn))
		}

		if .DeltaOut != 0 {
			* = (*)[:0]
			* = append(*, "outbound", , )
			blockedResources.WithLabelValues(*...).Add(float64(.DeltaOut))
		}

		if .Delta != 0 &&  == "connection" {
			// This represents fds blocked
			* = (*)[:0]
			* = append(*, "", , "fd")
			blockedResources.WithLabelValues(*...).Add(float64(.Delta))
		} else if .Delta != 0 {
			* = (*)[:0]
			* = append(*, "", , )
			blockedResources.WithLabelValues(*...).Add(float64(.Delta))
		}
	}
}