// Copyright The OpenTelemetry Authors// SPDX-License-Identifier: Apache-2.0package log // import "go.opentelemetry.io/otel/sdk/log"import ()// attributesInlineCount is the number of attributes that are efficiently// stored in an array within a Record. This value is borrowed from slog which// performed a quantitative survey of log library use and found this value to// cover 95% of all use-cases (https://go.dev/blog/slog#performance).const attributesInlineCount = 5var logAttrDropped = sync.OnceFunc(func() {global.Warn("limit reached: dropping log Record attributes")})// indexPool is a pool of index maps used for de-duplication.var indexPool = sync.Pool{New: func() any { returnmake(map[string]int) },}func getIndex() map[string]int {returnindexPool.Get().(map[string]int)}func putIndex( map[string]int) {clear()indexPool.Put()}// Record is a log record emitted by the Logger.// A log record with non-empty event name is interpreted as an event record.//// Do not create instances of Record on your own in production code.// You can use [go.opentelemetry.io/otel/sdk/log/logtest.RecordFactory]// for testing purposes.typeRecordstruct {// Do not embed the log.Record. Attributes need to be overwrite-able and // deep-copying needs to be possible. eventName string timestamp time.Time observedTimestamp time.Time severity log.Severity severityText string body log.Value// The fields below are for optimizing the implementation of Attributes and // AddAttributes. This design is borrowed from the slog Record type: // https://cs.opensource.google/go/go/+/refs/tags/go1.22.0:src/log/slog/record.go;l=20// Allocation optimization: an inline array sized to hold // the majority of log calls (based on examination of open-source // code). It holds the start of the list of attributes. front [attributesInlineCount]log.KeyValue// The number of attributes in front. nFront int// The list of attributes except for those in front. // Invariants: // - len(back) > 0 if nFront == len(front) // - Unused array elements are zero-ed. Used to detect mistakes. back []log.KeyValue// dropped is the count of attributes that have been dropped when limits // were reached. dropped int traceID trace.TraceID spanID trace.SpanID traceFlags trace.TraceFlags// resource represents the entity that collected the log. resource *resource.Resource// scope is the Scope that the Logger was created with. scope *instrumentation.Scope attributeValueLengthLimit int attributeCountLimit int// specifies whether we should deduplicate any key value collections or not allowDupKeys bool noCmp [0]func() //nolint: unused // This is indeed used.}func ( *Record) ( int) {logAttrDropped() .dropped += }func ( *Record) ( int) {logAttrDropped() .dropped = }// EventName returns the event name.// A log record with non-empty event name is interpreted as an event record.func ( *Record) () string {return .eventName}// SetEventName sets the event name.// A log record with non-empty event name is interpreted as an event record.func ( *Record) ( string) { .eventName = }// Timestamp returns the time when the log record occurred.func ( *Record) () time.Time {return .timestamp}// SetTimestamp sets the time when the log record occurred.func ( *Record) ( time.Time) { .timestamp = }// ObservedTimestamp returns the time when the log record was observed.func ( *Record) () time.Time {return .observedTimestamp}// SetObservedTimestamp sets the time when the log record was observed.func ( *Record) ( time.Time) { .observedTimestamp = }// Severity returns the severity of the log record.func ( *Record) () log.Severity {return .severity}// SetSeverity sets the severity level of the log record.func ( *Record) ( log.Severity) { .severity = }// SeverityText returns severity (also known as log level) text. This is the// original string representation of the severity as it is known at the source.func ( *Record) () string {return .severityText}// SetSeverityText sets severity (also known as log level) text. This is the// original string representation of the severity as it is known at the source.func ( *Record) ( string) { .severityText = }// Body returns the body of the log record.func ( *Record) () log.Value {return .body}// SetBody sets the body of the log record.func ( *Record) ( log.Value) {if !.allowDupKeys { .body = .dedupeBodyCollections() } else { .body = }}// WalkAttributes walks all attributes the log record holds by calling f for// each on each [log.KeyValue] in the [Record]. Iteration stops if f returns false.func ( *Record) ( func(log.KeyValue) bool) {for := 0; < .nFront; ++ {if !(.front[]) {return } }for , := range .back {if !() {return } }}// AddAttributes adds attributes to the log record.// Attributes in attrs will overwrite any attribute already added to r with the same key.func ( *Record) ( ...log.KeyValue) { := .AttributesLen()if == 0 {// Avoid the more complex duplicate map lookups below.varintif !.allowDupKeys { , = dedup() .setDropped() } , := head(, .attributeCountLimit) .addDropped() .addAttrs()return }if !.allowDupKeys {// Used to find duplicates between attrs and existing attributes in r. := .attrIndex()deferputIndex()// Unique attrs that need to be added to r. This uses the same underlying // array as attrs. // // Note, do not iterate attrs twice by just calling dedup(attrs) here. := [:0]// Used to find duplicates within attrs itself. The index value is the // index of the element in unique. := getIndex()deferputIndex()// Deduplicate attrs within the scope of all existing attributes.for , := range {// Last-value-wins for any duplicates in attrs. , := [.Key]if { .addDropped(1) [] = continue } , = [.Key]if {// New attrs overwrite any existing with the same key. .addDropped(1)if < 0 { .front[-( + 1)] = } else { .back[] = } } else {// Unique attribute. = append(, ) [.Key] = len() - 1 } } = }if .attributeCountLimit > 0 && +len() > .attributeCountLimit {// Truncate the now unique attributes to comply with limit. // // Do not use head(attrs, r.attributeCountLimit - n) here. If // (r.attributeCountLimit - n) <= 0 attrs needs to be emptied. := max(0, .attributeCountLimit-) .addDropped(len() - ) = [:] } .addAttrs()}// attrIndex returns an index map for all attributes in the Record r. The index// maps the attribute key to location the attribute is stored. If the value is// < 0 then -(value + 1) (e.g. -1 -> 0, -2 -> 1, -3 -> 2) represents the index// in r.nFront. Otherwise, the index is the exact index of r.back.//// The returned index is taken from the indexPool. It is the callers// responsibility to return the index to that pool (putIndex) when done.func ( *Record) () map[string]int { := getIndex()for := 0; < .nFront; ++ { := .front[].Key [] = - - 1// stored in front: negative index. }for := 0; < len(.back); ++ { := .back[].Key [] = // stored in back: positive index. }return}// addAttrs adds attrs to the Record r. This does not validate any limits or// duplication of attributes, these tasks are left to the caller to handle// prior to calling.func ( *Record) ( []log.KeyValue) {varintfor = 0; < len() && .nFront < len(.front); ++ { := [] .front[.nFront] = .applyAttrLimits() .nFront++ }for , := range [:] { [+] = .applyAttrLimits() } .back = slices.Grow(.back, len([:])) .back = append(.back, [:]...)}// SetAttributes sets (and overrides) attributes to the log record.func ( *Record) ( ...log.KeyValue) {varint .setDropped(0)if !.allowDupKeys { , = dedup() .setDropped() } , = head(, .attributeCountLimit) .addDropped() .nFront = 0varintfor = 0; < len() && .nFront < len(.front); ++ { := [] .front[.nFront] = .applyAttrLimits() .nFront++ } .back = slices.Clone([:])for , := range .back { .back[] = .applyAttrLimits() }}// head returns the first n values of kvs along with the number of elements// dropped. If n is less than or equal to zero, kvs is returned with 0.func head( []log.KeyValue, int) ( []log.KeyValue, int) {if > 0 && len() > {return [:], len() - }return , 0}// dedup deduplicates kvs front-to-back with the last value saved.func dedup( []log.KeyValue) ( []log.KeyValue, int) { := getIndex()deferputIndex() = [:0] // Use the same underlying array as kvs.for , := range { , := [.Key]if { ++ [] = } else { = append(, ) [.Key] = len() - 1 } }return , }// AttributesLen returns the number of attributes in the log record.func ( *Record) () int {return .nFront + len(.back)}// DroppedAttributes returns the number of attributes dropped due to limits// being reached.func ( *Record) () int {return .dropped}// TraceID returns the trace ID or empty array.func ( *Record) () trace.TraceID {return .traceID}// SetTraceID sets the trace ID.func ( *Record) ( trace.TraceID) { .traceID = }// SpanID returns the span ID or empty array.func ( *Record) () trace.SpanID {return .spanID}// SetSpanID sets the span ID.func ( *Record) ( trace.SpanID) { .spanID = }// TraceFlags returns the trace flags.func ( *Record) () trace.TraceFlags {return .traceFlags}// SetTraceFlags sets the trace flags.func ( *Record) ( trace.TraceFlags) { .traceFlags = }// Resource returns the entity that collected the log.func ( *Record) () *resource.Resource {return .resource}// InstrumentationScope returns the scope that the Logger was created with.func ( *Record) () instrumentation.Scope {if .scope == nil {returninstrumentation.Scope{} }return *.scope}// Clone returns a copy of the record with no shared state. The original record// and the clone can both be modified without interfering with each other.func ( *Record) () Record { := * .back = slices.Clone(.back)return}func ( *Record) ( log.KeyValue) log.KeyValue { .Value = .applyValueLimits(.Value)return}func ( *Record) ( log.Value) log.Value {switch .Kind() {caselog.KindString: := .AsString()iflen() > .attributeValueLengthLimit { = log.StringValue(truncate(.attributeValueLengthLimit, )) }caselog.KindSlice: := .AsSlice()for := range { [] = .([]) } = log.SliceValue(...)caselog.KindMap: := .AsMap()if !.allowDupKeys {// Deduplicate then truncate. Do not do at the same time to avoid // wasted truncation operations.varint , = dedup() .addDropped() }for := range { [] = .applyAttrLimits([]) } = log.MapValue(...) }return}func ( *Record) ( log.Value) log.Value {switch .Kind() {caselog.KindSlice: := .AsSlice()for := range { [] = .([]) } = log.SliceValue(...)caselog.KindMap: , := dedup(.AsMap())for := range { [].Value = .([].Value) } = log.MapValue(...) }return}// truncate returns a truncated version of s such that it contains less than// the limit number of characters. Truncation is applied by returning the limit// number of valid characters contained in s.//// If limit is negative, it returns the original string.//// UTF-8 is supported. When truncating, all invalid characters are dropped// before applying truncation.//// If s already contains less than the limit number of bytes, it is returned// unchanged. No invalid characters are removed.func truncate( int, string) string {// This prioritize performance in the following order based on the most // common expected use-cases. // // - Short values less than the default limit (128). // - Strings with valid encodings that exceed the limit. // - No limit. // - Strings with invalid encodings that exceed the limit.if < 0 || len() <= {return }// Optimistically, assume all valid UTF-8.varstrings.Builder := 0for , := range {if != utf8.RuneError { ++if > {return [:] }continue } , := utf8.DecodeRuneInString([:])if == 1 {// Invalid encoding. .Grow(len() - 1) _, _ = .WriteString([:]) = [:]break } }// Fast-path, no invalid input.if .Cap() == 0 {return }// Truncate while validating UTF-8.for := 0; < len() && < ; { := []if < utf8.RuneSelf {// Optimization for single byte runes (common case). _ = .WriteByte() ++ ++continue } , := utf8.DecodeRuneInString([:])if == 1 {// We checked for all 1-byte runes above, this is a RuneError. ++continue } _, _ = .WriteString([ : +]) += ++ }return .String()}
The pages are generated with Goldsv0.8.2. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds.