// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package profile

import (
	
	
	
	
	
)

// Compact performs garbage collection on a profile to remove any
// unreferenced fields. This is useful to reduce the size of a profile
// after samples or locations have been removed.
func ( *Profile) () *Profile {
	, _ = Merge([]*Profile{})
	return 
}

// Merge merges all the profiles in profs into a single Profile.
// Returns a new profile independent of the input profiles. The merged
// profile is compacted to eliminate unused samples, locations,
// functions and mappings. Profiles must have identical profile sample
// and period types or the merge will fail. profile.Period of the
// resulting profile will be the maximum of all profiles, and
// profile.TimeNanos will be the earliest nonzero one. Merges are
// associative with the caveat of the first profile having some
// specialization in how headers are combined. There may be other
// subtleties now or in the future regarding associativity.
func ( []*Profile) (*Profile, error) {
	if len() == 0 {
		return nil, fmt.Errorf("no profiles to merge")
	}
	,  := combineHeaders()
	if  != nil {
		return nil, 
	}

	 := &profileMerger{
		p:         ,
		samples:   make(map[sampleKey]*Sample, len([0].Sample)),
		locations: make(map[locationKey]*Location, len([0].Location)),
		functions: make(map[functionKey]*Function, len([0].Function)),
		mappings:  make(map[mappingKey]*Mapping, len([0].Mapping)),
	}

	for ,  := range  {
		// Clear the profile-specific hash tables
		.locationsByID = makeLocationIDMap(len(.Location))
		.functionsByID = make(map[uint64]*Function, len(.Function))
		.mappingsByID = make(map[uint64]mapInfo, len(.Mapping))

		if len(.mappings) == 0 && len(.Mapping) > 0 {
			// The Mapping list has the property that the first mapping
			// represents the main binary. Take the first Mapping we see,
			// otherwise the operations below will add mappings in an
			// arbitrary order.
			.mapMapping(.Mapping[0])
		}

		for ,  := range .Sample {
			if !isZeroSample() {
				.mapSample()
			}
		}
	}

	for ,  := range .Sample {
		if isZeroSample() {
			// If there are any zero samples, re-merge the profile to GC
			// them.
			return ([]*Profile{})
		}
	}

	return , nil
}

// Normalize normalizes the source profile by multiplying each value in profile by the
// ratio of the sum of the base profile's values of that sample type to the sum of the
// source profile's value of that sample type.
func ( *Profile) ( *Profile) error {

	if  := .compatible();  != nil {
		return 
	}

	 := make([]int64, len(.SampleType))
	for ,  := range .Sample {
		for ,  := range .Value {
			[] += 
		}
	}

	 := make([]int64, len(.SampleType))
	for ,  := range .Sample {
		for ,  := range .Value {
			[] += 
		}
	}

	 := make([]float64, len())
	for  := range  {
		if [] == 0 {
			[] = 0.0
		} else {
			[] = float64([]) / float64([])
		}
	}
	.ScaleN()
	return nil
}

func isZeroSample( *Sample) bool {
	for ,  := range .Value {
		if  != 0 {
			return false
		}
	}
	return true
}

type profileMerger struct {
	p *Profile

	// Memoization tables within a profile.
	locationsByID locationIDMap
	functionsByID map[uint64]*Function
	mappingsByID  map[uint64]mapInfo

	// Memoization tables for profile entities.
	samples   map[sampleKey]*Sample
	locations map[locationKey]*Location
	functions map[functionKey]*Function
	mappings  map[mappingKey]*Mapping
}

type mapInfo struct {
	m      *Mapping
	offset int64
}

func ( *profileMerger) ( *Sample) *Sample {
	// Check memoization table
	 := .sampleKey()
	if ,  := .samples[];  {
		for ,  := range .Value {
			.Value[] += 
		}
		return 
	}

	// Make new sample.
	 := &Sample{
		Location: make([]*Location, len(.Location)),
		Value:    make([]int64, len(.Value)),
		Label:    make(map[string][]string, len(.Label)),
		NumLabel: make(map[string][]int64, len(.NumLabel)),
		NumUnit:  make(map[string][]string, len(.NumLabel)),
	}
	for ,  := range .Location {
		.Location[] = .mapLocation()
	}
	for ,  := range .Label {
		 := make([]string, len())
		copy(, )
		.Label[] = 
	}
	for ,  := range .NumLabel {
		 := .NumUnit[]
		 := make([]int64, len())
		 := make([]string, len())
		copy(, )
		copy(, )
		.NumLabel[] = 
		.NumUnit[] = 
	}
	copy(.Value, .Value)
	.samples[] = 
	.p.Sample = append(.p.Sample, )
	return 
}

func ( *profileMerger) ( *Sample) sampleKey {
	// Accumulate contents into a string.
	var  strings.Builder
	.Grow(64) // Heuristic to avoid extra allocs

	// encode a number
	 := func( uint64) {
		var  [binary.MaxVarintLen64]byte
		 := binary.PutUvarint([:], )
		.Write([:])
	}

	// encode a string prefixed with its length.
	 := func( string) {
		(uint64(len()))
		.WriteString()
	}

	for ,  := range .Location {
		// Get the location in the merged profile, which may have a different ID.
		if  := .mapLocation();  != nil {
			(.ID)
		}
	}
	(0) // Delimiter

	for ,  := range sortedKeys1(.Label) {
		()
		 := .Label[]
		(uint64(len()))
		for ,  := range  {
			()
		}
	}

	for ,  := range sortedKeys2(.NumLabel) {
		()
		 := .NumLabel[]
		(uint64(len()))
		for ,  := range  {
			(uint64())
		}
		 := .NumUnit[]
		(uint64(len()))
		for ,  := range  {
			()
		}
	}

	return sampleKey(.String())
}

type sampleKey string

// sortedKeys1 returns the sorted keys found in a string->[]string map.
//
// Note: this is currently non-generic since github pprof runs golint,
// which does not support generics. When that issue is fixed, it can
// be merged with sortedKeys2 and made into a generic function.
func sortedKeys1( map[string][]string) []string {
	if len() == 0 {
		return nil
	}
	 := make([]string, 0, len())
	for  := range  {
		 = append(, )
	}
	sort.Strings()
	return 
}

// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
//
// Note: this is currently non-generic since github pprof runs golint,
// which does not support generics. When that issue is fixed, it can
// be merged with sortedKeys1 and made into a generic function.
func sortedKeys2( map[string][]int64) []string {
	if len() == 0 {
		return nil
	}
	 := make([]string, 0, len())
	for  := range  {
		 = append(, )
	}
	sort.Strings()
	return 
}

func ( *profileMerger) ( *Location) *Location {
	if  == nil {
		return nil
	}

	if  := .locationsByID.get(.ID);  != nil {
		return 
	}

	 := .mapMapping(.Mapping)
	 := &Location{
		ID:       uint64(len(.p.Location) + 1),
		Mapping:  .m,
		Address:  uint64(int64(.Address) + .offset),
		Line:     make([]Line, len(.Line)),
		IsFolded: .IsFolded,
	}
	for ,  := range .Line {
		.Line[] = .mapLine()
	}
	// Check memoization table. Must be done on the remapped location to
	// account for the remapped mapping ID.
	 := .key()
	if ,  := .locations[];  {
		.locationsByID.set(.ID, )
		return 
	}
	.locationsByID.set(.ID, )
	.locations[] = 
	.p.Location = append(.p.Location, )
	return 
}

// key generates locationKey to be used as a key for maps.
func ( *Location) () locationKey {
	 := locationKey{
		addr:     .Address,
		isFolded: .IsFolded,
	}
	if .Mapping != nil {
		// Normalizes address to handle address space randomization.
		.addr -= .Mapping.Start
		.mappingID = .Mapping.ID
	}
	 := make([]string, len(.Line)*3)
	for ,  := range .Line {
		if .Function != nil {
			[*2] = strconv.FormatUint(.Function.ID, 16)
		}
		[*2+1] = strconv.FormatInt(.Line, 16)
		[*2+2] = strconv.FormatInt(.Column, 16)
	}
	.lines = strings.Join(, "|")
	return 
}

type locationKey struct {
	addr, mappingID uint64
	lines           string
	isFolded        bool
}

func ( *profileMerger) ( *Mapping) mapInfo {
	if  == nil {
		return mapInfo{}
	}

	if ,  := .mappingsByID[.ID];  {
		return 
	}

	// Check memoization tables.
	 := .key()
	if ,  := .mappings[];  {
		 := mapInfo{, int64(.Start) - int64(.Start)}
		.mappingsByID[.ID] = 
		return 
	}
	 := &Mapping{
		ID:                     uint64(len(.p.Mapping) + 1),
		Start:                  .Start,
		Limit:                  .Limit,
		Offset:                 .Offset,
		File:                   .File,
		KernelRelocationSymbol: .KernelRelocationSymbol,
		BuildID:                .BuildID,
		HasFunctions:           .HasFunctions,
		HasFilenames:           .HasFilenames,
		HasLineNumbers:         .HasLineNumbers,
		HasInlineFrames:        .HasInlineFrames,
	}
	.p.Mapping = append(.p.Mapping, )

	// Update memoization tables.
	.mappings[] = 
	 := mapInfo{, 0}
	.mappingsByID[.ID] = 
	return 
}

// key generates encoded strings of Mapping to be used as a key for
// maps.
func ( *Mapping) () mappingKey {
	// Normalize addresses to handle address space randomization.
	// Round up to next 4K boundary to avoid minor discrepancies.
	const  = 0x1000

	 := .Limit - .Start
	 =  +  - 1
	 =  - ( % )
	 := mappingKey{
		size:   ,
		offset: .Offset,
	}

	switch {
	case .BuildID != "":
		.buildIDOrFile = .BuildID
	case .File != "":
		.buildIDOrFile = .File
	default:
		// A mapping containing neither build ID nor file name is a fake mapping. A
		// key with empty buildIDOrFile is used for fake mappings so that they are
		// treated as the same mapping during merging.
	}
	return 
}

type mappingKey struct {
	size, offset  uint64
	buildIDOrFile string
}

func ( *profileMerger) ( Line) Line {
	 := Line{
		Function: .mapFunction(.Function),
		Line:     .Line,
		Column:   .Column,
	}
	return 
}

func ( *profileMerger) ( *Function) *Function {
	if  == nil {
		return nil
	}
	if ,  := .functionsByID[.ID];  {
		return 
	}
	 := .key()
	if ,  := .functions[];  {
		.functionsByID[.ID] = 
		return 
	}
	 := &Function{
		ID:         uint64(len(.p.Function) + 1),
		Name:       .Name,
		SystemName: .SystemName,
		Filename:   .Filename,
		StartLine:  .StartLine,
	}
	.functions[] = 
	.functionsByID[.ID] = 
	.p.Function = append(.p.Function, )
	return 
}

// key generates a struct to be used as a key for maps.
func ( *Function) () functionKey {
	return functionKey{
		.StartLine,
		.Name,
		.SystemName,
		.Filename,
	}
}

type functionKey struct {
	startLine                  int64
	name, systemName, fileName string
}

// combineHeaders checks that all profiles can be merged and returns
// their combined profile.
func combineHeaders( []*Profile) (*Profile, error) {
	for ,  := range [1:] {
		if  := [0].compatible();  != nil {
			return nil, 
		}
	}

	var , ,  int64
	var  []string
	 := map[string]bool{}
	var  string
	var  string
	for ,  := range  {
		if  == 0 || .TimeNanos <  {
			 = .TimeNanos
		}
		 += .DurationNanos
		if  == 0 ||  < .Period {
			 = .Period
		}
		for ,  := range .Comments {
			if  := []; ! {
				 = append(, )
				[] = true
			}
		}
		if  == "" {
			 = .DefaultSampleType
		}
		if  == "" {
			 = .DocURL
		}
	}

	 := &Profile{
		SampleType: make([]*ValueType, len([0].SampleType)),

		DropFrames: [0].DropFrames,
		KeepFrames: [0].KeepFrames,

		TimeNanos:     ,
		DurationNanos: ,
		PeriodType:    [0].PeriodType,
		Period:        ,

		Comments:          ,
		DefaultSampleType: ,
		DocURL:            ,
	}
	copy(.SampleType, [0].SampleType)
	return , nil
}

// compatible determines if two profiles can be compared/merged.
// returns nil if the profiles are compatible; otherwise an error with
// details on the incompatibility.
func ( *Profile) ( *Profile) error {
	if !equalValueType(.PeriodType, .PeriodType) {
		return fmt.Errorf("incompatible period types %v and %v", .PeriodType, .PeriodType)
	}

	if len(.SampleType) != len(.SampleType) {
		return fmt.Errorf("incompatible sample types %v and %v", .SampleType, .SampleType)
	}

	for  := range .SampleType {
		if !equalValueType(.SampleType[], .SampleType[]) {
			return fmt.Errorf("incompatible sample types %v and %v", .SampleType, .SampleType)
		}
	}
	return nil
}

// equalValueType returns true if the two value types are semantically
// equal. It ignores the internal fields used during encode/decode.
func equalValueType(,  *ValueType) bool {
	return .Type == .Type && .Unit == .Unit
}

// locationIDMap is like a map[uint64]*Location, but provides efficiency for
// ids that are densely numbered, which is often the case.
type locationIDMap struct {
	dense  []*Location          // indexed by id for id < len(dense)
	sparse map[uint64]*Location // indexed by id for id >= len(dense)
}

func makeLocationIDMap( int) locationIDMap {
	return locationIDMap{
		dense:  make([]*Location, ),
		sparse: map[uint64]*Location{},
	}
}

func ( locationIDMap) ( uint64) *Location {
	if  < uint64(len(.dense)) {
		return .dense[int()]
	}
	return .sparse[]
}

func ( locationIDMap) ( uint64,  *Location) {
	if  < uint64(len(.dense)) {
		.dense[] = 
		return
	}
	.sparse[] = 
}

// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
// keeps sample types that appear in all profiles only and drops/reorders the
// sample types as necessary.
//
// In the case of sample types order is not the same for given profiles the
// order is derived from the first profile.
//
// Profiles are modified in-place.
//
// It returns an error if the sample type's intersection is empty.
func ( []*Profile) error {
	 := commonSampleTypes()
	if len() == 0 {
		return fmt.Errorf("profiles have empty common sample type list")
	}
	for ,  := range  {
		if  := compatibilizeSampleTypes(, );  != nil {
			return 
		}
	}
	return nil
}

// commonSampleTypes returns sample types that appear in all profiles in the
// order how they ordered in the first profile.
func commonSampleTypes( []*Profile) []string {
	if len() == 0 {
		return nil
	}
	 := map[string]int{}
	for ,  := range  {
		for ,  := range .SampleType {
			[.Type]++
		}
	}
	var  []string
	for ,  := range [0].SampleType {
		if [.Type] == len() {
			 = append(, .Type)
		}
	}
	return 
}

// compatibilizeSampleTypes drops sample types that are not present in sTypes
// list and reorder them if needed.
//
// It sets DefaultSampleType to sType[0] if it is not in sType list.
//
// It assumes that all sample types from the sTypes list are present in the
// given profile otherwise it returns an error.
func compatibilizeSampleTypes( *Profile,  []string) error {
	if len() == 0 {
		return fmt.Errorf("sample type list is empty")
	}
	 := [0]
	,  := make([]int, len()), false
	for ,  := range  {
		if  == .DefaultSampleType {
			 = .DefaultSampleType
		}
		 := searchValueType(.SampleType, )
		if  < 0 {
			return fmt.Errorf("%q sample type is not found in profile", )
		}
		[] = 
		if  !=  {
			 = true
		}
	}
	if ! && len() == len(.SampleType) {
		return nil
	}
	.DefaultSampleType = 
	 := .SampleType
	.SampleType = make([]*ValueType, len())
	for ,  := range  {
		.SampleType[] = []
	}
	 := make([]int64, len())
	for ,  := range .Sample {
		for ,  := range  {
			[] = .Value[]
		}
		.Value = .Value[:len()]
		copy(.Value, )
	}
	return nil
}

func searchValueType( []*ValueType,  string) int {
	for ,  := range  {
		if .Type ==  {
			return 
		}
	}
	return -1
}