package goja

import (
	
	
	
	
	
	

	
)

const profInterval = 10 * time.Millisecond
const profMaxStackDepth = 64

const (
	profReqNone int32 = iota
	profReqDoSample
	profReqSampleReady
	profReqStop
)

type _globalProfiler struct {
	p profiler
	w io.Writer

	enabled int32
}

var globalProfiler _globalProfiler

type profTracker struct {
	req, finished int32
	start, stop   time.Time
	numFrames     int
	frames        [profMaxStackDepth]StackFrame
}

type profiler struct {
	mu       sync.Mutex
	trackers []*profTracker
	buf      *profBuffer
	running  bool
}

type profFunc struct {
	f    profile.Function
	locs map[int32]*profile.Location
}

type profSampleNode struct {
	loc      *profile.Location
	sample   *profile.Sample
	parent   *profSampleNode
	children map[*profile.Location]*profSampleNode
}

type profBuffer struct {
	funcs map[*Program]*profFunc
	root  profSampleNode
}

func ( *profBuffer) ( *profTracker) {
	 := .frames[:.numFrames]
	 := &.root
	for  := len() - 1;  >= 0; -- {
		 := []
		if .prg == nil {
			continue
		}
		var  *profFunc
		if  = .funcs[.prg];  == nil {
			 = &profFunc{
				locs: make(map[int32]*profile.Location),
			}
			if .funcs == nil {
				.funcs = make(map[*Program]*profFunc)
			}
			.funcs[.prg] = 
		}
		var  *profile.Location
		if  = .locs[int32(.pc)];  == nil {
			 = &profile.Location{}
			.locs[int32(.pc)] = 
		}
		if  := .children[];  == nil {
			if .children == nil {
				.children = make(map[*profile.Location]*profSampleNode, 1)
			}
			 = &profSampleNode{
				parent: ,
				loc:    ,
			}
			.children[] = 
			 = 
		} else {
			 = 
		}
	}
	 := .sample
	if  == nil {
		 := make([]*profile.Location, 0, len())
		for  := ; .loc != nil;  = .parent {
			 = append(, .loc)
		}
		 = &profile.Sample{
			Location: ,
			Value:    make([]int64, 2),
		}
		.sample = 
	}
	.Value[0]++
	.Value[1] += int64(.stop.Sub(.start))
}

func ( *profBuffer) () *profile.Profile {
	 := profile.Profile{}
	.SampleType = []*profile.ValueType{
		{Type: "samples", Unit: "count"},
		{Type: "cpu", Unit: "nanoseconds"},
	}
	.PeriodType = .SampleType[1]
	.Period = int64(profInterval)
	 := &profile.Mapping{
		ID:   1,
		File: "[ECMAScript code]",
	}
	.Mapping = make([]*profile.Mapping, 1, len(.funcs)+1)
	.Mapping[0] = 

	.Function = make([]*profile.Function, 0, len(.funcs))
	 := make(map[string]struct{})
	var ,  uint64
	for ,  := range .funcs {
		 := .src.Name()
		++
		.f.ID = 
		.f.Filename = 
		var  string
		if .funcName != "" {
			 = .funcName.String()
		} else {
			 = "<anonymous>"
		}
		// Make sure the function name is unique, otherwise the graph display merges them into one node, even
		// if they are in different mappings.
		if ,  := [];  {
			 += "." + strconv.FormatUint(.f.ID, 10)
		} else {
			[] = struct{}{}
		}
		.f.Name = 
		.Function = append(.Function, &.f)
		for ,  := range .locs {
			++
			.ID = 
			 := .src.Position(.sourceOffset(int()))
			.Line = []profile.Line{
				{
					Function: &.f,
					Line:     int64(.Line),
				},
			}

			.Mapping = 
			.Location = append(.Location, )
		}
	}
	.addSamples(&, &.root)
	return &
}

func ( *profBuffer) ( *profile.Profile,  *profSampleNode) {
	if .sample != nil {
		.Sample = append(.Sample, .sample)
	}
	for ,  := range .children {
		.(, )
	}
}

func ( *profiler) () {
	 := time.NewTicker(profInterval)
	 := 0

	for  := range .C {
		.mu.Lock()
		 := len(.trackers)
		if  == 0 {
			break
		}
		for {
			// This loop runs until either one of the VMs is signalled or all of the VMs are scanned and found
			// busy or deleted.
			if  >= len(.trackers) {
				 = 0
			}
			 := .trackers[]
			 := atomic.LoadInt32(&.req)
			if  == profReqSampleReady {
				.buf.addSample()
			}
			if atomic.LoadInt32(&.finished) != 0 {
				.trackers[] = .trackers[len(.trackers)-1]
				.trackers[len(.trackers)-1] = nil
				.trackers = .trackers[:len(.trackers)-1]
			} else {
				++
				if  != profReqDoSample {
					// signal the VM to take a sample
					.start = 
					atomic.StoreInt32(&.req, profReqDoSample)
					break
				}
			}
			--
			if  <= 0 {
				// all VMs are busy
				break
			}
		}
		.mu.Unlock()
	}
	.Stop()
	.running = false
	.mu.Unlock()
}

func ( *profiler) () *profTracker {
	 := new(profTracker)
	.mu.Lock()
	if .buf != nil {
		.trackers = append(.trackers, )
		if !.running {
			go .run()
			.running = true
		}
	} else {
		.req = profReqStop
	}
	.mu.Unlock()
	return 
}

func ( *profiler) () error {
	.mu.Lock()
	if .buf != nil {
		.mu.Unlock()
		return errors.New("profiler is already active")
	}
	.buf = new(profBuffer)
	.mu.Unlock()
	return nil
}

func ( *profiler) () *profile.Profile {
	.mu.Lock()
	,  := .trackers, .buf
	.trackers, .buf = nil, nil
	.mu.Unlock()
	if  != nil {
		 := 0
		for ,  := range  {
			 := atomic.LoadInt32(&.req)
			if  == profReqSampleReady {
				.addSample()
			} else if  == profReqDoSample {
				// In case the VM is requested to do a sample, there is a small chance of a race
				// where we set profReqStop in between the read and the write, so that the req
				// ends up being set to profReqSampleReady. It's no such a big deal if we do nothing,
				// it just means the VM remains in tracing mode until it finishes the current run,
				// but we do an extra cleanup step later just in case.
				if  !=  {
					[] = []
				}
				++
			}
			atomic.StoreInt32(&.req, profReqStop)
		}

		if  > 0 {
			 = [:]
			go func() {
				// Make sure all VMs are requested to stop tracing.
				for {
					 := 0
					for ,  := range  {
						 := atomic.LoadInt32(&.req)
						if  != profReqStop {
							atomic.StoreInt32(&.req, profReqStop)
							if  !=  {
								[] = []
							}
							++
						}
					}

					if  == 0 {
						return
					}
					 = [:]
					time.Sleep(100 * time.Millisecond)
				}
			}()
		}
		return .profile()
	}
	return nil
}

/*
StartProfile enables execution time profiling for all Runtimes within the current process.
This works similar to pprof.StartCPUProfile and produces the same format which can be consumed by `go tool pprof`.
There are, however, a few notable differences. Firstly, it's not a CPU profile, rather "execution time" profile.
It measures the time the VM spends executing an instruction. If this instruction happens to be a call to a
blocking Go function, the waiting time will be measured. Secondly, the 'cpu' sample isn't simply `count*period`,
it's the time interval between when sampling was requested and when the instruction has finished. If a VM is still
executing the same instruction when the time comes for the next sample, the sampling is skipped (i.e. `count` doesn't
grow).

If there are multiple functions with the same name, their names get a '.N' suffix, where N is a unique number,
because otherwise the graph view merges them together (even if they are in different mappings). This includes
"<anonymous>" functions.

The sampling period is set to 10ms.

It returns an error if profiling is already active.
*/
func ( io.Writer) error {
	 := globalProfiler.p.start()
	if  != nil {
		return 
	}
	globalProfiler.w = 
	atomic.StoreInt32(&globalProfiler.enabled, 1)
	return nil
}

/*
StopProfile stops the current profile initiated by StartProfile, if any.
*/
func () {
	atomic.StoreInt32(&globalProfiler.enabled, 0)
	 := globalProfiler.p.stop()
	if  != nil {
		_ = .Write(globalProfiler.w)
	}
	globalProfiler.w = nil
}