package amd64

import (
	
	
	
	
)

var (
	executionContextPtrReg = raxVReg

	// Followings are callee saved registers. They can be used freely in the entry preamble
	// since the preamble is called via Go assembly function which has stack-based ABI.

	// savedExecutionContextPtr also must be a callee-saved reg so that they can be used in the prologue and epilogue.
	savedExecutionContextPtr = rdxVReg
	// paramResultSlicePtr must match with entrypoint function in abi_entry_amd64.s.
	paramResultSlicePtr = r12VReg
	// goAllocatedStackPtr must match with entrypoint function in abi_entry_amd64.s.
	goAllocatedStackPtr = r13VReg
	// functionExecutable must match with entrypoint function in abi_entry_amd64.s.
	functionExecutable = r14VReg
	tmpIntReg          = r15VReg
	tmpXmmReg          = xmm15VReg
)

// CompileEntryPreamble implements backend.Machine.
func ( *machine) ( *ssa.Signature) []byte {
	 := .compileEntryPreamble()
	.encodeWithoutSSA()
	 := .c.Buf()
	return 
}

func ( *machine) ( *ssa.Signature) *instruction {
	 := backend.FunctionABI{}
	.Init(, intArgResultRegs, floatArgResultRegs)

	 := .allocateNop()

	//// ----------------------------------- prologue ----------------------------------- ////

	// First, we save executionContextPtrReg into a callee-saved register so that it can be used in epilogue as well.
	// 		mov %executionContextPtrReg, %savedExecutionContextPtr
	 := .move64(executionContextPtrReg, savedExecutionContextPtr, )

	// Next is to save the original RBP and RSP into the execution context.
	 = .saveOriginalRSPRBP()

	// Now set the RSP to the Go-allocated stack pointer.
	// 		mov %goAllocatedStackPtr, %rsp
	 = .move64(goAllocatedStackPtr, rspVReg, )

	if  := .AlignedArgResultStackSlotSize();  > 0 {
		// Allocate stack slots for the arguments and return values.
		// 		sub $stackSlotSize, %rsp
		 := .allocateInstr().asAluRmiR(aluRmiROpcodeSub, newOperandImm32(uint32()), rspVReg, true)
		 = linkInstr(, )
	}

	var  uint32
	for  := range .Args {
		if  < 2 {
			// module context ptr and execution context ptr are passed in rax and rbx by the Go assembly function.
			continue
		}
		 := &.Args[]
		 = .goEntryPreamblePassArg(, paramResultSlicePtr, , )
		if .Type == ssa.TypeV128 {
			 += 16
		} else {
			 += 8
		}
	}

	// Zero out RBP so that the unwind/stack growth code can correctly detect the end of the stack.
	 := .allocateInstr().asAluRmiR(aluRmiROpcodeXor, newOperandReg(rbpVReg), rbpVReg, true)
	 = linkInstr(, )

	// Now ready to call the real function. Note that at this point stack pointer is already set to the Go-allocated,
	// which is aligned to 16 bytes.
	 := .allocateInstr().asCallIndirect(newOperandReg(functionExecutable), &)
	 = linkInstr(, )

	//// ----------------------------------- epilogue ----------------------------------- ////

	// Read the results from regs and the stack, and set them correctly into the paramResultSlicePtr.
	 = 0
	for  := range .Rets {
		 := &.Rets[]
		 = .goEntryPreamblePassResult(, paramResultSlicePtr, , , uint32(.ArgStackSize))
		if .Type == ssa.TypeV128 {
			 += 16
		} else {
			 += 8
		}
	}

	// Finally, restore the original RBP and RSP.
	 = .restoreOriginalRSPRBP()

	 := .allocateInstr().asRet()
	linkInstr(, )
	return 
}

// saveOriginalRSPRBP saves the original RSP and RBP into the execution context.
func ( *machine) ( *instruction) *instruction {
	// 		mov %rbp, wazevoapi.ExecutionContextOffsetOriginalFramePointer(%executionContextPtrReg)
	// 		mov %rsp, wazevoapi.ExecutionContextOffsetOriginalStackPointer(%executionContextPtrReg)
	 = .loadOrStore64AtExecutionCtx(executionContextPtrReg, wazevoapi.ExecutionContextOffsetOriginalFramePointer, rbpVReg, true, )
	 = .loadOrStore64AtExecutionCtx(executionContextPtrReg, wazevoapi.ExecutionContextOffsetOriginalStackPointer, rspVReg, true, )
	return 
}

// restoreOriginalRSPRBP restores the original RSP and RBP from the execution context.
func ( *machine) ( *instruction) *instruction {
	// 		mov wazevoapi.ExecutionContextOffsetOriginalFramePointer(%executionContextPtrReg), %rbp
	// 		mov wazevoapi.ExecutionContextOffsetOriginalStackPointer(%executionContextPtrReg), %rsp
	 = .loadOrStore64AtExecutionCtx(savedExecutionContextPtr, wazevoapi.ExecutionContextOffsetOriginalFramePointer, rbpVReg, false, )
	 = .loadOrStore64AtExecutionCtx(savedExecutionContextPtr, wazevoapi.ExecutionContextOffsetOriginalStackPointer, rspVReg, false, )
	return 
}

func ( *machine) (,  regalloc.VReg,  *instruction) *instruction {
	 := .allocateInstr().asMovRR(, , true)
	return linkInstr(, )
}

func ( *machine) ( regalloc.VReg,  wazevoapi.Offset,  regalloc.VReg,  bool,  *instruction) *instruction {
	 := newOperandMem(.newAmodeImmReg(.U32(), ))
	 := .allocateInstr()
	if  {
		.asMovRM(, , 8)
	} else {
		.asMov64MR(, )
	}
	return linkInstr(, )
}

// This is for debugging.
func ( *machine) ( *instruction) *instruction { //nolint
	return linkInstr(, .allocateInstr().asUD2())
}

func ( *machine) ( *instruction,  regalloc.VReg,  uint32,  *backend.ABIArg) *instruction {
	var  regalloc.VReg
	 := .Type
	if .Kind == backend.ABIArgKindStack {
		// Caller saved registers ca
		switch  {
		case ssa.TypeI32, ssa.TypeI64:
			 = tmpIntReg
		case ssa.TypeF32, ssa.TypeF64, ssa.TypeV128:
			 = tmpXmmReg
		default:
			panic("BUG")
		}
	} else {
		 = .Reg
	}

	 := .allocateInstr()
	 := newOperandMem(.newAmodeImmReg(, ))
	switch .Type {
	case ssa.TypeI32:
		.asMovzxRmR(extModeLQ, , )
	case ssa.TypeI64:
		.asMov64MR(, )
	case ssa.TypeF32:
		.asXmmUnaryRmR(sseOpcodeMovss, , )
	case ssa.TypeF64:
		.asXmmUnaryRmR(sseOpcodeMovsd, , )
	case ssa.TypeV128:
		.asXmmUnaryRmR(sseOpcodeMovdqu, , )
	}

	 = linkInstr(, )
	if .Kind == backend.ABIArgKindStack {
		// Store back to the stack.
		 := .allocateInstr()
		 := newOperandMem(.newAmodeImmReg(uint32(.Offset), rspVReg))
		switch .Type {
		case ssa.TypeI32:
			.asMovRM(, , 4)
		case ssa.TypeI64:
			.asMovRM(, , 8)
		case ssa.TypeF32:
			.asXmmMovRM(sseOpcodeMovss, , )
		case ssa.TypeF64:
			.asXmmMovRM(sseOpcodeMovsd, , )
		case ssa.TypeV128:
			.asXmmMovRM(sseOpcodeMovdqu, , )
		}
		 = linkInstr(, )
	}
	return 
}

func ( *machine) ( *instruction,  regalloc.VReg,  uint32,  *backend.ABIArg,  uint32) *instruction {
	var  regalloc.VReg
	if .Kind == backend.ABIArgKindStack {
		// Load the value to the temporary.
		 := .allocateInstr()
		 :=  + uint32(.Offset)
		 := newOperandMem(.newAmodeImmReg(, rspVReg))
		switch .Type {
		case ssa.TypeI32:
			 = tmpIntReg
			.asMovzxRmR(extModeLQ, , )
		case ssa.TypeI64:
			 = tmpIntReg
			.asMov64MR(, )
		case ssa.TypeF32:
			 = tmpXmmReg
			.asXmmUnaryRmR(sseOpcodeMovss, , )
		case ssa.TypeF64:
			 = tmpXmmReg
			.asXmmUnaryRmR(sseOpcodeMovsd, , )
		case ssa.TypeV128:
			 = tmpXmmReg
			.asXmmUnaryRmR(sseOpcodeMovdqu, , )
		default:
			panic("BUG")
		}
		 = linkInstr(, )
	} else {
		 = .Reg
	}

	 := .allocateInstr()
	 := newOperandMem(.newAmodeImmReg(, ))
	switch .Type {
	case ssa.TypeI32:
		.asMovRM(, , 4)
	case ssa.TypeI64:
		.asMovRM(, , 8)
	case ssa.TypeF32:
		.asXmmMovRM(sseOpcodeMovss, , )
	case ssa.TypeF64:
		.asXmmMovRM(sseOpcodeMovsd, , )
	case ssa.TypeV128:
		.asXmmMovRM(sseOpcodeMovdqu, , )
	}

	return linkInstr(, )
}