package amd64

import (
	
	

	
	
	
	
)

type operand struct {
	kind operandKind
	data uint64
}

type operandKind byte

const (
	// operandKindReg is an operand which is an integer Register.
	operandKindReg operandKind = iota + 1

	// operandKindMem is a value in Memory.
	// 32, 64, or 128 bit value.
	operandKindMem

	// operandKindImm32 is a signed-32-bit integer immediate value.
	operandKindImm32

	// operandKindLabel is a label.
	operandKindLabel
)

// String implements fmt.Stringer.
func ( operandKind) () string {
	switch  {
	case operandKindReg:
		return "reg"
	case operandKindMem:
		return "mem"
	case operandKindImm32:
		return "imm32"
	case operandKindLabel:
		return "label"
	default:
		panic("BUG: invalid operand kind")
	}
}

// format returns the string representation of the operand.
// _64 is only for the case where the operand is a register, and it's integer.
func ( *operand) ( bool) string {
	switch .kind {
	case operandKindReg:
		return formatVRegSized(.reg(), )
	case operandKindMem:
		return .addressMode().String()
	case operandKindImm32:
		return fmt.Sprintf("$%d", int32(.imm32()))
	case operandKindLabel:
		return label(.imm32()).String()
	default:
		panic(fmt.Sprintf("BUG: invalid operand: %s", .kind))
	}
}

//go:inline
func ( *operand) () regalloc.VReg {
	return regalloc.VReg(.data)
}

//go:inline
func ( *operand) ( regalloc.VReg) {
	.data = uint64()
}

//go:inline
func ( *operand) () *amode {
	return wazevoapi.PtrFromUintptr[amode](uintptr(.data))
}

//go:inline
func ( *operand) () uint32 {
	return uint32(.data)
}

func ( *operand) () label {
	switch .kind {
	case operandKindLabel:
		return label(.data)
	case operandKindMem:
		 := .addressMode()
		if .kind() != amodeRipRel {
			panic("BUG: invalid label")
		}
		return label(.imm32)
	default:
		panic("BUG: invalid operand kind")
	}
}

func newOperandLabel( label) operand {
	return operand{kind: operandKindLabel, data: uint64()}
}

func newOperandReg( regalloc.VReg) operand {
	return operand{kind: operandKindReg, data: uint64()}
}

func newOperandImm32( uint32) operand {
	return operand{kind: operandKindImm32, data: uint64()}
}

func newOperandMem( *amode) operand {
	return operand{kind: operandKindMem, data: uint64(uintptr(unsafe.Pointer()))}
}

// amode is a memory operand (addressing mode).
type amode struct {
	kindWithShift uint32
	imm32         uint32
	base          regalloc.VReg

	// For amodeRegRegShift:
	index regalloc.VReg
}

type amodeKind byte

const (
	// amodeRegRegShift calculates sign-extend-32-to-64(Immediate) + base
	amodeImmReg amodeKind = iota + 1

	// amodeImmRBP is the same as amodeImmReg, but the base register is fixed to RBP.
	// The only differece is that it doesn't tell the register allocator to use RBP which is distracting for the
	// register allocator.
	amodeImmRBP

	// amodeRegRegShift calculates sign-extend-32-to-64(Immediate) + base + (Register2 << Shift)
	amodeRegRegShift

	// amodeRipRel is a RIP-relative addressing mode specified by the label.
	amodeRipRel

	// TODO: there are other addressing modes such as the one without base register.
)

func ( *amode) () amodeKind {
	return amodeKind(.kindWithShift & 0xff)
}

func ( *amode) () byte {
	return byte(.kindWithShift >> 8)
}

func ( *amode) ( *[]regalloc.VReg) {
	switch .kind() {
	case amodeImmReg:
		* = append(*, .base)
	case amodeRegRegShift:
		* = append(*, .base, .index)
	case amodeImmRBP, amodeRipRel:
	default:
		panic("BUG: invalid amode kind")
	}
}

func ( *amode) () int {
	switch .kind() {
	case amodeImmReg:
		return 1
	case amodeRegRegShift:
		return 2
	case amodeImmRBP, amodeRipRel:
		return 0
	default:
		panic("BUG: invalid amode kind")
	}
}

func ( *amode) ( int,  regalloc.VReg) {
	switch .kind() {
	case amodeImmReg:
		if  == 0 {
			.base = 
		} else {
			panic("BUG: invalid amode assignment")
		}
	case amodeRegRegShift:
		if  == 0 {
			.base = 
		} else if  == 1 {
			.index = 
		} else {
			panic("BUG: invalid amode assignment")
		}
	default:
		panic("BUG: invalid amode assignment")
	}
}

func ( *machine) ( uint32,  regalloc.VReg) *amode {
	 := .amodePool.Allocate()
	* = amode{kindWithShift: uint32(amodeImmReg), imm32: , base: }
	return 
}

func ( *machine) ( uint32) *amode {
	 := .amodePool.Allocate()
	* = amode{kindWithShift: uint32(amodeImmRBP), imm32: , base: rbpVReg}
	return 
}

func ( *machine) ( uint32, ,  regalloc.VReg,  byte) *amode {
	if  > 3 {
		panic(fmt.Sprintf("BUG: invalid shift (must be 3>=): %d", ))
	}
	 := .amodePool.Allocate()
	* = amode{kindWithShift: uint32(amodeRegRegShift) | uint32()<<8, imm32: , base: , index: }
	return 
}

func ( *machine) ( label) *amode {
	 := .amodePool.Allocate()
	* = amode{kindWithShift: uint32(amodeRipRel), imm32: uint32()}
	return 
}

// String implements fmt.Stringer.
func ( *amode) () string {
	switch .kind() {
	case amodeImmReg, amodeImmRBP:
		if .imm32 == 0 {
			return fmt.Sprintf("(%s)", formatVRegSized(.base, true))
		}
		return fmt.Sprintf("%d(%s)", int32(.imm32), formatVRegSized(.base, true))
	case amodeRegRegShift:
		 := 1 << .shift()
		if .imm32 == 0 {
			return fmt.Sprintf(
				"(%s,%s,%d)",
				formatVRegSized(.base, true), formatVRegSized(.index, true), )
		}
		return fmt.Sprintf(
			"%d(%s,%s,%d)",
			int32(.imm32), formatVRegSized(.base, true), formatVRegSized(.index, true), )
	case amodeRipRel:
		return fmt.Sprintf("%s(%%rip)", label(.imm32))
	default:
		panic("BUG: invalid amode kind")
	}
}

func ( *machine) ( backend.SSAValueDefinition) ( operand) {
	if !.IsFromInstr() {
		return newOperandReg(.c.VRegOf(.V))
	}

	if .V.Type() == ssa.TypeV128 {
		// SIMD instructions require strict memory alignment, so we don't support the memory operand for V128 at the moment.
		return .getOperand_Reg()
	}

	if .c.MatchInstr(, ssa.OpcodeLoad) {
		 := .Instr
		, ,  := .LoadData()
		 = newOperandMem(.lowerToAddressMode(, ))
		.MarkLowered()
		return 
	}
	return .getOperand_Reg()
}

func ( *machine) ( backend.SSAValueDefinition) ( operand) {
	if !.IsFromInstr() {
		return newOperandReg(.c.VRegOf(.V))
	}

	if .c.MatchInstr(, ssa.OpcodeLoad) {
		 := .Instr
		, ,  := .LoadData()
		 = newOperandMem(.lowerToAddressMode(, ))
		.MarkLowered()
		return 
	}
	return .getOperand_Imm32_Reg()
}

func ( *machine) ( backend.SSAValueDefinition) ( operand) {
	if !.IsFromInstr() {
		return newOperandReg(.c.VRegOf(.V))
	}

	 := .Instr
	if .Constant() {
		// If the operation is 64-bit, x64 sign-extends the 32-bit immediate value.
		// Therefore, we need to check if the immediate value is within the 32-bit range and if the sign bit is set,
		// we should not use the immediate value.
		if ,  := asImm32Operand(.ConstantVal(), .Return().Type() == ssa.TypeI32);  {
			.MarkLowered()
			return 
		}
	}
	return .getOperand_Reg()
}

func asImm32Operand( uint64,  bool) (operand, bool) {
	if ,  := asImm32(, );  {
		return newOperandImm32(), true
	}
	return operand{}, false
}

func asImm32( uint64,  bool) (uint32, bool) {
	 := uint32()
	if uint64() !=  {
		return 0, false
	}
	if ! && &0x80000000 != 0 {
		return 0, false
	}
	return , true
}

func ( *machine) ( backend.SSAValueDefinition) ( operand) {
	var  regalloc.VReg
	if  := .Instr;  != nil && .Constant() {
		// We inline all the constant instructions so that we could reduce the register usage.
		 = .lowerConstant()
		.MarkLowered()
	} else {
		 = .c.VRegOf(.V)
	}
	return newOperandReg()
}