package amd64

import (
	

	
	
	
)

var addendsMatchOpcodes = [...]ssa.Opcode{ssa.OpcodeUExtend, ssa.OpcodeSExtend, ssa.OpcodeIadd, ssa.OpcodeIconst, ssa.OpcodeIshl}

type addend struct {
	r     regalloc.VReg
	off   int64
	shift byte
}

func ( addend) () string {
	return fmt.Sprintf("addend{r=%s, off=%d, shift=%d}", .r, .off, .shift)
}

// lowerToAddressMode converts a pointer to an addressMode that can be used as an operand for load/store instructions.
func ( *machine) ( ssa.Value,  uint32) ( *amode) {
	 := .c.ValueDefinition()

	if &0x80000000 != 0 {
		// Special casing the huge base offset whose MSB is set. In x64, the immediate is always
		// sign-extended, but our IR semantics requires the offset base is always unsigned.
		// Note that this should be extremely rare or even this shouldn't hit in the real application,
		// therefore we don't need to optimize this case in my opinion.

		 := .lowerAddend()
		 := .off + int64()
		 := .c.AllocateVReg(ssa.TypeI64)
		.lowerIconst(, uint64(), true)
		if .r != regalloc.VRegInvalid {
			return .newAmodeRegRegShift(0, , .r, .shift)
		} else {
			return .newAmodeImmReg(0, )
		}
	}

	if  := .c.MatchInstrOneOf(, addendsMatchOpcodes[:]);  == ssa.OpcodeIadd {
		 := .Instr
		,  := .Arg2()
		,  := .c.ValueDefinition(), .c.ValueDefinition()
		 := .lowerAddend()
		 := .lowerAddend()
		.MarkLowered()
		return .lowerAddendsToAmode(, , )
	} else {
		// If it is not an Iadd, then we lower the one addend.
		 := .lowerAddend()
		// off is always 0 if r is valid.
		if .r != regalloc.VRegInvalid {
			if .shift != 0 {
				 := .c.AllocateVReg(ssa.TypeI64)
				.lowerIconst(, 0, true)
				return .newAmodeRegRegShift(, , .r, .shift)
			}
			return .newAmodeImmReg(, .r)
		} else {
			 := .off + int64()
			 := .c.AllocateVReg(ssa.TypeI64)
			.lowerIconst(, uint64(), true)
			return .newAmodeImmReg(0, )
		}
	}
}

func ( *machine) (,  addend,  uint32) *amode {
	if .r != regalloc.VRegInvalid && .off != 0 || .r != regalloc.VRegInvalid && .off != 0 {
		panic("invalid input")
	}

	 := uint64(.off+.off) + uint64()
	if  != 0 {
		if ,  := asImm32(, false); ! {
			 := .c.AllocateVReg(ssa.TypeI64)
			.lowerIconst(, , true)
			// Blank u64 as it has been already lowered.
			 = 0

			if .r == regalloc.VRegInvalid {
				.r = 
			} else if .r == regalloc.VRegInvalid {
				.r = 
			} else {
				// We already know that either rx or ry is invalid,
				// so we overwrite it with the temporary register.
				panic("BUG")
			}
		}
	}

	 := uint32()
	switch {
	// We assume rx, ry are valid iff offx, offy are 0.
	case .r != regalloc.VRegInvalid && .r != regalloc.VRegInvalid:
		switch {
		case .shift != 0 && .shift != 0:
			// Cannot absorb two shifted registers, must lower one to a shift instruction.
			 := .allocateInstr()
			.asShiftR(shiftROpShiftLeft, newOperandImm32(uint32(.shift)), .r, true)
			.insert()

			return .newAmodeRegRegShift(, .r, .r, .shift)
		case .shift != 0 && .shift == 0:
			// Swap base and index.
			,  = , 
			fallthrough
		default:
			return .newAmodeRegRegShift(, .r, .r, .shift)
		}
	case .r == regalloc.VRegInvalid && .r != regalloc.VRegInvalid:
		,  = , 
		fallthrough
	case .r != regalloc.VRegInvalid && .r == regalloc.VRegInvalid:
		if .shift != 0 {
			 := .c.AllocateVReg(ssa.TypeI64)
			.lowerIconst(, 0, true)
			return .newAmodeRegRegShift(, , .r, .shift)
		}
		return .newAmodeImmReg(, .r)
	default: // Both are invalid: use the offset.
		 := .c.AllocateVReg(ssa.TypeI64)
		.lowerIconst(, , true)
		return .newAmodeImmReg(0, )
	}
}

func ( *machine) ( backend.SSAValueDefinition) addend {
	if !.IsFromInstr() {
		return addend{.c.VRegOf(.V), 0, 0}
	}
	// Ensure the addend is not referenced in multiple places; we will discard nested Iadds.
	 := .c.MatchInstrOneOf(, addendsMatchOpcodes[:])
	if  != ssa.OpcodeInvalid &&  != ssa.OpcodeIadd {
		return .lowerAddendFromInstr(.Instr)
	}
	 := .getOperand_Reg()
	return addend{.reg(), 0, 0}
}

// lowerAddendFromInstr takes an instruction returns a Vreg and an offset that can be used in an address mode.
// The Vreg is regalloc.VRegInvalid if the addend cannot be lowered to a register.
// The offset is 0 if the addend can be lowered to a register.
func ( *machine) ( *ssa.Instruction) addend {
	.MarkLowered()
	switch  := .Opcode();  {
	case ssa.OpcodeIconst:
		 := .ConstantVal()
		if .Return().Type().Bits() == 32 {
			return addend{regalloc.VRegInvalid, int64(int32()), 0} // sign-extend.
		} else {
			return addend{regalloc.VRegInvalid, int64(), 0}
		}
	case ssa.OpcodeUExtend, ssa.OpcodeSExtend:
		 := .Arg()
		 := .c.ValueDefinition()
		if .Type().Bits() != 32 {
			panic("BUG: invalid input type " + .Type().String())
		}
		 := .IsFromInstr() && .Instr.Constant()
		switch {
		case  &&  == ssa.OpcodeSExtend:
			return addend{regalloc.VRegInvalid, int64(uint32(.Instr.ConstantVal())), 0}
		case  &&  == ssa.OpcodeUExtend:
			return addend{regalloc.VRegInvalid, int64(int32(.Instr.ConstantVal())), 0} // sign-extend!
		default:
			 := .getOperand_Reg()
			return addend{.reg(), 0, 0}
		}
	case ssa.OpcodeIshl:
		// If the addend is a shift, we can only handle it if the shift amount is a constant.
		,  := .Arg2()
		 := .c.ValueDefinition()
		if .IsFromInstr() && .Instr.Constant() && .Instr.ConstantVal() <= 3 {
			 := .getOperand_Reg(.c.ValueDefinition())
			return addend{.reg(), 0, uint8(.Instr.ConstantVal())}
		}
		 := .getOperand_Reg(.c.ValueDefinition())
		return addend{.reg(), 0, 0}
	}
	panic("BUG: invalid opcode")
}