package backend

import (
	
	
)

// Lower implements Compiler.Lower.
func ( *compiler) () {
	.assignVirtualRegisters()
	.mach.SetCurrentABI(.GetFunctionABI(.ssaBuilder.Signature()))
	.mach.StartLoweringFunction(.ssaBuilder.BlockIDMax())
	.lowerBlocks()
}

// lowerBlocks lowers each block in the ssa.Builder.
func ( *compiler) () {
	 := .ssaBuilder
	for  := .BlockIteratorReversePostOrderBegin();  != nil;  = .BlockIteratorReversePostOrderNext() {
		.lowerBlock()
	}

	// After lowering all blocks, we need to link adjacent blocks to layout one single instruction list.
	var  ssa.BasicBlock
	for  := .BlockIteratorReversePostOrderBegin();  != nil;  = .BlockIteratorReversePostOrderNext() {
		if  != nil {
			.mach.LinkAdjacentBlocks(, )
		}
		 = 
	}
}

func ( *compiler) ( ssa.BasicBlock) {
	 := .mach
	.StartBlock()

	// We traverse the instructions in reverse order because we might want to lower multiple
	// instructions together.
	 := .Tail()

	// First gather the branching instructions at the end of the blocks.
	var ,  *ssa.Instruction
	if .IsBranching() {
		 = 
		 = .Prev()
		if  != nil && .IsBranching() {
			 = 
			 = .Prev()
		}
	}

	if  != nil {
		.lowerBranches(, )
	}

	if  != nil &&  == nil {
		panic("BUG? when a block has conditional branch but doesn't end with an unconditional branch?")
	}

	// Now start lowering the non-branching instructions.
	for ;  != nil;  = .Prev() {
		.setCurrentGroupID(.GroupID())
		if .Lowered() {
			continue
		}

		switch .Opcode() {
		case ssa.OpcodeReturn:
			 := .ReturnVals()
			if len() > 0 {
				.mach.LowerReturns()
			}
			.mach.InsertReturn()
		default:
			.LowerInstr()
		}
		.FlushPendingInstructions()
	}

	// Finally, if this is the entry block, we have to insert copies of arguments from the real location to the VReg.
	if .EntryBlock() {
		.lowerFunctionArguments()
	}

	.EndBlock()
}

// lowerBranches is called right after StartBlock and before any LowerInstr call if
// there are branches to the given block. br0 is the very end of the block and b1 is the before the br0 if it exists.
// At least br0 is not nil, but br1 can be nil if there's no branching before br0.
//
// See ssa.Instruction IsBranching, and the comment on ssa.BasicBlock.
func ( *compiler) (,  *ssa.Instruction) {
	 := .mach

	.setCurrentGroupID(.GroupID())
	.mach.LowerSingleBranch()
	.FlushPendingInstructions()
	if  != nil {
		.setCurrentGroupID(.GroupID())
		.mach.LowerConditionalBranch()
		.FlushPendingInstructions()
	}

	if .Opcode() == ssa.OpcodeJump {
		, ,  := .BranchData()
		 := len() != 0
		if  &&  != nil {
			panic("BUG: critical edge split failed")
		}
		 := .ssaBuilder.BasicBlock()
		if  && .ReturnBlock() {
			if len() > 0 {
				.mach.LowerReturns()
			}
		} else if  {
			.lowerBlockArguments(, )
		}
	}
	.FlushPendingInstructions()
}

func ( *compiler) ( ssa.BasicBlock) {
	 := .mach

	.tmpVals = .tmpVals[:0]
	 := .ssaBuilder.ValuesInfo()
	for  := 0;  < .Params(); ++ {
		 := .Param()
		if [.ID()].RefCount > 0 {
			.tmpVals = append(.tmpVals, )
		} else {
			// If the argument is not used, we can just pass an invalid value.
			.tmpVals = append(.tmpVals, ssa.ValueInvalid)
		}
	}
	.LowerParams(.tmpVals)
	.FlushPendingInstructions()
}

// lowerBlockArguments lowers how to pass arguments to the given successor block.
func ( *compiler) ( []ssa.Value,  ssa.BasicBlock) {
	if len() != .Params() {
		panic("BUG: mismatched number of arguments")
	}

	.varEdges = .varEdges[:0]
	.varEdgeTypes = .varEdgeTypes[:0]
	.constEdges = .constEdges[:0]
	for  := 0;  < len(); ++ {
		 := .Param()
		 := []

		 := .VRegOf()
		 := .ssaBuilder.InstructionOfValue()
		if  != nil && .Constant() {
			.constEdges = append(.constEdges, struct {
				 *ssa.Instruction
				   regalloc.VReg
			}{: , : })
		} else {
			 := .VRegOf()
			// Even when the src=dst, insert the move so that we can keep such registers keep-alive.
			.varEdges = append(.varEdges, [2]regalloc.VReg{, })
			.varEdgeTypes = append(.varEdgeTypes, .Type())
		}
	}

	// Check if there's an overlap among the dsts and srcs in varEdges.
	.vRegIDs = .vRegIDs[:0]
	for ,  := range .varEdges {
		 := [0].ID()
		if int() >= len(.vRegSet) {
			.vRegSet = append(.vRegSet, make([]bool, +1)...)
		}
		.vRegSet[] = true
		.vRegIDs = append(.vRegIDs, )
	}
	 := true
	for ,  := range .varEdges {
		 := [1].ID()
		if int() >= len(.vRegSet) {
			.vRegSet = append(.vRegSet, make([]bool, +1)...)
		} else {
			if .vRegSet[] {
				 = false
				break
			}
		}
	}
	for ,  := range .vRegIDs {
		.vRegSet[] = false // reset for the next use.
	}

	if  {
		// If there's no overlap, we can simply move the source to destination.
		for ,  := range .varEdges {
			,  := [0], [1]
			.mach.InsertMove(, , .varEdgeTypes[])
		}
	} else {
		// Otherwise, we allocate a temporary registers and move the source to the temporary register,
		//
		// First move all of them to temporary registers.
		.tempRegs = .tempRegs[:0]
		for ,  := range .varEdges {
			 := [0]
			 := .varEdgeTypes[]
			 := .AllocateVReg()
			.tempRegs = append(.tempRegs, )
			.mach.InsertMove(, , )
		}
		// Then move the temporary registers to the destination.
		for ,  := range .varEdges {
			 := .tempRegs[]
			 := [1]
			.mach.InsertMove(, , .varEdgeTypes[])
		}
	}

	// Finally, move the constants.
	for ,  := range .constEdges {
		,  := .cInst, .dst
		.mach.InsertLoadConstantBlockArg(, )
	}
}