package frontend

import (
	
	
	
	
	

	
	
	
	
	
)

type (
	// loweringState is used to keep the state of lowering.
	loweringState struct {
		// values holds the values on the Wasm stack.
		values           []ssa.Value
		controlFrames    []controlFrame
		unreachable      bool
		unreachableDepth int
		tmpForBrTable    []uint32
		pc               int
	}
	controlFrame struct {
		kind controlFrameKind
		// originalStackLen holds the number of values on the Wasm stack
		// when start executing this control frame minus params for the block.
		originalStackLenWithoutParam int
		// blk is the loop header if this is loop, and is the else-block if this is an if frame.
		blk,
		// followingBlock is the basic block we enter if we reach "end" of block.
		followingBlock ssa.BasicBlock
		blockType *wasm.FunctionType
		// clonedArgs hold the arguments to Else block.
		clonedArgs ssa.Values
	}

	controlFrameKind byte
)

// String implements fmt.Stringer for debugging.
func ( *loweringState) () string {
	var  []string
	for ,  := range .values {
		 = append(, fmt.Sprintf("v%v", .ID()))
	}
	var  []string
	for  := range .controlFrames {
		 = append(, .controlFrames[].kind.String())
	}
	return fmt.Sprintf("\n\tunreachable=%v(depth=%d)\n\tstack: %s\n\tcontrol frames: %s",
		.unreachable, .unreachableDepth,
		strings.Join(, ", "),
		strings.Join(, ", "),
	)
}

const (
	controlFrameKindFunction = iota + 1
	controlFrameKindLoop
	controlFrameKindIfWithElse
	controlFrameKindIfWithoutElse
	controlFrameKindBlock
)

// String implements fmt.Stringer for debugging.
func ( controlFrameKind) () string {
	switch  {
	case controlFrameKindFunction:
		return "function"
	case controlFrameKindLoop:
		return "loop"
	case controlFrameKindIfWithElse:
		return "if_with_else"
	case controlFrameKindIfWithoutElse:
		return "if_without_else"
	case controlFrameKindBlock:
		return "block"
	default:
		panic()
	}
}

// isLoop returns true if this is a loop frame.
func ( *controlFrame) () bool {
	return .kind == controlFrameKindLoop
}

// reset resets the state of loweringState for reuse.
func ( *loweringState) () {
	.values = .values[:0]
	.controlFrames = .controlFrames[:0]
	.pc = 0
	.unreachable = false
	.unreachableDepth = 0
}

func ( *loweringState) () ( ssa.Value) {
	 := len(.values) - 1
	return .values[]
}

func ( *loweringState) () ( ssa.Value) {
	 := len(.values) - 1
	 = .values[]
	.values = .values[:]
	return
}

func ( *loweringState) ( ssa.Value) {
	.values = append(.values, )
}

func ( *Compiler) ( int) ssa.Values {
	if  == 0 {
		return ssa.ValuesNil
	}

	 := .state()
	 := len(.values)

	 := .allocateVarLengthValues()
	 = .Append(.ssaBuilder.VarLengthPool(), .values[-:]...)
	return 
}

func ( *loweringState) () ( controlFrame) {
	 := len(.controlFrames) - 1
	 = .controlFrames[]
	.controlFrames = .controlFrames[:]
	return
}

func ( *loweringState) ( controlFrame) {
	.controlFrames = append(.controlFrames, )
}

func ( *loweringState) ( int) ( *controlFrame) {
	 := len(.controlFrames) - 1
	return &.controlFrames[-]
}

// lowerBody lowers the body of the Wasm function to the SSA form.
func ( *Compiler) ( ssa.BasicBlock) {
	.ssaBuilder.Seal()

	if .needListener {
		.callListenerBefore()
	}

	// Pushes the empty control frame which corresponds to the function return.
	.loweringState.ctrlPush(controlFrame{
		kind:           controlFrameKindFunction,
		blockType:      .wasmFunctionTyp,
		followingBlock: .ssaBuilder.ReturnBlock(),
	})

	for .loweringState.pc < len(.wasmFunctionBody) {
		 := .ssaBuilder.CurrentBlock()
		.lowerCurrentOpcode()
		 := .ssaBuilder.CurrentBlock()
		if  !=  {
			// In Wasm, once a block exits, that means we've done compiling the block.
			// Therefore, we finalize the known bounds at the end of the block for the exiting block.
			.finalizeKnownSafeBoundsAtTheEndOfBlock(.ID())
			// After that, we initialize the known bounds for the new compilation target block.
			.initializeCurrentBlockKnownBounds()
		}
	}
}

func ( *Compiler) () *loweringState {
	return &.loweringState
}

func ( *Compiler) () {
	 := .wasmFunctionBody[.loweringState.pc]

	if .needSourceOffsetInfo {
		.ssaBuilder.SetCurrentSourceOffset(
			ssa.SourceOffset(.loweringState.pc) + ssa.SourceOffset(.wasmFunctionBodyOffsetInCodeSection),
		)
	}

	 := .ssaBuilder
	 := .state()
	switch  {
	case wasm.OpcodeI32Const:
		 := .readI32s()
		if .unreachable {
			break
		}

		 := .AllocateInstruction().AsIconst32(uint32()).Insert()
		 := .Return()
		.push()
	case wasm.OpcodeI64Const:
		 := .readI64s()
		if .unreachable {
			break
		}
		 := .AllocateInstruction().AsIconst64(uint64()).Insert()
		 := .Return()
		.push()
	case wasm.OpcodeF32Const:
		 := .readF32()
		if .unreachable {
			break
		}
		 := .AllocateInstruction().
			AsF32const().
			Insert().
			Return()
		.push()
	case wasm.OpcodeF64Const:
		 := .readF64()
		if .unreachable {
			break
		}
		 := .AllocateInstruction().
			AsF64const().
			Insert().
			Return()
		.push()
	case wasm.OpcodeI32Add, wasm.OpcodeI64Add:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsIadd(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Sub, wasm.OpcodeI64Sub:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsIsub(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeF32Add, wasm.OpcodeF64Add:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsFadd(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Mul, wasm.OpcodeI64Mul:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsImul(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeF32Sub, wasm.OpcodeF64Sub:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsFsub(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeF32Mul, wasm.OpcodeF64Mul:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsFmul(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeF32Div, wasm.OpcodeF64Div:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsFdiv(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeF32Max, wasm.OpcodeF64Max:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsFmax(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeF32Min, wasm.OpcodeF64Min:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsFmin(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI64Extend8S:
		if .unreachable {
			break
		}
		.insertIntegerExtend(true, 8, 64)
	case wasm.OpcodeI64Extend16S:
		if .unreachable {
			break
		}
		.insertIntegerExtend(true, 16, 64)
	case wasm.OpcodeI64Extend32S, wasm.OpcodeI64ExtendI32S:
		if .unreachable {
			break
		}
		.insertIntegerExtend(true, 32, 64)
	case wasm.OpcodeI64ExtendI32U:
		if .unreachable {
			break
		}
		.insertIntegerExtend(false, 32, 64)
	case wasm.OpcodeI32Extend8S:
		if .unreachable {
			break
		}
		.insertIntegerExtend(true, 8, 32)
	case wasm.OpcodeI32Extend16S:
		if .unreachable {
			break
		}
		.insertIntegerExtend(true, 16, 32)
	case wasm.OpcodeI32Eqz, wasm.OpcodeI64Eqz:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction()
		if  == wasm.OpcodeI32Eqz {
			.AsIconst32(0)
		} else {
			.AsIconst64(0)
		}
		.InsertInstruction()
		 := .AllocateInstruction().
			AsIcmp(, .Return(), ssa.IntegerCmpCondEqual).
			Insert().
			Return()
		.push()
	case wasm.OpcodeI32Eq, wasm.OpcodeI64Eq:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondEqual)
	case wasm.OpcodeI32Ne, wasm.OpcodeI64Ne:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondNotEqual)
	case wasm.OpcodeI32LtS, wasm.OpcodeI64LtS:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondSignedLessThan)
	case wasm.OpcodeI32LtU, wasm.OpcodeI64LtU:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondUnsignedLessThan)
	case wasm.OpcodeI32GtS, wasm.OpcodeI64GtS:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondSignedGreaterThan)
	case wasm.OpcodeI32GtU, wasm.OpcodeI64GtU:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondUnsignedGreaterThan)
	case wasm.OpcodeI32LeS, wasm.OpcodeI64LeS:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondSignedLessThanOrEqual)
	case wasm.OpcodeI32LeU, wasm.OpcodeI64LeU:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondUnsignedLessThanOrEqual)
	case wasm.OpcodeI32GeS, wasm.OpcodeI64GeS:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondSignedGreaterThanOrEqual)
	case wasm.OpcodeI32GeU, wasm.OpcodeI64GeU:
		if .unreachable {
			break
		}
		.insertIcmp(ssa.IntegerCmpCondUnsignedGreaterThanOrEqual)

	case wasm.OpcodeF32Eq, wasm.OpcodeF64Eq:
		if .unreachable {
			break
		}
		.insertFcmp(ssa.FloatCmpCondEqual)
	case wasm.OpcodeF32Ne, wasm.OpcodeF64Ne:
		if .unreachable {
			break
		}
		.insertFcmp(ssa.FloatCmpCondNotEqual)
	case wasm.OpcodeF32Lt, wasm.OpcodeF64Lt:
		if .unreachable {
			break
		}
		.insertFcmp(ssa.FloatCmpCondLessThan)
	case wasm.OpcodeF32Gt, wasm.OpcodeF64Gt:
		if .unreachable {
			break
		}
		.insertFcmp(ssa.FloatCmpCondGreaterThan)
	case wasm.OpcodeF32Le, wasm.OpcodeF64Le:
		if .unreachable {
			break
		}
		.insertFcmp(ssa.FloatCmpCondLessThanOrEqual)
	case wasm.OpcodeF32Ge, wasm.OpcodeF64Ge:
		if .unreachable {
			break
		}
		.insertFcmp(ssa.FloatCmpCondGreaterThanOrEqual)
	case wasm.OpcodeF32Neg, wasm.OpcodeF64Neg:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction().AsFneg().Insert().Return()
		.push()
	case wasm.OpcodeF32Sqrt, wasm.OpcodeF64Sqrt:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction().AsSqrt().Insert().Return()
		.push()
	case wasm.OpcodeF32Abs, wasm.OpcodeF64Abs:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction().AsFabs().Insert().Return()
		.push()
	case wasm.OpcodeF32Copysign, wasm.OpcodeF64Copysign:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction().AsFcopysign(, ).Insert().Return()
		.push()

	case wasm.OpcodeF32Ceil, wasm.OpcodeF64Ceil:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction().AsCeil().Insert().Return()
		.push()
	case wasm.OpcodeF32Floor, wasm.OpcodeF64Floor:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction().AsFloor().Insert().Return()
		.push()
	case wasm.OpcodeF32Trunc, wasm.OpcodeF64Trunc:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction().AsTrunc().Insert().Return()
		.push()
	case wasm.OpcodeF32Nearest, wasm.OpcodeF64Nearest:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction().AsNearest().Insert().Return()
		.push()
	case wasm.OpcodeI64TruncF64S, wasm.OpcodeI64TruncF32S,
		wasm.OpcodeI32TruncF64S, wasm.OpcodeI32TruncF32S,
		wasm.OpcodeI64TruncF64U, wasm.OpcodeI64TruncF32U,
		wasm.OpcodeI32TruncF64U, wasm.OpcodeI32TruncF32U:
		if .unreachable {
			break
		}
		 := .AllocateInstruction().AsFcvtToInt(
			.pop(),
			.execCtxPtrValue,
			 == wasm.OpcodeI64TruncF64S ||  == wasm.OpcodeI64TruncF32S ||  == wasm.OpcodeI32TruncF32S ||  == wasm.OpcodeI32TruncF64S,
			 == wasm.OpcodeI64TruncF64S ||  == wasm.OpcodeI64TruncF32S ||  == wasm.OpcodeI64TruncF64U ||  == wasm.OpcodeI64TruncF32U,
			false,
		).Insert().Return()
		.push()
	case wasm.OpcodeMiscPrefix:
		.pc++
		// A misc opcode is encoded as an unsigned variable 32-bit integer.
		, ,  := leb128.LoadUint32(.wasmFunctionBody[.pc:])
		if  != nil {
			// In normal conditions this should never happen because the function has passed validation.
			panic(fmt.Sprintf("failed to read misc opcode: %v", ))
		}
		.pc += int( - 1)
		 := wasm.OpcodeMisc()
		switch  {
		case wasm.OpcodeMiscI64TruncSatF64S, wasm.OpcodeMiscI64TruncSatF32S,
			wasm.OpcodeMiscI32TruncSatF64S, wasm.OpcodeMiscI32TruncSatF32S,
			wasm.OpcodeMiscI64TruncSatF64U, wasm.OpcodeMiscI64TruncSatF32U,
			wasm.OpcodeMiscI32TruncSatF64U, wasm.OpcodeMiscI32TruncSatF32U:
			if .unreachable {
				break
			}
			 := .AllocateInstruction().AsFcvtToInt(
				.pop(),
				.execCtxPtrValue,
				 == wasm.OpcodeMiscI64TruncSatF64S ||  == wasm.OpcodeMiscI64TruncSatF32S ||  == wasm.OpcodeMiscI32TruncSatF32S ||  == wasm.OpcodeMiscI32TruncSatF64S,
				 == wasm.OpcodeMiscI64TruncSatF64S ||  == wasm.OpcodeMiscI64TruncSatF32S ||  == wasm.OpcodeMiscI64TruncSatF64U ||  == wasm.OpcodeMiscI64TruncSatF32U,
				true,
			).Insert().Return()
			.push()

		case wasm.OpcodeMiscTableSize:
			 := .readI32u()
			if .unreachable {
				break
			}

			// Load the table.
			 := .AllocateInstruction()
			.AsLoad(.moduleCtxPtrValue, .offset.TableOffset(int()).U32(), ssa.TypeI64)
			.InsertInstruction()
			 := .Return()

			// Load the table's length.
			 := .AllocateInstruction().
				AsLoad(, tableInstanceLenOffset, ssa.TypeI32).
				Insert()
			.push(.Return())

		case wasm.OpcodeMiscTableGrow:
			 := .readI32u()
			if .unreachable {
				break
			}

			.storeCallerModuleContext()

			 := .AllocateInstruction().AsIconst32().Insert().Return()

			 := .pop()
			 := .pop()

			 := .AllocateInstruction().
				AsLoad(.execCtxPtrValue,
					wazevoapi.ExecutionContextOffsetTableGrowTrampolineAddress.U32(),
					ssa.TypeI64,
				).Insert().Return()

			 := .allocateVarLengthValues(4, .execCtxPtrValue, , , )
			 := .
				AllocateInstruction().
				AsCallIndirect(, &.tableGrowSig, ).
				Insert().Return()
			.push()

		case wasm.OpcodeMiscTableCopy:
			 := .readI32u()
			 := .readI32u()
			if .unreachable {
				break
			}

			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()
			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()
			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()

			// Out of bounds check.
			 := .boundsCheckInTable(, , )
			 := .boundsCheckInTable(, , )

			 := .loadTableBaseAddr()
			 := .loadTableBaseAddr()

			 := .AllocateInstruction().AsIconst64(3).Insert().Return()

			 := .AllocateInstruction().AsIshl(, ).Insert().Return()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()
			 := .AllocateInstruction().AsIshl(, ).Insert().Return()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()

			 := .AllocateInstruction().AsIshl(, ).Insert().Return()
			.callMemmove(, , )

		case wasm.OpcodeMiscMemoryCopy:
			.pc += 2 // +2 to skip two memory indexes which are fixed to zero.
			if .unreachable {
				break
			}

			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()
			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()
			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()

			// Out of bounds check.
			 := .getMemoryLenValue(false)
			.boundsCheckInMemory(, , )
			.boundsCheckInMemory(, , )

			 := .getMemoryBaseValue(false)
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()

			.callMemmove(, , )

		case wasm.OpcodeMiscTableFill:
			 := .readI32u()
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .pop()

			 := .
				AllocateInstruction().AsUExtend(, 32, 64).Insert().Return()
			 := .
				AllocateInstruction().AsUExtend(, 32, 64).Insert().Return()
			 := .boundsCheckInTable(, , )

			 := .AllocateInstruction().AsIconst64(3).Insert().Return()
			 := .AllocateInstruction().AsIshl(, ).Insert().Return()
			 := .AllocateInstruction().AsIshl(, ).Insert().Return()

			// Calculate the base address of the table.
			 := .loadTableBaseAddr()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()

			// Prepare the loop and following block.
			 := .AllocateBasicBlock()
			 := .AllocateBasicBlock()
			 := .AddParam(, ssa.TypeI64)
			 := .AllocateBasicBlock()

			// Uses the copy trick for faster filling buffer like memory.fill, but in this case we copy 8 bytes at a time.
			// 	buf := memoryInst.Buffer[offset : offset+fillSize]
			// 	buf[0:8] = value
			// 	for i := 8; i < fillSize; i *= 2 { Begin with 8 bytes.
			// 		copy(buf[i:], buf[:i])
			// 	}

			// Insert the jump to the beforeLoop block; If the fillSize is zero, then jump to the following block to skip entire logics.
			 := .AllocateInstruction().AsIconst64(0).Insert().Return()
			 := .AllocateInstruction().AsIcmp(, , ssa.IntegerCmpCondEqual).
				Insert().Return()
			.AllocateInstruction().AsBrnz(, ssa.ValuesNil, ).Insert()
			.insertJumpToBlock(ssa.ValuesNil, )

			// buf[0:8] = value
			.SetCurrentBlock()
			.AllocateInstruction().AsStore(ssa.OpcodeStore, , , 0).Insert()
			 := .AllocateInstruction().AsIconst64(8).Insert().Return()
			.insertJumpToBlock(.allocateVarLengthValues(1, ), )

			.SetCurrentBlock()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()

			// If loopVar*2 > fillSizeInBytes, then count must be fillSizeInBytes-loopVar.
			var  ssa.Value
			{
				 := .AllocateInstruction().AsIadd(, ).Insert().Return()
				 := .
					AllocateInstruction().AsIcmp(, , ssa.IntegerCmpCondUnsignedGreaterThanOrEqual).
					Insert().Return()
				 := .AllocateInstruction().AsIsub(, ).Insert().Return()
				 = .AllocateInstruction().AsSelect(, , ).Insert().Return()
			}

			.callMemmove(, , )

			 := .AllocateInstruction().AsIconst64(1).Insert().Return()
			 := .AllocateInstruction().AsIshl(, ).Insert().Return()
			 := .AllocateInstruction().
				AsIcmp(, , ssa.IntegerCmpCondUnsignedLessThan).Insert().Return()

			.AllocateInstruction().
				AsBrnz(, .allocateVarLengthValues(1, ), ).
				Insert()

			.insertJumpToBlock(ssa.ValuesNil, )
			.SetCurrentBlock()

			.Seal()
			.Seal()
			.Seal()

		case wasm.OpcodeMiscMemoryFill:
			.pc++ // Skip the memory index which is fixed to zero.
			if .unreachable {
				break
			}

			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()
			 := .pop()
			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()

			// Out of bounds check.
			.boundsCheckInMemory(.getMemoryLenValue(false), , )

			// Calculate the base address:
			 := .AllocateInstruction().AsIadd(.getMemoryBaseValue(false), ).Insert().Return()

			// Uses the copy trick for faster filling buffer: https://gist.github.com/taylorza/df2f89d5f9ab3ffd06865062a4cf015d
			// 	buf := memoryInst.Buffer[offset : offset+fillSize]
			// 	buf[0] = value
			// 	for i := 1; i < fillSize; i *= 2 {
			// 		copy(buf[i:], buf[:i])
			// 	}

			// Prepare the loop and following block.
			 := .AllocateBasicBlock()
			 := .AllocateBasicBlock()
			 := .AddParam(, ssa.TypeI64)
			 := .AllocateBasicBlock()

			// Insert the jump to the beforeLoop block; If the fillSize is zero, then jump to the following block to skip entire logics.
			 := .AllocateInstruction().AsIconst64(0).Insert().Return()
			 := .AllocateInstruction().AsIcmp(, , ssa.IntegerCmpCondEqual).
				Insert().Return()
			.AllocateInstruction().AsBrnz(, ssa.ValuesNil, ).Insert()
			.insertJumpToBlock(ssa.ValuesNil, )

			// buf[0] = value
			.SetCurrentBlock()
			.AllocateInstruction().AsStore(ssa.OpcodeIstore8, , , 0).Insert()
			 := .AllocateInstruction().AsIconst64(1).Insert().Return()
			.insertJumpToBlock(.allocateVarLengthValues(1, ), )

			.SetCurrentBlock()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()

			// If loopVar*2 > fillSizeExt, then count must be fillSizeExt-loopVar.
			var  ssa.Value
			{
				 := .AllocateInstruction().AsIadd(, ).Insert().Return()
				 := .
					AllocateInstruction().AsIcmp(, , ssa.IntegerCmpCondUnsignedGreaterThanOrEqual).
					Insert().Return()
				 := .AllocateInstruction().AsIsub(, ).Insert().Return()
				 = .AllocateInstruction().AsSelect(, , ).Insert().Return()
			}

			.callMemmove(, , )

			 := .AllocateInstruction().AsIconst64(1).Insert().Return()
			 := .AllocateInstruction().AsIshl(, ).Insert().Return()
			 := .AllocateInstruction().
				AsIcmp(, , ssa.IntegerCmpCondUnsignedLessThan).Insert().Return()

			.AllocateInstruction().
				AsBrnz(, .allocateVarLengthValues(1, ), ).
				Insert()

			.insertJumpToBlock(ssa.ValuesNil, )
			.SetCurrentBlock()

			.Seal()
			.Seal()
			.Seal()

		case wasm.OpcodeMiscMemoryInit:
			 := .readI32u()
			.pc++ // Skip the memory index which is fixed to zero.
			if .unreachable {
				break
			}

			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()
			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()
			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()

			 := .dataOrElementInstanceAddr(, .offset.DataInstances1stElement)

			// Bounds check.
			.boundsCheckInMemory(.getMemoryLenValue(false), , )
			.boundsCheckInDataOrElementInstance(, , , wazevoapi.ExitCodeMemoryOutOfBounds)

			 := .AllocateInstruction().AsLoad(, 0, ssa.TypeI64).Insert().Return()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()

			 := .getMemoryBaseValue(false)
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()

			.callMemmove(, , )

		case wasm.OpcodeMiscTableInit:
			 := .readI32u()
			 := .readI32u()
			if .unreachable {
				break
			}

			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()
			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()
			 := .
				AllocateInstruction().AsUExtend(.pop(), 32, 64).Insert().Return()

			 := .dataOrElementInstanceAddr(, .offset.ElementInstances1stElement)

			// Bounds check.
			 := .boundsCheckInTable(, , )
			.boundsCheckInDataOrElementInstance(, , , wazevoapi.ExitCodeTableOutOfBounds)

			 := .AllocateInstruction().AsIconst64(3).Insert().Return()
			// Calculates the destination address in the table.
			 := .AllocateInstruction().AsIshl(, ).Insert().Return()
			 := .loadTableBaseAddr()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()

			// Calculates the source address in the element instance.
			 := .AllocateInstruction().AsIshl(, ).Insert().Return()
			 := .AllocateInstruction().AsLoad(, 0, ssa.TypeI64).Insert().Return()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()

			 := .AllocateInstruction().AsIshl(, ).Insert().Return()
			.callMemmove(, , )

		case wasm.OpcodeMiscElemDrop:
			 := .readI32u()
			if .unreachable {
				break
			}

			.dropDataOrElementInstance(, .offset.ElementInstances1stElement)

		case wasm.OpcodeMiscDataDrop:
			 := .readI32u()
			if .unreachable {
				break
			}
			.dropDataOrElementInstance(, .offset.DataInstances1stElement)

		default:
			panic("Unknown MiscOp " + wasm.MiscInstructionName())
		}

	case wasm.OpcodeI32ReinterpretF32:
		if .unreachable {
			break
		}
		 := .AllocateInstruction().
			AsBitcast(.pop(), ssa.TypeI32).
			Insert().Return()
		.push()

	case wasm.OpcodeI64ReinterpretF64:
		if .unreachable {
			break
		}
		 := .AllocateInstruction().
			AsBitcast(.pop(), ssa.TypeI64).
			Insert().Return()
		.push()

	case wasm.OpcodeF32ReinterpretI32:
		if .unreachable {
			break
		}
		 := .AllocateInstruction().
			AsBitcast(.pop(), ssa.TypeF32).
			Insert().Return()
		.push()

	case wasm.OpcodeF64ReinterpretI64:
		if .unreachable {
			break
		}
		 := .AllocateInstruction().
			AsBitcast(.pop(), ssa.TypeF64).
			Insert().Return()
		.push()

	case wasm.OpcodeI32DivS, wasm.OpcodeI64DivS:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction().AsSDiv(, , .execCtxPtrValue).Insert().Return()
		.push()

	case wasm.OpcodeI32DivU, wasm.OpcodeI64DivU:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction().AsUDiv(, , .execCtxPtrValue).Insert().Return()
		.push()

	case wasm.OpcodeI32RemS, wasm.OpcodeI64RemS:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction().AsSRem(, , .execCtxPtrValue).Insert().Return()
		.push()

	case wasm.OpcodeI32RemU, wasm.OpcodeI64RemU:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction().AsURem(, , .execCtxPtrValue).Insert().Return()
		.push()

	case wasm.OpcodeI32And, wasm.OpcodeI64And:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsBand(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Or, wasm.OpcodeI64Or:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsBor(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Xor, wasm.OpcodeI64Xor:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsBxor(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Shl, wasm.OpcodeI64Shl:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsIshl(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32ShrU, wasm.OpcodeI64ShrU:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsUshr(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32ShrS, wasm.OpcodeI64ShrS:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsSshr(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Rotl, wasm.OpcodeI64Rotl:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsRotl(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Rotr, wasm.OpcodeI64Rotr:
		if .unreachable {
			break
		}
		,  := .pop(), .pop()
		 := .AllocateInstruction()
		.AsRotr(, )
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Clz, wasm.OpcodeI64Clz:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction()
		.AsClz()
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Ctz, wasm.OpcodeI64Ctz:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction()
		.AsCtz()
		.InsertInstruction()
		 := .Return()
		.push()
	case wasm.OpcodeI32Popcnt, wasm.OpcodeI64Popcnt:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction()
		.AsPopcnt()
		.InsertInstruction()
		 := .Return()
		.push()

	case wasm.OpcodeI32WrapI64:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction().AsIreduce(, ssa.TypeI32).Insert().Return()
		.push()
	case wasm.OpcodeGlobalGet:
		 := .readI32u()
		if .unreachable {
			break
		}
		 := .getWasmGlobalValue(, false)
		.push()
	case wasm.OpcodeGlobalSet:
		 := .readI32u()
		if .unreachable {
			break
		}
		 := .pop()
		.setWasmGlobalValue(, )
	case wasm.OpcodeLocalGet:
		 := .readI32u()
		if .unreachable {
			break
		}
		 := .localVariable()
		.push(.MustFindValue())

	case wasm.OpcodeLocalSet:
		 := .readI32u()
		if .unreachable {
			break
		}
		 := .localVariable()
		 := .pop()
		.DefineVariableInCurrentBB(, )

	case wasm.OpcodeLocalTee:
		 := .readI32u()
		if .unreachable {
			break
		}
		 := .localVariable()
		 := .peek()
		.DefineVariableInCurrentBB(, )

	case wasm.OpcodeSelect, wasm.OpcodeTypedSelect:
		if  == wasm.OpcodeTypedSelect {
			.pc += 2 // ignores the type which is only needed during validation.
		}

		if .unreachable {
			break
		}

		 := .pop()
		 := .pop()
		 := .pop()

		 := .AllocateInstruction().
			AsSelect(, , ).
			Insert().
			Return()
		.push()

	case wasm.OpcodeMemorySize:
		.pc++ // skips the memory index.
		if .unreachable {
			break
		}

		var  ssa.Value
		if .offset.LocalMemoryBegin < 0 {
			 := .AllocateInstruction().
				AsLoad(.moduleCtxPtrValue, .offset.ImportedMemoryBegin.U32(), ssa.TypeI64).
				Insert().
				Return()

			 = .AllocateInstruction().
				AsLoad(, memoryInstanceBufSizeOffset, ssa.TypeI32).
				Insert().
				Return()
		} else {
			 = .AllocateInstruction().
				AsLoad(.moduleCtxPtrValue, .offset.LocalMemoryLen().U32(), ssa.TypeI32).
				Insert().
				Return()
		}

		 := .AllocateInstruction()
		.AsIconst32(uint32(wasm.MemoryPageSizeInBits))
		.InsertInstruction()
		 := .AllocateInstruction().
			AsUshr(, .Return()).
			Insert().
			Return()
		.push()

	case wasm.OpcodeMemoryGrow:
		.pc++ // skips the memory index.
		if .unreachable {
			break
		}

		.storeCallerModuleContext()

		 := .pop()
		 := .AllocateInstruction().
			AsLoad(.execCtxPtrValue,
				wazevoapi.ExecutionContextOffsetMemoryGrowTrampolineAddress.U32(),
				ssa.TypeI64,
			).Insert().Return()

		 := .allocateVarLengthValues(1, .execCtxPtrValue, )
		 := .
			AllocateInstruction().
			AsCallIndirect(, &.memoryGrowSig, ).
			Insert().Return()
		.push()

		// After the memory grow, reload the cached memory base and len.
		.reloadMemoryBaseLen()

	case wasm.OpcodeI32Store,
		wasm.OpcodeI64Store,
		wasm.OpcodeF32Store,
		wasm.OpcodeF64Store,
		wasm.OpcodeI32Store8,
		wasm.OpcodeI32Store16,
		wasm.OpcodeI64Store8,
		wasm.OpcodeI64Store16,
		wasm.OpcodeI64Store32:

		,  := .readMemArg()
		if .unreachable {
			break
		}
		var  uint64
		var  ssa.Opcode
		switch  {
		case wasm.OpcodeI32Store, wasm.OpcodeF32Store:
			 = ssa.OpcodeStore
			 = 4
		case wasm.OpcodeI64Store, wasm.OpcodeF64Store:
			 = ssa.OpcodeStore
			 = 8
		case wasm.OpcodeI32Store8, wasm.OpcodeI64Store8:
			 = ssa.OpcodeIstore8
			 = 1
		case wasm.OpcodeI32Store16, wasm.OpcodeI64Store16:
			 = ssa.OpcodeIstore16
			 = 2
		case wasm.OpcodeI64Store32:
			 = ssa.OpcodeIstore32
			 = 4
		default:
			panic("BUG")
		}

		 := .pop()
		 := .pop()
		 := .memOpSetup(, uint64(), )
		.AllocateInstruction().
			AsStore(, , , ).
			Insert()

	case wasm.OpcodeI32Load,
		wasm.OpcodeI64Load,
		wasm.OpcodeF32Load,
		wasm.OpcodeF64Load,
		wasm.OpcodeI32Load8S,
		wasm.OpcodeI32Load8U,
		wasm.OpcodeI32Load16S,
		wasm.OpcodeI32Load16U,
		wasm.OpcodeI64Load8S,
		wasm.OpcodeI64Load8U,
		wasm.OpcodeI64Load16S,
		wasm.OpcodeI64Load16U,
		wasm.OpcodeI64Load32S,
		wasm.OpcodeI64Load32U:
		,  := .readMemArg()
		if .unreachable {
			break
		}

		var  uint64
		switch  {
		case wasm.OpcodeI32Load, wasm.OpcodeF32Load:
			 = 4
		case wasm.OpcodeI64Load, wasm.OpcodeF64Load:
			 = 8
		case wasm.OpcodeI32Load8S, wasm.OpcodeI32Load8U:
			 = 1
		case wasm.OpcodeI32Load16S, wasm.OpcodeI32Load16U:
			 = 2
		case wasm.OpcodeI64Load8S, wasm.OpcodeI64Load8U:
			 = 1
		case wasm.OpcodeI64Load16S, wasm.OpcodeI64Load16U:
			 = 2
		case wasm.OpcodeI64Load32S, wasm.OpcodeI64Load32U:
			 = 4
		default:
			panic("BUG")
		}

		 := .pop()
		 := .memOpSetup(, uint64(), )
		 := .AllocateInstruction()
		switch  {
		case wasm.OpcodeI32Load:
			.AsLoad(, , ssa.TypeI32)
		case wasm.OpcodeI64Load:
			.AsLoad(, , ssa.TypeI64)
		case wasm.OpcodeF32Load:
			.AsLoad(, , ssa.TypeF32)
		case wasm.OpcodeF64Load:
			.AsLoad(, , ssa.TypeF64)
		case wasm.OpcodeI32Load8S:
			.AsExtLoad(ssa.OpcodeSload8, , , false)
		case wasm.OpcodeI32Load8U:
			.AsExtLoad(ssa.OpcodeUload8, , , false)
		case wasm.OpcodeI32Load16S:
			.AsExtLoad(ssa.OpcodeSload16, , , false)
		case wasm.OpcodeI32Load16U:
			.AsExtLoad(ssa.OpcodeUload16, , , false)
		case wasm.OpcodeI64Load8S:
			.AsExtLoad(ssa.OpcodeSload8, , , true)
		case wasm.OpcodeI64Load8U:
			.AsExtLoad(ssa.OpcodeUload8, , , true)
		case wasm.OpcodeI64Load16S:
			.AsExtLoad(ssa.OpcodeSload16, , , true)
		case wasm.OpcodeI64Load16U:
			.AsExtLoad(ssa.OpcodeUload16, , , true)
		case wasm.OpcodeI64Load32S:
			.AsExtLoad(ssa.OpcodeSload32, , , true)
		case wasm.OpcodeI64Load32U:
			.AsExtLoad(ssa.OpcodeUload32, , , true)
		default:
			panic("BUG")
		}
		.InsertInstruction()
		.push(.Return())
	case wasm.OpcodeBlock:
		// Note: we do not need to create a BB for this as that would always have only one predecessor
		// which is the current BB, and therefore it's always ok to merge them in any way.

		 := .readBlockType()

		if .unreachable {
			.unreachableDepth++
			break
		}

		 := .AllocateBasicBlock()
		.addBlockParamsFromWasmTypes(.Results, )

		.ctrlPush(controlFrame{
			kind:                         controlFrameKindBlock,
			originalStackLenWithoutParam: len(.values) - len(.Params),
			followingBlock:               ,
			blockType:                    ,
		})
	case wasm.OpcodeLoop:
		 := .readBlockType()

		if .unreachable {
			.unreachableDepth++
			break
		}

		,  := .AllocateBasicBlock(), .AllocateBasicBlock()
		.addBlockParamsFromWasmTypes(.Params, )
		.addBlockParamsFromWasmTypes(.Results, )

		 := len(.values) - len(.Params)
		.ctrlPush(controlFrame{
			originalStackLenWithoutParam: ,
			kind:                         controlFrameKindLoop,
			blk:                          ,
			followingBlock:               ,
			blockType:                    ,
		})

		 := .allocateVarLengthValues()
		 = .Append(.VarLengthPool(), .values[:]...)

		// Insert the jump to the header of loop.
		 := .AllocateInstruction()
		.AsJump(, )
		.InsertInstruction()

		.switchTo(, )

		if .ensureTermination {
			 := .AllocateInstruction().
				AsLoad(.execCtxPtrValue,
					wazevoapi.ExecutionContextOffsetCheckModuleExitCodeTrampolineAddress.U32(),
					ssa.TypeI64,
				).Insert().Return()

			 := .allocateVarLengthValues(1, .execCtxPtrValue)
			.AllocateInstruction().
				AsCallIndirect(, &.checkModuleExitCodeSig, ).
				Insert()
		}
	case wasm.OpcodeIf:
		 := .readBlockType()

		if .unreachable {
			.unreachableDepth++
			break
		}

		 := .pop()
		, ,  := .AllocateBasicBlock(), .AllocateBasicBlock(), .AllocateBasicBlock()

		// We do not make the Wasm-level block parameters as SSA-level block params for if-else blocks
		// since they won't be PHI and the definition is unique.

		// On the other hand, the following block after if-else-end will likely have
		// multiple definitions (one in Then and another in Else blocks).
		.addBlockParamsFromWasmTypes(.Results, )

		 := .allocateVarLengthValues(len(.Params))
		 = .Append(.VarLengthPool(), .values[len(.values)-len(.Params):]...)

		// Insert the conditional jump to the Else block.
		 := .AllocateInstruction()
		.AsBrz(, ssa.ValuesNil, )
		.InsertInstruction()

		// Then, insert the jump to the Then block.
		 := .AllocateInstruction()
		.AsJump(ssa.ValuesNil, )
		.InsertInstruction()

		.ctrlPush(controlFrame{
			kind:                         controlFrameKindIfWithoutElse,
			originalStackLenWithoutParam: len(.values) - len(.Params),
			blk:                          ,
			followingBlock:               ,
			blockType:                    ,
			clonedArgs:                   ,
		})

		.SetCurrentBlock()

		// Then and Else (if exists) have only one predecessor.
		.Seal()
		.Seal()
	case wasm.OpcodeElse:
		 := .ctrlPeekAt(0)
		if  := .unreachable;  && .unreachableDepth > 0 {
			// If it is currently in unreachable and is a nested if,
			// we just remove the entire else block.
			break
		}

		.kind = controlFrameKindIfWithElse
		if !.unreachable {
			// If this Then block is currently reachable, we have to insert the branching to the following BB.
			 := .followingBlock // == the BB after if-then-else.
			 := .nPeekDup(len(.blockType.Results))
			.insertJumpToBlock(, )
		} else {
			.unreachable = false
		}

		// Reset the stack so that we can correctly handle the else block.
		.values = .values[:.originalStackLenWithoutParam]
		 := .blk
		for ,  := range .clonedArgs.View() {
			.push()
		}

		.SetCurrentBlock()

	case wasm.OpcodeEnd:
		if .unreachableDepth > 0 {
			.unreachableDepth--
			break
		}

		 := .ctrlPop()
		 := .followingBlock

		 := .unreachable
		if ! {
			// Top n-th args will be used as a result of the current control frame.
			 := .nPeekDup(len(.blockType.Results))

			// Insert the unconditional branch to the target.
			.insertJumpToBlock(, )
		} else { // recover from the unreachable state.
			.unreachable = false
		}

		switch .kind {
		case controlFrameKindFunction:
			break // This is the very end of function.
		case controlFrameKindLoop:
			// Loop header block can be reached from any br/br_table contained in the loop,
			// so now that we've reached End of it, we can seal it.
			.Seal(.blk)
		case controlFrameKindIfWithoutElse:
			// If this is the end of Then block, we have to emit the empty Else block.
			 := .blk
			.SetCurrentBlock()
			.insertJumpToBlock(.clonedArgs, )
		}

		.Seal()

		// Ready to start translating the following block.
		.switchTo(.originalStackLenWithoutParam, )

	case wasm.OpcodeBr:
		 := .readI32u()
		if .unreachable {
			break
		}

		,  := .brTargetArgNumFor()
		 := .nPeekDup()
		.insertJumpToBlock(, )

		.unreachable = true

	case wasm.OpcodeBrIf:
		 := .readI32u()
		if .unreachable {
			break
		}

		 := .pop()

		,  := .brTargetArgNumFor()
		 := .nPeekDup()
		var  bool
		if .needListener && .ReturnBlock() { // In this case, we have to call the listener before returning.
			// Save the currently active block.
			 := .CurrentBlock()

			// Allocate the trampoline block to the return where we call the listener.
			 = .AllocateBasicBlock()
			.SetCurrentBlock()
			 = true

			.callListenerAfter()

			 := .AllocateInstruction()
			.AsReturn()
			.InsertInstruction()

			 = ssa.ValuesNil

			// Revert the current block.
			.SetCurrentBlock()
		}

		// Insert the conditional jump to the target block.
		 := .AllocateInstruction()
		.AsBrnz(, , )
		.InsertInstruction()

		if  {
			.Seal()
		}

		// Insert the unconditional jump to the Else block which corresponds to after br_if.
		 := .AllocateBasicBlock()
		.insertJumpToBlock(ssa.ValuesNil, )

		// Now start translating the instructions after br_if.
		.Seal() // Else of br_if has the current block as the only one successor.
		.SetCurrentBlock()

	case wasm.OpcodeBrTable:
		 := .tmpForBrTable[:0]
		 := .readI32u()
		for  := 0;  < int(); ++ {
			 = append(, .readI32u())
		}
		 = append(, .readI32u()) // default label.
		if .unreachable {
			break
		}

		 := .pop()
		if  == 0 { // If this br_table is empty, we can just emit the unconditional jump.
			,  := .brTargetArgNumFor([0])
			 := .nPeekDup()
			.insertJumpToBlock(, )
		} else {
			.lowerBrTable(, )
		}
		.tmpForBrTable =  // reuse the temporary slice for next use.
		.unreachable = true

	case wasm.OpcodeNop:
	case wasm.OpcodeReturn:
		if .unreachable {
			break
		}
		if .needListener {
			.callListenerAfter()
		}

		 := .nPeekDup(.results())
		 := .AllocateInstruction()

		.AsReturn()
		.InsertInstruction()
		.unreachable = true

	case wasm.OpcodeUnreachable:
		if .unreachable {
			break
		}
		 := .AllocateInstruction()
		.AsExitWithCode(.execCtxPtrValue, wazevoapi.ExitCodeUnreachable)
		.InsertInstruction()
		.unreachable = true

	case wasm.OpcodeCallIndirect:
		 := .readI32u()
		 := .readI32u()
		if .unreachable {
			break
		}
		.lowerCallIndirect(, )

	case wasm.OpcodeCall:
		 := .readI32u()
		if .unreachable {
			break
		}

		var  wasm.Index
		if  < .m.ImportFunctionCount {
			// Before transfer the control to the callee, we have to store the current module's moduleContextPtr
			// into execContext.callerModuleContextPtr in case when the callee is a Go function.
			.storeCallerModuleContext()
			var  int
			for  := range .m.ImportSection {
				 := &.m.ImportSection[]
				if .Type == wasm.ExternTypeFunc {
					if  == int() {
						 = .DescFunc
						break
					}
					++
				}
			}
		} else {
			 = .m.FunctionSection[-.m.ImportFunctionCount]
		}
		 := &.m.TypeSection[]

		 := len(.Params)
		 := len(.values) - 
		 := .values[:]
		.values = .values[:]
		 := .allocateVarLengthValues(2+len(), .execCtxPtrValue)

		 := .signatures[]
		 := .AllocateInstruction()
		if  >= .m.ImportFunctionCount {
			 = .Append(.VarLengthPool(), .moduleCtxPtrValue) // This case the callee module is itself.
			 = .Append(.VarLengthPool(), ...)
			.AsCall(FunctionIndexToFuncRef(), , )
			.InsertInstruction()
		} else {
			// This case we have to read the address of the imported function from the module context.
			 := .moduleCtxPtrValue
			,  := .AllocateInstruction(), .AllocateInstruction()
			, ,  := .offset.ImportedFunctionOffset()
			.AsLoad(, .U32(), ssa.TypeI64)
			.AsLoad(, .U32(), ssa.TypeI64)
			.InsertInstruction()
			.InsertInstruction()

			 = .Append(.VarLengthPool(), .Return())
			 = .Append(.VarLengthPool(), ...)
			.AsCallIndirect(.Return(), , )
			.InsertInstruction()
		}

		,  := .Returns()
		if .Valid() {
			.push()
		}
		for ,  := range  {
			.push()
		}

		.reloadAfterCall()

	case wasm.OpcodeDrop:
		if .unreachable {
			break
		}
		_ = .pop()
	case wasm.OpcodeF64ConvertI32S, wasm.OpcodeF64ConvertI64S, wasm.OpcodeF64ConvertI32U, wasm.OpcodeF64ConvertI64U:
		if .unreachable {
			break
		}
		 := .AllocateInstruction().AsFcvtFromInt(
			.pop(),
			 == wasm.OpcodeF64ConvertI32S ||  == wasm.OpcodeF64ConvertI64S,
			true,
		).Insert().Return()
		.push()
	case wasm.OpcodeF32ConvertI32S, wasm.OpcodeF32ConvertI64S, wasm.OpcodeF32ConvertI32U, wasm.OpcodeF32ConvertI64U:
		if .unreachable {
			break
		}
		 := .AllocateInstruction().AsFcvtFromInt(
			.pop(),
			 == wasm.OpcodeF32ConvertI32S ||  == wasm.OpcodeF32ConvertI64S,
			false,
		).Insert().Return()
		.push()
	case wasm.OpcodeF32DemoteF64:
		if .unreachable {
			break
		}
		 := .AllocateInstruction()
		.AsFdemote(.pop())
		.InsertInstruction()
		.push(.Return())
	case wasm.OpcodeF64PromoteF32:
		if .unreachable {
			break
		}
		 := .AllocateInstruction()
		.AsFpromote(.pop())
		.InsertInstruction()
		.push(.Return())

	case wasm.OpcodeVecPrefix:
		.pc++
		 := .wasmFunctionBody[.pc]
		switch  {
		case wasm.OpcodeVecV128Const:
			.pc++
			 := binary.LittleEndian.Uint64(.wasmFunctionBody[.pc:])
			.pc += 8
			 := binary.LittleEndian.Uint64(.wasmFunctionBody[.pc:])
			.pc += 7
			if .unreachable {
				break
			}
			 := .AllocateInstruction().AsVconst(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecV128Load:
			,  := .readMemArg()
			if .unreachable {
				break
			}
			 := .pop()
			 := .memOpSetup(, uint64(), 16)
			 := .AllocateInstruction()
			.AsLoad(, , ssa.TypeV128)
			.InsertInstruction()
			.push(.Return())
		case wasm.OpcodeVecV128Load8Lane, wasm.OpcodeVecV128Load16Lane, wasm.OpcodeVecV128Load32Lane:
			,  := .readMemArg()
			.pc++
			if .unreachable {
				break
			}
			var  ssa.VecLane
			var  ssa.Opcode
			var  uint64
			switch  {
			case wasm.OpcodeVecV128Load8Lane:
				, ,  = ssa.OpcodeUload8, ssa.VecLaneI8x16, 1
			case wasm.OpcodeVecV128Load16Lane:
				, ,  = ssa.OpcodeUload16, ssa.VecLaneI16x8, 2
			case wasm.OpcodeVecV128Load32Lane:
				, ,  = ssa.OpcodeUload32, ssa.VecLaneI32x4, 4
			}
			 := .wasmFunctionBody[.pc]
			 := .pop()
			 := .pop()
			 := .memOpSetup(, uint64(), )
			 := .AllocateInstruction().
				AsExtLoad(, , , false).
				Insert().Return()
			 := .AllocateInstruction().
				AsInsertlane(, , , ).
				Insert().Return()
			.push()
		case wasm.OpcodeVecV128Load64Lane:
			,  := .readMemArg()
			.pc++
			if .unreachable {
				break
			}
			 := .wasmFunctionBody[.pc]
			 := .pop()
			 := .pop()
			 := .memOpSetup(, uint64(), 8)
			 := .AllocateInstruction().
				AsLoad(, , ssa.TypeI64).
				Insert().Return()
			 := .AllocateInstruction().
				AsInsertlane(, , , ssa.VecLaneI64x2).
				Insert().Return()
			.push()

		case wasm.OpcodeVecV128Load32zero, wasm.OpcodeVecV128Load64zero:
			,  := .readMemArg()
			if .unreachable {
				break
			}

			var  ssa.Type
			switch  {
			case wasm.OpcodeVecV128Load32zero:
				 = ssa.TypeF32
			case wasm.OpcodeVecV128Load64zero:
				 = ssa.TypeF64
			}

			 := .pop()
			 := .memOpSetup(, uint64(), uint64(.Size()))

			 := .AllocateInstruction().
				AsVZeroExtLoad(, , ).
				Insert().Return()
			.push()

		case wasm.OpcodeVecV128Load8x8u, wasm.OpcodeVecV128Load8x8s,
			wasm.OpcodeVecV128Load16x4u, wasm.OpcodeVecV128Load16x4s,
			wasm.OpcodeVecV128Load32x2u, wasm.OpcodeVecV128Load32x2s:
			,  := .readMemArg()
			if .unreachable {
				break
			}
			var  ssa.VecLane
			var  bool
			switch  {
			case wasm.OpcodeVecV128Load8x8s:
				 = true
				fallthrough
			case wasm.OpcodeVecV128Load8x8u:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecV128Load16x4s:
				 = true
				fallthrough
			case wasm.OpcodeVecV128Load16x4u:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecV128Load32x2s:
				 = true
				fallthrough
			case wasm.OpcodeVecV128Load32x2u:
				 = ssa.VecLaneI32x4
			}
			 := .pop()
			 := .memOpSetup(, uint64(), 8)
			 := .AllocateInstruction().
				AsLoad(, , ssa.TypeF64).
				Insert().Return()
			 := .AllocateInstruction().
				AsWiden(, , , true).
				Insert().Return()
			.push()
		case wasm.OpcodeVecV128Load8Splat, wasm.OpcodeVecV128Load16Splat,
			wasm.OpcodeVecV128Load32Splat, wasm.OpcodeVecV128Load64Splat:
			,  := .readMemArg()
			if .unreachable {
				break
			}
			var  ssa.VecLane
			var  uint64
			switch  {
			case wasm.OpcodeVecV128Load8Splat:
				,  = ssa.VecLaneI8x16, 1
			case wasm.OpcodeVecV128Load16Splat:
				,  = ssa.VecLaneI16x8, 2
			case wasm.OpcodeVecV128Load32Splat:
				,  = ssa.VecLaneI32x4, 4
			case wasm.OpcodeVecV128Load64Splat:
				,  = ssa.VecLaneI64x2, 8
			}
			 := .pop()
			 := .memOpSetup(, uint64(), )
			 := .AllocateInstruction().
				AsLoadSplat(, , ).
				Insert().Return()
			.push()
		case wasm.OpcodeVecV128Store:
			,  := .readMemArg()
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .memOpSetup(, uint64(), 16)
			.AllocateInstruction().
				AsStore(ssa.OpcodeStore, , , ).
				Insert()
		case wasm.OpcodeVecV128Store8Lane, wasm.OpcodeVecV128Store16Lane,
			wasm.OpcodeVecV128Store32Lane, wasm.OpcodeVecV128Store64Lane:
			,  := .readMemArg()
			.pc++
			if .unreachable {
				break
			}
			 := .wasmFunctionBody[.pc]
			var  ssa.Opcode
			var  ssa.VecLane
			var  uint64
			switch  {
			case wasm.OpcodeVecV128Store8Lane:
				, ,  = ssa.OpcodeIstore8, ssa.VecLaneI8x16, 1
			case wasm.OpcodeVecV128Store16Lane:
				, ,  = ssa.OpcodeIstore16, ssa.VecLaneI16x8, 2
			case wasm.OpcodeVecV128Store32Lane:
				, ,  = ssa.OpcodeIstore32, ssa.VecLaneI32x4, 4
			case wasm.OpcodeVecV128Store64Lane:
				, ,  = ssa.OpcodeStore, ssa.VecLaneI64x2, 8
			}
			 := .pop()
			 := .pop()
			 := .memOpSetup(, uint64(), )
			 := .AllocateInstruction().
				AsExtractlane(, , , false).
				Insert().Return()
			.AllocateInstruction().
				AsStore(, , , ).
				Insert()
		case wasm.OpcodeVecV128Not:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().AsVbnot().Insert().Return()
			.push()
		case wasm.OpcodeVecV128And:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVband(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecV128AndNot:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVbandnot(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecV128Or:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVbor(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecV128Xor:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVbxor(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecV128Bitselect:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVbitselect(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecV128AnyTrue:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().AsVanyTrue().Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16AllTrue, wasm.OpcodeVecI16x8AllTrue, wasm.OpcodeVecI32x4AllTrue, wasm.OpcodeVecI64x2AllTrue:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16AllTrue:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8AllTrue:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4AllTrue:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2AllTrue:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVallTrue(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16BitMask, wasm.OpcodeVecI16x8BitMask, wasm.OpcodeVecI32x4BitMask, wasm.OpcodeVecI64x2BitMask:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16BitMask:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8BitMask:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4BitMask:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2BitMask:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVhighBits(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16Abs, wasm.OpcodeVecI16x8Abs, wasm.OpcodeVecI32x4Abs, wasm.OpcodeVecI64x2Abs:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16Abs:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8Abs:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4Abs:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2Abs:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVIabs(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16Neg, wasm.OpcodeVecI16x8Neg, wasm.OpcodeVecI32x4Neg, wasm.OpcodeVecI64x2Neg:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16Neg:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8Neg:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4Neg:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2Neg:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVIneg(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16Popcnt:
			if .unreachable {
				break
			}
			 := ssa.VecLaneI8x16
			 := .pop()

			 := .AllocateInstruction().AsVIpopcnt(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16Add, wasm.OpcodeVecI16x8Add, wasm.OpcodeVecI32x4Add, wasm.OpcodeVecI64x2Add:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16Add:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8Add:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4Add:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2Add:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVIadd(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16AddSatS, wasm.OpcodeVecI16x8AddSatS:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16AddSatS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8AddSatS:
				 = ssa.VecLaneI16x8
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVSaddSat(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16AddSatU, wasm.OpcodeVecI16x8AddSatU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16AddSatU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8AddSatU:
				 = ssa.VecLaneI16x8
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVUaddSat(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16SubSatS, wasm.OpcodeVecI16x8SubSatS:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16SubSatS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8SubSatS:
				 = ssa.VecLaneI16x8
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVSsubSat(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16SubSatU, wasm.OpcodeVecI16x8SubSatU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16SubSatU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8SubSatU:
				 = ssa.VecLaneI16x8
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVUsubSat(, , ).Insert().Return()
			.push()

		case wasm.OpcodeVecI8x16Sub, wasm.OpcodeVecI16x8Sub, wasm.OpcodeVecI32x4Sub, wasm.OpcodeVecI64x2Sub:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16Sub:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8Sub:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4Sub:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2Sub:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVIsub(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16MinS, wasm.OpcodeVecI16x8MinS, wasm.OpcodeVecI32x4MinS:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16MinS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8MinS:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4MinS:
				 = ssa.VecLaneI32x4
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVImin(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16MinU, wasm.OpcodeVecI16x8MinU, wasm.OpcodeVecI32x4MinU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16MinU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8MinU:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4MinU:
				 = ssa.VecLaneI32x4
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVUmin(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16MaxS, wasm.OpcodeVecI16x8MaxS, wasm.OpcodeVecI32x4MaxS:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16MaxS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8MaxS:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4MaxS:
				 = ssa.VecLaneI32x4
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVImax(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16MaxU, wasm.OpcodeVecI16x8MaxU, wasm.OpcodeVecI32x4MaxU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16MaxU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8MaxU:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4MaxU:
				 = ssa.VecLaneI32x4
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVUmax(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16AvgrU, wasm.OpcodeVecI16x8AvgrU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16AvgrU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8AvgrU:
				 = ssa.VecLaneI16x8
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVAvgRound(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI16x8Mul, wasm.OpcodeVecI32x4Mul, wasm.OpcodeVecI64x2Mul:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI16x8Mul:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4Mul:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2Mul:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVImul(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI16x8Q15mulrSatS:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsSqmulRoundSat(, , ssa.VecLaneI16x8).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16Eq, wasm.OpcodeVecI16x8Eq, wasm.OpcodeVecI32x4Eq, wasm.OpcodeVecI64x2Eq:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16Eq:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8Eq:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4Eq:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2Eq:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16Ne, wasm.OpcodeVecI16x8Ne, wasm.OpcodeVecI32x4Ne, wasm.OpcodeVecI64x2Ne:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16Ne:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8Ne:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4Ne:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2Ne:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondNotEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16LtS, wasm.OpcodeVecI16x8LtS, wasm.OpcodeVecI32x4LtS, wasm.OpcodeVecI64x2LtS:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16LtS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8LtS:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4LtS:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2LtS:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondSignedLessThan, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16LtU, wasm.OpcodeVecI16x8LtU, wasm.OpcodeVecI32x4LtU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16LtU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8LtU:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4LtU:
				 = ssa.VecLaneI32x4
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondUnsignedLessThan, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16LeS, wasm.OpcodeVecI16x8LeS, wasm.OpcodeVecI32x4LeS, wasm.OpcodeVecI64x2LeS:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16LeS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8LeS:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4LeS:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2LeS:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondSignedLessThanOrEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16LeU, wasm.OpcodeVecI16x8LeU, wasm.OpcodeVecI32x4LeU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16LeU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8LeU:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4LeU:
				 = ssa.VecLaneI32x4
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondUnsignedLessThanOrEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16GtS, wasm.OpcodeVecI16x8GtS, wasm.OpcodeVecI32x4GtS, wasm.OpcodeVecI64x2GtS:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16GtS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8GtS:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4GtS:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2GtS:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondSignedGreaterThan, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16GtU, wasm.OpcodeVecI16x8GtU, wasm.OpcodeVecI32x4GtU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16GtU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8GtU:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4GtU:
				 = ssa.VecLaneI32x4
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondUnsignedGreaterThan, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16GeS, wasm.OpcodeVecI16x8GeS, wasm.OpcodeVecI32x4GeS, wasm.OpcodeVecI64x2GeS:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16GeS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8GeS:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4GeS:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2GeS:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondSignedGreaterThanOrEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16GeU, wasm.OpcodeVecI16x8GeU, wasm.OpcodeVecI32x4GeU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16GeU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8GeU:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4GeU:
				 = ssa.VecLaneI32x4
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVIcmp(, , ssa.IntegerCmpCondUnsignedGreaterThanOrEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Max, wasm.OpcodeVecF64x2Max:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Max:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Max:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVFmax(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Abs, wasm.OpcodeVecF64x2Abs:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Abs:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Abs:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVFabs(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Min, wasm.OpcodeVecF64x2Min:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Min:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Min:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVFmin(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Neg, wasm.OpcodeVecF64x2Neg:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Neg:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Neg:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVFneg(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Sqrt, wasm.OpcodeVecF64x2Sqrt:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Sqrt:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Sqrt:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVSqrt(, ).Insert().Return()
			.push()

		case wasm.OpcodeVecF32x4Add, wasm.OpcodeVecF64x2Add:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Add:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Add:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVFadd(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Sub, wasm.OpcodeVecF64x2Sub:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Sub:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Sub:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVFsub(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Mul, wasm.OpcodeVecF64x2Mul:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Mul:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Mul:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVFmul(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Div, wasm.OpcodeVecF64x2Div:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Div:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Div:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVFdiv(, , ).Insert().Return()
			.push()

		case wasm.OpcodeVecI16x8ExtaddPairwiseI8x16S, wasm.OpcodeVecI16x8ExtaddPairwiseI8x16U:
			if .unreachable {
				break
			}
			 := .pop()
			 :=  == wasm.OpcodeVecI16x8ExtaddPairwiseI8x16S
			 := .AllocateInstruction().AsExtIaddPairwise(, ssa.VecLaneI8x16, ).Insert().Return()
			.push()

		case wasm.OpcodeVecI32x4ExtaddPairwiseI16x8S, wasm.OpcodeVecI32x4ExtaddPairwiseI16x8U:
			if .unreachable {
				break
			}
			 := .pop()
			 :=  == wasm.OpcodeVecI32x4ExtaddPairwiseI16x8S
			 := .AllocateInstruction().AsExtIaddPairwise(, ssa.VecLaneI16x8, ).Insert().Return()
			.push()

		case wasm.OpcodeVecI16x8ExtMulLowI8x16S, wasm.OpcodeVecI16x8ExtMulLowI8x16U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .lowerExtMul(
				, ,
				ssa.VecLaneI8x16, ssa.VecLaneI16x8,
				 == wasm.OpcodeVecI16x8ExtMulLowI8x16S, true)
			.push()

		case wasm.OpcodeVecI16x8ExtMulHighI8x16S, wasm.OpcodeVecI16x8ExtMulHighI8x16U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .lowerExtMul(
				, ,
				ssa.VecLaneI8x16, ssa.VecLaneI16x8,
				 == wasm.OpcodeVecI16x8ExtMulHighI8x16S, false)
			.push()

		case wasm.OpcodeVecI32x4ExtMulLowI16x8S, wasm.OpcodeVecI32x4ExtMulLowI16x8U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .lowerExtMul(
				, ,
				ssa.VecLaneI16x8, ssa.VecLaneI32x4,
				 == wasm.OpcodeVecI32x4ExtMulLowI16x8S, true)
			.push()

		case wasm.OpcodeVecI32x4ExtMulHighI16x8S, wasm.OpcodeVecI32x4ExtMulHighI16x8U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .lowerExtMul(
				, ,
				ssa.VecLaneI16x8, ssa.VecLaneI32x4,
				 == wasm.OpcodeVecI32x4ExtMulHighI16x8S, false)
			.push()
		case wasm.OpcodeVecI64x2ExtMulLowI32x4S, wasm.OpcodeVecI64x2ExtMulLowI32x4U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .lowerExtMul(
				, ,
				ssa.VecLaneI32x4, ssa.VecLaneI64x2,
				 == wasm.OpcodeVecI64x2ExtMulLowI32x4S, true)
			.push()

		case wasm.OpcodeVecI64x2ExtMulHighI32x4S, wasm.OpcodeVecI64x2ExtMulHighI32x4U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .lowerExtMul(
				, ,
				ssa.VecLaneI32x4, ssa.VecLaneI64x2,
				 == wasm.OpcodeVecI64x2ExtMulHighI32x4S, false)
			.push()

		case wasm.OpcodeVecI32x4DotI16x8S:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()

			 := .AllocateInstruction().AsWideningPairwiseDotProductS(, ).Insert().Return()
			.push()

		case wasm.OpcodeVecF32x4Eq, wasm.OpcodeVecF64x2Eq:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Eq:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Eq:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVFcmp(, , ssa.FloatCmpCondEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Ne, wasm.OpcodeVecF64x2Ne:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Ne:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Ne:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVFcmp(, , ssa.FloatCmpCondNotEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Lt, wasm.OpcodeVecF64x2Lt:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Lt:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Lt:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVFcmp(, , ssa.FloatCmpCondLessThan, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Le, wasm.OpcodeVecF64x2Le:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Le:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Le:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVFcmp(, , ssa.FloatCmpCondLessThanOrEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Gt, wasm.OpcodeVecF64x2Gt:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Gt:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Gt:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVFcmp(, , ssa.FloatCmpCondGreaterThan, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Ge, wasm.OpcodeVecF64x2Ge:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Ge:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Ge:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsVFcmp(, , ssa.FloatCmpCondGreaterThanOrEqual, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Ceil, wasm.OpcodeVecF64x2Ceil:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Ceil:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Ceil:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVCeil(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Floor, wasm.OpcodeVecF64x2Floor:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Floor:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Floor:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVFloor(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Trunc, wasm.OpcodeVecF64x2Trunc:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Trunc:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Trunc:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVTrunc(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Nearest, wasm.OpcodeVecF64x2Nearest:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Nearest:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Nearest:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsVNearest(, ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Pmin, wasm.OpcodeVecF64x2Pmin:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Pmin:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Pmin:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVMinPseudo(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4Pmax, wasm.OpcodeVecF64x2Pmax:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecF32x4Pmax:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Pmax:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVMaxPseudo(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI32x4TruncSatF32x4S, wasm.OpcodeVecI32x4TruncSatF32x4U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsVFcvtToIntSat(, ssa.VecLaneF32x4,  == wasm.OpcodeVecI32x4TruncSatF32x4S).Insert().Return()
			.push()
		case wasm.OpcodeVecI32x4TruncSatF64x2SZero, wasm.OpcodeVecI32x4TruncSatF64x2UZero:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsVFcvtToIntSat(, ssa.VecLaneF64x2,  == wasm.OpcodeVecI32x4TruncSatF64x2SZero).Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4ConvertI32x4S, wasm.OpcodeVecF32x4ConvertI32x4U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsVFcvtFromInt(, ssa.VecLaneF32x4,  == wasm.OpcodeVecF32x4ConvertI32x4S).Insert().Return()
			.push()
		case wasm.OpcodeVecF64x2ConvertLowI32x4S, wasm.OpcodeVecF64x2ConvertLowI32x4U:
			if .unreachable {
				break
			}
			 := .pop()
			if runtime.GOARCH == "arm64" {
				// TODO: this is weird. fix.
				 = .AllocateInstruction().
					AsWiden(, ssa.VecLaneI32x4,  == wasm.OpcodeVecF64x2ConvertLowI32x4S, true).Insert().Return()
			}
			 := .AllocateInstruction().
				AsVFcvtFromInt(, ssa.VecLaneF64x2,  == wasm.OpcodeVecF64x2ConvertLowI32x4S).
				Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16NarrowI16x8S, wasm.OpcodeVecI8x16NarrowI16x8U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsNarrow(, , ssa.VecLaneI16x8,  == wasm.OpcodeVecI8x16NarrowI16x8S).
				Insert().Return()
			.push()
		case wasm.OpcodeVecI16x8NarrowI32x4S, wasm.OpcodeVecI16x8NarrowI32x4U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().
				AsNarrow(, , ssa.VecLaneI32x4,  == wasm.OpcodeVecI16x8NarrowI32x4S).
				Insert().Return()
			.push()
		case wasm.OpcodeVecI16x8ExtendLowI8x16S, wasm.OpcodeVecI16x8ExtendLowI8x16U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsWiden(, ssa.VecLaneI8x16,  == wasm.OpcodeVecI16x8ExtendLowI8x16S, true).
				Insert().Return()
			.push()
		case wasm.OpcodeVecI16x8ExtendHighI8x16S, wasm.OpcodeVecI16x8ExtendHighI8x16U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsWiden(, ssa.VecLaneI8x16,  == wasm.OpcodeVecI16x8ExtendHighI8x16S, false).
				Insert().Return()
			.push()
		case wasm.OpcodeVecI32x4ExtendLowI16x8S, wasm.OpcodeVecI32x4ExtendLowI16x8U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsWiden(, ssa.VecLaneI16x8,  == wasm.OpcodeVecI32x4ExtendLowI16x8S, true).
				Insert().Return()
			.push()
		case wasm.OpcodeVecI32x4ExtendHighI16x8S, wasm.OpcodeVecI32x4ExtendHighI16x8U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsWiden(, ssa.VecLaneI16x8,  == wasm.OpcodeVecI32x4ExtendHighI16x8S, false).
				Insert().Return()
			.push()
		case wasm.OpcodeVecI64x2ExtendLowI32x4S, wasm.OpcodeVecI64x2ExtendLowI32x4U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsWiden(, ssa.VecLaneI32x4,  == wasm.OpcodeVecI64x2ExtendLowI32x4S, true).
				Insert().Return()
			.push()
		case wasm.OpcodeVecI64x2ExtendHighI32x4S, wasm.OpcodeVecI64x2ExtendHighI32x4U:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsWiden(, ssa.VecLaneI32x4,  == wasm.OpcodeVecI64x2ExtendHighI32x4S, false).
				Insert().Return()
			.push()

		case wasm.OpcodeVecF64x2PromoteLowF32x4Zero:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsFvpromoteLow(, ssa.VecLaneF32x4).
				Insert().Return()
			.push()
		case wasm.OpcodeVecF32x4DemoteF64x2Zero:
			if .unreachable {
				break
			}
			 := .pop()
			 := .AllocateInstruction().
				AsFvdemote(, ssa.VecLaneF64x2).
				Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16Shl, wasm.OpcodeVecI16x8Shl, wasm.OpcodeVecI32x4Shl, wasm.OpcodeVecI64x2Shl:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16Shl:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8Shl:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4Shl:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2Shl:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVIshl(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16ShrS, wasm.OpcodeVecI16x8ShrS, wasm.OpcodeVecI32x4ShrS, wasm.OpcodeVecI64x2ShrS:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16ShrS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8ShrS:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4ShrS:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2ShrS:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVSshr(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16ShrU, wasm.OpcodeVecI16x8ShrU, wasm.OpcodeVecI32x4ShrU, wasm.OpcodeVecI64x2ShrU:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16ShrU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8ShrU:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4ShrU:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2ShrU:
				 = ssa.VecLaneI64x2
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsVUshr(, , ).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16ExtractLaneS, wasm.OpcodeVecI16x8ExtractLaneS:
			.pc++
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16ExtractLaneS:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8ExtractLaneS:
				 = ssa.VecLaneI16x8
			}
			 := .pop()
			 := .wasmFunctionBody[.pc]
			 := .AllocateInstruction().AsExtractlane(, , , true).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16ExtractLaneU, wasm.OpcodeVecI16x8ExtractLaneU,
			wasm.OpcodeVecI32x4ExtractLane, wasm.OpcodeVecI64x2ExtractLane,
			wasm.OpcodeVecF32x4ExtractLane, wasm.OpcodeVecF64x2ExtractLane:
			.pc++ // Skip the immediate value.
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16ExtractLaneU:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8ExtractLaneU:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4ExtractLane:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2ExtractLane:
				 = ssa.VecLaneI64x2
			case wasm.OpcodeVecF32x4ExtractLane:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2ExtractLane:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .wasmFunctionBody[.pc]
			 := .AllocateInstruction().AsExtractlane(, , , false).Insert().Return()
			.push()
		case wasm.OpcodeVecI8x16ReplaceLane, wasm.OpcodeVecI16x8ReplaceLane,
			wasm.OpcodeVecI32x4ReplaceLane, wasm.OpcodeVecI64x2ReplaceLane,
			wasm.OpcodeVecF32x4ReplaceLane, wasm.OpcodeVecF64x2ReplaceLane:
			.pc++
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16ReplaceLane:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8ReplaceLane:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4ReplaceLane:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2ReplaceLane:
				 = ssa.VecLaneI64x2
			case wasm.OpcodeVecF32x4ReplaceLane:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2ReplaceLane:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .pop()
			 := .wasmFunctionBody[.pc]
			 := .AllocateInstruction().AsInsertlane(, , , ).Insert().Return()
			.push()
		case wasm.OpcodeVecV128i8x16Shuffle:
			.pc++
			 := .wasmFunctionBody[.pc : .pc+16]
			.pc += 15
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsShuffle(, , ).Insert().Return()
			.push()

		case wasm.OpcodeVecI8x16Swizzle:
			if .unreachable {
				break
			}
			 := .pop()
			 := .pop()
			 := .AllocateInstruction().AsSwizzle(, , ssa.VecLaneI8x16).Insert().Return()
			.push()

		case wasm.OpcodeVecI8x16Splat,
			wasm.OpcodeVecI16x8Splat,
			wasm.OpcodeVecI32x4Splat,
			wasm.OpcodeVecI64x2Splat,
			wasm.OpcodeVecF32x4Splat,
			wasm.OpcodeVecF64x2Splat:
			if .unreachable {
				break
			}
			var  ssa.VecLane
			switch  {
			case wasm.OpcodeVecI8x16Splat:
				 = ssa.VecLaneI8x16
			case wasm.OpcodeVecI16x8Splat:
				 = ssa.VecLaneI16x8
			case wasm.OpcodeVecI32x4Splat:
				 = ssa.VecLaneI32x4
			case wasm.OpcodeVecI64x2Splat:
				 = ssa.VecLaneI64x2
			case wasm.OpcodeVecF32x4Splat:
				 = ssa.VecLaneF32x4
			case wasm.OpcodeVecF64x2Splat:
				 = ssa.VecLaneF64x2
			}
			 := .pop()
			 := .AllocateInstruction().AsSplat(, ).Insert().Return()
			.push()

		default:
			panic("TODO: unsupported vector instruction: " + wasm.VectorInstructionName())
		}
	case wasm.OpcodeAtomicPrefix:
		.pc++
		 := .wasmFunctionBody[.pc]
		switch  {
		case wasm.OpcodeAtomicMemoryWait32, wasm.OpcodeAtomicMemoryWait64:
			,  := .readMemArg()
			if .unreachable {
				break
			}

			.storeCallerModuleContext()

			var  uint64
			var  wazevoapi.Offset
			var  *ssa.Signature
			switch  {
			case wasm.OpcodeAtomicMemoryWait32:
				 = 4
				 = wazevoapi.ExecutionContextOffsetMemoryWait32TrampolineAddress
				 = &.memoryWait32Sig
			case wasm.OpcodeAtomicMemoryWait64:
				 = 8
				 = wazevoapi.ExecutionContextOffsetMemoryWait64TrampolineAddress
				 = &.memoryWait64Sig
			}

			 := .pop()
			 := .pop()
			 := .pop()
			 := .atomicMemOpSetup(, uint64(), )

			 := .AllocateInstruction().
				AsLoad(.execCtxPtrValue,
					.U32(),
					ssa.TypeI64,
				).Insert().Return()

			 := .allocateVarLengthValues(3, .execCtxPtrValue, , , )
			 := .AllocateInstruction().
				AsCallIndirect(, , ).
				Insert().Return()
			.push()
		case wasm.OpcodeAtomicMemoryNotify:
			,  := .readMemArg()
			if .unreachable {
				break
			}

			.storeCallerModuleContext()
			 := .pop()
			 := .pop()
			 := .atomicMemOpSetup(, uint64(), 4)

			 := .AllocateInstruction().
				AsLoad(.execCtxPtrValue,
					wazevoapi.ExecutionContextOffsetMemoryNotifyTrampolineAddress.U32(),
					ssa.TypeI64,
				).Insert().Return()
			 := .allocateVarLengthValues(2, .execCtxPtrValue, , )
			 := .AllocateInstruction().
				AsCallIndirect(, &.memoryNotifySig, ).
				Insert().Return()
			.push()
		case wasm.OpcodeAtomicI32Load, wasm.OpcodeAtomicI64Load, wasm.OpcodeAtomicI32Load8U, wasm.OpcodeAtomicI32Load16U, wasm.OpcodeAtomicI64Load8U, wasm.OpcodeAtomicI64Load16U, wasm.OpcodeAtomicI64Load32U:
			,  := .readMemArg()
			if .unreachable {
				break
			}

			 := .pop()

			var  uint64
			switch  {
			case wasm.OpcodeAtomicI64Load:
				 = 8
			case wasm.OpcodeAtomicI32Load, wasm.OpcodeAtomicI64Load32U:
				 = 4
			case wasm.OpcodeAtomicI32Load16U, wasm.OpcodeAtomicI64Load16U:
				 = 2
			case wasm.OpcodeAtomicI32Load8U, wasm.OpcodeAtomicI64Load8U:
				 = 1
			}

			var  ssa.Type
			switch  {
			case wasm.OpcodeAtomicI64Load, wasm.OpcodeAtomicI64Load32U, wasm.OpcodeAtomicI64Load16U, wasm.OpcodeAtomicI64Load8U:
				 = ssa.TypeI64
			case wasm.OpcodeAtomicI32Load, wasm.OpcodeAtomicI32Load16U, wasm.OpcodeAtomicI32Load8U:
				 = ssa.TypeI32
			}

			 := .atomicMemOpSetup(, uint64(), )
			 := .AllocateInstruction().AsAtomicLoad(, , ).Insert().Return()
			.push()
		case wasm.OpcodeAtomicI32Store, wasm.OpcodeAtomicI64Store, wasm.OpcodeAtomicI32Store8, wasm.OpcodeAtomicI32Store16, wasm.OpcodeAtomicI64Store8, wasm.OpcodeAtomicI64Store16, wasm.OpcodeAtomicI64Store32:
			,  := .readMemArg()
			if .unreachable {
				break
			}

			 := .pop()
			 := .pop()

			var  uint64
			switch  {
			case wasm.OpcodeAtomicI64Store:
				 = 8
			case wasm.OpcodeAtomicI32Store, wasm.OpcodeAtomicI64Store32:
				 = 4
			case wasm.OpcodeAtomicI32Store16, wasm.OpcodeAtomicI64Store16:
				 = 2
			case wasm.OpcodeAtomicI32Store8, wasm.OpcodeAtomicI64Store8:
				 = 1
			}

			 := .atomicMemOpSetup(, uint64(), )
			.AllocateInstruction().AsAtomicStore(, , ).Insert()
		case wasm.OpcodeAtomicI32RmwAdd, wasm.OpcodeAtomicI64RmwAdd, wasm.OpcodeAtomicI32Rmw8AddU, wasm.OpcodeAtomicI32Rmw16AddU, wasm.OpcodeAtomicI64Rmw8AddU, wasm.OpcodeAtomicI64Rmw16AddU, wasm.OpcodeAtomicI64Rmw32AddU,
			wasm.OpcodeAtomicI32RmwSub, wasm.OpcodeAtomicI64RmwSub, wasm.OpcodeAtomicI32Rmw8SubU, wasm.OpcodeAtomicI32Rmw16SubU, wasm.OpcodeAtomicI64Rmw8SubU, wasm.OpcodeAtomicI64Rmw16SubU, wasm.OpcodeAtomicI64Rmw32SubU,
			wasm.OpcodeAtomicI32RmwAnd, wasm.OpcodeAtomicI64RmwAnd, wasm.OpcodeAtomicI32Rmw8AndU, wasm.OpcodeAtomicI32Rmw16AndU, wasm.OpcodeAtomicI64Rmw8AndU, wasm.OpcodeAtomicI64Rmw16AndU, wasm.OpcodeAtomicI64Rmw32AndU,
			wasm.OpcodeAtomicI32RmwOr, wasm.OpcodeAtomicI64RmwOr, wasm.OpcodeAtomicI32Rmw8OrU, wasm.OpcodeAtomicI32Rmw16OrU, wasm.OpcodeAtomicI64Rmw8OrU, wasm.OpcodeAtomicI64Rmw16OrU, wasm.OpcodeAtomicI64Rmw32OrU,
			wasm.OpcodeAtomicI32RmwXor, wasm.OpcodeAtomicI64RmwXor, wasm.OpcodeAtomicI32Rmw8XorU, wasm.OpcodeAtomicI32Rmw16XorU, wasm.OpcodeAtomicI64Rmw8XorU, wasm.OpcodeAtomicI64Rmw16XorU, wasm.OpcodeAtomicI64Rmw32XorU,
			wasm.OpcodeAtomicI32RmwXchg, wasm.OpcodeAtomicI64RmwXchg, wasm.OpcodeAtomicI32Rmw8XchgU, wasm.OpcodeAtomicI32Rmw16XchgU, wasm.OpcodeAtomicI64Rmw8XchgU, wasm.OpcodeAtomicI64Rmw16XchgU, wasm.OpcodeAtomicI64Rmw32XchgU:
			,  := .readMemArg()
			if .unreachable {
				break
			}

			 := .pop()
			 := .pop()

			var  ssa.AtomicRmwOp
			var  uint64
			switch  {
			case wasm.OpcodeAtomicI32RmwAdd, wasm.OpcodeAtomicI64RmwAdd, wasm.OpcodeAtomicI32Rmw8AddU, wasm.OpcodeAtomicI32Rmw16AddU, wasm.OpcodeAtomicI64Rmw8AddU, wasm.OpcodeAtomicI64Rmw16AddU, wasm.OpcodeAtomicI64Rmw32AddU:
				 = ssa.AtomicRmwOpAdd
				switch  {
				case wasm.OpcodeAtomicI64RmwAdd:
					 = 8
				case wasm.OpcodeAtomicI32RmwAdd, wasm.OpcodeAtomicI64Rmw32AddU:
					 = 4
				case wasm.OpcodeAtomicI32Rmw16AddU, wasm.OpcodeAtomicI64Rmw16AddU:
					 = 2
				case wasm.OpcodeAtomicI32Rmw8AddU, wasm.OpcodeAtomicI64Rmw8AddU:
					 = 1
				}
			case wasm.OpcodeAtomicI32RmwSub, wasm.OpcodeAtomicI64RmwSub, wasm.OpcodeAtomicI32Rmw8SubU, wasm.OpcodeAtomicI32Rmw16SubU, wasm.OpcodeAtomicI64Rmw8SubU, wasm.OpcodeAtomicI64Rmw16SubU, wasm.OpcodeAtomicI64Rmw32SubU:
				 = ssa.AtomicRmwOpSub
				switch  {
				case wasm.OpcodeAtomicI64RmwSub:
					 = 8
				case wasm.OpcodeAtomicI32RmwSub, wasm.OpcodeAtomicI64Rmw32SubU:
					 = 4
				case wasm.OpcodeAtomicI32Rmw16SubU, wasm.OpcodeAtomicI64Rmw16SubU:
					 = 2
				case wasm.OpcodeAtomicI32Rmw8SubU, wasm.OpcodeAtomicI64Rmw8SubU:
					 = 1
				}
			case wasm.OpcodeAtomicI32RmwAnd, wasm.OpcodeAtomicI64RmwAnd, wasm.OpcodeAtomicI32Rmw8AndU, wasm.OpcodeAtomicI32Rmw16AndU, wasm.OpcodeAtomicI64Rmw8AndU, wasm.OpcodeAtomicI64Rmw16AndU, wasm.OpcodeAtomicI64Rmw32AndU:
				 = ssa.AtomicRmwOpAnd
				switch  {
				case wasm.OpcodeAtomicI64RmwAnd:
					 = 8
				case wasm.OpcodeAtomicI32RmwAnd, wasm.OpcodeAtomicI64Rmw32AndU:
					 = 4
				case wasm.OpcodeAtomicI32Rmw16AndU, wasm.OpcodeAtomicI64Rmw16AndU:
					 = 2
				case wasm.OpcodeAtomicI32Rmw8AndU, wasm.OpcodeAtomicI64Rmw8AndU:
					 = 1
				}
			case wasm.OpcodeAtomicI32RmwOr, wasm.OpcodeAtomicI64RmwOr, wasm.OpcodeAtomicI32Rmw8OrU, wasm.OpcodeAtomicI32Rmw16OrU, wasm.OpcodeAtomicI64Rmw8OrU, wasm.OpcodeAtomicI64Rmw16OrU, wasm.OpcodeAtomicI64Rmw32OrU:
				 = ssa.AtomicRmwOpOr
				switch  {
				case wasm.OpcodeAtomicI64RmwOr:
					 = 8
				case wasm.OpcodeAtomicI32RmwOr, wasm.OpcodeAtomicI64Rmw32OrU:
					 = 4
				case wasm.OpcodeAtomicI32Rmw16OrU, wasm.OpcodeAtomicI64Rmw16OrU:
					 = 2
				case wasm.OpcodeAtomicI32Rmw8OrU, wasm.OpcodeAtomicI64Rmw8OrU:
					 = 1
				}
			case wasm.OpcodeAtomicI32RmwXor, wasm.OpcodeAtomicI64RmwXor, wasm.OpcodeAtomicI32Rmw8XorU, wasm.OpcodeAtomicI32Rmw16XorU, wasm.OpcodeAtomicI64Rmw8XorU, wasm.OpcodeAtomicI64Rmw16XorU, wasm.OpcodeAtomicI64Rmw32XorU:
				 = ssa.AtomicRmwOpXor
				switch  {
				case wasm.OpcodeAtomicI64RmwXor:
					 = 8
				case wasm.OpcodeAtomicI32RmwXor, wasm.OpcodeAtomicI64Rmw32XorU:
					 = 4
				case wasm.OpcodeAtomicI32Rmw16XorU, wasm.OpcodeAtomicI64Rmw16XorU:
					 = 2
				case wasm.OpcodeAtomicI32Rmw8XorU, wasm.OpcodeAtomicI64Rmw8XorU:
					 = 1
				}
			case wasm.OpcodeAtomicI32RmwXchg, wasm.OpcodeAtomicI64RmwXchg, wasm.OpcodeAtomicI32Rmw8XchgU, wasm.OpcodeAtomicI32Rmw16XchgU, wasm.OpcodeAtomicI64Rmw8XchgU, wasm.OpcodeAtomicI64Rmw16XchgU, wasm.OpcodeAtomicI64Rmw32XchgU:
				 = ssa.AtomicRmwOpXchg
				switch  {
				case wasm.OpcodeAtomicI64RmwXchg:
					 = 8
				case wasm.OpcodeAtomicI32RmwXchg, wasm.OpcodeAtomicI64Rmw32XchgU:
					 = 4
				case wasm.OpcodeAtomicI32Rmw16XchgU, wasm.OpcodeAtomicI64Rmw16XchgU:
					 = 2
				case wasm.OpcodeAtomicI32Rmw8XchgU, wasm.OpcodeAtomicI64Rmw8XchgU:
					 = 1
				}
			}

			 := .atomicMemOpSetup(, uint64(), )
			 := .AllocateInstruction().AsAtomicRmw(, , , ).Insert().Return()
			.push()
		case wasm.OpcodeAtomicI32RmwCmpxchg, wasm.OpcodeAtomicI64RmwCmpxchg, wasm.OpcodeAtomicI32Rmw8CmpxchgU, wasm.OpcodeAtomicI32Rmw16CmpxchgU, wasm.OpcodeAtomicI64Rmw8CmpxchgU, wasm.OpcodeAtomicI64Rmw16CmpxchgU, wasm.OpcodeAtomicI64Rmw32CmpxchgU:
			,  := .readMemArg()
			if .unreachable {
				break
			}

			 := .pop()
			 := .pop()
			 := .pop()

			var  uint64
			switch  {
			case wasm.OpcodeAtomicI64RmwCmpxchg:
				 = 8
			case wasm.OpcodeAtomicI32RmwCmpxchg, wasm.OpcodeAtomicI64Rmw32CmpxchgU:
				 = 4
			case wasm.OpcodeAtomicI32Rmw16CmpxchgU, wasm.OpcodeAtomicI64Rmw16CmpxchgU:
				 = 2
			case wasm.OpcodeAtomicI32Rmw8CmpxchgU, wasm.OpcodeAtomicI64Rmw8CmpxchgU:
				 = 1
			}
			 := .atomicMemOpSetup(, uint64(), )
			 := .AllocateInstruction().AsAtomicCas(, , , ).Insert().Return()
			.push()
		case wasm.OpcodeAtomicFence:
			 := .readByte()
			if .unreachable {
				break
			}
			if .needMemory {
				.AllocateInstruction().AsFence().Insert()
			}
		default:
			panic("TODO: unsupported atomic instruction: " + wasm.AtomicInstructionName())
		}
	case wasm.OpcodeRefFunc:
		 := .readI32u()
		if .unreachable {
			break
		}

		.storeCallerModuleContext()

		 := .AllocateInstruction().AsIconst32().Insert().Return()

		 := .AllocateInstruction().
			AsLoad(.execCtxPtrValue,
				wazevoapi.ExecutionContextOffsetRefFuncTrampolineAddress.U32(),
				ssa.TypeI64,
			).Insert().Return()

		 := .allocateVarLengthValues(2, .execCtxPtrValue, )
		 := .
			AllocateInstruction().
			AsCallIndirect(, &.refFuncSig, ).
			Insert().Return()
		.push()

	case wasm.OpcodeRefNull:
		.loweringState.pc++ // skips the reference type as we treat both of them as i64(0).
		if .unreachable {
			break
		}
		 := .AllocateInstruction().AsIconst64(0).Insert().Return()
		.push()
	case wasm.OpcodeRefIsNull:
		if .unreachable {
			break
		}
		 := .pop()
		 := .AllocateInstruction().AsIconst64(0).Insert()
		 := .AllocateInstruction().
			AsIcmp(, .Return(), ssa.IntegerCmpCondEqual).
			Insert().
			Return()
		.push()
	case wasm.OpcodeTableSet:
		 := .readI32u()
		if .unreachable {
			break
		}
		 := .pop()
		 := .pop()

		 := .lowerAccessTableWithBoundsCheck(, )
		.AllocateInstruction().AsStore(ssa.OpcodeStore, , , 0).Insert()

	case wasm.OpcodeTableGet:
		 := .readI32u()
		if .unreachable {
			break
		}
		 := .pop()
		 := .lowerAccessTableWithBoundsCheck(, )
		 := .AllocateInstruction().AsLoad(, 0, ssa.TypeI64).Insert().Return()
		.push()
	default:
		panic("TODO: unsupported in wazevo yet: " + wasm.InstructionName())
	}

	if wazevoapi.FrontEndLoggingEnabled {
		fmt.Println("--------- Translated " + wasm.InstructionName() + " --------")
		fmt.Println("state: " + .loweringState.String())
		fmt.Println(.formatBuilder())
		fmt.Println("--------------------------")
	}
	.loweringState.pc++
}

func ( *Compiler) (,  ssa.Value, ,  ssa.VecLane, ,  bool) ssa.Value {
	// TODO: The sequence `Widen; Widen; VIMul` can be substituted for a single instruction on some ISAs.
	 := .ssaBuilder

	 := .AllocateInstruction().AsWiden(, , , ).Insert().Return()
	 := .AllocateInstruction().AsWiden(, , , ).Insert().Return()

	return .AllocateInstruction().AsVImul(, , ).Insert().Return()
}

const (
	tableInstanceBaseAddressOffset = 0
	tableInstanceLenOffset         = tableInstanceBaseAddressOffset + 8
)

func ( *Compiler) ( uint32,  ssa.Value) ( ssa.Value) {
	 := .ssaBuilder

	// Load the table.
	 := .AllocateInstruction()
	.AsLoad(.moduleCtxPtrValue, .offset.TableOffset(int()).U32(), ssa.TypeI64)
	.InsertInstruction()
	 := .Return()

	// Load the table's length.
	 := .AllocateInstruction()
	.AsLoad(, tableInstanceLenOffset, ssa.TypeI32)
	.InsertInstruction()
	 := .Return()

	// Compare the length and the target, and trap if out of bounds.
	 := .AllocateInstruction()
	.AsIcmp(, , ssa.IntegerCmpCondUnsignedGreaterThanOrEqual)
	.InsertInstruction()
	 := .AllocateInstruction()
	.AsExitIfTrueWithCode(.execCtxPtrValue, .Return(), wazevoapi.ExitCodeTableOutOfBounds)
	.InsertInstruction()

	// Get the base address of wasm.TableInstance.References.
	 := .AllocateInstruction()
	.AsLoad(, tableInstanceBaseAddressOffset, ssa.TypeI64)
	.InsertInstruction()
	 := .Return()

	// Calculate the address of the target function. First we need to multiply targetOffsetInTable by 8 (pointer size).
	 := .AllocateInstruction()
	 := .AllocateInstruction()
	.AsIconst64(3)
	.InsertInstruction()
	.AsIshl(, .Return())
	.InsertInstruction()
	 := .Return()

	// Then add the multiplied value to the base which results in the address of the target function (*wazevo.functionInstance)
	 := .AllocateInstruction()
	.AsIadd(, )
	.InsertInstruction()
	return .Return()
}

func ( *Compiler) (,  uint32) {
	 := .ssaBuilder
	 := .state()

	 := .pop()
	 := .lowerAccessTableWithBoundsCheck(, )
	 := .AllocateInstruction()
	.AsLoad(, 0, ssa.TypeI64)
	.InsertInstruction()
	 := .Return()

	// Check if it is not the null pointer.
	 := .AllocateInstruction()
	.AsIconst64(0)
	.InsertInstruction()
	 := .AllocateInstruction()
	.AsIcmp(, .Return(), ssa.IntegerCmpCondEqual)
	.InsertInstruction()
	 := .AllocateInstruction()
	.AsExitIfTrueWithCode(.execCtxPtrValue, .Return(), wazevoapi.ExitCodeIndirectCallNullPointer)
	.InsertInstruction()

	// We need to do the type check. First, load the target function instance's typeID.
	 := .AllocateInstruction()
	.AsLoad(, wazevoapi.FunctionInstanceTypeIDOffset, ssa.TypeI32)
	.InsertInstruction()
	 := .Return()

	// Next, we load the expected TypeID:
	 := .AllocateInstruction()
	.AsLoad(.moduleCtxPtrValue, .offset.TypeIDs1stElement.U32(), ssa.TypeI64)
	.InsertInstruction()
	 := .Return()

	 := .AllocateInstruction()
	.AsLoad(, uint32()*4 /* size of wasm.FunctionTypeID */, ssa.TypeI32)
	.InsertInstruction()
	 := .Return()

	// Check if the type ID matches.
	 := .AllocateInstruction()
	.AsIcmp(, , ssa.IntegerCmpCondNotEqual)
	.InsertInstruction()
	 := .AllocateInstruction()
	.AsExitIfTrueWithCode(.execCtxPtrValue, .Return(), wazevoapi.ExitCodeIndirectCallTypeMismatch)
	.InsertInstruction()

	// Now ready to call the function. Load the executable and moduleContextOpaquePtr from the function instance.
	 := .AllocateInstruction()
	.AsLoad(, wazevoapi.FunctionInstanceExecutableOffset, ssa.TypeI64)
	.InsertInstruction()
	 := .Return()
	 := .AllocateInstruction()
	.AsLoad(, wazevoapi.FunctionInstanceModuleContextOpaquePtrOffset, ssa.TypeI64)
	.InsertInstruction()
	 := .Return()

	 := &.m.TypeSection[]
	 := len(.values) - len(.Params)
	 := .values[:]
	.values = .values[:]
	 := .allocateVarLengthValues(2+len(), .execCtxPtrValue, )
	 = .Append(.VarLengthPool(), ...)

	// Before transfer the control to the callee, we have to store the current module's moduleContextPtr
	// into execContext.callerModuleContextPtr in case when the callee is a Go function.
	.storeCallerModuleContext()

	 := .AllocateInstruction()
	.AsCallIndirect(, .signatures[], )
	.InsertInstruction()

	,  := .Returns()
	if .Valid() {
		.push()
	}
	for ,  := range  {
		.push()
	}

	.reloadAfterCall()
}

// memOpSetup inserts the bounds check and calculates the address of the memory operation (loads/stores).
func ( *Compiler) ( ssa.Value, ,  uint64) ( ssa.Value) {
	 = ssa.ValueInvalid
	 := .ssaBuilder

	 := .ID()
	 :=  + 
	if  := .getKnownSafeBound(); .valid() {
		// We reuse the calculated absolute address even if the bound is not known to be safe.
		 = .absoluteAddr
		if  <= .bound {
			if !.Valid() {
				// This means that, the bound is known to be safe, but the memory base might have changed.
				// So, we re-calculate the address.
				 := .getMemoryBaseValue(false)
				 := .AllocateInstruction().
					AsUExtend(, 32, 64).
					Insert().
					Return()
				 = .AllocateInstruction().
					AsIadd(, ).Insert().Return()
				.absoluteAddr =  // Update the absolute address for the subsequent memory access.
			}
			return
		}
	}

	 := .AllocateInstruction()
	.AsIconst64()
	.InsertInstruction()

	// We calculate the offset in 64-bit space.
	 := .AllocateInstruction().
		AsUExtend(, 32, 64).
		Insert().
		Return()

	// Note: memLen is already zero extended to 64-bit space at the load time.
	 := .getMemoryLenValue(false)

	// baseAddrPlusCeil = baseAddr + ceil
	 := .AllocateInstruction()
	.AsIadd(, .Return())
	.InsertInstruction()

	// Check for out of bounds memory access: `memLen >= baseAddrPlusCeil`.
	 := .AllocateInstruction()
	.AsIcmp(, .Return(), ssa.IntegerCmpCondUnsignedLessThan)
	.InsertInstruction()
	 := .AllocateInstruction()
	.AsExitIfTrueWithCode(.execCtxPtrValue, .Return(), wazevoapi.ExitCodeMemoryOutOfBounds)
	.InsertInstruction()

	// Load the value from memBase + extBaseAddr.
	if  == ssa.ValueInvalid { // Reuse the value if the memBase is already calculated at this point.
		 := .getMemoryBaseValue(false)
		 = .AllocateInstruction().
			AsIadd(, ).Insert().Return()
	}

	// Record the bound ceil for this baseAddr is known to be safe for the subsequent memory access in the same block.
	.recordKnownSafeBound(, , )
	return
}

// atomicMemOpSetup inserts the bounds check and calculates the address of the memory operation (loads/stores), including
// the constant offset and performs an alignment check on the final address.
func ( *Compiler) ( ssa.Value, ,  uint64) ( ssa.Value) {
	 := .ssaBuilder

	 := .memOpSetup(, , )
	var  ssa.Value
	if  == 0 {
		 = 
	} else {
		 := .AllocateInstruction().AsIconst64().Insert().Return()
		 = .AllocateInstruction().AsIadd(, ).Insert().Return()
	}

	.memAlignmentCheck(, )

	return 
}

func ( *Compiler) ( ssa.Value,  uint64) {
	if  == 1 {
		return // No alignment restrictions when accessing a byte
	}
	var  uint64
	switch  {
	case 2:
		 = 0b1
	case 4:
		 = 0b11
	case 8:
		 = 0b111
	}

	 := .ssaBuilder

	 := .AllocateInstruction().AsIconst64().Insert().Return()
	 := .AllocateInstruction().AsBand(, ).Insert().Return()
	 := .AllocateInstruction().AsIconst64(0).Insert().Return()
	 := .AllocateInstruction().AsIcmp(, , ssa.IntegerCmpCondNotEqual).Insert().Return()
	.AllocateInstruction().AsExitIfTrueWithCode(.execCtxPtrValue, , wazevoapi.ExitCodeUnalignedAtomic).Insert()
}

func ( *Compiler) (, ,  ssa.Value) {
	 := .allocateVarLengthValues(3, , , )
	if .Type() != ssa.TypeI64 {
		panic("TODO: memmove size must be i64")
	}

	 := .ssaBuilder
	 := .AllocateInstruction().
		AsLoad(.execCtxPtrValue,
			wazevoapi.ExecutionContextOffsetMemmoveAddress.U32(),
			ssa.TypeI64,
		).Insert().Return()
	.AllocateInstruction().AsCallGoRuntimeMemmove(, &.memmoveSig, ).Insert()
}

func ( *Compiler) () {
	// Note that when these are not used in the following instructions, they will be optimized out.
	// So in any ways, we define them!

	// After calling any function, memory buffer might have changed. So we need to re-define the variable.
	// However, if the memory is shared, we don't need to reload the memory base and length as the base will never change.
	if .needMemory && !.memoryShared {
		.reloadMemoryBaseLen()
	}

	// Also, any mutable Global can change.
	for ,  := range .mutableGlobalVariablesIndexes {
		_ = .getWasmGlobalValue(, true)
	}
}

func ( *Compiler) () {
	_ = .getMemoryBaseValue(true)
	_ = .getMemoryLenValue(true)

	// This function being called means that the memory base might have changed.
	// Therefore, we need to clear the absolute addresses recorded in the known safe bounds
	// because we cache the absolute address of the memory access per each base offset.
	.resetAbsoluteAddressInSafeBounds()
}

func ( *Compiler) ( wasm.Index,  ssa.Value) {
	 := .globalVariables[]
	 := .offset.GlobalInstanceOffset()

	 := .ssaBuilder
	if  < .m.ImportGlobalCount {
		 := .AllocateInstruction()
		.AsLoad(.moduleCtxPtrValue, uint32(), ssa.TypeI64)
		.InsertInstruction()

		 := .AllocateInstruction()
		.AsStore(ssa.OpcodeStore, , .Return(), uint32(0))
		.InsertInstruction()

	} else {
		 := .AllocateInstruction()
		.AsStore(ssa.OpcodeStore, , .moduleCtxPtrValue, uint32())
		.InsertInstruction()
	}

	// The value has changed to `v`, so we record it.
	.DefineVariableInCurrentBB(, )
}

func ( *Compiler) ( wasm.Index,  bool) ssa.Value {
	 := .globalVariables[]
	 := .globalVariablesTypes[]
	 := .offset.GlobalInstanceOffset()

	 := .ssaBuilder
	if ! {
		if  := .FindValueInLinearPath(); .Valid() {
			return 
		}
	}

	var  *ssa.Instruction
	if  < .m.ImportGlobalCount {
		 := .AllocateInstruction()
		.AsLoad(.moduleCtxPtrValue, uint32(), ssa.TypeI64)
		.InsertInstruction()
		 = .AllocateInstruction().
			AsLoad(.Return(), uint32(0), )
	} else {
		 = .AllocateInstruction().
			AsLoad(.moduleCtxPtrValue, uint32(), )
	}

	 := .Insert().Return()
	.DefineVariableInCurrentBB(, )
	return 
}

const (
	memoryInstanceBufOffset     = 0
	memoryInstanceBufSizeOffset = memoryInstanceBufOffset + 8
)

func ( *Compiler) ( bool) ssa.Value {
	 := .ssaBuilder
	 := .memoryBaseVariable
	if ! {
		if  := .FindValueInLinearPath(); .Valid() {
			return 
		}
	}

	var  ssa.Value
	if .offset.LocalMemoryBegin < 0 {
		 := .AllocateInstruction()
		.AsLoad(.moduleCtxPtrValue, .offset.ImportedMemoryBegin.U32(), ssa.TypeI64)
		.InsertInstruction()
		 := .Return()

		 := .AllocateInstruction()
		.AsLoad(, memoryInstanceBufOffset, ssa.TypeI64)
		.InsertInstruction()
		 = .Return()
	} else {
		 := .AllocateInstruction()
		.AsLoad(.moduleCtxPtrValue, .offset.LocalMemoryBase().U32(), ssa.TypeI64)
		.InsertInstruction()
		 = .Return()
	}

	.DefineVariableInCurrentBB(, )
	return 
}

func ( *Compiler) ( bool) ssa.Value {
	 := .memoryLenVariable
	 := .ssaBuilder
	if ! && !.memoryShared {
		if  := .FindValueInLinearPath(); .Valid() {
			return 
		}
	}

	var  ssa.Value
	if .offset.LocalMemoryBegin < 0 {
		 := .AllocateInstruction()
		.AsLoad(.moduleCtxPtrValue, .offset.ImportedMemoryBegin.U32(), ssa.TypeI64)
		.InsertInstruction()
		 := .Return()

		 := .AllocateInstruction()
		if .memoryShared {
			 := .AllocateInstruction().AsIconst64(memoryInstanceBufSizeOffset).Insert().Return()
			 := .AllocateInstruction().AsIadd(, ).Insert().Return()
			.AsAtomicLoad(, 8, ssa.TypeI64)
		} else {
			.AsLoad(, memoryInstanceBufSizeOffset, ssa.TypeI64)
		}
		.InsertInstruction()

		 = .Return()
	} else {
		 := .AllocateInstruction()
		if .memoryShared {
			 := .AllocateInstruction().AsIconst64(.offset.LocalMemoryLen().U64()).Insert().Return()
			 := .AllocateInstruction().AsIadd(.moduleCtxPtrValue, ).Insert().Return()
			.AsAtomicLoad(, 8, ssa.TypeI64)
		} else {
			.AsExtLoad(ssa.OpcodeUload32, .moduleCtxPtrValue, .offset.LocalMemoryLen().U32(), true)
		}
		.InsertInstruction()
		 = .Return()
	}

	.DefineVariableInCurrentBB(, )
	return 
}

func ( *Compiler) ( ssa.IntegerCmpCond) {
	,  := .state(), .ssaBuilder
	,  := .pop(), .pop()
	 := .AllocateInstruction()
	.AsIcmp(, , )
	.InsertInstruction()
	 := .Return()
	.push()
}

func ( *Compiler) ( ssa.FloatCmpCond) {
	,  := .state(), .ssaBuilder
	,  := .pop(), .pop()
	 := .AllocateInstruction()
	.AsFcmp(, , )
	.InsertInstruction()
	 := .Return()
	.push()
}

// storeCallerModuleContext stores the current module's moduleContextPtr into execContext.callerModuleContextPtr.
func ( *Compiler) () {
	 := .ssaBuilder
	 := .execCtxPtrValue
	 := .AllocateInstruction()
	.AsStore(ssa.OpcodeStore,
		.moduleCtxPtrValue, , wazevoapi.ExecutionContextOffsetCallerModuleContextPtr.U32())
	.InsertInstruction()
}

func ( *Compiler) () byte {
	 := .wasmFunctionBody[.loweringState.pc+1]
	.loweringState.pc++
	return 
}

func ( *Compiler) () uint32 {
	, ,  := leb128.LoadUint32(.wasmFunctionBody[.loweringState.pc+1:])
	if  != nil {
		panic() // shouldn't be reached since compilation comes after validation.
	}
	.loweringState.pc += int()
	return 
}

func ( *Compiler) () int32 {
	, ,  := leb128.LoadInt32(.wasmFunctionBody[.loweringState.pc+1:])
	if  != nil {
		panic() // shouldn't be reached since compilation comes after validation.
	}
	.loweringState.pc += int()
	return 
}

func ( *Compiler) () int64 {
	, ,  := leb128.LoadInt64(.wasmFunctionBody[.loweringState.pc+1:])
	if  != nil {
		panic() // shouldn't be reached since compilation comes after validation.
	}
	.loweringState.pc += int()
	return 
}

func ( *Compiler) () float32 {
	 := math.Float32frombits(binary.LittleEndian.Uint32(.wasmFunctionBody[.loweringState.pc+1:]))
	.loweringState.pc += 4
	return 
}

func ( *Compiler) () float64 {
	 := math.Float64frombits(binary.LittleEndian.Uint64(.wasmFunctionBody[.loweringState.pc+1:]))
	.loweringState.pc += 8
	return 
}

// readBlockType reads the block type from the current position of the bytecode reader.
func ( *Compiler) () *wasm.FunctionType {
	 := .state()

	.br.Reset(.wasmFunctionBody[.pc+1:])
	, ,  := wasm.DecodeBlockType(.m.TypeSection, .br, api.CoreFeaturesV2)
	if  != nil {
		panic() // shouldn't be reached since compilation comes after validation.
	}
	.pc += int()

	return 
}

func ( *Compiler) () (,  uint32) {
	 := .state()

	, ,  := leb128.LoadUint32(.wasmFunctionBody[.pc+1:])
	if  != nil {
		panic(fmt.Errorf("read memory align: %v", ))
	}

	.pc += int()
	, ,  = leb128.LoadUint32(.wasmFunctionBody[.pc+1:])
	if  != nil {
		panic(fmt.Errorf("read memory offset: %v", ))
	}

	.pc += int()
	return , 
}

// insertJumpToBlock inserts a jump instruction to the given block in the current block.
func ( *Compiler) ( ssa.Values,  ssa.BasicBlock) {
	if .ReturnBlock() {
		if .needListener {
			.callListenerAfter()
		}
	}

	 := .ssaBuilder
	 := .AllocateInstruction()
	.AsJump(, )
	.InsertInstruction()
}

func ( *Compiler) ( bool, ,  byte) {
	 := .state()
	 := .ssaBuilder
	 := .pop()
	 := .AllocateInstruction()
	if  {
		.AsSExtend(, , )
	} else {
		.AsUExtend(, , )
	}
	.InsertInstruction()
	 := .Return()
	.push()
}

func ( *Compiler) ( int,  ssa.BasicBlock) {
	if .Preds() == 0 {
		.loweringState.unreachable = true
	}

	// Now we should adjust the stack and start translating the continuation block.
	.loweringState.values = .loweringState.values[:]

	.ssaBuilder.SetCurrentBlock()

	// At this point, blocks params consist only of the Wasm-level parameters,
	// (since it's added only when we are trying to resolve variable *inside* this block).
	for  := 0;  < .Params(); ++ {
		 := .Param()
		.loweringState.push()
	}
}

// results returns the number of results of the current function.
func ( *Compiler) () int {
	return len(.wasmFunctionTyp.Results)
}

func ( *Compiler) ( []uint32,  ssa.Value) {
	 := .state()
	 := .ssaBuilder

	 := .ctrlPeekAt(int([0]))
	var  int
	if .isLoop() {
		 = len(.blockType.Params)
	} else {
		 = len(.blockType.Results)
	}

	 := .VarLengthPool()
	 := .Allocate(len())

	// We need trampoline blocks since depending on the target block structure, we might end up inserting moves before jumps,
	// which cannot be done with br_table. Instead, we can do such per-block moves in the trampoline blocks.
	// At the linking phase (very end of the backend), we can remove the unnecessary jumps, and therefore no runtime overhead.
	 := .CurrentBlock()
	for ,  := range  {
		// Args are always on the top of the stack. Note that we should not share the args slice
		// among the jump instructions since the args are modified during passes (e.g. redundant phi elimination).
		 := .nPeekDup()
		,  := .brTargetArgNumFor()
		 := .AllocateBasicBlock()
		.SetCurrentBlock()
		.insertJumpToBlock(, )
		 = .Append(.VarLengthPool(), ssa.Value(.ID()))
	}
	.SetCurrentBlock()

	// If the target block has no arguments, we can just jump to the target block.
	 := .AllocateInstruction()
	.AsBrTable(, )
	.InsertInstruction()

	for ,  := range .View() {
		.Seal(.BasicBlock(ssa.BasicBlockID()))
	}
}

func ( *loweringState) ( uint32) ( ssa.BasicBlock,  int) {
	 := .ctrlPeekAt(int())
	if .isLoop() {
		,  = .blk, len(.blockType.Params)
	} else {
		,  = .followingBlock, len(.blockType.Results)
	}
	return
}

func ( *Compiler) () {
	.storeCallerModuleContext()

	 := .ssaBuilder
	 := .AllocateInstruction().
		AsLoad(.moduleCtxPtrValue,
			.offset.BeforeListenerTrampolines1stElement.U32(),
			ssa.TypeI64,
		).Insert().Return()

	 := .AllocateInstruction().
		AsLoad(, uint32(.wasmFunctionTypeIndex)*8 /* 8 bytes per index */, ssa.TypeI64).Insert().Return()

	 := .EntryBlock()
	 := .Params()

	 := .allocateVarLengthValues(, .execCtxPtrValue,
		.AllocateInstruction().AsIconst32(.wasmLocalFunctionIndex).Insert().Return())
	for  := 2;  < ; ++ {
		 = .Append(.VarLengthPool(), .Param())
	}

	 := .listenerSignatures[.wasmFunctionTyp][0]
	.AllocateInstruction().
		AsCallIndirect(, , ).
		Insert()
}

func ( *Compiler) () {
	.storeCallerModuleContext()

	 := .ssaBuilder
	 := .AllocateInstruction().
		AsLoad(.moduleCtxPtrValue,
			.offset.AfterListenerTrampolines1stElement.U32(),
			ssa.TypeI64,
		).Insert().Return()

	 := .AllocateInstruction().
		AsLoad(,
			uint32(.wasmFunctionTypeIndex)*8 /* 8 bytes per index */, ssa.TypeI64).
		Insert().
		Return()

	 := .listenerSignatures[.wasmFunctionTyp][1]
	 := .allocateVarLengthValues(
		.results()+2,
		.execCtxPtrValue,
		.AllocateInstruction().AsIconst32(.wasmLocalFunctionIndex).Insert().Return(),
	)

	 := .state()
	 := len(.values)
	 = .Append(.ssaBuilder.VarLengthPool(), .values[-.results():]...)
	.AllocateInstruction().
		AsCallIndirect(, , ).
		Insert()
}

const (
	elementOrDataInstanceLenOffset = 8
	elementOrDataInstanceSize      = 24
)

// dropInstance inserts instructions to drop the element/data instance specified by the given index.
func ( *Compiler) ( uint32,  wazevoapi.Offset) {
	 := .ssaBuilder
	 := .dataOrElementInstanceAddr(, )

	 := .AllocateInstruction().AsIconst64(0).Insert().Return()

	// Clear the instance.
	.AllocateInstruction().AsStore(ssa.OpcodeStore, , , 0).Insert()
	.AllocateInstruction().AsStore(ssa.OpcodeStore, , , elementOrDataInstanceLenOffset).Insert()
	.AllocateInstruction().AsStore(ssa.OpcodeStore, , , elementOrDataInstanceLenOffset+8).Insert()
}

func ( *Compiler) ( uint32,  wazevoapi.Offset) ssa.Value {
	 := .ssaBuilder

	 := .
		AllocateInstruction().
		AsLoad(.moduleCtxPtrValue, .U32(), ssa.TypeI64).
		Insert().Return()

	// Each data/element instance is a slice, so we need to multiply index by 16 to get the offset of the target instance.
	 =  * elementOrDataInstanceSize
	 := .AllocateInstruction().AsIconst64(uint64()).Insert().Return()
	// Then, add the offset to the address of the instance.
	 := .AllocateInstruction().AsIadd(, ).Insert().Return()
	return 
}

func ( *Compiler) (, ,  ssa.Value,  wazevoapi.ExitCode) {
	 := .ssaBuilder
	 := .AllocateInstruction().
		AsLoad(, elementOrDataInstanceLenOffset, ssa.TypeI64).
		Insert().Return()
	 := .AllocateInstruction().AsIadd(, ).Insert().Return()
	 := .AllocateInstruction().
		AsIcmp(, , ssa.IntegerCmpCondUnsignedLessThan).
		Insert().
		Return()
	.AllocateInstruction().
		AsExitIfTrueWithCode(.execCtxPtrValue, , ).
		Insert()
}

func ( *Compiler) ( uint32, ,  ssa.Value) ( ssa.Value) {
	 := .ssaBuilder
	 := .AllocateInstruction().AsIadd(, ).Insert().Return()

	// Load the table.
	 = .AllocateInstruction().
		AsLoad(.moduleCtxPtrValue, .offset.TableOffset(int()).U32(), ssa.TypeI64).
		Insert().Return()

	// Load the table's length.
	 := .AllocateInstruction().
		AsLoad(, tableInstanceLenOffset, ssa.TypeI32).Insert().Return()
	 := .AllocateInstruction().AsUExtend(, 32, 64).Insert().Return()

	// Compare the length and the target, and trap if out of bounds.
	 := .AllocateInstruction()
	.AsIcmp(, , ssa.IntegerCmpCondUnsignedLessThan)
	.InsertInstruction()
	 := .AllocateInstruction()
	.AsExitIfTrueWithCode(.execCtxPtrValue, .Return(), wazevoapi.ExitCodeTableOutOfBounds)
	.InsertInstruction()
	return
}

func ( *Compiler) ( ssa.Value) ssa.Value {
	 := .ssaBuilder
	 := .
		AllocateInstruction().
		AsLoad(, tableInstanceBaseAddressOffset, ssa.TypeI64).
		Insert()
	return .Return()
}

func ( *Compiler) (, ,  ssa.Value) {
	 := .ssaBuilder
	 := .AllocateInstruction().AsIadd(, ).Insert().Return()
	 := .AllocateInstruction().
		AsIcmp(, , ssa.IntegerCmpCondUnsignedLessThan).
		Insert().
		Return()
	.AllocateInstruction().
		AsExitIfTrueWithCode(.execCtxPtrValue, , wazevoapi.ExitCodeMemoryOutOfBounds).
		Insert()
}