package interpreter

import (
	
	
	
	
	

	
	
	
)

type controlFrameKind byte

const (
	controlFrameKindBlockWithContinuationLabel controlFrameKind = iota
	controlFrameKindBlockWithoutContinuationLabel
	controlFrameKindFunction
	controlFrameKindLoop
	controlFrameKindIfWithElse
	controlFrameKindIfWithoutElse
)

type (
	controlFrame struct {
		frameID uint32
		// originalStackLenWithoutParam holds the number of values on the stack
		// when Start executing this control frame minus params for the block.
		originalStackLenWithoutParam int
		// originalStackLenWithoutParamUint64 is almost the same as originalStackLenWithoutParam
		// except that it holds the number of values on the stack in uint64.
		originalStackLenWithoutParamUint64 int
		blockType                          *wasm.FunctionType
		kind                               controlFrameKind
	}
	controlFrames struct{ frames []controlFrame }
)

func ( *controlFrame) () {
	// Make sure that if the frame is block and doesn't have continuation,
	// change the Kind so we can emit the continuation block
	// later when we reach the End instruction of this frame.
	if .kind == controlFrameKindBlockWithoutContinuationLabel {
		.kind = controlFrameKindBlockWithContinuationLabel
	}
}

func ( *controlFrame) () label {
	switch .kind {
	case controlFrameKindBlockWithContinuationLabel,
		controlFrameKindBlockWithoutContinuationLabel:
		return newLabel(labelKindContinuation, .frameID)
	case controlFrameKindLoop:
		return newLabel(labelKindHeader, .frameID)
	case controlFrameKindFunction:
		return newLabel(labelKindReturn, 0)
	case controlFrameKindIfWithElse,
		controlFrameKindIfWithoutElse:
		return newLabel(labelKindContinuation, .frameID)
	}
	panic(fmt.Sprintf("unreachable: a bug in interpreterir implementation: %v", .kind))
}

func ( *controlFrames) () *controlFrame {
	// No need to check stack bound
	// as we can assume that all the operations
	// are valid thanks to validateFunction
	// at module validation phase.
	return &.frames[0]
}

func ( *controlFrames) ( int) *controlFrame {
	// No need to check stack bound
	// as we can assume that all the operations
	// are valid thanks to validateFunction
	// at module validation phase.
	return &.frames[len(.frames)--1]
}

func ( *controlFrames) () *controlFrame {
	// No need to check stack bound
	// as we can assume that all the operations
	// are valid thanks to validateFunction
	// at module validation phase.
	return &.frames[len(.frames)-1]
}

func ( *controlFrames) () bool {
	return len(.frames) == 0
}

func ( *controlFrames) () ( *controlFrame) {
	// No need to check stack bound
	// as we can assume that all the operations
	// are valid thanks to validateFunction
	// at module validation phase.
	 = .top()
	.frames = .frames[:len(.frames)-1]
	return
}

func ( *controlFrames) ( controlFrame) {
	.frames = append(.frames, )
}

func ( *compiler) () {
	// Reuse the existing slice.
	.localIndexToStackHeightInUint64 = .localIndexToStackHeightInUint64[:0]
	var  int
	for ,  := range .sig.Params {
		.localIndexToStackHeightInUint64 = append(.localIndexToStackHeightInUint64, )
		if  == wasm.ValueTypeV128 {
			++
		}
		++
	}

	if .callFrameStackSizeInUint64 > 0 {
		// We reserve the stack slots for result values below the return call frame slots.
		if  := .sig.ResultNumInUint64 - .sig.ParamNumInUint64;  > 0 {
			 += 
		}
	}

	// Non-func param locals Start after the return call frame.
	 += .callFrameStackSizeInUint64

	for ,  := range .localTypes {
		.localIndexToStackHeightInUint64 = append(.localIndexToStackHeightInUint64, )
		if  == wasm.ValueTypeV128 {
			++
		}
		++
	}

	// Push function arguments.
	for ,  := range .sig.Params {
		.stackPush(wasmValueTypeTounsignedType())
	}

	if .callFrameStackSizeInUint64 > 0 {
		// Reserve the stack slots for results.
		for  := 0;  < .sig.ResultNumInUint64-.sig.ParamNumInUint64; ++ {
			.stackPush(unsignedTypeI64)
		}

		// Reserve the stack slots for call frame.
		for  := 0;  < .callFrameStackSizeInUint64; ++ {
			.stackPush(unsignedTypeI64)
		}
	}
}

// compiler is in charge of lowering raw Wasm function body to get compilationResult.
// This is created per *wasm.Module and reused for all functions in it to reduce memory allocations.
type compiler struct {
	module                     *wasm.Module
	enabledFeatures            api.CoreFeatures
	callFrameStackSizeInUint64 int
	stack                      []unsignedType
	// stackLenInUint64 is the length of the stack in uint64.
	stackLenInUint64 int
	currentFrameID   uint32
	controlFrames    controlFrames
	unreachableState struct {
		on    bool
		depth int
	}
	pc, currentOpPC uint64
	result          compilationResult

	// body holds the code for the function's body where Wasm instructions are stored.
	body []byte
	// sig is the function type of the target function.
	sig *wasm.FunctionType
	// localTypes holds the target function locals' value types except function params.
	localTypes []wasm.ValueType
	// localIndexToStackHeightInUint64 maps the local index (starting with function params) to the stack height
	// where the local is places. This is the necessary mapping for functions who contain vector type locals.
	localIndexToStackHeightInUint64 []int

	// types hold all the function types in the module where the targe function exists.
	types []wasm.FunctionType
	// funcs holds the type indexes for all declared functions in the module where the target function exists.
	funcs []uint32
	// globals holds the global types for all declared globals in the module where the target function exists.
	globals []wasm.GlobalType

	// needSourceOffset is true if this module requires DWARF based stack trace.
	needSourceOffset bool
	// bodyOffsetInCodeSection is the offset of the body of this function in the original Wasm binary's code section.
	bodyOffsetInCodeSection uint64

	ensureTermination bool
	// Pre-allocated bytes.Reader to be used in various places.
	br             *bytes.Reader
	funcTypeToSigs funcTypeToIRSignatures

	next int
}

//lint:ignore U1000 for debugging only.
func ( *compiler) () string {
	 := make([]string, 0, len(.stack))
	for ,  := range .stack {
		 = append(, .String())
	}
	return "[" + strings.Join(, ", ") + "]"
}

func ( *compiler) () {
	.unreachableState.on = true
}

func ( *compiler) () {
	.unreachableState.on = false
}

// memoryType is the type of memory in a compiled module.
type memoryType byte

const (
	// memoryTypeNone indicates there is no memory.
	memoryTypeNone memoryType = iota
	// memoryTypeStandard indicates there is a non-shared memory.
	memoryTypeStandard
	// memoryTypeShared indicates there is a shared memory.
	memoryTypeShared
)

type compilationResult struct {
	// Operations holds interpreterir operations compiled from Wasm instructions in a Wasm function.
	Operations []unionOperation

	// IROperationSourceOffsetsInWasmBinary is index-correlated with Operation and maps each operation to the corresponding source instruction's
	// offset in the original WebAssembly binary.
	// Non nil only when the given Wasm module has the DWARF section.
	IROperationSourceOffsetsInWasmBinary []uint64

	// LabelCallers maps label to the number of callers to that label.
	// Here "callers" means that the call-sites which jumps to the label with br, br_if or br_table
	// instructions.
	//
	// Note: zero possible and allowed in wasm. e.g.
	//
	//	(block
	//	  (br 0)
	//	  (block i32.const 1111)
	//	)
	//
	// This example the label corresponding to `(block i32.const 1111)` is never be reached at runtime because `br 0` exits the function before we reach there
	LabelCallers map[label]uint32
	// UsesMemory is true if this function might use memory.
	UsesMemory bool

	// The following fields are per-module values, not per-function.

	// Globals holds all the declarations of globals in the module from which this function is compiled.
	Globals []wasm.GlobalType
	// Functions holds all the declarations of function in the module from which this function is compiled, including itself.
	Functions []wasm.Index
	// Types holds all the types in the module from which this function is compiled.
	Types []wasm.FunctionType
	// Memory indicates the type of memory of the module.
	Memory memoryType
	// HasTable is true if the module from which this function is compiled has table declaration.
	HasTable bool
	// HasDataInstances is true if the module has data instances which might be used by memory.init or data.drop instructions.
	HasDataInstances bool
	// HasDataInstances is true if the module has element instances which might be used by table.init or elem.drop instructions.
	HasElementInstances bool
}

// newCompiler returns the new *compiler for the given parameters.
// Use compiler.Next function to get compilation result per function.
func newCompiler( api.CoreFeatures,  int,  *wasm.Module,  bool) (*compiler, error) {
	, , , ,  := .AllDeclarations()
	if  != nil {
		return nil, 
	}

	, ,  := len() > 0,
		len(.DataSection) > 0, len(.ElementSection) > 0

	var  memoryType
	switch {
	case  == nil:
		 = memoryTypeNone
	case .IsShared:
		 = memoryTypeShared
	default:
		 = memoryTypeStandard
	}

	 := .TypeSection

	 := &compiler{
		module:                     ,
		enabledFeatures:            ,
		controlFrames:              controlFrames{},
		callFrameStackSizeInUint64: ,
		result: compilationResult{
			Globals:             ,
			Functions:           ,
			Types:               ,
			Memory:              ,
			HasTable:            ,
			HasDataInstances:    ,
			HasElementInstances: ,
			LabelCallers:        map[label]uint32{},
		},
		globals:           ,
		funcs:             ,
		types:             ,
		ensureTermination: ,
		br:                bytes.NewReader(nil),
		funcTypeToSigs: funcTypeToIRSignatures{
			indirectCalls: make([]*signature, len()),
			directCalls:   make([]*signature, len()),
			wasmTypes:     ,
		},
		needSourceOffset: .DWARFLines != nil,
	}
	return , nil
}

// Next returns the next compilationResult for this compiler.
func ( *compiler) () (*compilationResult, error) {
	 := .next
	 := &.module.CodeSection[]
	 := &.types[.module.FunctionSection[]]

	// Reset the previous result.
	.result.Operations = .result.Operations[:0]
	.result.IROperationSourceOffsetsInWasmBinary = .result.IROperationSourceOffsetsInWasmBinary[:0]
	.result.UsesMemory = false
	// Clears the existing entries in LabelCallers.
	for  := uint32(0);  <= .currentFrameID; ++ {
		for  := labelKind(0);  < labelKindNum; ++ {
			delete(.result.LabelCallers, newLabel(, ))
		}
	}
	// Reset the previous states.
	.pc = 0
	.currentOpPC = 0
	.currentFrameID = 0
	.stackLenInUint64 = 0
	.unreachableState.on, .unreachableState.depth = false, 0

	if  := .compile(, .Body, .LocalTypes, .BodyOffsetInCodeSection);  != nil {
		return nil, 
	}
	.next++
	return &.result, nil
}

// Compile lowers given function instance into interpreterir operations
// so that the resulting operations can be consumed by the interpreter
// or the compiler compilation engine.
func ( *compiler) ( *wasm.FunctionType,  []byte,  []wasm.ValueType,  uint64) error {
	// Set function specific fields.
	.body = 
	.localTypes = 
	.sig = 
	.bodyOffsetInCodeSection = 

	// Reuses the underlying slices.
	.stack = .stack[:0]
	.controlFrames.frames = .controlFrames.frames[:0]

	.initializeStack()

	// Emit const expressions for locals.
	// Note that here we don't take function arguments
	// into account, meaning that callers must push
	// arguments before entering into the function body.
	for ,  := range .localTypes {
		.emitDefaultValue()
	}

	// Insert the function control frame.
	.controlFrames.push(controlFrame{
		frameID:   .nextFrameID(),
		blockType: .sig,
		kind:      controlFrameKindFunction,
	})

	// Now, enter the function body.
	for !.controlFrames.empty() && .pc < uint64(len(.body)) {
		if  := .handleInstruction();  != nil {
			return fmt.Errorf("handling instruction: %w", )
		}
	}
	return nil
}

// Translate the current Wasm instruction to interpreterir's operations,
// and emit the results into c.results.
func ( *compiler) () error {
	 := .body[.pc]
	.currentOpPC = .pc
	if false {
		var  string
		if  == wasm.OpcodeVecPrefix {
			 = wasm.VectorInstructionName(.body[.pc+1])
		} else if  == wasm.OpcodeAtomicPrefix {
			 = wasm.AtomicInstructionName(.body[.pc+1])
		} else if  == wasm.OpcodeMiscPrefix {
			 = wasm.MiscInstructionName(.body[.pc+1])
		} else {
			 = wasm.InstructionName()
		}
		fmt.Printf("handling %s, unreachable_state(on=%v,depth=%d), stack=%v\n",
			, .unreachableState.on, .unreachableState.depth, .stack,
		)
	}

	var  unsignedType
	if len(.stack) > 0 {
		 = .stackPeek()
	}

	// Modify the stack according the current instruction.
	// Note that some instructions will read "index" in
	// applyToStack and advance c.pc inside the function.
	,  := .applyToStack()
	if  != nil {
		return fmt.Errorf("apply stack failed for %s: %w", wasm.InstructionName(), )
	}
	// Now we handle each instruction, and
	// emit the corresponding interpreterir operations to the results.
:
	switch  {
	case wasm.OpcodeUnreachable:
		.emit(newOperationUnreachable())
		.markUnreachable()
	case wasm.OpcodeNop:
		// Nop is noop!
	case wasm.OpcodeBlock:
		.br.Reset(.body[.pc+1:])
		, ,  := wasm.DecodeBlockType(.types, .br, .enabledFeatures)
		if  != nil {
			return fmt.Errorf("reading block type for block instruction: %w", )
		}
		.pc += 

		if .unreachableState.on {
			// If it is currently in unreachable,
			// just remove the entire block.
			.unreachableState.depth++
			break 
		}

		// Create a new frame -- entering this block.
		 := controlFrame{
			frameID:                            .nextFrameID(),
			originalStackLenWithoutParam:       len(.stack) - len(.Params),
			originalStackLenWithoutParamUint64: .stackLenInUint64 - .ParamNumInUint64,
			kind:                               controlFrameKindBlockWithoutContinuationLabel,
			blockType:                          ,
		}
		.controlFrames.push()

	case wasm.OpcodeLoop:
		.br.Reset(.body[.pc+1:])
		, ,  := wasm.DecodeBlockType(.types, .br, .enabledFeatures)
		if  != nil {
			return fmt.Errorf("reading block type for loop instruction: %w", )
		}
		.pc += 

		if .unreachableState.on {
			// If it is currently in unreachable,
			// just remove the entire block.
			.unreachableState.depth++
			break 
		}

		// Create a new frame -- entering loop.
		 := controlFrame{
			frameID:                            .nextFrameID(),
			originalStackLenWithoutParam:       len(.stack) - len(.Params),
			originalStackLenWithoutParamUint64: .stackLenInUint64 - .ParamNumInUint64,
			kind:                               controlFrameKindLoop,
			blockType:                          ,
		}
		.controlFrames.push()

		// Prep labels for inside and the continuation of this loop.
		 := newLabel(labelKindHeader, .frameID)
		.result.LabelCallers[]++

		// Emit the branch operation to enter inside the loop.
		.emit(newOperationBr())
		.emit(newOperationLabel())

		// Insert the exit code check on the loop header, which is the only necessary point in the function body
		// to prevent infinite loop.
		//
		// Note that this is a little aggressive: this checks the exit code regardless the loop header is actually
		// the loop. In other words, this checks even when no br/br_if/br_table instructions jumping to this loop
		// exist. However, in reality, that shouldn't be an issue since such "noop" loop header will highly likely be
		// optimized out by almost all guest language compilers which have the control flow optimization passes.
		if .ensureTermination {
			.emit(newOperationBuiltinFunctionCheckExitCode())
		}
	case wasm.OpcodeIf:
		.br.Reset(.body[.pc+1:])
		, ,  := wasm.DecodeBlockType(.types, .br, .enabledFeatures)
		if  != nil {
			return fmt.Errorf("reading block type for if instruction: %w", )
		}
		.pc += 

		if .unreachableState.on {
			// If it is currently in unreachable,
			// just remove the entire block.
			.unreachableState.depth++
			break 
		}

		// Create a new frame -- entering if.
		 := controlFrame{
			frameID:                            .nextFrameID(),
			originalStackLenWithoutParam:       len(.stack) - len(.Params),
			originalStackLenWithoutParamUint64: .stackLenInUint64 - .ParamNumInUint64,
			// Note this will be set to controlFrameKindIfWithElse
			// when else opcode found later.
			kind:      controlFrameKindIfWithoutElse,
			blockType: ,
		}
		.controlFrames.push()

		// Prep labels for if and else of this if.
		 := newLabel(labelKindHeader, .frameID)
		 := newLabel(labelKindElse, .frameID)
		.result.LabelCallers[]++
		.result.LabelCallers[]++

		// Emit the branch operation to enter the then block.
		.emit(newOperationBrIf(, , nopinclusiveRange))
		.emit(newOperationLabel())
	case wasm.OpcodeElse:
		 := .controlFrames.top()
		if .unreachableState.on && .unreachableState.depth > 0 {
			// If it is currently in unreachable, and the nested if,
			// just remove the entire else block.
			break 
		} else if .unreachableState.on {
			// If it is currently in unreachable, and the non-nested if,
			// reset the stack so we can correctly handle the else block.
			 := .controlFrames.top()
			.stackSwitchAt()
			.kind = controlFrameKindIfWithElse

			// Re-push the parameters to the if block so that else block can use them.
			for ,  := range .blockType.Params {
				.stackPush(wasmValueTypeTounsignedType())
			}

			// We are no longer unreachable in else frame,
			// so emit the correct label, and reset the unreachable state.
			 := newLabel(labelKindElse, .frameID)
			.resetUnreachable()
			.emit(
				newOperationLabel(),
			)
			break 
		}

		// Change the Kind of this If block, indicating that
		// the if has else block.
		.kind = controlFrameKindIfWithElse

		// We need to reset the stack so that
		// the values pushed inside the then block
		// do not affect the else block.
		 := newOperationDrop(.getFrameDropRange(, false))

		// Reset the stack manipulated by the then block, and re-push the block param types to the stack.

		.stackSwitchAt()
		for ,  := range .blockType.Params {
			.stackPush(wasmValueTypeTounsignedType())
		}

		// Prep labels for else and the continuation of this if block.
		 := newLabel(labelKindElse, .frameID)
		 := newLabel(labelKindContinuation, .frameID)
		.result.LabelCallers[]++

		// Emit the instructions for exiting the if loop,
		// and then the initiation of else block.
		.emit()
		// Jump to the continuation of this block.
		.emit(newOperationBr())
		// Initiate the else block.
		.emit(newOperationLabel())
	case wasm.OpcodeEnd:
		if .unreachableState.on && .unreachableState.depth > 0 {
			.unreachableState.depth--
			break 
		} else if .unreachableState.on {
			.resetUnreachable()

			 := .controlFrames.pop()
			if .controlFrames.empty() {
				return nil
			}

			.stackSwitchAt()
			for ,  := range .blockType.Results {
				.stackPush(wasmValueTypeTounsignedType())
			}

			 := newLabel(labelKindContinuation, .frameID)
			if .kind == controlFrameKindIfWithoutElse {
				// Emit the else label.
				 := newLabel(labelKindElse, .frameID)
				.result.LabelCallers[]++
				.emit(newOperationLabel())
				.emit(newOperationBr())
				.emit(newOperationLabel())
			} else {
				.emit(
					newOperationLabel(),
				)
			}

			break 
		}

		 := .controlFrames.pop()

		// We need to reset the stack so that
		// the values pushed inside the block.
		 := newOperationDrop(.getFrameDropRange(, true))
		.stackSwitchAt()

		// Push the result types onto the stack.
		for ,  := range .blockType.Results {
			.stackPush(wasmValueTypeTounsignedType())
		}

		// Emit the instructions according to the Kind of the current control frame.
		switch .kind {
		case controlFrameKindFunction:
			if !.controlFrames.empty() {
				// Should never happen. If so, there's a bug in the translation.
				panic("bug: found more function control frames")
			}
			// Return from function.
			.emit()
			.emit(newOperationBr(newLabel(labelKindReturn, 0)))
		case controlFrameKindIfWithoutElse:
			// This case we have to emit "empty" else label.
			 := newLabel(labelKindElse, .frameID)
			 := newLabel(labelKindContinuation, .frameID)
			.result.LabelCallers[] += 2
			.emit()
			.emit(newOperationBr())
			// Emit the else which soon branches into the continuation.
			.emit(newOperationLabel())
			.emit(newOperationBr())
			// Initiate the continuation.
			.emit(newOperationLabel())
		case controlFrameKindBlockWithContinuationLabel,
			controlFrameKindIfWithElse:
			 := newLabel(labelKindContinuation, .frameID)
			.result.LabelCallers[]++
			.emit()
			.emit(newOperationBr())
			.emit(newOperationLabel())
		case controlFrameKindLoop, controlFrameKindBlockWithoutContinuationLabel:
			.emit(
				,
			)
		default:
			// Should never happen. If so, there's a bug in the translation.
			panic(fmt.Errorf("bug: invalid control frame Kind: 0x%x", .kind))
		}

	case wasm.OpcodeBr:
		, ,  := leb128.LoadUint32(.body[.pc+1:])
		if  != nil {
			return fmt.Errorf("read the target for br_if: %w", )
		}
		.pc += 

		if .unreachableState.on {
			// If it is currently in unreachable, br is no-op.
			break 
		}

		 := .controlFrames.get(int())
		.ensureContinuation()
		 := newOperationDrop(.getFrameDropRange(, false))
		 := .asLabel()
		.result.LabelCallers[]++
		.emit()
		.emit(newOperationBr())
		// Br operation is stack-polymorphic, and mark the state as unreachable.
		// That means subsequent instructions in the current control frame are "unreachable"
		// and can be safely removed.
		.markUnreachable()
	case wasm.OpcodeBrIf:
		, ,  := leb128.LoadUint32(.body[.pc+1:])
		if  != nil {
			return fmt.Errorf("read the target for br_if: %w", )
		}
		.pc += 

		if .unreachableState.on {
			// If it is currently in unreachable, br-if is no-op.
			break 
		}

		 := .controlFrames.get(int())
		.ensureContinuation()
		 := .getFrameDropRange(, false)
		 := .asLabel()
		.result.LabelCallers[]++

		 := newLabel(labelKindHeader, .nextFrameID())
		.result.LabelCallers[]++
		.emit(newOperationBrIf(, , ))
		// Start emitting else block operations.
		.emit(newOperationLabel())
	case wasm.OpcodeBrTable:
		.br.Reset(.body[.pc+1:])
		 := .br
		, ,  := leb128.DecodeUint32()
		if  != nil {
			return fmt.Errorf("error reading number of targets in br_table: %w", )
		}
		.pc += 

		if .unreachableState.on {
			// If it is currently in unreachable, br_table is no-op.
			// But before proceeding to the next instruction, we must advance the pc
			// according to the number of br_table targets.
			for  := uint32(0);  <= ; ++ { // inclusive as we also need to read the index of default target.
				, ,  := leb128.DecodeUint32()
				if  != nil {
					return fmt.Errorf("error reading target %d in br_table: %w", , )
				}
				.pc += 
			}
			break 
		}

		// Read the branch targets.
		 :=  * 2
		 := make([]uint64, 2+) // (label, inclusiveRange) * (default+numTargets)
		for  := uint32(0);  < ;  += 2 {
			, ,  := leb128.DecodeUint32()
			if  != nil {
				return fmt.Errorf("error reading target %d in br_table: %w", , )
			}
			.pc += 
			 := .controlFrames.get(int())
			.ensureContinuation()
			 := .getFrameDropRange(, false)
			 := .asLabel()
			[] = uint64()
			[+1] = .AsU64()
			.result.LabelCallers[]++
		}

		// Prep default target control frame.
		, ,  := leb128.DecodeUint32()
		if  != nil {
			return fmt.Errorf("error reading default target of br_table: %w", )
		}
		.pc += 
		 := .controlFrames.get(int())
		.ensureContinuation()
		 := .getFrameDropRange(, false)
		 := .asLabel()
		.result.LabelCallers[]++
		[] = uint64()
		[+1] = .AsU64()
		.emit(newOperationBrTable())

		// br_table operation is stack-polymorphic, and mark the state as unreachable.
		// That means subsequent instructions in the current control frame are "unreachable"
		// and can be safely removed.
		.markUnreachable()
	case wasm.OpcodeReturn:
		 := .controlFrames.functionFrame()
		 := newOperationDrop(.getFrameDropRange(, false))

		// Cleanup the stack and then jmp to function frame's continuation (meaning return).
		.emit()
		.emit(newOperationBr(.asLabel()))

		// Return operation is stack-polymorphic, and mark the state as unreachable.
		// That means subsequent instructions in the current control frame are "unreachable"
		// and can be safely removed.
		.markUnreachable()
	case wasm.OpcodeCall:
		.emit(
			newOperationCall(),
		)
	case wasm.OpcodeCallIndirect:
		 := 
		, ,  := leb128.LoadUint32(.body[.pc+1:])
		if  != nil {
			return fmt.Errorf("read target for br_table: %w", )
		}
		.pc += 
		.emit(
			newOperationCallIndirect(, ),
		)
	case wasm.OpcodeDrop:
		 := inclusiveRange{Start: 0, End: 0}
		if  == unsignedTypeV128 {
			// inclusiveRange is the range in uint64 representation, so dropping a vector value on top
			// should be translated as drop [0..1] inclusively.
			.End++
		}
		.emit(newOperationDrop())
	case wasm.OpcodeSelect:
		// If it is on the unreachable state, ignore the instruction.
		if .unreachableState.on {
			break 
		}
		 := .stackPeek() == unsignedTypeV128
		.emit(
			newOperationSelect(),
		)
	case wasm.OpcodeTypedSelect:
		// Skips two bytes: vector size fixed to 1, and the value type for select.
		.pc += 2
		// If it is on the unreachable state, ignore the instruction.
		if .unreachableState.on {
			break 
		}
		// Typed select is semantically equivalent to select at runtime.
		 := .stackPeek() == unsignedTypeV128
		.emit(
			newOperationSelect(),
		)
	case wasm.OpcodeLocalGet:
		 := .localDepth()
		if  := .localType() == wasm.ValueTypeV128; ! {
			.emit(
				// -1 because we already manipulated the stack before
				// called localDepth ^^.
				newOperationPick(-1, ),
			)
		} else {
			.emit(
				// -2 because we already manipulated the stack before
				// called localDepth ^^.
				newOperationPick(-2, ),
			)
		}
	case wasm.OpcodeLocalSet:
		 := .localDepth()

		 := .localType() == wasm.ValueTypeV128
		if  {
			.emit(
				// +2 because we already popped the operands for this operation from the c.stack before
				// called localDepth ^^,
				newOperationSet(+2, ),
			)
		} else {
			.emit(
				// +1 because we already popped the operands for this operation from the c.stack before
				// called localDepth ^^,
				newOperationSet(+1, ),
			)
		}
	case wasm.OpcodeLocalTee:
		 := .localDepth()
		 := .localType() == wasm.ValueTypeV128
		if  {
			.emit(newOperationPick(1, ))
			.emit(newOperationSet(+2, ))
		} else {
			.emit(
				newOperationPick(0, ))
			.emit(newOperationSet(+1, ))
		}
	case wasm.OpcodeGlobalGet:
		.emit(
			newOperationGlobalGet(),
		)
	case wasm.OpcodeGlobalSet:
		.emit(
			newOperationGlobalSet(),
		)
	case wasm.OpcodeI32Load:
		,  := .readMemoryArg(wasm.OpcodeI32LoadName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad(unsignedTypeI32, ))
	case wasm.OpcodeI64Load:
		,  := .readMemoryArg(wasm.OpcodeI64LoadName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad(unsignedTypeI64, ))
	case wasm.OpcodeF32Load:
		,  := .readMemoryArg(wasm.OpcodeF32LoadName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad(unsignedTypeF32, ))
	case wasm.OpcodeF64Load:
		,  := .readMemoryArg(wasm.OpcodeF64LoadName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad(unsignedTypeF64, ))
	case wasm.OpcodeI32Load8S:
		,  := .readMemoryArg(wasm.OpcodeI32Load8SName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad8(signedInt32, ))
	case wasm.OpcodeI32Load8U:
		,  := .readMemoryArg(wasm.OpcodeI32Load8UName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad8(signedUint32, ))
	case wasm.OpcodeI32Load16S:
		,  := .readMemoryArg(wasm.OpcodeI32Load16SName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad16(signedInt32, ))
	case wasm.OpcodeI32Load16U:
		,  := .readMemoryArg(wasm.OpcodeI32Load16UName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad16(signedUint32, ))
	case wasm.OpcodeI64Load8S:
		,  := .readMemoryArg(wasm.OpcodeI64Load8SName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad8(signedInt64, ))
	case wasm.OpcodeI64Load8U:
		,  := .readMemoryArg(wasm.OpcodeI64Load8UName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad8(signedUint64, ))
	case wasm.OpcodeI64Load16S:
		,  := .readMemoryArg(wasm.OpcodeI64Load16SName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad16(signedInt64, ))
	case wasm.OpcodeI64Load16U:
		,  := .readMemoryArg(wasm.OpcodeI64Load16UName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad16(signedUint64, ))
	case wasm.OpcodeI64Load32S:
		,  := .readMemoryArg(wasm.OpcodeI64Load32SName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad32(true, ))
	case wasm.OpcodeI64Load32U:
		,  := .readMemoryArg(wasm.OpcodeI64Load32UName)
		if  != nil {
			return 
		}
		.emit(newOperationLoad32(false, ))
	case wasm.OpcodeI32Store:
		,  := .readMemoryArg(wasm.OpcodeI32StoreName)
		if  != nil {
			return 
		}
		.emit(
			newOperationStore(unsignedTypeI32, ),
		)
	case wasm.OpcodeI64Store:
		,  := .readMemoryArg(wasm.OpcodeI64StoreName)
		if  != nil {
			return 
		}
		.emit(
			newOperationStore(unsignedTypeI64, ),
		)
	case wasm.OpcodeF32Store:
		,  := .readMemoryArg(wasm.OpcodeF32StoreName)
		if  != nil {
			return 
		}
		.emit(
			newOperationStore(unsignedTypeF32, ),
		)
	case wasm.OpcodeF64Store:
		,  := .readMemoryArg(wasm.OpcodeF64StoreName)
		if  != nil {
			return 
		}
		.emit(
			newOperationStore(unsignedTypeF64, ),
		)
	case wasm.OpcodeI32Store8:
		,  := .readMemoryArg(wasm.OpcodeI32Store8Name)
		if  != nil {
			return 
		}
		.emit(
			newOperationStore8(),
		)
	case wasm.OpcodeI32Store16:
		,  := .readMemoryArg(wasm.OpcodeI32Store16Name)
		if  != nil {
			return 
		}
		.emit(
			newOperationStore16(),
		)
	case wasm.OpcodeI64Store8:
		,  := .readMemoryArg(wasm.OpcodeI64Store8Name)
		if  != nil {
			return 
		}
		.emit(
			newOperationStore8(),
		)
	case wasm.OpcodeI64Store16:
		,  := .readMemoryArg(wasm.OpcodeI64Store16Name)
		if  != nil {
			return 
		}
		.emit(
			newOperationStore16(),
		)
	case wasm.OpcodeI64Store32:
		,  := .readMemoryArg(wasm.OpcodeI64Store32Name)
		if  != nil {
			return 
		}
		.emit(
			newOperationStore32(),
		)
	case wasm.OpcodeMemorySize:
		.result.UsesMemory = true
		.pc++ // Skip the reserved one byte.
		.emit(
			newOperationMemorySize(),
		)
	case wasm.OpcodeMemoryGrow:
		.result.UsesMemory = true
		.pc++ // Skip the reserved one byte.
		.emit(
			newOperationMemoryGrow(),
		)
	case wasm.OpcodeI32Const:
		, ,  := leb128.LoadInt32(.body[.pc+1:])
		if  != nil {
			return fmt.Errorf("reading i32.const value: %v", )
		}
		.pc += 
		.emit(
			newOperationConstI32(uint32()),
		)
	case wasm.OpcodeI64Const:
		, ,  := leb128.LoadInt64(.body[.pc+1:])
		if  != nil {
			return fmt.Errorf("reading i64.const value: %v", )
		}
		.pc += 
		.emit(
			newOperationConstI64(uint64()),
		)
	case wasm.OpcodeF32Const:
		 := math.Float32frombits(binary.LittleEndian.Uint32(.body[.pc+1:]))
		.pc += 4
		.emit(
			newOperationConstF32(),
		)
	case wasm.OpcodeF64Const:
		 := math.Float64frombits(binary.LittleEndian.Uint64(.body[.pc+1:]))
		.pc += 8
		.emit(
			newOperationConstF64(),
		)
	case wasm.OpcodeI32Eqz:
		.emit(
			newOperationEqz(unsignedInt32),
		)
	case wasm.OpcodeI32Eq:
		.emit(
			newOperationEq(unsignedTypeI32),
		)
	case wasm.OpcodeI32Ne:
		.emit(
			newOperationNe(unsignedTypeI32),
		)
	case wasm.OpcodeI32LtS:
		.emit(
			newOperationLt(signedTypeInt32),
		)
	case wasm.OpcodeI32LtU:
		.emit(
			newOperationLt(signedTypeUint32),
		)
	case wasm.OpcodeI32GtS:
		.emit(
			newOperationGt(signedTypeInt32),
		)
	case wasm.OpcodeI32GtU:
		.emit(
			newOperationGt(signedTypeUint32),
		)
	case wasm.OpcodeI32LeS:
		.emit(
			newOperationLe(signedTypeInt32),
		)
	case wasm.OpcodeI32LeU:
		.emit(
			newOperationLe(signedTypeUint32),
		)
	case wasm.OpcodeI32GeS:
		.emit(
			newOperationGe(signedTypeInt32),
		)
	case wasm.OpcodeI32GeU:
		.emit(
			newOperationGe(signedTypeUint32),
		)
	case wasm.OpcodeI64Eqz:
		.emit(
			newOperationEqz(unsignedInt64),
		)
	case wasm.OpcodeI64Eq:
		.emit(
			newOperationEq(unsignedTypeI64),
		)
	case wasm.OpcodeI64Ne:
		.emit(
			newOperationNe(unsignedTypeI64),
		)
	case wasm.OpcodeI64LtS:
		.emit(
			newOperationLt(signedTypeInt64),
		)
	case wasm.OpcodeI64LtU:
		.emit(
			newOperationLt(signedTypeUint64),
		)
	case wasm.OpcodeI64GtS:
		.emit(
			newOperationGt(signedTypeInt64),
		)
	case wasm.OpcodeI64GtU:
		.emit(
			newOperationGt(signedTypeUint64),
		)
	case wasm.OpcodeI64LeS:
		.emit(
			newOperationLe(signedTypeInt64),
		)
	case wasm.OpcodeI64LeU:
		.emit(
			newOperationLe(signedTypeUint64),
		)
	case wasm.OpcodeI64GeS:
		.emit(
			newOperationGe(signedTypeInt64),
		)
	case wasm.OpcodeI64GeU:
		.emit(
			newOperationGe(signedTypeUint64),
		)
	case wasm.OpcodeF32Eq:
		.emit(
			newOperationEq(unsignedTypeF32),
		)
	case wasm.OpcodeF32Ne:
		.emit(
			newOperationNe(unsignedTypeF32),
		)
	case wasm.OpcodeF32Lt:
		.emit(
			newOperationLt(signedTypeFloat32),
		)
	case wasm.OpcodeF32Gt:
		.emit(
			newOperationGt(signedTypeFloat32),
		)
	case wasm.OpcodeF32Le:
		.emit(
			newOperationLe(signedTypeFloat32),
		)
	case wasm.OpcodeF32Ge:
		.emit(
			newOperationGe(signedTypeFloat32),
		)
	case wasm.OpcodeF64Eq:
		.emit(
			newOperationEq(unsignedTypeF64),
		)
	case wasm.OpcodeF64Ne:
		.emit(
			newOperationNe(unsignedTypeF64),
		)
	case wasm.OpcodeF64Lt:
		.emit(
			newOperationLt(signedTypeFloat64),
		)
	case wasm.OpcodeF64Gt:
		.emit(
			newOperationGt(signedTypeFloat64),
		)
	case wasm.OpcodeF64Le:
		.emit(
			newOperationLe(signedTypeFloat64),
		)
	case wasm.OpcodeF64Ge:
		.emit(
			newOperationGe(signedTypeFloat64),
		)
	case wasm.OpcodeI32Clz:
		.emit(
			newOperationClz(unsignedInt32),
		)
	case wasm.OpcodeI32Ctz:
		.emit(
			newOperationCtz(unsignedInt32),
		)
	case wasm.OpcodeI32Popcnt:
		.emit(
			newOperationPopcnt(unsignedInt32),
		)
	case wasm.OpcodeI32Add:
		.emit(
			newOperationAdd(unsignedTypeI32),
		)
	case wasm.OpcodeI32Sub:
		.emit(
			newOperationSub(unsignedTypeI32),
		)
	case wasm.OpcodeI32Mul:
		.emit(
			newOperationMul(unsignedTypeI32),
		)
	case wasm.OpcodeI32DivS:
		.emit(
			newOperationDiv(signedTypeInt32),
		)
	case wasm.OpcodeI32DivU:
		.emit(
			newOperationDiv(signedTypeUint32),
		)
	case wasm.OpcodeI32RemS:
		.emit(
			newOperationRem(signedInt32),
		)
	case wasm.OpcodeI32RemU:
		.emit(
			newOperationRem(signedUint32),
		)
	case wasm.OpcodeI32And:
		.emit(
			newOperationAnd(unsignedInt32),
		)
	case wasm.OpcodeI32Or:
		.emit(
			newOperationOr(unsignedInt32),
		)
	case wasm.OpcodeI32Xor:
		.emit(
			newOperationXor(unsignedInt64),
		)
	case wasm.OpcodeI32Shl:
		.emit(
			newOperationShl(unsignedInt32),
		)
	case wasm.OpcodeI32ShrS:
		.emit(
			newOperationShr(signedInt32),
		)
	case wasm.OpcodeI32ShrU:
		.emit(
			newOperationShr(signedUint32),
		)
	case wasm.OpcodeI32Rotl:
		.emit(
			newOperationRotl(unsignedInt32),
		)
	case wasm.OpcodeI32Rotr:
		.emit(
			newOperationRotr(unsignedInt32),
		)
	case wasm.OpcodeI64Clz:
		.emit(
			newOperationClz(unsignedInt64),
		)
	case wasm.OpcodeI64Ctz:
		.emit(
			newOperationCtz(unsignedInt64),
		)
	case wasm.OpcodeI64Popcnt:
		.emit(
			newOperationPopcnt(unsignedInt64),
		)
	case wasm.OpcodeI64Add:
		.emit(
			newOperationAdd(unsignedTypeI64),
		)
	case wasm.OpcodeI64Sub:
		.emit(
			newOperationSub(unsignedTypeI64),
		)
	case wasm.OpcodeI64Mul:
		.emit(
			newOperationMul(unsignedTypeI64),
		)
	case wasm.OpcodeI64DivS:
		.emit(
			newOperationDiv(signedTypeInt64),
		)
	case wasm.OpcodeI64DivU:
		.emit(
			newOperationDiv(signedTypeUint64),
		)
	case wasm.OpcodeI64RemS:
		.emit(
			newOperationRem(signedInt64),
		)
	case wasm.OpcodeI64RemU:
		.emit(
			newOperationRem(signedUint64),
		)
	case wasm.OpcodeI64And:
		.emit(
			newOperationAnd(unsignedInt64),
		)
	case wasm.OpcodeI64Or:
		.emit(
			newOperationOr(unsignedInt64),
		)
	case wasm.OpcodeI64Xor:
		.emit(
			newOperationXor(unsignedInt64),
		)
	case wasm.OpcodeI64Shl:
		.emit(
			newOperationShl(unsignedInt64),
		)
	case wasm.OpcodeI64ShrS:
		.emit(
			newOperationShr(signedInt64),
		)
	case wasm.OpcodeI64ShrU:
		.emit(
			newOperationShr(signedUint64),
		)
	case wasm.OpcodeI64Rotl:
		.emit(
			newOperationRotl(unsignedInt64),
		)
	case wasm.OpcodeI64Rotr:
		.emit(
			newOperationRotr(unsignedInt64),
		)
	case wasm.OpcodeF32Abs:
		.emit(
			newOperationAbs(f32),
		)
	case wasm.OpcodeF32Neg:
		.emit(
			newOperationNeg(f32),
		)
	case wasm.OpcodeF32Ceil:
		.emit(
			newOperationCeil(f32),
		)
	case wasm.OpcodeF32Floor:
		.emit(
			newOperationFloor(f32),
		)
	case wasm.OpcodeF32Trunc:
		.emit(
			newOperationTrunc(f32),
		)
	case wasm.OpcodeF32Nearest:
		.emit(
			newOperationNearest(f32),
		)
	case wasm.OpcodeF32Sqrt:
		.emit(
			newOperationSqrt(f32),
		)
	case wasm.OpcodeF32Add:
		.emit(
			newOperationAdd(unsignedTypeF32),
		)
	case wasm.OpcodeF32Sub:
		.emit(
			newOperationSub(unsignedTypeF32),
		)
	case wasm.OpcodeF32Mul:
		.emit(
			newOperationMul(unsignedTypeF32),
		)
	case wasm.OpcodeF32Div:
		.emit(
			newOperationDiv(signedTypeFloat32),
		)
	case wasm.OpcodeF32Min:
		.emit(
			newOperationMin(f32),
		)
	case wasm.OpcodeF32Max:
		.emit(
			newOperationMax(f32),
		)
	case wasm.OpcodeF32Copysign:
		.emit(
			newOperationCopysign(f32),
		)
	case wasm.OpcodeF64Abs:
		.emit(
			newOperationAbs(f64),
		)
	case wasm.OpcodeF64Neg:
		.emit(
			newOperationNeg(f64),
		)
	case wasm.OpcodeF64Ceil:
		.emit(
			newOperationCeil(f64),
		)
	case wasm.OpcodeF64Floor:
		.emit(
			newOperationFloor(f64),
		)
	case wasm.OpcodeF64Trunc:
		.emit(
			newOperationTrunc(f64),
		)
	case wasm.OpcodeF64Nearest:
		.emit(
			newOperationNearest(f64),
		)
	case wasm.OpcodeF64Sqrt:
		.emit(
			newOperationSqrt(f64),
		)
	case wasm.OpcodeF64Add:
		.emit(
			newOperationAdd(unsignedTypeF64),
		)
	case wasm.OpcodeF64Sub:
		.emit(
			newOperationSub(unsignedTypeF64),
		)
	case wasm.OpcodeF64Mul:
		.emit(
			newOperationMul(unsignedTypeF64),
		)
	case wasm.OpcodeF64Div:
		.emit(
			newOperationDiv(signedTypeFloat64),
		)
	case wasm.OpcodeF64Min:
		.emit(
			newOperationMin(f64),
		)
	case wasm.OpcodeF64Max:
		.emit(
			newOperationMax(f64),
		)
	case wasm.OpcodeF64Copysign:
		.emit(
			newOperationCopysign(f64),
		)
	case wasm.OpcodeI32WrapI64:
		.emit(
			newOperationI32WrapFromI64(),
		)
	case wasm.OpcodeI32TruncF32S:
		.emit(
			newOperationITruncFromF(f32, signedInt32, false),
		)
	case wasm.OpcodeI32TruncF32U:
		.emit(
			newOperationITruncFromF(f32, signedUint32, false),
		)
	case wasm.OpcodeI32TruncF64S:
		.emit(
			newOperationITruncFromF(f64, signedInt32, false),
		)
	case wasm.OpcodeI32TruncF64U:
		.emit(
			newOperationITruncFromF(f64, signedUint32, false),
		)
	case wasm.OpcodeI64ExtendI32S:
		.emit(
			newOperationExtend(true),
		)
	case wasm.OpcodeI64ExtendI32U:
		.emit(
			newOperationExtend(false),
		)
	case wasm.OpcodeI64TruncF32S:
		.emit(
			newOperationITruncFromF(f32, signedInt64, false),
		)
	case wasm.OpcodeI64TruncF32U:
		.emit(
			newOperationITruncFromF(f32, signedUint64, false),
		)
	case wasm.OpcodeI64TruncF64S:
		.emit(
			newOperationITruncFromF(f64, signedInt64, false),
		)
	case wasm.OpcodeI64TruncF64U:
		.emit(
			newOperationITruncFromF(f64, signedUint64, false),
		)
	case wasm.OpcodeF32ConvertI32S:
		.emit(
			newOperationFConvertFromI(signedInt32, f32),
		)
	case wasm.OpcodeF32ConvertI32U:
		.emit(
			newOperationFConvertFromI(signedUint32, f32),
		)
	case wasm.OpcodeF32ConvertI64S:
		.emit(
			newOperationFConvertFromI(signedInt64, f32),
		)
	case wasm.OpcodeF32ConvertI64U:
		.emit(
			newOperationFConvertFromI(signedUint64, f32),
		)
	case wasm.OpcodeF32DemoteF64:
		.emit(
			newOperationF32DemoteFromF64(),
		)
	case wasm.OpcodeF64ConvertI32S:
		.emit(
			newOperationFConvertFromI(signedInt32, f64),
		)
	case wasm.OpcodeF64ConvertI32U:
		.emit(
			newOperationFConvertFromI(signedUint32, f64),
		)
	case wasm.OpcodeF64ConvertI64S:
		.emit(
			newOperationFConvertFromI(signedInt64, f64),
		)
	case wasm.OpcodeF64ConvertI64U:
		.emit(
			newOperationFConvertFromI(signedUint64, f64),
		)
	case wasm.OpcodeF64PromoteF32:
		.emit(
			newOperationF64PromoteFromF32(),
		)
	case wasm.OpcodeI32ReinterpretF32:
		.emit(
			newOperationI32ReinterpretFromF32(),
		)
	case wasm.OpcodeI64ReinterpretF64:
		.emit(
			newOperationI64ReinterpretFromF64(),
		)
	case wasm.OpcodeF32ReinterpretI32:
		.emit(
			newOperationF32ReinterpretFromI32(),
		)
	case wasm.OpcodeF64ReinterpretI64:
		.emit(
			newOperationF64ReinterpretFromI64(),
		)
	case wasm.OpcodeI32Extend8S:
		.emit(
			newOperationSignExtend32From8(),
		)
	case wasm.OpcodeI32Extend16S:
		.emit(
			newOperationSignExtend32From16(),
		)
	case wasm.OpcodeI64Extend8S:
		.emit(
			newOperationSignExtend64From8(),
		)
	case wasm.OpcodeI64Extend16S:
		.emit(
			newOperationSignExtend64From16(),
		)
	case wasm.OpcodeI64Extend32S:
		.emit(
			newOperationSignExtend64From32(),
		)
	case wasm.OpcodeRefFunc:
		.pc++
		, ,  := leb128.LoadUint32(.body[.pc:])
		if  != nil {
			return fmt.Errorf("failed to read function index for ref.func: %v", )
		}
		.pc +=  - 1
		.emit(
			newOperationRefFunc(),
		)
	case wasm.OpcodeRefNull:
		.pc++ // Skip the type of reftype as every ref value is opaque pointer.
		.emit(
			newOperationConstI64(0),
		)
	case wasm.OpcodeRefIsNull:
		// Simply compare the opaque pointer (i64) with zero.
		.emit(
			newOperationEqz(unsignedInt64),
		)
	case wasm.OpcodeTableGet:
		.pc++
		, ,  := leb128.LoadUint32(.body[.pc:])
		if  != nil {
			return fmt.Errorf("failed to read function index for table.get: %v", )
		}
		.pc +=  - 1
		.emit(
			newOperationTableGet(),
		)
	case wasm.OpcodeTableSet:
		.pc++
		, ,  := leb128.LoadUint32(.body[.pc:])
		if  != nil {
			return fmt.Errorf("failed to read function index for table.set: %v", )
		}
		.pc +=  - 1
		.emit(
			newOperationTableSet(),
		)
	case wasm.OpcodeMiscPrefix:
		.pc++
		// A misc opcode is encoded as an unsigned variable 32-bit integer.
		, ,  := leb128.LoadUint32(.body[.pc:])
		if  != nil {
			return fmt.Errorf("failed to read misc opcode: %v", )
		}
		.pc +=  - 1
		switch byte() {
		case wasm.OpcodeMiscI32TruncSatF32S:
			.emit(
				newOperationITruncFromF(f32, signedInt32, true),
			)
		case wasm.OpcodeMiscI32TruncSatF32U:
			.emit(
				newOperationITruncFromF(f32, signedUint32, true),
			)
		case wasm.OpcodeMiscI32TruncSatF64S:
			.emit(
				newOperationITruncFromF(f64, signedInt32, true),
			)
		case wasm.OpcodeMiscI32TruncSatF64U:
			.emit(
				newOperationITruncFromF(f64, signedUint32, true),
			)
		case wasm.OpcodeMiscI64TruncSatF32S:
			.emit(
				newOperationITruncFromF(f32, signedInt64, true),
			)
		case wasm.OpcodeMiscI64TruncSatF32U:
			.emit(
				newOperationITruncFromF(f32, signedUint64, true),
			)
		case wasm.OpcodeMiscI64TruncSatF64S:
			.emit(
				newOperationITruncFromF(f64, signedInt64, true),
			)
		case wasm.OpcodeMiscI64TruncSatF64U:
			.emit(
				newOperationITruncFromF(f64, signedUint64, true),
			)
		case wasm.OpcodeMiscMemoryInit:
			.result.UsesMemory = true
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc +=  + 1 // +1 to skip the memory index which is fixed to zero.
			.emit(
				newOperationMemoryInit(),
			)
		case wasm.OpcodeMiscDataDrop:
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc += 
			.emit(
				newOperationDataDrop(),
			)
		case wasm.OpcodeMiscMemoryCopy:
			.result.UsesMemory = true
			.pc += 2 // +2 to skip two memory indexes which are fixed to zero.
			.emit(
				newOperationMemoryCopy(),
			)
		case wasm.OpcodeMiscMemoryFill:
			.result.UsesMemory = true
			.pc += 1 // +1 to skip the memory index which is fixed to zero.
			.emit(
				newOperationMemoryFill(),
			)
		case wasm.OpcodeMiscTableInit:
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc += 
			// Read table index which is fixed to zero currently.
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc += 
			.emit(
				newOperationTableInit(, ),
			)
		case wasm.OpcodeMiscElemDrop:
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc += 
			.emit(
				newOperationElemDrop(),
			)
		case wasm.OpcodeMiscTableCopy:
			// Read the source table inde.g.
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc += 
			// Read the destination table inde.g.
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc += 
			.emit(
				newOperationTableCopy(, ),
			)
		case wasm.OpcodeMiscTableGrow:
			// Read the source table inde.g.
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc += 
			.emit(
				newOperationTableGrow(),
			)
		case wasm.OpcodeMiscTableSize:
			// Read the source table inde.g.
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc += 
			.emit(
				newOperationTableSize(),
			)
		case wasm.OpcodeMiscTableFill:
			// Read the source table index.
			, ,  := leb128.LoadUint32(.body[.pc+1:])
			if  != nil {
				return fmt.Errorf("reading i32.const value: %v", )
			}
			.pc += 
			.emit(
				newOperationTableFill(),
			)
		default:
			return fmt.Errorf("unsupported misc instruction in interpreterir: 0x%x", )
		}
	case wasm.OpcodeVecPrefix:
		.pc++
		switch  := .body[.pc];  {
		case wasm.OpcodeVecV128Const:
			.pc++
			 := binary.LittleEndian.Uint64(.body[.pc : .pc+8])
			.pc += 8
			 := binary.LittleEndian.Uint64(.body[.pc : .pc+8])
			.emit(
				newOperationV128Const(, ),
			)
			.pc += 7
		case wasm.OpcodeVecV128Load:
			,  := .readMemoryArg(wasm.OpcodeI32LoadName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType128, ),
			)
		case wasm.OpcodeVecV128Load8x8s:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load8x8SName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType8x8s, ),
			)
		case wasm.OpcodeVecV128Load8x8u:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load8x8UName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType8x8u, ),
			)
		case wasm.OpcodeVecV128Load16x4s:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load16x4SName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType16x4s, ),
			)
		case wasm.OpcodeVecV128Load16x4u:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load16x4UName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType16x4u, ),
			)
		case wasm.OpcodeVecV128Load32x2s:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load32x2SName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType32x2s, ),
			)
		case wasm.OpcodeVecV128Load32x2u:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load32x2UName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType32x2u, ),
			)
		case wasm.OpcodeVecV128Load8Splat:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load8SplatName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType8Splat, ),
			)
		case wasm.OpcodeVecV128Load16Splat:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load16SplatName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType16Splat, ),
			)
		case wasm.OpcodeVecV128Load32Splat:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load32SplatName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType32Splat, ),
			)
		case wasm.OpcodeVecV128Load64Splat:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load64SplatName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType64Splat, ),
			)
		case wasm.OpcodeVecV128Load32zero:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load32zeroName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType32zero, ),
			)
		case wasm.OpcodeVecV128Load64zero:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load64zeroName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Load(v128LoadType64zero, ),
			)
		case wasm.OpcodeVecV128Load8Lane:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load8LaneName)
			if  != nil {
				return 
			}
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128LoadLane(, 8, ),
			)
		case wasm.OpcodeVecV128Load16Lane:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load16LaneName)
			if  != nil {
				return 
			}
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128LoadLane(, 16, ),
			)
		case wasm.OpcodeVecV128Load32Lane:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load32LaneName)
			if  != nil {
				return 
			}
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128LoadLane(, 32, ),
			)
		case wasm.OpcodeVecV128Load64Lane:
			,  := .readMemoryArg(wasm.OpcodeVecV128Load64LaneName)
			if  != nil {
				return 
			}
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128LoadLane(, 64, ),
			)
		case wasm.OpcodeVecV128Store:
			,  := .readMemoryArg(wasm.OpcodeVecV128StoreName)
			if  != nil {
				return 
			}
			.emit(
				newOperationV128Store(),
			)
		case wasm.OpcodeVecV128Store8Lane:
			,  := .readMemoryArg(wasm.OpcodeVecV128Store8LaneName)
			if  != nil {
				return 
			}
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128StoreLane(, 8, ),
			)
		case wasm.OpcodeVecV128Store16Lane:
			,  := .readMemoryArg(wasm.OpcodeVecV128Store16LaneName)
			if  != nil {
				return 
			}
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128StoreLane(, 16, ),
			)
		case wasm.OpcodeVecV128Store32Lane:
			,  := .readMemoryArg(wasm.OpcodeVecV128Store32LaneName)
			if  != nil {
				return 
			}
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128StoreLane(, 32, ),
			)
		case wasm.OpcodeVecV128Store64Lane:
			,  := .readMemoryArg(wasm.OpcodeVecV128Store64LaneName)
			if  != nil {
				return 
			}
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128StoreLane(, 64, ),
			)
		case wasm.OpcodeVecI8x16ExtractLaneS:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ExtractLane(, true, shapeI8x16),
			)
		case wasm.OpcodeVecI8x16ExtractLaneU:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ExtractLane(, false, shapeI8x16),
			)
		case wasm.OpcodeVecI16x8ExtractLaneS:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ExtractLane(, true, shapeI16x8),
			)
		case wasm.OpcodeVecI16x8ExtractLaneU:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ExtractLane(, false, shapeI16x8),
			)
		case wasm.OpcodeVecI32x4ExtractLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ExtractLane(, false, shapeI32x4),
			)
		case wasm.OpcodeVecI64x2ExtractLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ExtractLane(, false, shapeI64x2),
			)
		case wasm.OpcodeVecF32x4ExtractLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ExtractLane(, false, shapeF32x4),
			)
		case wasm.OpcodeVecF64x2ExtractLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ExtractLane(, false, shapeF64x2),
			)
		case wasm.OpcodeVecI8x16ReplaceLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ReplaceLane(, shapeI8x16),
			)
		case wasm.OpcodeVecI16x8ReplaceLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ReplaceLane(, shapeI16x8),
			)
		case wasm.OpcodeVecI32x4ReplaceLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ReplaceLane(, shapeI32x4),
			)
		case wasm.OpcodeVecI64x2ReplaceLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ReplaceLane(, shapeI64x2),
			)
		case wasm.OpcodeVecF32x4ReplaceLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ReplaceLane(, shapeF32x4),
			)
		case wasm.OpcodeVecF64x2ReplaceLane:
			.pc++
			 := .body[.pc]
			.emit(
				newOperationV128ReplaceLane(, shapeF64x2),
			)
		case wasm.OpcodeVecI8x16Splat:
			.emit(
				newOperationV128Splat(shapeI8x16),
			)
		case wasm.OpcodeVecI16x8Splat:
			.emit(
				newOperationV128Splat(shapeI16x8),
			)
		case wasm.OpcodeVecI32x4Splat:
			.emit(
				newOperationV128Splat(shapeI32x4),
			)
		case wasm.OpcodeVecI64x2Splat:
			.emit(
				newOperationV128Splat(shapeI64x2),
			)
		case wasm.OpcodeVecF32x4Splat:
			.emit(
				newOperationV128Splat(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Splat:
			.emit(
				newOperationV128Splat(shapeF64x2),
			)
		case wasm.OpcodeVecI8x16Swizzle:
			.emit(
				newOperationV128Swizzle(),
			)
		case wasm.OpcodeVecV128i8x16Shuffle:
			.pc++
			 := make([]uint64, 16)
			for  := uint64(0);  < 16; ++ {
				[] = uint64(.body[.pc+])
			}
			 := newOperationV128Shuffle()
			.emit()
			.pc += 15
		case wasm.OpcodeVecV128AnyTrue:
			.emit(
				newOperationV128AnyTrue(),
			)
		case wasm.OpcodeVecI8x16AllTrue:
			.emit(
				newOperationV128AllTrue(shapeI8x16),
			)
		case wasm.OpcodeVecI16x8AllTrue:
			.emit(
				newOperationV128AllTrue(shapeI16x8),
			)
		case wasm.OpcodeVecI32x4AllTrue:
			.emit(
				newOperationV128AllTrue(shapeI32x4),
			)
		case wasm.OpcodeVecI64x2AllTrue:
			.emit(
				newOperationV128AllTrue(shapeI64x2),
			)
		case wasm.OpcodeVecI8x16BitMask:
			.emit(
				newOperationV128BitMask(shapeI8x16),
			)
		case wasm.OpcodeVecI16x8BitMask:
			.emit(
				newOperationV128BitMask(shapeI16x8),
			)
		case wasm.OpcodeVecI32x4BitMask:
			.emit(
				newOperationV128BitMask(shapeI32x4),
			)
		case wasm.OpcodeVecI64x2BitMask:
			.emit(
				newOperationV128BitMask(shapeI64x2),
			)
		case wasm.OpcodeVecV128And:
			.emit(
				newOperationV128And(),
			)
		case wasm.OpcodeVecV128Not:
			.emit(
				newOperationV128Not(),
			)
		case wasm.OpcodeVecV128Or:
			.emit(
				newOperationV128Or(),
			)
		case wasm.OpcodeVecV128Xor:
			.emit(
				newOperationV128Xor(),
			)
		case wasm.OpcodeVecV128Bitselect:
			.emit(
				newOperationV128Bitselect(),
			)
		case wasm.OpcodeVecV128AndNot:
			.emit(
				newOperationV128AndNot(),
			)
		case wasm.OpcodeVecI8x16Shl:
			.emit(
				newOperationV128Shl(shapeI8x16),
			)
		case wasm.OpcodeVecI8x16ShrS:
			.emit(
				newOperationV128Shr(shapeI8x16, true),
			)
		case wasm.OpcodeVecI8x16ShrU:
			.emit(
				newOperationV128Shr(shapeI8x16, false),
			)
		case wasm.OpcodeVecI16x8Shl:
			.emit(
				newOperationV128Shl(shapeI16x8),
			)
		case wasm.OpcodeVecI16x8ShrS:
			.emit(
				newOperationV128Shr(shapeI16x8, true),
			)
		case wasm.OpcodeVecI16x8ShrU:
			.emit(
				newOperationV128Shr(shapeI16x8, false),
			)
		case wasm.OpcodeVecI32x4Shl:
			.emit(
				newOperationV128Shl(shapeI32x4),
			)
		case wasm.OpcodeVecI32x4ShrS:
			.emit(
				newOperationV128Shr(shapeI32x4, true),
			)
		case wasm.OpcodeVecI32x4ShrU:
			.emit(
				newOperationV128Shr(shapeI32x4, false),
			)
		case wasm.OpcodeVecI64x2Shl:
			.emit(
				newOperationV128Shl(shapeI64x2),
			)
		case wasm.OpcodeVecI64x2ShrS:
			.emit(
				newOperationV128Shr(shapeI64x2, true),
			)
		case wasm.OpcodeVecI64x2ShrU:
			.emit(
				newOperationV128Shr(shapeI64x2, false),
			)
		case wasm.OpcodeVecI8x16Eq:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16Eq),
			)
		case wasm.OpcodeVecI8x16Ne:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16Ne),
			)
		case wasm.OpcodeVecI8x16LtS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16LtS),
			)
		case wasm.OpcodeVecI8x16LtU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16LtU),
			)
		case wasm.OpcodeVecI8x16GtS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16GtS),
			)
		case wasm.OpcodeVecI8x16GtU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16GtU),
			)
		case wasm.OpcodeVecI8x16LeS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16LeS),
			)
		case wasm.OpcodeVecI8x16LeU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16LeU),
			)
		case wasm.OpcodeVecI8x16GeS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16GeS),
			)
		case wasm.OpcodeVecI8x16GeU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI8x16GeU),
			)
		case wasm.OpcodeVecI16x8Eq:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8Eq),
			)
		case wasm.OpcodeVecI16x8Ne:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8Ne),
			)
		case wasm.OpcodeVecI16x8LtS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8LtS),
			)
		case wasm.OpcodeVecI16x8LtU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8LtU),
			)
		case wasm.OpcodeVecI16x8GtS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8GtS),
			)
		case wasm.OpcodeVecI16x8GtU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8GtU),
			)
		case wasm.OpcodeVecI16x8LeS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8LeS),
			)
		case wasm.OpcodeVecI16x8LeU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8LeU),
			)
		case wasm.OpcodeVecI16x8GeS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8GeS),
			)
		case wasm.OpcodeVecI16x8GeU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI16x8GeU),
			)
		case wasm.OpcodeVecI32x4Eq:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4Eq),
			)
		case wasm.OpcodeVecI32x4Ne:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4Ne),
			)
		case wasm.OpcodeVecI32x4LtS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4LtS),
			)
		case wasm.OpcodeVecI32x4LtU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4LtU),
			)
		case wasm.OpcodeVecI32x4GtS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4GtS),
			)
		case wasm.OpcodeVecI32x4GtU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4GtU),
			)
		case wasm.OpcodeVecI32x4LeS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4LeS),
			)
		case wasm.OpcodeVecI32x4LeU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4LeU),
			)
		case wasm.OpcodeVecI32x4GeS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4GeS),
			)
		case wasm.OpcodeVecI32x4GeU:
			.emit(
				newOperationV128Cmp(v128CmpTypeI32x4GeU),
			)
		case wasm.OpcodeVecI64x2Eq:
			.emit(
				newOperationV128Cmp(v128CmpTypeI64x2Eq),
			)
		case wasm.OpcodeVecI64x2Ne:
			.emit(
				newOperationV128Cmp(v128CmpTypeI64x2Ne),
			)
		case wasm.OpcodeVecI64x2LtS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI64x2LtS),
			)
		case wasm.OpcodeVecI64x2GtS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI64x2GtS),
			)
		case wasm.OpcodeVecI64x2LeS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI64x2LeS),
			)
		case wasm.OpcodeVecI64x2GeS:
			.emit(
				newOperationV128Cmp(v128CmpTypeI64x2GeS),
			)
		case wasm.OpcodeVecF32x4Eq:
			.emit(
				newOperationV128Cmp(v128CmpTypeF32x4Eq),
			)
		case wasm.OpcodeVecF32x4Ne:
			.emit(
				newOperationV128Cmp(v128CmpTypeF32x4Ne),
			)
		case wasm.OpcodeVecF32x4Lt:
			.emit(
				newOperationV128Cmp(v128CmpTypeF32x4Lt),
			)
		case wasm.OpcodeVecF32x4Gt:
			.emit(
				newOperationV128Cmp(v128CmpTypeF32x4Gt),
			)
		case wasm.OpcodeVecF32x4Le:
			.emit(
				newOperationV128Cmp(v128CmpTypeF32x4Le),
			)
		case wasm.OpcodeVecF32x4Ge:
			.emit(
				newOperationV128Cmp(v128CmpTypeF32x4Ge),
			)
		case wasm.OpcodeVecF64x2Eq:
			.emit(
				newOperationV128Cmp(v128CmpTypeF64x2Eq),
			)
		case wasm.OpcodeVecF64x2Ne:
			.emit(
				newOperationV128Cmp(v128CmpTypeF64x2Ne),
			)
		case wasm.OpcodeVecF64x2Lt:
			.emit(
				newOperationV128Cmp(v128CmpTypeF64x2Lt),
			)
		case wasm.OpcodeVecF64x2Gt:
			.emit(
				newOperationV128Cmp(v128CmpTypeF64x2Gt),
			)
		case wasm.OpcodeVecF64x2Le:
			.emit(
				newOperationV128Cmp(v128CmpTypeF64x2Le),
			)
		case wasm.OpcodeVecF64x2Ge:
			.emit(
				newOperationV128Cmp(v128CmpTypeF64x2Ge),
			)
		case wasm.OpcodeVecI8x16Neg:
			.emit(
				newOperationV128Neg(shapeI8x16),
			)
		case wasm.OpcodeVecI16x8Neg:
			.emit(
				newOperationV128Neg(shapeI16x8),
			)
		case wasm.OpcodeVecI32x4Neg:
			.emit(
				newOperationV128Neg(shapeI32x4),
			)
		case wasm.OpcodeVecI64x2Neg:
			.emit(
				newOperationV128Neg(shapeI64x2),
			)
		case wasm.OpcodeVecF32x4Neg:
			.emit(
				newOperationV128Neg(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Neg:
			.emit(
				newOperationV128Neg(shapeF64x2),
			)
		case wasm.OpcodeVecI8x16Add:
			.emit(
				newOperationV128Add(shapeI8x16),
			)
		case wasm.OpcodeVecI16x8Add:
			.emit(
				newOperationV128Add(shapeI16x8),
			)
		case wasm.OpcodeVecI32x4Add:
			.emit(
				newOperationV128Add(shapeI32x4),
			)
		case wasm.OpcodeVecI64x2Add:
			.emit(
				newOperationV128Add(shapeI64x2),
			)
		case wasm.OpcodeVecF32x4Add:
			.emit(
				newOperationV128Add(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Add:
			.emit(
				newOperationV128Add(shapeF64x2),
			)
		case wasm.OpcodeVecI8x16Sub:
			.emit(
				newOperationV128Sub(shapeI8x16),
			)
		case wasm.OpcodeVecI16x8Sub:
			.emit(
				newOperationV128Sub(shapeI16x8),
			)
		case wasm.OpcodeVecI32x4Sub:
			.emit(
				newOperationV128Sub(shapeI32x4),
			)
		case wasm.OpcodeVecI64x2Sub:
			.emit(
				newOperationV128Sub(shapeI64x2),
			)
		case wasm.OpcodeVecF32x4Sub:
			.emit(
				newOperationV128Sub(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Sub:
			.emit(
				newOperationV128Sub(shapeF64x2),
			)
		case wasm.OpcodeVecI8x16AddSatS:
			.emit(
				newOperationV128AddSat(shapeI8x16, true),
			)
		case wasm.OpcodeVecI8x16AddSatU:
			.emit(
				newOperationV128AddSat(shapeI8x16, false),
			)
		case wasm.OpcodeVecI16x8AddSatS:
			.emit(
				newOperationV128AddSat(shapeI16x8, true),
			)
		case wasm.OpcodeVecI16x8AddSatU:
			.emit(
				newOperationV128AddSat(shapeI16x8, false),
			)
		case wasm.OpcodeVecI8x16SubSatS:
			.emit(
				newOperationV128SubSat(shapeI8x16, true),
			)
		case wasm.OpcodeVecI8x16SubSatU:
			.emit(
				newOperationV128SubSat(shapeI8x16, false),
			)
		case wasm.OpcodeVecI16x8SubSatS:
			.emit(
				newOperationV128SubSat(shapeI16x8, true),
			)
		case wasm.OpcodeVecI16x8SubSatU:
			.emit(
				newOperationV128SubSat(shapeI16x8, false),
			)
		case wasm.OpcodeVecI16x8Mul:
			.emit(
				newOperationV128Mul(shapeI16x8),
			)
		case wasm.OpcodeVecI32x4Mul:
			.emit(
				newOperationV128Mul(shapeI32x4),
			)
		case wasm.OpcodeVecI64x2Mul:
			.emit(
				newOperationV128Mul(shapeI64x2),
			)
		case wasm.OpcodeVecF32x4Mul:
			.emit(
				newOperationV128Mul(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Mul:
			.emit(
				newOperationV128Mul(shapeF64x2),
			)
		case wasm.OpcodeVecF32x4Sqrt:
			.emit(
				newOperationV128Sqrt(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Sqrt:
			.emit(
				newOperationV128Sqrt(shapeF64x2),
			)
		case wasm.OpcodeVecF32x4Div:
			.emit(
				newOperationV128Div(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Div:
			.emit(
				newOperationV128Div(shapeF64x2),
			)
		case wasm.OpcodeVecI8x16Abs:
			.emit(
				newOperationV128Abs(shapeI8x16),
			)
		case wasm.OpcodeVecI8x16Popcnt:
			.emit(
				newOperationV128Popcnt(shapeI8x16),
			)
		case wasm.OpcodeVecI16x8Abs:
			.emit(
				newOperationV128Abs(shapeI16x8),
			)
		case wasm.OpcodeVecI32x4Abs:
			.emit(
				newOperationV128Abs(shapeI32x4),
			)
		case wasm.OpcodeVecI64x2Abs:
			.emit(
				newOperationV128Abs(shapeI64x2),
			)
		case wasm.OpcodeVecF32x4Abs:
			.emit(
				newOperationV128Abs(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Abs:
			.emit(
				newOperationV128Abs(shapeF64x2),
			)
		case wasm.OpcodeVecI8x16MinS:
			.emit(
				newOperationV128Min(shapeI8x16, true),
			)
		case wasm.OpcodeVecI8x16MinU:
			.emit(
				newOperationV128Min(shapeI8x16, false),
			)
		case wasm.OpcodeVecI8x16MaxS:
			.emit(
				newOperationV128Max(shapeI8x16, true),
			)
		case wasm.OpcodeVecI8x16MaxU:
			.emit(
				newOperationV128Max(shapeI8x16, false),
			)
		case wasm.OpcodeVecI8x16AvgrU:
			.emit(
				newOperationV128AvgrU(shapeI8x16),
			)
		case wasm.OpcodeVecI16x8MinS:
			.emit(
				newOperationV128Min(shapeI16x8, true),
			)
		case wasm.OpcodeVecI16x8MinU:
			.emit(
				newOperationV128Min(shapeI16x8, false),
			)
		case wasm.OpcodeVecI16x8MaxS:
			.emit(
				newOperationV128Max(shapeI16x8, true),
			)
		case wasm.OpcodeVecI16x8MaxU:
			.emit(
				newOperationV128Max(shapeI16x8, false),
			)
		case wasm.OpcodeVecI16x8AvgrU:
			.emit(
				newOperationV128AvgrU(shapeI16x8),
			)
		case wasm.OpcodeVecI32x4MinS:
			.emit(
				newOperationV128Min(shapeI32x4, true),
			)
		case wasm.OpcodeVecI32x4MinU:
			.emit(
				newOperationV128Min(shapeI32x4, false),
			)
		case wasm.OpcodeVecI32x4MaxS:
			.emit(
				newOperationV128Max(shapeI32x4, true),
			)
		case wasm.OpcodeVecI32x4MaxU:
			.emit(
				newOperationV128Max(shapeI32x4, false),
			)
		case wasm.OpcodeVecF32x4Min:
			.emit(
				newOperationV128Min(shapeF32x4, false),
			)
		case wasm.OpcodeVecF32x4Max:
			.emit(
				newOperationV128Max(shapeF32x4, false),
			)
		case wasm.OpcodeVecF64x2Min:
			.emit(
				newOperationV128Min(shapeF64x2, false),
			)
		case wasm.OpcodeVecF64x2Max:
			.emit(
				newOperationV128Max(shapeF64x2, false),
			)
		case wasm.OpcodeVecF32x4Pmin:
			.emit(
				newOperationV128Pmin(shapeF32x4),
			)
		case wasm.OpcodeVecF32x4Pmax:
			.emit(
				newOperationV128Pmax(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Pmin:
			.emit(
				newOperationV128Pmin(shapeF64x2),
			)
		case wasm.OpcodeVecF64x2Pmax:
			.emit(
				newOperationV128Pmax(shapeF64x2),
			)
		case wasm.OpcodeVecF32x4Ceil:
			.emit(
				newOperationV128Ceil(shapeF32x4),
			)
		case wasm.OpcodeVecF32x4Floor:
			.emit(
				newOperationV128Floor(shapeF32x4),
			)
		case wasm.OpcodeVecF32x4Trunc:
			.emit(
				newOperationV128Trunc(shapeF32x4),
			)
		case wasm.OpcodeVecF32x4Nearest:
			.emit(
				newOperationV128Nearest(shapeF32x4),
			)
		case wasm.OpcodeVecF64x2Ceil:
			.emit(
				newOperationV128Ceil(shapeF64x2),
			)
		case wasm.OpcodeVecF64x2Floor:
			.emit(
				newOperationV128Floor(shapeF64x2),
			)
		case wasm.OpcodeVecF64x2Trunc:
			.emit(
				newOperationV128Trunc(shapeF64x2),
			)
		case wasm.OpcodeVecF64x2Nearest:
			.emit(
				newOperationV128Nearest(shapeF64x2),
			)
		case wasm.OpcodeVecI16x8ExtendLowI8x16S:
			.emit(
				newOperationV128Extend(shapeI8x16, true, true),
			)
		case wasm.OpcodeVecI16x8ExtendHighI8x16S:
			.emit(
				newOperationV128Extend(shapeI8x16, true, false),
			)
		case wasm.OpcodeVecI16x8ExtendLowI8x16U:
			.emit(
				newOperationV128Extend(shapeI8x16, false, true),
			)
		case wasm.OpcodeVecI16x8ExtendHighI8x16U:
			.emit(
				newOperationV128Extend(shapeI8x16, false, false),
			)
		case wasm.OpcodeVecI32x4ExtendLowI16x8S:
			.emit(
				newOperationV128Extend(shapeI16x8, true, true),
			)
		case wasm.OpcodeVecI32x4ExtendHighI16x8S:
			.emit(
				newOperationV128Extend(shapeI16x8, true, false),
			)
		case wasm.OpcodeVecI32x4ExtendLowI16x8U:
			.emit(
				newOperationV128Extend(shapeI16x8, false, true),
			)
		case wasm.OpcodeVecI32x4ExtendHighI16x8U:
			.emit(
				newOperationV128Extend(shapeI16x8, false, false),
			)
		case wasm.OpcodeVecI64x2ExtendLowI32x4S:
			.emit(
				newOperationV128Extend(shapeI32x4, true, true),
			)
		case wasm.OpcodeVecI64x2ExtendHighI32x4S:
			.emit(
				newOperationV128Extend(shapeI32x4, true, false),
			)
		case wasm.OpcodeVecI64x2ExtendLowI32x4U:
			.emit(
				newOperationV128Extend(shapeI32x4, false, true),
			)
		case wasm.OpcodeVecI64x2ExtendHighI32x4U:
			.emit(
				newOperationV128Extend(shapeI32x4, false, false),
			)
		case wasm.OpcodeVecI16x8Q15mulrSatS:
			.emit(
				newOperationV128Q15mulrSatS(),
			)
		case wasm.OpcodeVecI16x8ExtMulLowI8x16S:
			.emit(
				newOperationV128ExtMul(shapeI8x16, true, true),
			)
		case wasm.OpcodeVecI16x8ExtMulHighI8x16S:
			.emit(
				newOperationV128ExtMul(shapeI8x16, true, false),
			)
		case wasm.OpcodeVecI16x8ExtMulLowI8x16U:
			.emit(
				newOperationV128ExtMul(shapeI8x16, false, true),
			)
		case wasm.OpcodeVecI16x8ExtMulHighI8x16U:
			.emit(
				newOperationV128ExtMul(shapeI8x16, false, false),
			)
		case wasm.OpcodeVecI32x4ExtMulLowI16x8S:
			.emit(
				newOperationV128ExtMul(shapeI16x8, true, true),
			)
		case wasm.OpcodeVecI32x4ExtMulHighI16x8S:
			.emit(
				newOperationV128ExtMul(shapeI16x8, true, false),
			)
		case wasm.OpcodeVecI32x4ExtMulLowI16x8U:
			.emit(
				newOperationV128ExtMul(shapeI16x8, false, true),
			)
		case wasm.OpcodeVecI32x4ExtMulHighI16x8U:
			.emit(
				newOperationV128ExtMul(shapeI16x8, false, false),
			)
		case wasm.OpcodeVecI64x2ExtMulLowI32x4S:
			.emit(
				newOperationV128ExtMul(shapeI32x4, true, true),
			)
		case wasm.OpcodeVecI64x2ExtMulHighI32x4S:
			.emit(
				newOperationV128ExtMul(shapeI32x4, true, false),
			)
		case wasm.OpcodeVecI64x2ExtMulLowI32x4U:
			.emit(
				newOperationV128ExtMul(shapeI32x4, false, true),
			)
		case wasm.OpcodeVecI64x2ExtMulHighI32x4U:
			.emit(
				newOperationV128ExtMul(shapeI32x4, false, false),
			)
		case wasm.OpcodeVecI16x8ExtaddPairwiseI8x16S:
			.emit(
				newOperationV128ExtAddPairwise(shapeI8x16, true),
			)
		case wasm.OpcodeVecI16x8ExtaddPairwiseI8x16U:
			.emit(
				newOperationV128ExtAddPairwise(shapeI8x16, false),
			)
		case wasm.OpcodeVecI32x4ExtaddPairwiseI16x8S:
			.emit(
				newOperationV128ExtAddPairwise(shapeI16x8, true),
			)
		case wasm.OpcodeVecI32x4ExtaddPairwiseI16x8U:
			.emit(
				newOperationV128ExtAddPairwise(shapeI16x8, false),
			)
		case wasm.OpcodeVecF64x2PromoteLowF32x4Zero:
			.emit(
				newOperationV128FloatPromote(),
			)
		case wasm.OpcodeVecF32x4DemoteF64x2Zero:
			.emit(
				newOperationV128FloatDemote(),
			)
		case wasm.OpcodeVecF32x4ConvertI32x4S:
			.emit(
				newOperationV128FConvertFromI(shapeF32x4, true),
			)
		case wasm.OpcodeVecF32x4ConvertI32x4U:
			.emit(
				newOperationV128FConvertFromI(shapeF32x4, false),
			)
		case wasm.OpcodeVecF64x2ConvertLowI32x4S:
			.emit(
				newOperationV128FConvertFromI(shapeF64x2, true),
			)
		case wasm.OpcodeVecF64x2ConvertLowI32x4U:
			.emit(
				newOperationV128FConvertFromI(shapeF64x2, false),
			)
		case wasm.OpcodeVecI32x4DotI16x8S:
			.emit(
				newOperationV128Dot(),
			)
		case wasm.OpcodeVecI8x16NarrowI16x8S:
			.emit(
				newOperationV128Narrow(shapeI16x8, true),
			)
		case wasm.OpcodeVecI8x16NarrowI16x8U:
			.emit(
				newOperationV128Narrow(shapeI16x8, false),
			)
		case wasm.OpcodeVecI16x8NarrowI32x4S:
			.emit(
				newOperationV128Narrow(shapeI32x4, true),
			)
		case wasm.OpcodeVecI16x8NarrowI32x4U:
			.emit(
				newOperationV128Narrow(shapeI32x4, false),
			)
		case wasm.OpcodeVecI32x4TruncSatF32x4S:
			.emit(
				newOperationV128ITruncSatFromF(shapeF32x4, true),
			)
		case wasm.OpcodeVecI32x4TruncSatF32x4U:
			.emit(
				newOperationV128ITruncSatFromF(shapeF32x4, false),
			)
		case wasm.OpcodeVecI32x4TruncSatF64x2SZero:
			.emit(
				newOperationV128ITruncSatFromF(shapeF64x2, true),
			)
		case wasm.OpcodeVecI32x4TruncSatF64x2UZero:
			.emit(
				newOperationV128ITruncSatFromF(shapeF64x2, false),
			)
		default:
			return fmt.Errorf("unsupported vector instruction in interpreterir: %s", wasm.VectorInstructionName())
		}
	case wasm.OpcodeAtomicPrefix:
		.pc++
		 := .body[.pc]
		switch  {
		case wasm.OpcodeAtomicMemoryWait32:
			,  := .readMemoryArg(wasm.OpcodeAtomicMemoryWait32Name)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicMemoryWait(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicMemoryWait64:
			,  := .readMemoryArg(wasm.OpcodeAtomicMemoryWait64Name)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicMemoryWait(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicMemoryNotify:
			,  := .readMemoryArg(wasm.OpcodeAtomicMemoryNotifyName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicMemoryNotify(),
			)
		case wasm.OpcodeAtomicFence:
			// Skip immediate value
			.pc++
			_ = .body[.pc]
			.emit(
				newOperationAtomicFence(),
			)
		case wasm.OpcodeAtomicI32Load:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32LoadName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicLoad(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI64Load:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64LoadName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicLoad(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicI32Load8U:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Load8UName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicLoad8(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI32Load16U:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Load16UName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicLoad16(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI64Load8U:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Load8UName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicLoad8(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicI64Load16U:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Load16UName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicLoad16(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicI64Load32U:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Load32UName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicLoad(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI32Store:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32StoreName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicStore(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI32Store8:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Store8Name)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicStore8(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI32Store16:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Store16Name)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicStore16(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI64Store:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64StoreName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicStore(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicI64Store8:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Store8Name)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicStore8(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicI64Store16:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Store16Name)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicStore16(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicI64Store32:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Store32Name)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicStore(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI32RmwAdd:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32RmwAddName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpAdd),
			)
		case wasm.OpcodeAtomicI64RmwAdd:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64RmwAddName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI64, , atomicArithmeticOpAdd),
			)
		case wasm.OpcodeAtomicI32Rmw8AddU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw8AddUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI32, , atomicArithmeticOpAdd),
			)
		case wasm.OpcodeAtomicI64Rmw8AddU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw8AddUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI64, , atomicArithmeticOpAdd),
			)
		case wasm.OpcodeAtomicI32Rmw16AddU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw16AddUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI32, , atomicArithmeticOpAdd),
			)
		case wasm.OpcodeAtomicI64Rmw16AddU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw16AddUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI64, , atomicArithmeticOpAdd),
			)
		case wasm.OpcodeAtomicI64Rmw32AddU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw32AddUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpAdd),
			)
		case wasm.OpcodeAtomicI32RmwSub:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32RmwSubName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpSub),
			)
		case wasm.OpcodeAtomicI64RmwSub:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64RmwSubName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI64, , atomicArithmeticOpSub),
			)
		case wasm.OpcodeAtomicI32Rmw8SubU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw8SubUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI32, , atomicArithmeticOpSub),
			)
		case wasm.OpcodeAtomicI64Rmw8SubU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw8SubUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI64, , atomicArithmeticOpSub),
			)
		case wasm.OpcodeAtomicI32Rmw16SubU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw16SubUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI32, , atomicArithmeticOpSub),
			)
		case wasm.OpcodeAtomicI64Rmw16SubU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw16SubUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI64, , atomicArithmeticOpSub),
			)
		case wasm.OpcodeAtomicI64Rmw32SubU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw32SubUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpSub),
			)
		case wasm.OpcodeAtomicI32RmwAnd:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32RmwAndName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpAnd),
			)
		case wasm.OpcodeAtomicI64RmwAnd:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64RmwAndName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI64, , atomicArithmeticOpAnd),
			)
		case wasm.OpcodeAtomicI32Rmw8AndU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw8AndUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI32, , atomicArithmeticOpAnd),
			)
		case wasm.OpcodeAtomicI64Rmw8AndU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw8AndUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI64, , atomicArithmeticOpAnd),
			)
		case wasm.OpcodeAtomicI32Rmw16AndU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw16AndUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI32, , atomicArithmeticOpAnd),
			)
		case wasm.OpcodeAtomicI64Rmw16AndU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw16AndUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI64, , atomicArithmeticOpAnd),
			)
		case wasm.OpcodeAtomicI64Rmw32AndU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw32AndUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpAnd),
			)
		case wasm.OpcodeAtomicI32RmwOr:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32RmwOrName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpOr),
			)
		case wasm.OpcodeAtomicI64RmwOr:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64RmwOrName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI64, , atomicArithmeticOpOr),
			)
		case wasm.OpcodeAtomicI32Rmw8OrU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw8OrUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI32, , atomicArithmeticOpOr),
			)
		case wasm.OpcodeAtomicI64Rmw8OrU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw8OrUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI64, , atomicArithmeticOpOr),
			)
		case wasm.OpcodeAtomicI32Rmw16OrU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw16OrUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI32, , atomicArithmeticOpOr),
			)
		case wasm.OpcodeAtomicI64Rmw16OrU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw16OrUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI64, , atomicArithmeticOpOr),
			)
		case wasm.OpcodeAtomicI64Rmw32OrU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw32OrUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpOr),
			)
		case wasm.OpcodeAtomicI32RmwXor:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32RmwXorName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpXor),
			)
		case wasm.OpcodeAtomicI64RmwXor:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64RmwXorName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI64, , atomicArithmeticOpXor),
			)
		case wasm.OpcodeAtomicI32Rmw8XorU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw8XorUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI32, , atomicArithmeticOpXor),
			)
		case wasm.OpcodeAtomicI64Rmw8XorU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw8XorUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI64, , atomicArithmeticOpXor),
			)
		case wasm.OpcodeAtomicI32Rmw16XorU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw16XorUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI32, , atomicArithmeticOpXor),
			)
		case wasm.OpcodeAtomicI64Rmw16XorU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw16XorUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI64, , atomicArithmeticOpXor),
			)
		case wasm.OpcodeAtomicI64Rmw32XorU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw32XorUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpXor),
			)
		case wasm.OpcodeAtomicI32RmwXchg:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32RmwXchgName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpNop),
			)
		case wasm.OpcodeAtomicI64RmwXchg:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64RmwXchgName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI64, , atomicArithmeticOpNop),
			)
		case wasm.OpcodeAtomicI32Rmw8XchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw8XchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI32, , atomicArithmeticOpNop),
			)
		case wasm.OpcodeAtomicI64Rmw8XchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw8XchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8(unsignedTypeI64, , atomicArithmeticOpNop),
			)
		case wasm.OpcodeAtomicI32Rmw16XchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw16XchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI32, , atomicArithmeticOpNop),
			)
		case wasm.OpcodeAtomicI64Rmw16XchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw16XchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16(unsignedTypeI64, , atomicArithmeticOpNop),
			)
		case wasm.OpcodeAtomicI64Rmw32XchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw32XchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW(unsignedTypeI32, , atomicArithmeticOpNop),
			)
		case wasm.OpcodeAtomicI32RmwCmpxchg:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32RmwCmpxchgName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMWCmpxchg(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI64RmwCmpxchg:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64RmwCmpxchgName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMWCmpxchg(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicI32Rmw8CmpxchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw8CmpxchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8Cmpxchg(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI64Rmw8CmpxchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw8CmpxchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW8Cmpxchg(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicI32Rmw16CmpxchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI32Rmw16CmpxchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16Cmpxchg(unsignedTypeI32, ),
			)
		case wasm.OpcodeAtomicI64Rmw16CmpxchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw16CmpxchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMW16Cmpxchg(unsignedTypeI64, ),
			)
		case wasm.OpcodeAtomicI64Rmw32CmpxchgU:
			,  := .readMemoryArg(wasm.OpcodeAtomicI64Rmw32CmpxchgUName)
			if  != nil {
				return 
			}
			.emit(
				newOperationAtomicRMWCmpxchg(unsignedTypeI32, ),
			)
		default:
			return fmt.Errorf("unsupported atomic instruction in interpreterir: %s", wasm.AtomicInstructionName())
		}
	default:
		return fmt.Errorf("unsupported instruction in interpreterir: 0x%x", )
	}

	// Move the program counter to point to the next instruction.
	.pc++
	return nil
}

func ( *compiler) () ( uint32) {
	 = .currentFrameID + 1
	.currentFrameID++
	return
}

func ( *compiler) ( wasm.Opcode) ( uint32,  error) {
	switch  {
	case
		// These are the opcodes that is coupled with "index" immediate
		// and it DOES affect the signature of opcode.
		wasm.OpcodeCall,
		wasm.OpcodeCallIndirect,
		wasm.OpcodeLocalGet,
		wasm.OpcodeLocalSet,
		wasm.OpcodeLocalTee,
		wasm.OpcodeGlobalGet,
		wasm.OpcodeGlobalSet:
		// Assumes that we are at the opcode now so skip it before read immediates.
		, ,  := leb128.LoadUint32(.body[.pc+1:])
		if  != nil {
			return 0, fmt.Errorf("reading immediates: %w", )
		}
		.pc += 
		 = 
	default:
		// Note that other opcodes are free of index
		// as it doesn't affect the signature of opt code.
		// In other words, the "index" argument of wasmOpcodeSignature
		// is ignored there.
	}

	if .unreachableState.on {
		return 0, nil
	}

	// Retrieve the signature of the opcode.
	,  := .wasmOpcodeSignature(, )
	if  != nil {
		return 0, 
	}

	// Manipulate the stack according to the signature.
	// Note that the following algorithm assumes that
	// the unknown type is unique in the signature,
	// and is determined by the actual type on the stack.
	// The determined type is stored in this typeParam.
	var  unsignedType
	var  bool
	for  := range .in {
		 := .in[len(.in)-1-]
		 := .stackPop()
		if  == unsignedTypeUnknown &&  {
			 = 
		} else if  == unsignedTypeUnknown {
			 = 
			 = 
			 = true
		}
		if  !=  {
			return 0, fmt.Errorf("input signature mismatch: want %s but have %s", , )
		}
	}

	for ,  := range .out {
		if  == unsignedTypeUnknown && ! {
			return 0, fmt.Errorf("cannot determine type of unknown result")
		} else if  == unsignedTypeUnknown {
			.stackPush()
		} else {
			.stackPush()
		}
	}

	return , nil
}

func ( *compiler) () ( unsignedType) {
	 = .stack[len(.stack)-1]
	return
}

func ( *compiler) ( *controlFrame) {
	.stack = .stack[:.originalStackLenWithoutParam]
	.stackLenInUint64 = .originalStackLenWithoutParamUint64
}

func ( *compiler) () ( unsignedType) {
	// No need to check stack bound
	// as we can assume that all the operations
	// are valid thanks to validateFunction
	// at module validation phase.
	 = .stack[len(.stack)-1]
	.stack = .stack[:len(.stack)-1]
	.stackLenInUint64 -= 1 + int(unsignedTypeV128&>>2)
	return
}

func ( *compiler) ( unsignedType) {
	.stack = append(.stack, )
	.stackLenInUint64 += 1 + int(unsignedTypeV128&>>2)
}

// emit adds the operations into the result.
func ( *compiler) ( unionOperation) {
	if !.unreachableState.on {
		switch .Kind {
		case operationKindDrop:
			// If the drop range is nil,
			// we could remove such operations.
			// That happens when drop operation is unnecessary.
			// i.e. when there's no need to adjust stack before jmp.
			if int64(.U1) == -1 {
				return
			}
		}
		.result.Operations = append(.result.Operations, )
		if .needSourceOffset {
			.result.IROperationSourceOffsetsInWasmBinary = append(.result.IROperationSourceOffsetsInWasmBinary,
				.currentOpPC+.bodyOffsetInCodeSection)
		}
	}
}

// Emit const expression with default values of the given type.
func ( *compiler) ( wasm.ValueType) {
	switch  {
	case wasm.ValueTypeI32:
		.stackPush(unsignedTypeI32)
		.emit(newOperationConstI32(0))
	case wasm.ValueTypeI64, wasm.ValueTypeExternref, wasm.ValueTypeFuncref:
		.stackPush(unsignedTypeI64)
		.emit(newOperationConstI64(0))
	case wasm.ValueTypeF32:
		.stackPush(unsignedTypeF32)
		.emit(newOperationConstF32(0))
	case wasm.ValueTypeF64:
		.stackPush(unsignedTypeF64)
		.emit(newOperationConstF64(0))
	case wasm.ValueTypeV128:
		.stackPush(unsignedTypeV128)
		.emit(newOperationV128Const(0, 0))
	}
}

// Returns the "depth" (starting from top of the stack)
// of the n-th local.
func ( *compiler) ( wasm.Index) int {
	 := .localIndexToStackHeightInUint64[]
	return .stackLenInUint64 - 1 - 
}

func ( *compiler) ( wasm.Index) ( wasm.ValueType) {
	if  := uint32(len(.sig.Params));  <  {
		 = .sig.Params[]
	} else {
		 = .localTypes[-]
	}
	return
}

// getFrameDropRange returns the range (starting from top of the stack) that spans across the (uint64) stack. The range is
// supposed to be dropped from the stack when the given frame exists or branch into it.
//
// * frame is the control frame which the call-site is trying to branch into or exit.
// * isEnd true if the call-site is handling wasm.OpcodeEnd.
func ( *compiler) ( *controlFrame,  bool) inclusiveRange {
	var  int
	if ! && .kind == controlFrameKindLoop {
		// If this is not End and the call-site is trying to branch into the Loop control frame,
		// we have to Start executing from the beginning of the loop block.
		// Therefore, we have to pass the inputs to the frame.
		 = .blockType.ParamNumInUint64
	} else {
		 = .blockType.ResultNumInUint64
	}
	 := .stackLenInUint64 - 1 - .originalStackLenWithoutParamUint64
	if  <=  {
		return inclusiveRange{Start: int32(), End: int32()}
	} else {
		return nopinclusiveRange
	}
}

func ( *compiler) ( string) (memoryArg, error) {
	.result.UsesMemory = true
	, ,  := leb128.LoadUint32(.body[.pc+1:])
	if  != nil {
		return memoryArg{}, fmt.Errorf("reading alignment for %s: %w", , )
	}
	.pc += 
	, ,  := leb128.LoadUint32(.body[.pc+1:])
	if  != nil {
		return memoryArg{}, fmt.Errorf("reading offset for %s: %w", , )
	}
	.pc += 
	return memoryArg{Offset: , Alignment: }, nil
}