package amd64
import (
"github.com/tetratelabs/wazero/internal/engine/wazevo/backend"
"github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc"
"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa"
"github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi"
)
var calleeSavedVRegs = []regalloc .VReg {
rdxVReg , r12VReg , r13VReg , r14VReg , r15VReg ,
xmm8VReg , xmm9VReg , xmm10VReg , xmm11VReg , xmm12VReg , xmm13VReg , xmm14VReg , xmm15VReg ,
}
func (m *machine ) CompileGoFunctionTrampoline (exitCode wazevoapi .ExitCode , sig *ssa .Signature , needModuleContextPtr bool ) []byte {
argBegin := 1
if needModuleContextPtr {
argBegin ++
}
abi := &backend .FunctionABI {}
abi .Init (sig , intArgResultRegs , floatArgResultRegs )
m .currentABI = abi
cur := m .allocateNop ()
m .rootInstr = cur
execCtrPtr := raxVReg
cur = m .setupRBPRSP (cur )
goSliceSizeAligned , goSliceSizeAlignedUnaligned := backend .GoFunctionCallRequiredStackSize (sig , argBegin )
cur = m .insertStackBoundsCheck (goSliceSizeAligned +8 , cur )
cur = m .saveRegistersInExecutionContext (cur , execCtrPtr , calleeSavedVRegs )
if needModuleContextPtr {
moduleCtrPtr := rbxVReg
mem := m .newAmodeImmReg (
wazevoapi .ExecutionContextOffsetGoFunctionCallCalleeModuleContextOpaque .U32 (),
execCtrPtr )
store := m .allocateInstr ().asMovRM (moduleCtrPtr , newOperandMem (mem ), 8 )
cur = linkInstr (cur , store )
}
cur = m .addRSP (-int32 (goSliceSizeAligned ), cur )
var offsetInGoSlice int32
for i := range abi .Args [argBegin :] {
arg := &abi .Args [argBegin +i ]
var v regalloc .VReg
if arg .Kind == backend .ABIArgKindReg {
v = arg .Reg
} else {
if arg .Type .IsInt () {
v = r15VReg
} else {
v = xmm15VReg
}
mem := newOperandMem (m .newAmodeImmReg (uint32 (arg .Offset +16 ), rbpVReg ))
load := m .allocateInstr ()
switch arg .Type {
case ssa .TypeI32 :
load .asMovzxRmR (extModeLQ , mem , v )
case ssa .TypeI64 :
load .asMov64MR (mem , v )
case ssa .TypeF32 :
load .asXmmUnaryRmR (sseOpcodeMovss , mem , v )
case ssa .TypeF64 :
load .asXmmUnaryRmR (sseOpcodeMovsd , mem , v )
case ssa .TypeV128 :
load .asXmmUnaryRmR (sseOpcodeMovdqu , mem , v )
default :
panic ("BUG" )
}
cur = linkInstr (cur , load )
}
store := m .allocateInstr ()
mem := newOperandMem (m .newAmodeImmReg (uint32 (offsetInGoSlice ), rspVReg ))
switch arg .Type {
case ssa .TypeI32 :
store .asMovRM (v , mem , 4 )
offsetInGoSlice += 8
case ssa .TypeI64 :
store .asMovRM (v , mem , 8 )
offsetInGoSlice += 8
case ssa .TypeF32 :
store .asXmmMovRM (sseOpcodeMovss , v , mem )
offsetInGoSlice += 8
case ssa .TypeF64 :
store .asXmmMovRM (sseOpcodeMovsd , v , mem )
offsetInGoSlice += 8
case ssa .TypeV128 :
store .asXmmMovRM (sseOpcodeMovdqu , v , mem )
offsetInGoSlice += 16
default :
panic ("BUG" )
}
cur = linkInstr (cur , store )
}
cur = linkInstr (cur , m .allocateInstr ().asPush64 (newOperandImm32 (uint32 (goSliceSizeAlignedUnaligned ))))
exitCodeReg := r12VReg
cur = linkInstr (cur , m .allocateInstr ().asImm (exitCodeReg , uint64 (exitCode ), false ))
saveRsp , saveRbp , setExitCode := m .allocateExitInstructions (execCtrPtr , exitCodeReg )
cur = linkInstr (cur , setExitCode )
cur = linkInstr (cur , saveRsp )
cur = linkInstr (cur , saveRbp )
cur = m .storeReturnAddressAndExit (cur , execCtrPtr )
cur = m .addRSP (8 , cur )
offsetInGoSlice = 0
var argOverlapWithExecCtxOffset int32 = -1
for i := range abi .Rets {
r := &abi .Rets [i ]
var v regalloc .VReg
isRegResult := r .Kind == backend .ABIArgKindReg
if isRegResult {
v = r .Reg
if v .RealReg () == execCtrPtr .RealReg () {
argOverlapWithExecCtxOffset = offsetInGoSlice
offsetInGoSlice += 8
continue
}
} else {
if r .Type .IsInt () {
v = r15VReg
} else {
v = xmm15VReg
}
}
load := m .allocateInstr ()
mem := newOperandMem (m .newAmodeImmReg (uint32 (offsetInGoSlice ), rspVReg ))
switch r .Type {
case ssa .TypeI32 :
load .asMovzxRmR (extModeLQ , mem , v )
offsetInGoSlice += 8
case ssa .TypeI64 :
load .asMov64MR (mem , v )
offsetInGoSlice += 8
case ssa .TypeF32 :
load .asXmmUnaryRmR (sseOpcodeMovss , mem , v )
offsetInGoSlice += 8
case ssa .TypeF64 :
load .asXmmUnaryRmR (sseOpcodeMovsd , mem , v )
offsetInGoSlice += 8
case ssa .TypeV128 :
load .asXmmUnaryRmR (sseOpcodeMovdqu , mem , v )
offsetInGoSlice += 16
default :
panic ("BUG" )
}
cur = linkInstr (cur , load )
if !isRegResult {
store := m .allocateInstr ()
mem := newOperandMem (m .newAmodeImmReg (uint32 (abi .ArgStackSize +r .Offset +16 ), rbpVReg ))
switch r .Type {
case ssa .TypeI32 :
store .asMovRM (v , mem , 4 )
case ssa .TypeI64 :
store .asMovRM (v , mem , 8 )
case ssa .TypeF32 :
store .asXmmMovRM (sseOpcodeMovss , v , mem )
case ssa .TypeF64 :
store .asXmmMovRM (sseOpcodeMovsd , v , mem )
case ssa .TypeV128 :
store .asXmmMovRM (sseOpcodeMovdqu , v , mem )
default :
panic ("BUG" )
}
cur = linkInstr (cur , store )
}
}
cur = m .restoreRegistersInExecutionContext (cur , execCtrPtr , calleeSavedVRegs )
if argOverlapWithExecCtxOffset >= 0 {
mem := newOperandMem (m .newAmodeImmReg (uint32 (argOverlapWithExecCtxOffset ), rspVReg ))
load := m .allocateInstr ().asMov64MR (mem , execCtrPtr )
cur = linkInstr (cur , load )
}
cur = m .revertRBPRSP (cur )
linkInstr (cur , m .allocateInstr ().asRet ())
m .encodeWithoutSSA (m .rootInstr )
return m .c .Buf ()
}
func (m *machine ) saveRegistersInExecutionContext (cur *instruction , execCtx regalloc .VReg , regs []regalloc .VReg ) *instruction {
offset := wazevoapi .ExecutionContextOffsetSavedRegistersBegin .I64 ()
for _ , v := range regs {
store := m .allocateInstr ()
mem := newOperandMem (m .newAmodeImmReg (uint32 (offset ), execCtx ))
switch v .RegType () {
case regalloc .RegTypeInt :
store .asMovRM (v , mem , 8 )
case regalloc .RegTypeFloat :
store .asXmmMovRM (sseOpcodeMovdqu , v , mem )
default :
panic ("BUG" )
}
cur = linkInstr (cur , store )
offset += 16
}
return cur
}
func (m *machine ) restoreRegistersInExecutionContext (cur *instruction , execCtx regalloc .VReg , regs []regalloc .VReg ) *instruction {
offset := wazevoapi .ExecutionContextOffsetSavedRegistersBegin .I64 ()
for _ , v := range regs {
load := m .allocateInstr ()
mem := newOperandMem (m .newAmodeImmReg (uint32 (offset ), execCtx ))
switch v .RegType () {
case regalloc .RegTypeInt :
load .asMov64MR (mem , v )
case regalloc .RegTypeFloat :
load .asXmmUnaryRmR (sseOpcodeMovdqu , mem , v )
default :
panic ("BUG" )
}
cur = linkInstr (cur , load )
offset += 16
}
return cur
}
func (m *machine ) storeReturnAddressAndExit (cur *instruction , execCtx regalloc .VReg ) *instruction {
readRip := m .allocateInstr ()
cur = linkInstr (cur , readRip )
ripReg := r12VReg
saveRip := m .allocateInstr ().asMovRM (
ripReg ,
newOperandMem (m .newAmodeImmReg (wazevoapi .ExecutionContextOffsetGoCallReturnAddress .U32 (), execCtx )),
8 ,
)
cur = linkInstr (cur , saveRip )
exit := m .allocateExitSeq (execCtx )
cur = linkInstr (cur , exit )
nop , l := m .allocateBrTarget ()
cur = linkInstr (cur , nop )
readRip .asLEA (newOperandLabel (l ), ripReg )
return cur
}
var stackGrowSaveVRegs = []regalloc .VReg {
rdxVReg , r12VReg , r13VReg , r14VReg , r15VReg ,
rcxVReg , rbxVReg , rsiVReg , rdiVReg , r8VReg , r9VReg , r10VReg , r11VReg ,
xmm8VReg , xmm9VReg , xmm10VReg , xmm11VReg , xmm12VReg , xmm13VReg , xmm14VReg , xmm15VReg ,
xmm0VReg , xmm1VReg , xmm2VReg , xmm3VReg , xmm4VReg , xmm5VReg , xmm6VReg , xmm7VReg ,
}
func (m *machine ) CompileStackGrowCallSequence () []byte {
cur := m .allocateNop ()
m .rootInstr = cur
cur = m .setupRBPRSP (cur )
execCtrPtr := raxVReg
cur = m .saveRegistersInExecutionContext (cur , execCtrPtr , stackGrowSaveVRegs )
exitCodeReg := r12VReg
cur = linkInstr (cur , m .allocateInstr ().asImm (exitCodeReg , uint64 (wazevoapi .ExitCodeGrowStack ), false ))
saveRsp , saveRbp , setExitCode := m .allocateExitInstructions (execCtrPtr , exitCodeReg )
cur = linkInstr (cur , setExitCode )
cur = linkInstr (cur , saveRsp )
cur = linkInstr (cur , saveRbp )
cur = m .storeReturnAddressAndExit (cur , execCtrPtr )
cur = m .restoreRegistersInExecutionContext (cur , execCtrPtr , stackGrowSaveVRegs )
cur = m .revertRBPRSP (cur )
linkInstr (cur , m .allocateInstr ().asRet ())
m .encodeWithoutSSA (m .rootInstr )
return m .c .Buf ()
}
func (m *machine ) insertStackBoundsCheck (requiredStackSize int64 , cur *instruction ) *instruction {
cur = m .addRSP (-int32 (requiredStackSize ), cur )
cur = linkInstr (cur , m .allocateInstr ().asCmpRmiR (true ,
newOperandMem (m .newAmodeImmReg (wazevoapi .ExecutionContextOffsetStackBottomPtr .U32 (), raxVReg )),
rspVReg , true ))
ja := m .allocateInstr ()
cur = linkInstr (cur , ja )
cur = m .addRSP (int32 (requiredStackSize ), cur )
cur = linkInstr (cur , m .allocateInstr ().asPush64 (newOperandReg (r15VReg )))
cur = linkInstr (cur , m .allocateInstr ().asImm (r15VReg , uint64 (requiredStackSize ), true ))
cur = linkInstr (cur , m .allocateInstr ().asMovRM (r15VReg ,
newOperandMem (m .newAmodeImmReg (wazevoapi .ExecutionContextOffsetStackGrowRequiredSize .U32 (), raxVReg )), 8 ))
cur = linkInstr (cur , m .allocateInstr ().asPop64 (r15VReg ))
cur = linkInstr (cur , m .allocateInstr ().asCallIndirect (newOperandMem (m .newAmodeImmReg (
wazevoapi .ExecutionContextOffsetStackGrowCallTrampolineAddress .U32 (), raxVReg )), nil ))
jmpToCont := m .allocateInstr ()
cur = linkInstr (cur , jmpToCont )
okInstr , ok := m .allocateBrTarget ()
cur = linkInstr (cur , okInstr )
ja .asJmpIf (condNBE , newOperandLabel (ok ))
cur = m .addRSP (int32 (requiredStackSize ), cur )
contInstr , cont := m .allocateBrTarget ()
cur = linkInstr (cur , contInstr )
jmpToCont .asJmp (newOperandLabel (cont ))
return cur
}
The pages are generated with Golds v0.8.2 . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds .