package amd64
import (
"fmt"
"github.com/tetratelabs/wazero/internal/engine/wazevo/backend"
"github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc"
"github.com/tetratelabs/wazero/internal/engine/wazevo/ssa"
)
var addendsMatchOpcodes = [...]ssa .Opcode {ssa .OpcodeUExtend , ssa .OpcodeSExtend , ssa .OpcodeIadd , ssa .OpcodeIconst , ssa .OpcodeIshl }
type addend struct {
r regalloc .VReg
off int64
shift byte
}
func (a addend ) String () string {
return fmt .Sprintf ("addend{r=%s, off=%d, shift=%d}" , a .r , a .off , a .shift )
}
func (m *machine ) lowerToAddressMode (ptr ssa .Value , offsetBase uint32 ) (am *amode ) {
def := m .c .ValueDefinition (ptr )
if offsetBase &0x80000000 != 0 {
a := m .lowerAddend (def )
off64 := a .off + int64 (offsetBase )
offsetBaseReg := m .c .AllocateVReg (ssa .TypeI64 )
m .lowerIconst (offsetBaseReg , uint64 (off64 ), true )
if a .r != regalloc .VRegInvalid {
return m .newAmodeRegRegShift (0 , offsetBaseReg , a .r , a .shift )
} else {
return m .newAmodeImmReg (0 , offsetBaseReg )
}
}
if op := m .c .MatchInstrOneOf (def , addendsMatchOpcodes [:]); op == ssa .OpcodeIadd {
add := def .Instr
x , y := add .Arg2 ()
xDef , yDef := m .c .ValueDefinition (x ), m .c .ValueDefinition (y )
ax := m .lowerAddend (xDef )
ay := m .lowerAddend (yDef )
add .MarkLowered ()
return m .lowerAddendsToAmode (ax , ay , offsetBase )
} else {
a := m .lowerAddend (def )
if a .r != regalloc .VRegInvalid {
if a .shift != 0 {
tmpReg := m .c .AllocateVReg (ssa .TypeI64 )
m .lowerIconst (tmpReg , 0 , true )
return m .newAmodeRegRegShift (offsetBase , tmpReg , a .r , a .shift )
}
return m .newAmodeImmReg (offsetBase , a .r )
} else {
off64 := a .off + int64 (offsetBase )
tmpReg := m .c .AllocateVReg (ssa .TypeI64 )
m .lowerIconst (tmpReg , uint64 (off64 ), true )
return m .newAmodeImmReg (0 , tmpReg )
}
}
}
func (m *machine ) lowerAddendsToAmode (x , y addend , offBase uint32 ) *amode {
if x .r != regalloc .VRegInvalid && x .off != 0 || y .r != regalloc .VRegInvalid && y .off != 0 {
panic ("invalid input" )
}
u64 := uint64 (x .off +y .off ) + uint64 (offBase )
if u64 != 0 {
if _ , ok := asImm32 (u64 , false ); !ok {
tmpReg := m .c .AllocateVReg (ssa .TypeI64 )
m .lowerIconst (tmpReg , u64 , true )
u64 = 0
if x .r == regalloc .VRegInvalid {
x .r = tmpReg
} else if y .r == regalloc .VRegInvalid {
y .r = tmpReg
} else {
panic ("BUG" )
}
}
}
u32 := uint32 (u64 )
switch {
case x .r != regalloc .VRegInvalid && y .r != regalloc .VRegInvalid :
switch {
case x .shift != 0 && y .shift != 0 :
shifted := m .allocateInstr ()
shifted .asShiftR (shiftROpShiftLeft , newOperandImm32 (uint32 (x .shift )), x .r , true )
m .insert (shifted )
return m .newAmodeRegRegShift (u32 , x .r , y .r , y .shift )
case x .shift != 0 && y .shift == 0 :
x , y = y , x
fallthrough
default :
return m .newAmodeRegRegShift (u32 , x .r , y .r , y .shift )
}
case x .r == regalloc .VRegInvalid && y .r != regalloc .VRegInvalid :
x , y = y , x
fallthrough
case x .r != regalloc .VRegInvalid && y .r == regalloc .VRegInvalid :
if x .shift != 0 {
zero := m .c .AllocateVReg (ssa .TypeI64 )
m .lowerIconst (zero , 0 , true )
return m .newAmodeRegRegShift (u32 , zero , x .r , x .shift )
}
return m .newAmodeImmReg (u32 , x .r )
default :
tmpReg := m .c .AllocateVReg (ssa .TypeI64 )
m .lowerIconst (tmpReg , u64 , true )
return m .newAmodeImmReg (0 , tmpReg )
}
}
func (m *machine ) lowerAddend (x backend .SSAValueDefinition ) addend {
if !x .IsFromInstr () {
return addend {m .c .VRegOf (x .V ), 0 , 0 }
}
op := m .c .MatchInstrOneOf (x , addendsMatchOpcodes [:])
if op != ssa .OpcodeInvalid && op != ssa .OpcodeIadd {
return m .lowerAddendFromInstr (x .Instr )
}
p := m .getOperand_Reg (x )
return addend {p .reg (), 0 , 0 }
}
func (m *machine ) lowerAddendFromInstr (instr *ssa .Instruction ) addend {
instr .MarkLowered ()
switch op := instr .Opcode (); op {
case ssa .OpcodeIconst :
u64 := instr .ConstantVal ()
if instr .Return ().Type ().Bits () == 32 {
return addend {regalloc .VRegInvalid , int64 (int32 (u64 )), 0 }
} else {
return addend {regalloc .VRegInvalid , int64 (u64 ), 0 }
}
case ssa .OpcodeUExtend , ssa .OpcodeSExtend :
input := instr .Arg ()
inputDef := m .c .ValueDefinition (input )
if input .Type ().Bits () != 32 {
panic ("BUG: invalid input type " + input .Type ().String ())
}
constInst := inputDef .IsFromInstr () && inputDef .Instr .Constant ()
switch {
case constInst && op == ssa .OpcodeSExtend :
return addend {regalloc .VRegInvalid , int64 (uint32 (inputDef .Instr .ConstantVal ())), 0 }
case constInst && op == ssa .OpcodeUExtend :
return addend {regalloc .VRegInvalid , int64 (int32 (inputDef .Instr .ConstantVal ())), 0 }
default :
r := m .getOperand_Reg (inputDef )
return addend {r .reg (), 0 , 0 }
}
case ssa .OpcodeIshl :
x , amount := instr .Arg2 ()
amountDef := m .c .ValueDefinition (amount )
if amountDef .IsFromInstr () && amountDef .Instr .Constant () && amountDef .Instr .ConstantVal () <= 3 {
r := m .getOperand_Reg (m .c .ValueDefinition (x ))
return addend {r .reg (), 0 , uint8 (amountDef .Instr .ConstantVal ())}
}
r := m .getOperand_Reg (m .c .ValueDefinition (x ))
return addend {r .reg (), 0 , 0 }
}
panic ("BUG: invalid opcode" )
}
The pages are generated with Golds v0.8.2 . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds .