[Liftoff] Implement indirect calls

This CL adds support for indirect calls.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: Ia29b87fa1f7be873cd722f934b8007c38794dceb
Reviewed-on: https://chromium-review.googlesource.com/877884
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: Ben Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50756}
This commit is contained in:
Clemens Hammacher 2018-01-22 13:30:37 +01:00 committed by Commit Bot
parent f30a86c8d3
commit e40a968dd2
13 changed files with 392 additions and 100 deletions

View File

@ -233,30 +233,19 @@ enum RoundingMode {
class Immediate BASE_EMBEDDED {
public:
inline explicit Immediate(int x) {
inline explicit Immediate(int x, RelocInfo::Mode rmode = RelocInfo::NONE32) {
value_.immediate = x;
rmode_ = RelocInfo::NONE32;
}
inline explicit Immediate(const ExternalReference& ext) {
value_.immediate = reinterpret_cast<int32_t>(ext.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
inline explicit Immediate(Handle<HeapObject> handle) {
value_.immediate = reinterpret_cast<intptr_t>(handle.address());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
}
inline explicit Immediate(Smi* value) {
value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
inline explicit Immediate(Address addr) {
value_.immediate = reinterpret_cast<int32_t>(addr);
rmode_ = RelocInfo::NONE32;
}
inline explicit Immediate(Address x, RelocInfo::Mode rmode) {
value_.immediate = reinterpret_cast<int32_t>(x);
rmode_ = rmode;
}
inline explicit Immediate(const ExternalReference& ext)
: Immediate(ext.address(), RelocInfo::EXTERNAL_REFERENCE) {}
inline explicit Immediate(Handle<HeapObject> handle)
: Immediate(handle.address(), RelocInfo::EMBEDDED_OBJECT) {}
inline explicit Immediate(Smi* value)
: Immediate(reinterpret_cast<intptr_t>(value)) {}
inline explicit Immediate(Address addr,
RelocInfo::Mode rmode = RelocInfo::NONE32)
: Immediate(reinterpret_cast<int32_t>(addr), rmode) {}
static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
static Immediate EmbeddedCode(CodeStub* code);

View File

@ -13,7 +13,8 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
UNIMPLEMENTED();
}
@ -83,6 +84,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
DoubleRegister rhs) { \
UNIMPLEMENTED(); \
}
#define UNIMPLEMENTED_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
LiftoffRegList pinned) { \
UNIMPLEMENTED(); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
UNIMPLEMENTED_GP_BINOP(i32_sub)
@ -90,9 +96,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
UNIMPLEMENTED_GP_BINOP(i32_shl)
UNIMPLEMENTED_GP_BINOP(i32_sar)
UNIMPLEMENTED_GP_BINOP(i32_shr)
UNIMPLEMENTED_SHIFTOP(i32_shl)
UNIMPLEMENTED_SHIFTOP(i32_sar)
UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_eqz)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
@ -105,6 +111,7 @@ UNIMPLEMENTED_FP_BINOP(f32_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_SHIFTOP
void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
@ -112,6 +119,10 @@ void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_ptrsize_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
@ -168,6 +179,12 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
UNIMPLEMENTED();
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
Register target) {
UNIMPLEMENTED();
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
UNIMPLEMENTED();
}

View File

@ -13,7 +13,8 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
UNIMPLEMENTED();
}
@ -83,6 +84,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
DoubleRegister rhs) { \
UNIMPLEMENTED(); \
}
#define UNIMPLEMENTED_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
LiftoffRegList pinned) { \
UNIMPLEMENTED(); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
UNIMPLEMENTED_GP_BINOP(i32_sub)
@ -90,9 +96,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
UNIMPLEMENTED_GP_BINOP(i32_shl)
UNIMPLEMENTED_GP_BINOP(i32_sar)
UNIMPLEMENTED_GP_BINOP(i32_shr)
UNIMPLEMENTED_SHIFTOP(i32_shl)
UNIMPLEMENTED_SHIFTOP(i32_sar)
UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_eqz)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
@ -105,6 +111,7 @@ UNIMPLEMENTED_FP_BINOP(f32_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_SHIFTOP
void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
@ -112,6 +119,10 @@ void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_ptrsize_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
@ -168,6 +179,12 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
UNIMPLEMENTED();
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
Register target) {
UNIMPLEMENTED();
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
UNIMPLEMENTED();
}

View File

@ -46,14 +46,13 @@ void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
sub(esp, Immediate(bytes));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
xor_(reg.gp(), reg.gp());
} else {
mov(reg.gp(), Immediate(value.to_i32()));
}
TurboAssembler::Move(
reg.gp(),
Immediate(reinterpret_cast<Address>(value.to_i32()), rmode));
break;
case kWasmF32: {
Register tmp = GetUnusedRegister(kGpReg).gp();
@ -286,8 +285,11 @@ COMMUTATIVE_I32_BINOP(xor, xor_)
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register lhs, Register rhs,
void (Assembler::*emit_shift)(Register)) {
LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, lhs, rhs);
void (Assembler::*emit_shift)(Register),
LiftoffRegList pinned) {
pinned.set(dst);
pinned.set(lhs);
pinned.set(rhs);
// If dst is ecx, compute into a tmp register first, then move to ecx.
if (dst == ecx) {
Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
@ -302,7 +304,8 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
// first. If lhs is ecx, lhs is now the tmp register.
Register tmp_reg = no_reg;
if (rhs != ecx) {
if (lhs == ecx || assm->cache_state()->is_used(LiftoffRegister(ecx))) {
if (assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
pinned.has(LiftoffRegister(ecx))) {
tmp_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
assm->mov(tmp_reg, ecx);
if (lhs == ecx) lhs = tmp_reg;
@ -319,16 +322,19 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
}
} // namespace liftoff
void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl);
void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
LiftoffRegList pinned) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl, pinned);
}
void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl);
void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
LiftoffRegList pinned) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl, pinned);
}
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl);
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
LiftoffRegList pinned) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl, pinned);
}
bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
@ -438,6 +444,10 @@ void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
cmp(lhs, rhs);
}
void LiftoffAssembler::emit_ptrsize_compare(Register lhs, Register rhs) {
emit_i32_compare(lhs, rhs);
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
@ -571,6 +581,18 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
CallRuntimeDelayed(zone, fid);
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
Register target) {
PrepareCall(sig, call_desc, &target);
if (target == no_reg) {
add(esp, Immediate(kPointerSize));
call(Operand(esp, -4));
} else {
call(target);
}
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
sub(esp, Immediate(size));
mov(addr, esp);

View File

@ -50,10 +50,12 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
constexpr Condition kEqual = equal;
constexpr Condition kUnequal = not_equal;
constexpr Condition kUnsignedGreaterEqual = above_equal;
#else
// On unimplemented platforms, just make this compile.
constexpr Condition kEqual = static_cast<Condition>(0);
constexpr Condition kUnequal = static_cast<Condition>(0);
constexpr Condition kUnsignedGreaterEqual = static_cast<Condition>(0);
#endif

View File

@ -379,7 +379,8 @@ void LiftoffAssembler::SpillAllRegisters() {
}
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc) {
compiler::CallDescriptor* call_desc,
Register* target) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
// Parameter 0 is the wasm context.
constexpr size_t kFirstActualParameter = 1;
@ -403,6 +404,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
// Now move all parameter values into the right slot for the call.
// Process parameters backward, such that we can just pop values from the
// stack.
LiftoffRegList param_regs;
for (uint32_t i = num_params; i > 0; --i) {
uint32_t param = i - 1;
ValueType type = sig->GetParam(param);
@ -415,6 +417,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
DCHECK(!loc.IsAnyRegister());
int reg_code = loc.AsRegister();
LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
param_regs.set(reg);
stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
} else {
DCHECK(loc.IsCallerFrameSlot());
@ -423,6 +426,27 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
cache_state_.stack_state.pop_back();
}
compiler::LinkageLocation context_loc =
call_desc->GetInputLocation(kInputShift);
DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
Register context_reg = Register::from_code(context_loc.AsRegister());
param_regs.set(LiftoffRegister(context_reg));
// If the target register overlaps with a parameter register, then move the
// target to another free register, or spill to the stack.
if (target && param_regs.has(LiftoffRegister(*target))) {
// Try to find another free register.
LiftoffRegList free_regs = kGpCacheRegList.MaskOut(param_regs);
if (!free_regs.is_empty()) {
LiftoffRegister new_target = free_regs.GetFirstRegSet();
stack_transfers.MoveRegister(new_target, LiftoffRegister(*target));
*target = new_target.gp();
} else {
PushCallerFrameSlot(LiftoffRegister(*target));
*target = no_reg;
}
}
// Execute the stack transfers before filling the context register.
stack_transfers.Execute();
@ -430,12 +454,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
cache_state_.reset_used_registers();
// Fill the wasm context into the right register.
compiler::LinkageLocation context_loc =
call_desc->GetInputLocation(kInputShift);
DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
int context_reg_code = context_loc.AsRegister();
LiftoffRegister context_reg(Register::from_code(context_reg_code));
FillContextInto(context_reg.gp());
FillContextInto(context_reg);
}
void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,

View File

@ -269,7 +269,10 @@ class LiftoffAssembler : public TurboAssembler {
void SpillAllRegisters();
// Load parameters into the right registers / stack slots for the call.
void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*);
// Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack.
void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
Register* target = nullptr);
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
@ -279,7 +282,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void ReserveStackSpace(uint32_t bytes);
inline void LoadConstant(LiftoffRegister, WasmValue);
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE32);
inline void LoadFromContext(Register dst, uint32_t offset, int size);
inline void SpillContext(Register context);
inline void FillContextInto(Register dst);
@ -307,9 +311,12 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
inline void emit_i32_shl(Register dst, Register lhs, Register rhs);
inline void emit_i32_sar(Register dst, Register lhs, Register rhs);
inline void emit_i32_shr(Register dst, Register lhs, Register rhs);
inline void emit_i32_shl(Register dst, Register lhs, Register rhs,
LiftoffRegList pinned = {});
inline void emit_i32_sar(Register dst, Register lhs, Register rhs,
LiftoffRegList pinned = {});
inline void emit_i32_shr(Register dst, Register lhs, Register rhs,
LiftoffRegList pinned = {});
// i32 unops.
inline bool emit_i32_eqz(Register dst, Register src);
@ -328,6 +335,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_test(Register);
inline void emit_i32_compare(Register, Register);
inline void emit_ptrsize_compare(Register, Register);
inline void emit_jump(Label*);
inline void emit_cond_jump(Condition, Label*);
@ -356,8 +364,10 @@ class LiftoffAssembler : public TurboAssembler {
inline void CallC(ExternalReference ext_ref, uint32_t num_params);
inline void CallNativeWasmCode(Address addr);
inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
inline void CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
Register target);
// Reserve space in the current frame, store address to space in {addr}.
inline void AllocateStackSlot(Register addr, uint32_t size);

View File

@ -60,6 +60,11 @@ class MovableLabel {
};
#endif
wasm::WasmValue WasmPtrValue(void* ptr) {
using int_t = std::conditional<kPointerSize == 8, uint64_t, uint32_t>::type;
return wasm::WasmValue(reinterpret_cast<int_t>(ptr));
}
class LiftoffCompiler {
public:
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
@ -508,6 +513,16 @@ class LiftoffCompiler {
__ PushRegister(kWasmI32, dst_reg);
}
void I32ShiftOp(void (LiftoffAssembler::*emit_fn)(Register, Register,
Register, LiftoffRegList)) {
LiftoffRegList pinned;
LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
(asm_->*emit_fn)(dst_reg.gp(), lhs_reg.gp(), rhs_reg.gp(), {});
__ PushRegister(kWasmI32, dst_reg);
}
void I32CCallBinOp(ExternalReference ext_ref) {
LiftoffRegList pinned;
LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
@ -535,6 +550,9 @@ class LiftoffCompiler {
#define CASE_BINOP(opcode, type, fn) \
case WasmOpcode::kExpr##opcode: \
return type##BinOp(&LiftoffAssembler::emit_##fn);
#define CASE_SHIFTOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return I32ShiftOp(&LiftoffAssembler::emit_##fn);
#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
case WasmOpcode::kExpr##opcode: \
type##CCallBinOp(ExternalReference::ext_ref_fn(asm_->isolate())); \
@ -546,9 +564,9 @@ class LiftoffCompiler {
CASE_BINOP(I32And, I32, i32_and)
CASE_BINOP(I32Ior, I32, i32_or)
CASE_BINOP(I32Xor, I32, i32_xor)
CASE_BINOP(I32Shl, I32, i32_shl)
CASE_BINOP(I32ShrS, I32, i32_sar)
CASE_BINOP(I32ShrU, I32, i32_shr)
CASE_SHIFTOP(I32Shl, i32_shl)
CASE_SHIFTOP(I32ShrS, i32_sar)
CASE_SHIFTOP(I32ShrU, i32_shr)
CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
CASE_BINOP(F32Add, F32, f32_add)
@ -558,6 +576,7 @@ class LiftoffCompiler {
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_BINOP
#undef CASE_SHIFTOP
#undef CASE_CCALL_BINOP
}
@ -754,13 +773,15 @@ class LiftoffCompiler {
__ cache_state()->Steal(if_block->else_state->state);
}
Label* AddOutOfLineTrap(wasm::WasmCodePosition position, uint32_t pc = 0) {
Label* AddOutOfLineTrap(wasm::WasmCodePosition position,
Builtins::Name builtin, uint32_t pc = 0) {
DCHECK(!FLAG_wasm_no_bounds_checks);
// The pc is needed exactly if trap handlers are enabled.
DCHECK_EQ(pc != 0, env_->use_trap_handler);
// The pc is needed for memory OOB trap with trap handler enabled. Other
// callers should not even compute it.
DCHECK_EQ(pc != 0, builtin == Builtins::kThrowWasmTrapMemOutOfBounds &&
env_->use_trap_handler);
out_of_line_code_.push_back(OutOfLineCode::Trap(
Builtins::kThrowWasmTrapMemOutOfBounds, position, pc));
out_of_line_code_.push_back(OutOfLineCode::Trap(builtin, position, pc));
return out_of_line_code_.back().label.get();
}
@ -769,7 +790,8 @@ class LiftoffCompiler {
DCHECK(!env_->use_trap_handler);
if (FLAG_wasm_no_bounds_checks) return;
Label* trap_label = AddOutOfLineTrap(position);
Label* trap_label =
AddOutOfLineTrap(position, Builtins::kThrowWasmTrapMemOutOfBounds);
if (access_size > max_size_ || offset > max_size_ - access_size) {
// The access will be out of bounds, even for the largest memory.
@ -884,7 +906,9 @@ class LiftoffCompiler {
__ Load(value, addr, index, operand.offset, type, pinned,
&protected_load_pc);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder->position(), protected_load_pc);
AddOutOfLineTrap(decoder->position(),
Builtins::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
__ PushRegister(value_type, value);
CheckStackSizeLimit(decoder);
@ -916,7 +940,9 @@ class LiftoffCompiler {
__ Store(addr, index, operand.offset, value, type, pinned,
&protected_store_pc);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder->position(), protected_store_pc);
AddOutOfLineTrap(decoder->position(),
Builtins::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
if (FLAG_wasm_trace_memory) {
TraceMemoryOperation(true, type.mem_rep(), index, operand.offset,
@ -962,11 +988,105 @@ class LiftoffCompiler {
__ FinishCall(operand.sig, call_desc);
}
void CallIndirect(Decoder* decoder, const Value& index,
void CallIndirect(Decoder* decoder, const Value& index_val,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
unsupported(decoder, "call_indirect");
if (operand.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
// Assume only one table for now.
uint32_t table_index = 0;
// Pop the index.
LiftoffRegister index = __ PopToRegister(kGpReg);
// If that register is still being used after popping, we move it to another
// register, because we want to modify that register.
if (__ cache_state()->is_used(index)) {
LiftoffRegister new_index =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index));
__ Move(new_index, index);
index = new_index;
}
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
// Get three temporary registers.
LiftoffRegister table = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister tmp_const =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Bounds check against the table size.
{
uint32_t table_size =
env_->module->function_tables[table_index].initial_size;
Label* trap_label = AddOutOfLineTrap(decoder->position(),
Builtins::kThrowWasmTrapFuncInvalid);
__ LoadConstant(tmp_const, WasmValue(table_size),
RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
__ emit_i32_compare(index.gp(), tmp_const.gp());
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
}
wasm::GlobalHandleAddress function_table_handle_address =
env_->function_tables[table_index];
__ LoadConstant(table, WasmPtrValue(function_table_handle_address),
RelocInfo::WASM_GLOBAL_HANDLE);
static constexpr LoadType kPointerLoadType =
kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
__ Load(table, table.gp(), no_reg, 0, kPointerLoadType, pinned);
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
// [sig1, code1, sig2, code2, sig3, code3, ...]
static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency");
static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
constexpr int kFixedArrayOffset = FixedArray::kHeaderSize - kHeapObjectTag;
__ LoadConstant(tmp_const, WasmValue(kPointerSizeLog2 + 1));
// Shift index such that it's the offset of the signature in the FixedArray.
__ emit_i32_shl(index.gp(), index.gp(), tmp_const.gp(), pinned);
// Load the signature.
__ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset,
kPointerLoadType, pinned);
uint32_t canonical_sig_num = env_->module->signature_ids[operand.sig_index];
DCHECK_GE(canonical_sig_num, 0);
DCHECK_GE(kMaxInt, canonical_sig_num);
__ LoadConstant(tmp_const, WasmPtrValue(Smi::FromInt(canonical_sig_num)));
__ emit_ptrsize_compare(scratch.gp(), tmp_const.gp());
Label* trap_label = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
__ emit_cond_jump(kUnequal, trap_label);
// Load code object.
__ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset + kPointerSize,
kPointerLoadType, pinned);
if (FLAG_wasm_jit_to_native) {
// The table holds a Foreign pointing to the instruction start.
__ Load(scratch, scratch.gp(), no_reg,
Foreign::kForeignAddressOffset - kHeapObjectTag, kPointerLoadType,
pinned);
}
source_position_table_builder_->AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
compiler::CallDescriptor* call_desc =
compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
__ CallIndirect(operand.sig, call_desc, scratch.gp());
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
__ FinishCall(operand.sig, call_desc);
}
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");

View File

@ -13,7 +13,8 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
UNIMPLEMENTED();
}
@ -83,6 +84,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
DoubleRegister rhs) { \
UNIMPLEMENTED(); \
}
#define UNIMPLEMENTED_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
LiftoffRegList pinned) { \
UNIMPLEMENTED(); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
UNIMPLEMENTED_GP_BINOP(i32_sub)
@ -90,9 +96,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
UNIMPLEMENTED_GP_BINOP(i32_shl)
UNIMPLEMENTED_GP_BINOP(i32_sar)
UNIMPLEMENTED_GP_BINOP(i32_shr)
UNIMPLEMENTED_SHIFTOP(i32_shl)
UNIMPLEMENTED_SHIFTOP(i32_sar)
UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_eqz)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
@ -105,6 +111,7 @@ UNIMPLEMENTED_FP_BINOP(f32_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_SHIFTOP
void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
@ -112,6 +119,10 @@ void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_ptrsize_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
@ -168,6 +179,12 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
UNIMPLEMENTED();
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
Register target) {
UNIMPLEMENTED();
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
UNIMPLEMENTED();
}

View File

@ -13,7 +13,8 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
UNIMPLEMENTED();
}
@ -83,6 +84,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
DoubleRegister rhs) { \
UNIMPLEMENTED(); \
}
#define UNIMPLEMENTED_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
LiftoffRegList pinned) { \
UNIMPLEMENTED(); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
UNIMPLEMENTED_GP_BINOP(i32_sub)
@ -90,9 +96,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
UNIMPLEMENTED_GP_BINOP(i32_shl)
UNIMPLEMENTED_GP_BINOP(i32_sar)
UNIMPLEMENTED_GP_BINOP(i32_shr)
UNIMPLEMENTED_SHIFTOP(i32_shl)
UNIMPLEMENTED_SHIFTOP(i32_sar)
UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_eqz)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
@ -105,6 +111,7 @@ UNIMPLEMENTED_FP_BINOP(f32_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_SHIFTOP
void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
@ -112,6 +119,10 @@ void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_ptrsize_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
@ -168,6 +179,12 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
UNIMPLEMENTED();
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
Register target) {
UNIMPLEMENTED();
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
UNIMPLEMENTED();
}

View File

@ -13,7 +13,8 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
UNIMPLEMENTED();
}
@ -83,6 +84,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
DoubleRegister rhs) { \
UNIMPLEMENTED(); \
}
#define UNIMPLEMENTED_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
LiftoffRegList pinned) { \
UNIMPLEMENTED(); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
UNIMPLEMENTED_GP_BINOP(i32_sub)
@ -90,9 +96,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
UNIMPLEMENTED_GP_BINOP(i32_shl)
UNIMPLEMENTED_GP_BINOP(i32_sar)
UNIMPLEMENTED_GP_BINOP(i32_shr)
UNIMPLEMENTED_SHIFTOP(i32_shl)
UNIMPLEMENTED_SHIFTOP(i32_sar)
UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_eqz)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
@ -105,6 +111,7 @@ UNIMPLEMENTED_FP_BINOP(f32_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_SHIFTOP
void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
@ -112,6 +119,10 @@ void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_ptrsize_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
@ -168,6 +179,12 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
UNIMPLEMENTED();
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
Register target) {
UNIMPLEMENTED();
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
UNIMPLEMENTED();
}

View File

@ -13,7 +13,8 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
UNIMPLEMENTED();
}
@ -83,6 +84,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
DoubleRegister rhs) { \
UNIMPLEMENTED(); \
}
#define UNIMPLEMENTED_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
LiftoffRegList pinned) { \
UNIMPLEMENTED(); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
UNIMPLEMENTED_GP_BINOP(i32_sub)
@ -90,9 +96,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
UNIMPLEMENTED_GP_BINOP(i32_shl)
UNIMPLEMENTED_GP_BINOP(i32_sar)
UNIMPLEMENTED_GP_BINOP(i32_shr)
UNIMPLEMENTED_SHIFTOP(i32_shl)
UNIMPLEMENTED_SHIFTOP(i32_sar)
UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_eqz)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
@ -105,6 +111,7 @@ UNIMPLEMENTED_FP_BINOP(f32_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_SHIFTOP
void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
@ -112,6 +119,10 @@ void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_ptrsize_compare(Register lhs, Register rhs) {
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
@ -168,6 +179,12 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
UNIMPLEMENTED();
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
Register target) {
UNIMPLEMENTED();
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
UNIMPLEMENTED();
}

View File

@ -38,13 +38,21 @@ void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
subp(rsp, Immediate(bytes));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
if (value.to_i32() == 0 && RelocInfo::IsNone(rmode)) {
xorl(reg.gp(), reg.gp());
} else {
movl(reg.gp(), Immediate(value.to_i32()));
movl(reg.gp(), Immediate(value.to_i32(), rmode));
}
break;
case kWasmI64:
if (value.to_i64() == 0 && RelocInfo::IsNone(rmode)) {
xorq(reg.gp(), reg.gp());
} else {
movq(reg.gp(), value.to_i64(), rmode);
}
break;
case kWasmF32:
@ -279,7 +287,8 @@ COMMUTATIVE_I32_BINOP(xor, xor)
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register lhs, Register rhs,
void (Assembler::*emit_shift)(Register)) {
void (Assembler::*emit_shift)(Register),
LiftoffRegList pinned) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
assm->movl(kScratchRegister, lhs);
@ -293,9 +302,10 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
// register. If lhs is rcx, lhs is now the scratch register.
bool use_scratch = false;
if (rhs != rcx) {
use_scratch =
lhs == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
if (use_scratch) assm->movl(kScratchRegister, rcx);
use_scratch = lhs == rcx ||
assm->cache_state()->is_used(LiftoffRegister(rcx)) ||
pinned.has(LiftoffRegister(rcx));
if (use_scratch) assm->movq(kScratchRegister, rcx);
if (lhs == rcx) lhs = kScratchRegister;
assm->movl(rcx, rhs);
}
@ -305,20 +315,23 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
(assm->*emit_shift)(dst);
// Restore rcx if needed.
if (use_scratch) assm->movl(rcx, kScratchRegister);
if (use_scratch) assm->movq(rcx, kScratchRegister);
}
} // namespace liftoff
void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl);
void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
LiftoffRegList pinned) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl, pinned);
}
void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl);
void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
LiftoffRegList pinned) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl, pinned);
}
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl);
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
LiftoffRegList pinned) {
liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl, pinned);
}
bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
@ -425,6 +438,10 @@ void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
cmpl(lhs, rhs);
}
void LiftoffAssembler::emit_ptrsize_compare(Register lhs, Register rhs) {
cmpp(lhs, rhs);
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
@ -552,6 +569,17 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
CallRuntimeDelayed(zone, fid);
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
Register target) {
PrepareCall(sig, call_desc, &target);
if (target == no_reg) {
popq(kScratchRegister);
target = kScratchRegister;
}
call(target);
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
subp(rsp, Immediate(size));
movp(addr, rsp);