[Liftoff] Implement direct calls

This adds support for direct calls in Liftoff.

Drive-by: Fix / extend two tests for calls which were helpful for
developing this CL.

R=ahaas@chromium.org

Bug: v8:6600
Change-Id: I20a98d9dd330da9a020c8c9b5c10b04e94af684d
Reviewed-on: https://chromium-review.googlesource.com/847579
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50427}
This commit is contained in:
Clemens Hammacher 2018-01-09 11:04:54 +01:00 committed by Commit Bot
parent 30fabc4cdf
commit 566b3bf4a7
15 changed files with 274 additions and 26 deletions

View File

@ -5220,8 +5220,13 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
// TODO(wasm): Use proper log files, here and elsewhere.
OFStream os(stdout);
os << "--- Wasm liftoff code ---\n";
EmbeddedVector<char, 32> func_name;
func_name.Truncate(SNPrintF(func_name, "wasm#%d-liftoff", func_index_));
EmbeddedVector<char, 64> func_name;
if (func_name_.start() != nullptr) {
SNPrintF(func_name, "#%d:%.*s", func_index(), func_name_.length(),
func_name_.start());
} else {
SNPrintF(func_name, "wasm#%d", func_index());
}
code->Disassemble(func_name.start(), os);
os << "--- End code ---\n";
}

View File

@ -24,6 +24,8 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@ -117,6 +119,11 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }

View File

@ -24,6 +24,8 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@ -117,6 +119,11 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }

View File

@ -38,6 +38,7 @@ static_assert((kByteRegs & kGpCacheRegList) == kByteRegs,
static constexpr DoubleRegister kScratchDoubleReg = xmm7;
void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
DCHECK_LE(bytes, kMaxInt);
sub(esp, Immediate(bytes));
}
@ -50,9 +51,12 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
mov(reg.gp(), Immediate(value.to_i32()));
}
break;
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
case kWasmF32: {
Register tmp = GetUnusedRegister(kGpReg).gp();
mov(tmp, Immediate(value.to_f32_boxed().get_bits()));
movd(reg.fp(), tmp);
break;
}
default:
UNREACHABLE();
}
@ -70,6 +74,10 @@ void LiftoffAssembler::SpillContext(Register context) {
mov(liftoff::GetContextOperand(), context);
}
void LiftoffAssembler::FillContextInto(Register dst) {
mov(dst, liftoff::GetContextOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@ -269,7 +277,7 @@ COMMUTATIVE_I32_BINOP(or, or_)
COMMUTATIVE_I32_BINOP(xor, xor_)
// clang-format on
#undef DEFAULT_I32_BINOP
#undef COMMUTATIVE_I32_BINOP
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
@ -341,6 +349,36 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
TurboAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
switch (src.loc()) {
case VarState::kStack:
DCHECK_NE(kWasmF64, src.type()); // TODO(clemensh): Implement this.
push(liftoff::GetStackSlot(src_index));
break;
case VarState::kRegister:
switch (src.type()) {
case kWasmI32:
push(src.reg().gp());
break;
case kWasmF32:
sub(esp, Immediate(sizeof(float)));
movss(Operand(esp, 0), src.reg().fp());
break;
case kWasmF64:
sub(esp, Immediate(sizeof(double)));
movsd(Operand(esp, 0), src.reg().fp());
break;
default:
UNREACHABLE();
}
break;
case VarState::kConstant:
push(Immediate(src.i32_const()));
break;
}
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
while (!gp_regs.is_empty()) {

View File

@ -88,6 +88,7 @@ class StackTransferRecipe {
}
if (executed_moves == 0) {
// There is a cycle. Spill one register, then continue.
// TODO(clemensh): Use an unused register if available.
LiftoffRegister spill_reg = register_moves.back().src;
asm_->Spill(next_spill_slot, spill_reg);
// Remember to reload into the destination register later.
@ -141,14 +142,6 @@ class StackTransferRecipe {
}
}
private:
// TODO(clemensh): Avoid unconditionally allocating on the heap.
std::vector<RegisterMove> register_moves;
std::vector<RegisterLoad> register_loads;
LiftoffRegList move_dst_regs;
LiftoffRegList move_src_regs;
LiftoffAssembler* const asm_;
void LoadIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src,
uint32_t src_index) {
@ -181,6 +174,14 @@ class StackTransferRecipe {
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index) {
register_loads.emplace_back(dst, stack_index);
}
private:
// TODO(clemensh): Avoid unconditionally allocating on the heap.
std::vector<RegisterMove> register_moves;
std::vector<RegisterLoad> register_loads;
LiftoffRegList move_dst_regs;
LiftoffRegList move_src_regs;
LiftoffAssembler* const asm_;
};
} // namespace
@ -358,22 +359,104 @@ void LiftoffAssembler::SpillLocals() {
}
}
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
// Parameter 0 is the wasm context.
constexpr size_t kFirstActualParameter = 1;
DCHECK_EQ(kFirstActualParameter + num_params, call_desc->ParameterCount());
// Input 0 is the call target.
constexpr size_t kInputShift = 1;
StackTransferRecipe stack_transfers(this);
// Spill all cache slots which are not being used as parameters.
// Don't update any register use counters, they will be reset later anyway.
for (uint32_t idx = 0, end = cache_state_.stack_height() - num_params;
idx < end; ++idx) {
VarState& slot = cache_state_.stack_state[idx];
if (!slot.is_reg()) continue;
Spill(idx, slot.reg());
slot.MakeStack();
}
// Now move all parameter values into the right slot for the call.
// Process parameters backward, such that we can just pop values from the
// stack.
for (uint32_t i = num_params; i > 0; --i) {
uint32_t param = i - 1;
ValueType type = sig->GetParam(param);
RegClass rc = reg_class_for(type);
compiler::LinkageLocation loc = call_desc->GetInputLocation(
param + kFirstActualParameter + kInputShift);
const VarState& slot = cache_state_.stack_state.back();
uint32_t stack_idx = cache_state_.stack_height() - 1;
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
int reg_code = loc.AsRegister();
LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
} else {
DCHECK(loc.IsCallerFrameSlot());
PushCallerFrameSlot(slot, stack_idx);
}
cache_state_.stack_state.pop_back();
}
// Reset register use counters.
cache_state_.used_registers = {};
memset(cache_state_.register_use_count, 0,
sizeof(cache_state_.register_use_count));
// Execute the stack transfers before filling the context register.
stack_transfers.Execute();
// Fill the wasm context into the right register.
compiler::LinkageLocation context_loc =
call_desc->GetInputLocation(kInputShift);
DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
int context_reg_code = context_loc.AsRegister();
LiftoffRegister context_reg(Register::from_code(context_reg_code));
FillContextInto(context_reg.gp());
}
void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc) {
size_t return_count = call_desc->ReturnCount();
DCHECK_EQ(return_count, sig->return_count());
if (return_count != 0) {
DCHECK_EQ(1, return_count);
compiler::LinkageLocation return_loc = call_desc->GetReturnLocation(0);
int return_reg_code = return_loc.AsRegister();
ValueType return_type = sig->GetReturn(0);
LiftoffRegister return_reg =
LiftoffRegister::from_code(reg_class_for(return_type), return_reg_code);
DCHECK(!cache_state_.is_used(return_reg));
PushRegister(return_type, return_reg);
}
}
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
// Spill one cached value to free a register.
LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
int remaining_uses = cache_state_.get_use_count(spill_reg);
SpillRegister(spill_reg);
return spill_reg;
}
void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
int remaining_uses = cache_state_.get_use_count(reg);
DCHECK_LT(0, remaining_uses);
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
DCHECK_GT(cache_state_.stack_height(), idx);
auto* slot = &cache_state_.stack_state[idx];
if (!slot->is_reg() || slot->reg() != spill_reg) continue;
Spill(idx, spill_reg);
if (!slot->is_reg() || slot->reg() != reg) continue;
Spill(idx, reg);
slot->MakeStack();
if (--remaining_uses == 0) break;
}
cache_state_.clear_used(spill_reg);
return spill_reg;
cache_state_.clear_used(reg);
}
void LiftoffAssembler::set_num_locals(uint32_t num_locals) {

View File

@ -224,16 +224,20 @@ class LiftoffAssembler : public TurboAssembler {
cache_state_.stack_state.emplace_back(type, reg);
}
void SpillRegister(LiftoffRegister);
uint32_t GetNumUses(LiftoffRegister reg) {
return cache_state_.get_use_count(reg);
}
// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return GetUnusedRegister(candidates, pinned);
}
// Get an unused register of {candidates}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
LiftoffRegList pinned = {}) {
if (cache_state_.has_unused_register(candidates, pinned)) {
@ -258,6 +262,11 @@ class LiftoffAssembler : public TurboAssembler {
void Spill(uint32_t index);
void SpillLocals();
// Load parameters into the right registers / stack slots for the call.
void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*);
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
@ -267,6 +276,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadConstant(LiftoffRegister, WasmValue);
inline void LoadFromContext(Register dst, uint32_t offset, int size);
inline void SpillContext(Register context);
inline void FillContextInto(Register dst);
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr);
@ -311,6 +321,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void AssertUnreachable(AbortReason reason);
// Push a value to the stack (will become a caller frame slot).
inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index);
inline void PushRegisters(LiftoffRegList);
inline void PopRegisters(LiftoffRegList);

View File

@ -486,6 +486,7 @@ class LiftoffCompiler {
void GetLocal(Decoder* decoder, Value* result,
const LocalIndexOperand<validate>& operand) {
TraceCacheState(decoder);
auto& slot = __ cache_state()->stack_state[operand.index];
DCHECK_EQ(slot.type(), operand.type);
switch (slot.loc()) {
@ -735,11 +736,38 @@ class LiftoffCompiler {
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
unsupported(decoder, "grow_memory");
}
void CallDirect(Decoder* decoder,
const CallFunctionOperand<validate>& operand,
const Value args[], Value returns[]) {
unsupported(decoder, "call");
if (operand.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
TraceCacheState(decoder);
compiler::CallDescriptor* call_desc =
compiler::GetWasmCallDescriptor(&compilation_zone_, operand.sig);
__ PrepareCall(operand.sig, call_desc);
source_position_table_builder_->AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
if (FLAG_wasm_jit_to_native) {
return unsupported(decoder, "call with jit-to-native");
} else {
Handle<Code> target = operand.index < env_->function_code.size()
? env_->function_code[operand.index]
: env_->default_function_code;
__ Call(target, RelocInfo::CODE_TARGET);
}
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
__ FinishCall(operand.sig, call_desc);
}
void CallIndirect(Decoder* decoder, const Value& index,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {

View File

@ -70,6 +70,17 @@ class LiftoffRegister {
return LiftoffRegister(code);
}
static LiftoffRegister from_code(RegClass rc, int code) {
switch (rc) {
case kGpReg:
return LiftoffRegister(Register::from_code(code));
case kFpReg:
return LiftoffRegister(DoubleRegister::from_code(code));
default:
UNREACHABLE();
}
}
constexpr bool is_gp() const { return code_ < kAfterMaxLiftoffGpRegCode; }
constexpr bool is_fp() const {
return code_ >= kAfterMaxLiftoffGpRegCode &&

View File

@ -24,6 +24,8 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@ -117,6 +119,11 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }

View File

@ -24,6 +24,8 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@ -117,6 +119,11 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }

View File

@ -24,6 +24,8 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@ -117,6 +119,11 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }

View File

@ -24,6 +24,8 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@ -117,6 +119,11 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
UNIMPLEMENTED();
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }

View File

@ -67,6 +67,10 @@ void LiftoffAssembler::SpillContext(Register context) {
movp(liftoff::GetContextOperand(), context);
}
void LiftoffAssembler::FillContextInto(Register dst) {
movp(dst, liftoff::GetContextOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@ -341,6 +345,26 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
TurboAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
switch (src.loc()) {
case VarState::kStack:
pushq(liftoff::GetStackSlot(src_index));
break;
case VarState::kRegister:
if (src.reg().is_gp()) {
pushq(src.reg().gp());
} else {
subp(rsp, Immediate(kStackSlotSize));
movsd(Operand(rsp, 0), src.reg().fp());
}
break;
case VarState::kConstant:
pushq(Immediate(src.i32_const()));
break;
}
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
while (!gp_regs.is_empty()) {

View File

@ -2400,10 +2400,11 @@ WASM_EXEC_TEST(AddCall) {
byte local = r.AllocateLocal(kWasmI32);
BUILD(r, WASM_SET_LOCAL(local, WASM_I32V_2(99)),
WASM_I32_ADD(WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(0),
WASM_GET_LOCAL(0)),
WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(1),
WASM_GET_LOCAL(local))));
WASM_I32_ADD(
WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(0),
WASM_GET_LOCAL(0)),
WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(local),
WASM_GET_LOCAL(local))));
CHECK_EQ(198, r.Call(0));
CHECK_EQ(200, r.Call(1));

View File

@ -12,10 +12,13 @@ let type_const = [wasmI32Const, wasmF32Const, wasmF64Const];
function f(values, shift, num_const_params, ...args) {
assertEquals(
values.length + num_const_params, args.length, 'number of arguments');
const expected = idx =>
idx < values.length ? values[(idx + shift) % values.length] : idx;
const msg = 'shifted by ' + shift + ': ' +
'expected [' + args.map((_, i) => expected(i)).join(', ') + '], got [' +
args.join(', ') + ']';
args.forEach((arg_val, idx) => {
const expected =
idx < values.length ? values[(idx + shift) % values.length] : idx;
assertEquals(expected, arg_val, 'arg #' + idx + ', shifted by ' + shift);
assertEquals(expected(idx), arg_val, 'arg #' + idx + ', ' + msg);
});
}