[Liftoff] Optimize code for returns
Since all other values are not used any more if we return, we don't need to spill anything (as might happen during {PopToRegister}). Instead, just load the top stack value into the return register(s). R=titzer@chromium.org Bug: v8:6600, v8:8423 Change-Id: Ibfd02d20191459c7b136ab9a48f0cf1a53b3385d Reviewed-on: https://chromium-review.googlesource.com/c/1358391 Commit-Queue: Clemens Hammacher <clemensh@chromium.org> Reviewed-by: Ben Titzer <titzer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57995}
This commit is contained in:
parent
0ed8017e8a
commit
558c950454
@ -11,6 +11,7 @@
|
||||
#include "src/compiler/wasm-compiler.h"
|
||||
#include "src/macro-assembler-inl.h"
|
||||
#include "src/wasm/function-body-decoder-impl.h"
|
||||
#include "src/wasm/wasm-linkage.h"
|
||||
#include "src/wasm/wasm-opcodes.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -620,6 +621,22 @@ void LiftoffAssembler::ParallelRegisterMove(
|
||||
}
|
||||
}
|
||||
|
||||
void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) {
|
||||
// We do not support multi-value yet.
|
||||
DCHECK_EQ(1, sig->return_count());
|
||||
ValueType return_type = sig->GetReturn(0);
|
||||
StackTransferRecipe stack_transfers(this);
|
||||
LiftoffRegister return_reg =
|
||||
needs_reg_pair(return_type)
|
||||
? LiftoffRegister::ForPair(kGpReturnRegisters[0],
|
||||
kGpReturnRegisters[1])
|
||||
: reg_class_for(return_type) == kGpReg
|
||||
? LiftoffRegister(kGpReturnRegisters[0])
|
||||
: LiftoffRegister(kFpReturnRegisters[0]);
|
||||
stack_transfers.LoadIntoRegister(return_reg, cache_state_.stack_state.back(),
|
||||
cache_state_.stack_height() - 1);
|
||||
}
|
||||
|
||||
#ifdef ENABLE_SLOW_DCHECKS
|
||||
bool LiftoffAssembler::ValidateCacheState() const {
|
||||
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
|
||||
|
@ -338,6 +338,8 @@ class LiftoffAssembler : public TurboAssembler {
|
||||
};
|
||||
void ParallelRegisterMove(Vector<ParallelRegisterMoveTuple>);
|
||||
|
||||
void MoveToReturnRegisters(FunctionSig*);
|
||||
|
||||
#ifdef ENABLE_SLOW_DCHECKS
|
||||
// Validate that the register use counts reflect the state of the cache.
|
||||
bool ValidateCacheState() const;
|
||||
|
@ -1075,25 +1075,17 @@ class LiftoffCompiler {
|
||||
__ cache_state()->stack_state.pop_back();
|
||||
}
|
||||
|
||||
void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
|
||||
void DoReturn(FullDecoder* decoder, Vector<Value> /*values*/, bool implicit) {
|
||||
if (implicit) {
|
||||
DCHECK_EQ(1, decoder->control_depth());
|
||||
Control* func_block = decoder->control_at(0);
|
||||
__ bind(func_block->label.get());
|
||||
__ cache_state()->Steal(func_block->label_state);
|
||||
TraceCacheState(decoder);
|
||||
}
|
||||
if (!values.is_empty()) {
|
||||
if (values.size() > 1) return unsupported(decoder, "multi-return");
|
||||
LiftoffRegister reg = __ PopToRegister();
|
||||
LiftoffRegister return_reg =
|
||||
kNeedI64RegPair && values[0].type == kWasmI64
|
||||
? LiftoffRegister::ForPair(kGpReturnRegisters[0],
|
||||
kGpReturnRegisters[1])
|
||||
: reg_class_for(values[0].type) == kGpReg
|
||||
? LiftoffRegister(kGpReturnRegisters[0])
|
||||
: LiftoffRegister(kFpReturnRegisters[0]);
|
||||
if (reg != return_reg) __ Move(return_reg, reg, values[0].type);
|
||||
}
|
||||
size_t num_returns = decoder->sig_->return_count();
|
||||
if (num_returns > 1) return unsupported(decoder, "multi-return");
|
||||
if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
|
||||
__ LeaveFrame(StackFrame::WASM_COMPILED);
|
||||
__ DropStackSlotsAndRet(
|
||||
static_cast<uint32_t>(descriptor_->StackParameterCount()));
|
||||
|
Loading…
Reference in New Issue
Block a user