[Deopt] Remove jump table in prologue of deopt entries.

Remove the use of a jump table in the prologue of the deopt entries
and instead pass the bailout id explicitly in a register when calling
the deopt entry routine from optimized code. This unifies the logic
with the way the Arm64 code works. It saves the following amount of
memory in code stubs:

 - arm:  384KB
 - ia32: 480KB
 - x64:  240KB

This could be offset by a slight increase in the size of optimized code
for loading the immediate, however this impact should be minimal and
will scale with the maximum number of bailout ids (e.g., the size of
code will increase by one instruction per bailout id on Arm, therefore
~98,000 bailouts will be needed before the overhead is greater than
the current fixed table size).

Change-Id: I838604b48fa04cbd45320c7b9dac0de08fd8eb25
Reviewed-on: https://chromium-review.googlesource.com/c/1398224
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58636}
This commit is contained in:
Ross McIlroy 2019-01-08 13:31:38 +00:00 committed by Commit Bot
parent 9cd84510be
commit 4ab96a9a81
24 changed files with 106 additions and 281 deletions

View File

@ -338,11 +338,6 @@ bool RelocInfo::IsInConstantPool() {
return Assembler::is_constant_pool_load(pc_);
}
int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
DCHECK(IsRuntimeEntry(rmode_));
return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return static_cast<uint32_t>(

View File

@ -12,17 +12,14 @@
namespace v8 {
namespace internal {
const int Deoptimizer::table_entry_size_ = 8;
#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate, int count,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
GenerateDeoptimizationEntriesPrologue(masm, count);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@ -70,15 +67,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack.
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
// Get the bailout id is passed as r10 by the caller.
__ mov(r2, r10);
// Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
__ mov(r3, lr);
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
__ add(r4, sp, Operand(kSavedRegistersAreaSize));
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
@ -138,8 +134,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ str(r2, MemOperand(r1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
// Remove the saved registers from the stack.
__ add(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
@ -237,59 +233,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ stop("Unreachable.");
}
void Deoptimizer::GenerateDeoptimizationEntriesPrologue(MacroAssembler* masm,
int count) {
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
// We need to be able to generate immediates up to kMaxNumberOfEntries. On
// ARMv7, we can use movw (with a maximum immediate of 0xFFFF). On ARMv6, we
// need two instructions.
STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xFFFF);
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(masm, ARMv7);
Label done;
for (int i = 0; i < count; i++) {
int start = masm->pc_offset();
USE(start);
__ movw(scratch, i);
__ b(&done);
DCHECK_EQ(table_entry_size_, masm->pc_offset() - start);
}
__ bind(&done);
} else {
// We want to keep table_entry_size_ == 8 (since this is the common case),
// but we need two instructions to load most immediates over 0xFF. To handle
// this, we set the low byte in the main table, and then set the high byte
// in a separate table if necessary.
Label high_fixes[256];
int high_fix_max = (count - 1) >> 8;
DCHECK_GT(arraysize(high_fixes), static_cast<size_t>(high_fix_max));
for (int i = 0; i < count; i++) {
int start = masm->pc_offset();
USE(start);
__ mov(scratch, Operand(i & 0xFF)); // Set the low byte.
__ b(&high_fixes[i >> 8]); // Jump to the secondary table.
DCHECK_EQ(table_entry_size_, masm->pc_offset() - start);
}
// Generate the secondary table, to set the high byte.
for (int high = 1; high <= high_fix_max; high++) {
__ bind(&high_fixes[high]);
__ orr(scratch, scratch, Operand(high << 8));
// If this isn't the last entry, emit a branch to the end of the table.
// The last entry can just fall through.
if (high < high_fix_max) __ b(&high_fixes[0]);
}
// Bind high_fixes[0] last, for indices like 0x00**. This case requires no
// fix-up, so for (common) small tables we can jump here, then just fall
// through with no additional branch.
__ bind(&high_fixes[0]);
}
__ push(scratch);
}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {

View File

@ -2506,6 +2506,26 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
NoRootArrayScope no_root_array(this);
// Save the deopt id in r10 (we don't need the roots array from now on).
DCHECK_LE(deopt_id, 0xFFFF);
if (CpuFeatures::IsSupported(ARMv7)) {
// On ARMv7, we can use movw (with a maximum immediate of 0xFFFF)
movw(r10, deopt_id);
} else {
// On ARMv6, we might need two instructions.
mov(r10, Operand(deopt_id & 0xFF)); // Set the low byte.
if (deopt_id >= 0xFF) {
orr(r10, r10, Operand(deopt_id & 0xFF00)); // Set the high byte.
}
}
Call(target, RelocInfo::RUNTIME_ENTRY);
CheckConstPool(false, false);
}
} // namespace internal
} // namespace v8

View File

@ -309,12 +309,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This should only be used when assembling a deoptimizer call because of
// the CheckConstPool invocation, which is only needed for deoptimization.
void CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode) {
USE(deopt_id);
Call(target, rmode);
CheckConstPool(false, false);
}
void CallForDeoptimization(Address target, int deopt_id);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.

View File

@ -190,17 +190,6 @@ bool RelocInfo::IsInConstantPool() {
return instr->IsLdrLiteralX();
}
int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
DCHECK(IsRuntimeEntry(rmode_));
Instruction* movz_instr = reinterpret_cast<Instruction*>(pc_)->preceding();
DCHECK(movz_instr->IsMovz());
uint64_t imm = static_cast<uint64_t>(movz_instr->ImmMoveWide())
<< (16 * movz_instr->ShiftMoveWide());
DCHECK_LE(imm, INT_MAX);
return static_cast<int>(imm);
}
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
Instruction* instr = reinterpret_cast<Instruction*>(pc_);

View File

@ -100,10 +100,9 @@ void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
} // namespace
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate, int count,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
GenerateDeoptimizationEntriesPrologue(masm, count);
// TODO(all): This code needs to be revisited. We probably only need to save
// caller-saved registers here. Callee-saved registers can be stored directly
@ -144,18 +143,17 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
const int kDoubleRegistersOffset =
kFloatRegistersOffset + saved_float_registers.Count() * kSRegSize;
// Get the bailout id from the stack.
// The bailout id was passed by the caller in x26.
Register bailout_id = x2;
__ Peek(bailout_id, kSavedRegistersAreaSize);
__ Mov(bailout_id, x26);
Register code_object = x3;
Register fp_to_sp = x4;
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, adding two words for alignment padding and
// bailout id.
__ Add(fp_to_sp, sp, kSavedRegistersAreaSize + (2 * kPointerSize));
// Compute the fp-to-sp delta.
__ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
@ -203,9 +201,9 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
CopyRegListToFrame(masm, x1, FrameDescription::float_registers_offset(),
saved_float_registers, w2, w3, kFloatRegistersOffset);
// Remove the padding, bailout id and the saved registers from the stack.
// Remove the saved registers from the stack.
DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
__ Drop(2 + (kSavedRegistersAreaSize / kXRegSize));
__ Drop(kSavedRegistersAreaSize / kXRegSize);
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
@ -289,28 +287,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ Br(continuation);
}
// Size of an entry of the second level deopt table. Since we do not generate
// a table for ARM64, the size is zero.
const int Deoptimizer::table_entry_size_ = 0 * kInstrSize;
void Deoptimizer::GenerateDeoptimizationEntriesPrologue(MacroAssembler* masm,
int count) {
UseScratchRegisterScope temps(masm);
// The MacroAssembler will have put the deoptimization id in x16, the first
// temp register allocated. We can't assert that the id is in there, but we
// can check that x16 the first allocated temp and that the value it contains
// is in the expected range.
Register entry_id = temps.AcquireX();
DCHECK(entry_id.Is(x16));
__ Push(padreg, entry_id);
if (__ emit_debug_code()) {
// Ensure the entry_id looks sensible, ie. 0 <= entry_id < count().
__ Cmp(entry_id, count);
__ Check(lo, AbortReason::kOffsetOutOfRange);
}
}
bool Deoptimizer::PadTopOfStackRegister() { return true; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {

View File

@ -2118,23 +2118,18 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode) {
DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY);
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
BlockPoolsScope scope(this);
NoRootArrayScope no_root_array(this);
#ifdef DEBUG
Label start;
Bind(&start);
#endif
// The deoptimizer requires the deoptimization id to be in x16.
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
DCHECK(temp.Is(x16));
// Make sure that the deopt id can be encoded in 16 bits, so can be encoded
// in a single movz instruction with a zero shift.
DCHECK(is_uint16(deopt_id));
movz(temp, deopt_id);
movz(x26, deopt_id);
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(options().code_range_start);
DCHECK_EQ(offset % kInstrSize, 0);

View File

@ -857,8 +857,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
void CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode);
void CallForDeoptimization(Address target, int deopt_id);
// Calls a C function.
// The called function is not allowed to trigger a

View File

@ -112,17 +112,19 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, SourcePosition pos) {
if (deoptimization_id > Deoptimizer::kMaxNumberOfEntries) {
return kTooManyDeoptimizationBailouts;
}
DeoptimizeKind deopt_kind = GetDeoptimizationKind(deoptimization_id);
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
tasm()->isolate(), deoptimization_id, deopt_kind);
if (deopt_entry == kNullAddress) return kTooManyDeoptimizationBailouts;
Address deopt_entry =
Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
if (info()->is_source_positions_enabled()) {
tasm()->RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
tasm()->CallForDeoptimization(deopt_entry, deoptimization_id,
RelocInfo::RUNTIME_ENTRY);
tasm()->CallForDeoptimization(deopt_entry, deoptimization_id);
return kSuccess;
}

View File

@ -943,10 +943,10 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
// Make sure that we have generated the maximal number of deopt entries.
// This is in order to avoid triggering the generation of deopt entries later
// during code assembly.
Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
// Make sure that we have generated the deopt entries code. This is in order
// to avoid triggering the generation of deopt entries later during code
// assembly.
Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
return SUCCEEDED;
}
@ -2299,7 +2299,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();

View File

@ -567,52 +567,34 @@ void Deoptimizer::DeleteFrameDescriptions() {
#endif // DEBUG
}
Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, int id,
Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
DeoptimizeKind kind) {
CHECK_GE(id, 0);
if (id >= kMaxNumberOfEntries) return kNullAddress;
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
CHECK(!data->deopt_entry_code(kind).is_null());
Code code = data->deopt_entry_code(kind);
return code->raw_instruction_start() + (id * table_entry_size_);
return data->deopt_entry_code(kind)->raw_instruction_start();
}
int Deoptimizer::GetDeoptimizationId(Isolate* isolate, Address addr,
DeoptimizeKind kind) {
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
DCHECK(IsInDeoptimizationTable(isolate, addr, kind));
Code code = data->deopt_entry_code(kind);
Address start = code->raw_instruction_start();
DCHECK_EQ(0,
static_cast<int>(addr - start) % table_entry_size_);
return static_cast<int>(addr - start) / table_entry_size_;
}
bool Deoptimizer::IsInDeoptimizationTable(Isolate* isolate, Address addr,
DeoptimizeKind type) {
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind type) {
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind);
Code code = data->deopt_entry_code(type);
if (code.is_null()) return false;
Address start = code->raw_instruction_start();
return ((table_entry_size_ == 0 && addr == start) ||
(addr >= start &&
addr < start + (kMaxNumberOfEntries * table_entry_size_)));
return addr == code->raw_instruction_start();
}
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind* type) {
if (IsInDeoptimizationTable(isolate, addr, DeoptimizeKind::kEager)) {
if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kEager)) {
*type = DeoptimizeKind::kEager;
return true;
}
if (IsInDeoptimizationTable(isolate, addr, DeoptimizeKind::kSoft)) {
if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kSoft)) {
*type = DeoptimizeKind::kSoft;
return true;
}
if (IsInDeoptimizationTable(isolate, addr, DeoptimizeKind::kLazy)) {
if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kLazy)) {
*type = DeoptimizeKind::kLazy;
return true;
}
@ -1846,8 +1828,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
MacroAssembler masm(isolate, nullptr, 16 * KB, CodeObjectRequired::kYes);
masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, masm.isolate(), kMaxNumberOfEntries,
kind);
GenerateDeoptimizationEntries(&masm, masm.isolate(), kind);
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
@ -1863,7 +1844,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
data->set_deopt_entry_code(kind, *code);
}
void Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate) {
void Deoptimizer::EnsureCodeForDeoptimizationEntries(Isolate* isolate) {
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kEager);
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kLazy);
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kSoft);

View File

@ -492,10 +492,7 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
static Address GetDeoptimizationEntry(Isolate* isolate, int id,
DeoptimizeKind kind);
static int GetDeoptimizationId(Isolate* isolate, Address addr,
DeoptimizeKind kind);
static Address GetDeoptimizationEntry(Isolate* isolate, DeoptimizeKind kind);
// Returns true if {addr} is a deoptimization entry and stores its type in
// {type}. Returns false if {addr} is not a deoptimization entry.
@ -519,17 +516,17 @@ class Deoptimizer : public Malloced {
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
DeoptimizeKind kind);
static void EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate);
static void EnsureCodeForDeoptimizationEntries(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
static const int kMaxNumberOfEntries = 16384;
private:
friend class FrameWriter;
void QueueValueForMaterialization(Address output_address, Object obj,
const TranslatedFrame::iterator& iterator);
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind,
unsigned bailout_id, Address from, int fp_to_sp_delta);
@ -537,8 +534,8 @@ class Deoptimizer : public Malloced {
void PrintFunctionName();
void DeleteFrameDescriptions();
static bool IsInDeoptimizationTable(Isolate* isolate, Address addr,
DeoptimizeKind type);
static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind type);
void DoComputeOutputFrames();
void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
@ -573,10 +570,8 @@ class Deoptimizer : public Malloced {
static unsigned ComputeOutgoingArgumentSize(Code code, unsigned bailout_id);
static void GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate, int count,
Isolate* isolate,
DeoptimizeKind kind);
static void GenerateDeoptimizationEntriesPrologue(MacroAssembler* masm,
int count);
// Marks all the code in the given context for deoptimization.
static void MarkAllCodeForContext(Context native_context);

View File

@ -250,9 +250,8 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
Address addr = relocinfo->target_address();
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, addr, &type)) {
int id = relocinfo->GetDeoptimizationId(isolate, type);
out->AddFormatted(" ;; %s deoptimization bailout %d",
Deoptimizer::MessageFor(type), id);
out->AddFormatted(" ;; %s deoptimization bailout",
Deoptimizer::MessageFor(type));
} else {
out->AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
}
@ -370,7 +369,8 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// already, check if we can find some RelocInfo for the target address in
// the constant pool.
if (pcs.empty() && !code.is_null()) {
RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc), RelocInfo::NONE,
RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc),
RelocInfo::NONE,
0, Code());
if (dummy_rinfo.IsInConstantPool()) {
Address constant_pool_entry_address =

View File

@ -201,16 +201,10 @@ bool RelocInfo::IsCodedSpecially() {
return RelocInfo::ModeMask(rmode_) & kApplyMask;
}
bool RelocInfo::IsInConstantPool() {
return false;
}
int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
DCHECK(IsRuntimeEntry(rmode_));
return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return Memory<uint32_t>(pc_);

View File

@ -13,15 +13,12 @@
namespace v8 {
namespace internal {
const int Deoptimizer::table_entry_size_ = 10;
#define __ masm->
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate, int count,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
GenerateDeoptimizationEntriesPrologue(masm, count);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@ -55,13 +52,12 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kPointerSize + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack.
__ mov(esi, Operand(esp, kSavedRegistersAreaSize));
// The bailout id is passed in ebx by the caller.
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ sub(edx, ebp);
__ neg(edx);
@ -77,7 +73,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kPointerSize),
Immediate(static_cast<int>(deopt_kind)));
__ mov(Operand(esp, 2 * kPointerSize), esi); // Bailout id.
__ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
@ -119,8 +115,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
// Remove the bailout id, return address and the double registers.
__ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
// Remove the return address and the double registers.
__ add(esp, Immediate(kDoubleRegsSize + 1 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
@ -206,20 +202,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ ret(0);
}
void Deoptimizer::GenerateDeoptimizationEntriesPrologue(MacroAssembler* masm,
int count) {
// Create a sequence of deoptimization entries.
Label done;
for (int i = 0; i < count; i++) {
int start = masm->pc_offset();
USE(start);
__ push_imm32(i);
__ jmp(&done);
DCHECK(masm->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {

View File

@ -2035,6 +2035,13 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
NoRootArrayScope no_root_array(this);
// Save the deopt id in ebx (we don't need the roots array from now on).
mov(ebx, deopt_id);
call(target, RelocInfo::RUNTIME_ENTRY);
}
} // namespace internal
} // namespace v8

View File

@ -107,11 +107,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RetpolineJump(Register reg);
void CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode) {
USE(deopt_id);
call(target, rmode);
}
void CallForDeoptimization(Address target, int deopt_id);
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
// builtin for the target runtime function and uses an indirect call.

View File

@ -179,7 +179,7 @@ class AllowExternalCallThatCantCauseGC: public FrameScope {
// scope object.
class NoRootArrayScope {
public:
explicit NoRootArrayScope(MacroAssembler* masm)
explicit NoRootArrayScope(TurboAssembler* masm)
: masm_(masm), old_value_(masm->root_array_available()) {
masm->set_root_array_available(false);
}
@ -187,7 +187,7 @@ class NoRootArrayScope {
~NoRootArrayScope() { masm_->set_root_array_available(old_value_); }
private:
MacroAssembler* masm_;
TurboAssembler* masm_;
bool old_value_;
};

View File

@ -474,9 +474,8 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
// Deoptimization bailouts are stored as runtime entries.
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
int id = GetDeoptimizationId(isolate, type);
os << " (" << Deoptimizer::MessageFor(type) << " deoptimization bailout "
<< id << ")";
os << " (" << Deoptimizer::MessageFor(type)
<< " deoptimization bailout)";
}
} else if (IsConstPool(rmode_)) {
os << " (size " << static_cast<int>(data_) << ")";

View File

@ -204,11 +204,6 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
// Returns the deoptimization id for the entry associated with the reloc info
// where {kind} is the deoptimization kind.
// This is only used for printing RUNTIME_ENTRY relocation info.
int GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind);
Address wasm_call_address() const;
Address wasm_stub_call_address() const;

View File

@ -4993,16 +4993,10 @@ bool RelocInfo::IsCodedSpecially() {
return (1 << rmode_) & kApplyMask;
}
bool RelocInfo::IsInConstantPool() {
return false;
}
int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
DCHECK(IsRuntimeEntry(rmode_));
return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
} // namespace internal
} // namespace v8

View File

@ -13,18 +13,12 @@
namespace v8 {
namespace internal {
const int Deoptimizer::table_entry_size_ = 5;
#define __ masm->
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate, int count,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
Label deopt_table_entry;
__ bind(&deopt_table_entry);
GenerateDeoptimizationEntriesPrologue(masm, count);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@ -69,29 +63,13 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// this on linux), since it is another parameter passing register on windows.
Register arg5 = r11;
// Get the bailout id from the stack.
__ movp(rax, Operand(rsp, kSavedRegistersAreaSize));
// address of deoptimization table
__ leap(rdx, Operand(&deopt_table_entry));
// rax = deopt_entry - deopt_table_entry - 5
__ subp(rax, rdx);
__ subl(rax, Immediate(5));
// rax /= 5
__ movl(rbx, Immediate(0xcccccccd));
__ imulq(rax, rbx);
__ shrq(rax, Immediate(0x22));
// bailout id
__ movl(arg_reg_3, rax);
// The bailout id is passed using r13 on the stack.
__ movp(arg_reg_3, r13);
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
__ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
kPCOnStackSize));
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
__ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
__ subp(arg5, rbp);
__ negp(arg5);
@ -150,8 +128,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ popq(Operand(rbx, dst_offset));
}
// Remove the bailout id and return address from the stack.
__ addp(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
// Remove the return address from the stack.
__ addp(rsp, Immediate(kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
@ -243,19 +221,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ ret(0);
}
void Deoptimizer::GenerateDeoptimizationEntriesPrologue(MacroAssembler* masm,
int count) {
// Create a sequence of deoptimization entries.
Label done;
for (int i = 0; i < count; i++) {
int start = masm->pc_offset();
USE(start);
__ call(&done);
DCHECK(masm->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {

View File

@ -2819,6 +2819,13 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
Set(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
NoRootArrayScope no_root_array(this);
// Save the deopt id in r13 (we don't need the roots array from now on).
movp(r13, Immediate(deopt_id));
call(target, RelocInfo::RUNTIME_ENTRY);
}
} // namespace internal
} // namespace v8

View File

@ -375,11 +375,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RetpolineJump(Register reg);
void CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode) {
USE(deopt_id);
call(target, rmode);
}
void CallForDeoptimization(Address target, int deopt_id);
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);