Revert "[deoptimizer] Change deopt entries into builtins"

This reverts commit 7f58ced72e.

Reason for revert: Segfaults on Atom_x64 https://ci.chromium.org/p/v8-internal/builders/ci/v8_linux64_atom_perf/5686?

Original change's description:
> [deoptimizer] Change deopt entries into builtins
>
> While the overall goal of this commit is to change deoptimization
> entries into builtins, there are multiple related things happening:
>
> - Deoptimization entries, formerly stubs (i.e. Code objects generated
>   at runtime, guaranteed to be immovable), have been converted into
>   builtins. The major restriction is that we now need to preserve the
>   kRootRegister, which was formerly used on most architectures to pass
>   the deoptimization id. The solution differs based on platform.
> - Renamed DEOPT_ENTRIES_OR_FOR_TESTING code kind to FOR_TESTING.
> - Removed heap/ support for immovable Code generation.
> - Removed the DeserializerData class (no longer needed).
> - arm64: to preserve 4-byte deopt exits, introduced a new optimization
>   in which the final jump to the deoptimization entry is generated
>   once per Code object, and deopt exits can continue to emit a
>   near-call.
> - arm,ia32,x64: change to fixed-size deopt exits. This reduces exit
>   sizes by 4/8, 5, and 5 bytes, respectively.
>
> On arm the deopt exit size is reduced from 12 (or 16) bytes to 8 bytes
> by using the same strategy as on arm64 (recalc deopt id from return
> address). Before:
>
>  e300a002       movw r10, <id>
>  e59fc024       ldr ip, [pc, <entry offset>]
>  e12fff3c       blx ip
>
> After:
>
>  e59acb35       ldr ip, [r10, <entry offset>]
>  e12fff3c       blx ip
>
> On arm64 the deopt exit size remains 4 bytes (or 8 bytes in same cases
> with CFI). Additionally, up to 4 builtin jumps are emitted per Code
> object (max 32 bytes added overhead per Code object). Before:
>
>  9401cdae       bl <entry offset>
>
> After:
>
>  # eager deoptimization entry jump.
>  f95b1f50       ldr x16, [x26, <eager entry offset>]
>  d61f0200       br x16
>  # lazy deoptimization entry jump.
>  f95b2b50       ldr x16, [x26, <lazy entry offset>]
>  d61f0200       br x16
>  # the deopt exit.
>  97fffffc       bl <eager deoptimization entry jump offset>
>
> On ia32 the deopt exit size is reduced from 10 to 5 bytes. Before:
>
>  bb00000000     mov ebx,<id>
>  e825f5372b     call <entry>
>
> After:
>
>  e8ea2256ba     call <entry>
>
> On x64 the deopt exit size is reduced from 12 to 7 bytes. Before:
>
>  49c7c511000000 REX.W movq r13,<id>
>  e8ea2f0700     call <entry>
>
> After:
>
>  41ff9560360000 call [r13+<entry offset>]
>
> Bug: v8:8661,v8:8768
> Change-Id: I13e30aedc360474dc818fecc528ce87c3bfeed42
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2465834
> Commit-Queue: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#70597}

TBR=ulan@chromium.org,rmcilroy@chromium.org,jgruber@chromium.org,tebbi@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:8661,v8:8768,chromium:1140165
Change-Id: I3df02ab42f6e02233d9f6fb80e8bb18f76870d91
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2485504
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70649}
This commit is contained in:
Jakob Gruber 2020-10-20 11:00:29 +02:00 committed by Commit Bot
parent 45e49775f5
commit 8bc9a7941c
77 changed files with 2353 additions and 1814 deletions

View File

@ -3166,250 +3166,6 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
__ Ret();
}
namespace {
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Generate_DeoptimizationEntry(MacroAssembler* masm,
DeoptimizeKind deopt_kind) {
Isolate* isolate = masm->isolate();
static constexpr int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kNumRegisters;
// Save all allocatable VFP registers before messing with them.
{
// We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm, VFP32DREGS,
CpuFeatureScope::kDontCheckSupported);
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(scratch);
// Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
// Okay to not call AllocateStackSpace here because the size is a known
// small number and we need to use condition codes.
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d15);
}
// Save all general purpose registers before messing with them.
static constexpr int kNumberOfRegisters = Register::kNumRegisters;
STATIC_ASSERT(kNumberOfRegisters == 16);
// Everything but pc, lr and ip which will be saved but not restored.
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(v8:1588): Note that using pc with stm is deprecated, so we should
// perhaps handle this a bit differently.
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ Move(scratch, ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate));
__ str(fp, MemOperand(scratch));
}
static constexpr int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
__ mov(r2, Operand(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
__ mov(r3, lr);
__ add(r4, sp, Operand(kSavedRegistersAreaSize));
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
__ PrepareCallCFunction(6);
__ mov(r0, Operand(0));
Label context_check;
__ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r1, &context_check);
__ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(r1, Operand(static_cast<int>(deopt_kind)));
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
__ Move(r5, ExternalReference::isolate_address(isolate));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
__ str(r2, MemOperand(r1, offset));
}
// Copy VFP registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register zero = r4;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ mov(zero, Operand(0));
__ strb(zero, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
__ add(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
__ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
__ add(r2, r2, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
__ bind(&pop_loop_header);
__ cmp(r2, sp);
__ b(ne, &pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r0); // Preserve deoptimizer object across call.
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
__ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r4 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
__ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
__ add(r1, r4, Operand(r1, LSL, 2));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
__ ldr(r2, MemOperand(r4, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r6);
__ bind(&inner_loop_header);
__ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
__ add(r4, r4, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ cmp(r4, r1);
__ b(lt, &outer_push_loop);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
DwVfpRegister reg = DwVfpRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ vldr(reg, r1, src_offset);
}
// Push pc and continuation from the last output frame.
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
__ push(r6);
// Push the registers from the last output frame.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r6, MemOperand(r2, offset));
__ push(r6);
}
// Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register one = r4;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ mov(one, Operand(1));
__ strb(one, MemOperand(is_iterable));
}
// Remove sp, lr and pc.
__ Drop(3);
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(scratch);
}
__ stop();
}
} // namespace
void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
#undef __
} // namespace internal

View File

@ -3619,297 +3619,6 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ Ret();
}
namespace {
void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
int dst_offset, const CPURegList& reg_list,
const Register& temp0, const Register& temp1,
int src_offset = 0) {
DCHECK_EQ(reg_list.Count() % 2, 0);
UseScratchRegisterScope temps(masm);
CPURegList copy_to_input = reg_list;
int reg_size = reg_list.RegisterSizeInBytes();
DCHECK_EQ(temp0.SizeInBytes(), reg_size);
DCHECK_EQ(temp1.SizeInBytes(), reg_size);
// Compute some temporary addresses to avoid having the macro assembler set
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
masm->Add(src, sp, src_offset);
masm->Add(dst, dst, dst_offset);
// Write reg_list into the frame pointed to by dst.
for (int i = 0; i < reg_list.Count(); i += 2) {
masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
CPURegister reg0 = copy_to_input.PopLowestIndex();
CPURegister reg1 = copy_to_input.PopLowestIndex();
int offset0 = reg0.code() * reg_size;
int offset1 = reg1.code() * reg_size;
// Pair up adjacent stores, otherwise write them separately.
if (offset1 == offset0 + reg_size) {
masm->Stp(temp0, temp1, MemOperand(dst, offset0));
} else {
masm->Str(temp0, MemOperand(dst, offset0));
masm->Str(temp1, MemOperand(dst, offset1));
}
}
masm->Sub(dst, dst, dst_offset);
}
void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
const Register& src_base, int src_offset) {
DCHECK_EQ(reg_list.Count() % 2, 0);
UseScratchRegisterScope temps(masm);
CPURegList restore_list = reg_list;
int reg_size = restore_list.RegisterSizeInBytes();
// Compute a temporary addresses to avoid having the macro assembler set
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
masm->Add(src, src_base, src_offset);
// No need to restore padreg.
restore_list.Remove(padreg);
// Restore every register in restore_list from src.
while (!restore_list.IsEmpty()) {
CPURegister reg0 = restore_list.PopLowestIndex();
CPURegister reg1 = restore_list.PopLowestIndex();
int offset0 = reg0.code() * reg_size;
if (reg1 == NoCPUReg) {
masm->Ldr(reg0, MemOperand(src, offset0));
break;
}
int offset1 = reg1.code() * reg_size;
// Pair up adjacent loads, otherwise read them separately.
if (offset1 == offset0 + reg_size) {
masm->Ldp(reg0, reg1, MemOperand(src, offset0));
} else {
masm->Ldr(reg0, MemOperand(src, offset0));
masm->Ldr(reg1, MemOperand(src, offset1));
}
}
}
void Generate_DeoptimizationEntry(MacroAssembler* masm,
DeoptimizeKind deopt_kind) {
Isolate* isolate = masm->isolate();
// TODO(all): This code needs to be revisited. We probably only need to save
// caller-saved registers here. Callee-saved registers can be stored directly
// in the input frame.
// Save all allocatable double registers.
CPURegList saved_double_registers(
CPURegister::kVRegister, kDRegSizeInBits,
RegisterConfiguration::Default()->allocatable_double_codes_mask());
DCHECK_EQ(saved_double_registers.Count() % 2, 0);
__ PushCPURegList(saved_double_registers);
// We save all the registers except sp, lr, platform register (x18) and the
// masm scratches.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
saved_registers.Remove(ip0);
saved_registers.Remove(ip1);
saved_registers.Remove(x18);
saved_registers.Combine(fp);
saved_registers.Align();
DCHECK_EQ(saved_registers.Count() % 2, 0);
__ PushCPURegList(saved_registers);
__ Mov(x3, Operand(ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate)));
__ Str(fp, MemOperand(x3));
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSize) +
(saved_double_registers.Count() * kDRegSize);
// Floating point registers are saved on the stack above core registers.
const int kDoubleRegistersOffset = saved_registers.Count() * kXRegSize;
Register bailout_id = x2;
Register code_object = x3;
Register fp_to_sp = x4;
__ Mov(bailout_id, Deoptimizer::kFixedExitSizeMarker);
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta.
__ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
__ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
// Ensure we can safely load from below fp.
DCHECK_GT(kSavedRegistersAreaSize, -StandardFrameConstants::kFunctionOffset);
__ Ldr(x0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// If x1 is a smi, zero x0.
__ Tst(x1, kSmiTagMask);
__ CzeroX(x0, eq);
__ Mov(x1, static_cast<int>(deopt_kind));
// Following arguments are already loaded:
// - x2: bailout id
// - x3: code object address
// - x4: fp-to-sp delta
__ Mov(x5, ExternalReference::isolate_address(isolate));
{
// Call Deoptimizer::New().
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve "deoptimizer" object in register x0.
Register deoptimizer = x0;
// Get the input frame descriptor pointer.
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(),
saved_registers, x2, x3);
// Copy double registers to the input frame.
CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
saved_double_registers, x2, x3, kDoubleRegistersOffset);
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.AcquireX();
__ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ strb(xzr, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
__ Drop(kSavedRegistersAreaSize / kXRegSize);
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
Register unwind_limit = x2;
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
__ SlotAddress(x1, 0);
__ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
__ Mov(x5, unwind_limit);
__ CopyDoubleWords(x3, x1, x5);
__ Drop(unwind_limit);
// Compute the output frame in the deoptimizer.
__ Push(padreg, x0); // Preserve deoptimizer object across call.
{
// Call Deoptimizer::ComputeOutputFrames().
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
__ Mov(sp, scratch);
}
// Replace the current (input) frame with the output frames.
Label outer_push_loop, outer_loop_header;
__ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
__ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
__ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2));
__ B(&outer_loop_header);
__ Bind(&outer_push_loop);
Register current_frame = x2;
Register frame_size = x3;
__ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
__ Lsr(frame_size, x3, kSystemPointerSizeLog2);
__ Claim(frame_size);
__ Add(x7, current_frame, FrameDescription::frame_content_offset());
__ SlotAddress(x6, 0);
__ CopyDoubleWords(x6, x7, frame_size);
__ Bind(&outer_loop_header);
__ Cmp(x0, x1);
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
RestoreRegList(masm, saved_double_registers, x1,
FrameDescription::double_registers_offset());
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.AcquireX();
Register one = x4;
__ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ Mov(one, Operand(1));
__ strb(one, MemOperand(is_iterable));
}
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
// ARM code.
// Restore registers from the last output frame.
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
// reloading the other registers.
DCHECK(!saved_registers.IncludesAliasOf(lr));
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
RestoreRegList(masm, saved_registers, last_output_frame,
FrameDescription::registers_offset());
UseScratchRegisterScope temps(masm);
temps.Exclude(x17);
Register continuation = x17;
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
__ Autibsp();
#endif
__ Br(continuation);
}
} // namespace
void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
#undef __
} // namespace internal

View File

@ -138,10 +138,6 @@ namespace internal {
TFC(CompileLazyDeoptimizedCode, JSTrampoline) \
TFC(InstantiateAsmJs, JSTrampoline) \
ASM(NotifyDeoptimized, Dummy) \
ASM(DeoptimizationEntry_Eager, DeoptimizationEntry) \
ASM(DeoptimizationEntry_Soft, DeoptimizationEntry) \
ASM(DeoptimizationEntry_Bailout, DeoptimizationEntry) \
ASM(DeoptimizationEntry_Lazy, DeoptimizationEntry) \
\
/* Trampolines called when returning from a deoptimization that expects */ \
/* to continue in a JavaScript builtin to finish the functionality of a */ \

View File

@ -3781,205 +3781,6 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
MemMoveEmitPopAndReturn(masm);
}
namespace {
void Generate_DeoptimizationEntry(MacroAssembler* masm,
DeoptimizeKind deopt_kind) {
Isolate* isolate = masm->isolate();
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
__ AllocateStackSpace(kDoubleRegsSize);
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
int offset = code * kDoubleSize;
__ movsd(Operand(esp, offset), xmm_reg);
}
__ pushad();
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
__ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize));
__ sub(edx, ebp);
__ neg(edx);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6, eax);
__ mov(eax, Immediate(0));
Label context_check;
__ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(edi, &context_check);
__ mov(eax, Operand(ebp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kSystemPointerSize),
Immediate(static_cast<int>(deopt_kind)));
__ mov(Operand(esp, 2 * kSystemPointerSize),
Immediate(Deoptimizer::kFixedExitSizeMarker)); // Bailout id.
__ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta.
__ Move(Operand(esp, 5 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(masm->isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
__ mov(esi, Operand(eax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ pop(Operand(esi, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Fill in the double input registers.
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize;
__ movsd(xmm0, Operand(esp, src_offset));
__ movsd(Operand(esi, dst_offset), xmm0);
}
// Clear FPU all exceptions.
// TODO(ulan): Find out why the TOP register is not zero here in some cases,
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
__ mov_b(__ ExternalReferenceAsOperand(
ExternalReference::stack_is_iterable_address(isolate), edx),
Immediate(0));
// Remove the return address and the double registers.
__ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
__ add(ecx, esp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(edx, Operand(esi, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t)));
__ bind(&pop_loop_header);
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, esi);
__ mov(Operand(esp, 0 * kSystemPointerSize), eax);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(eax);
__ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one
// past the last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_system_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: esi = current FrameDescription*, ecx = loop
// index.
__ mov(esi, Operand(eax, 0));
__ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(esi, ecx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
// In case of a failed STUB, we have to restore the XMM registers.
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(esi, src_offset));
}
// Push pc and continuation from the last output frame.
__ push(Operand(esi, FrameDescription::pc_offset()));
__ push(Operand(esi, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ push(Operand(esi, offset));
}
__ mov_b(__ ExternalReferenceAsOperand(
ExternalReference::stack_is_iterable_address(isolate), edx),
Immediate(1));
// Restore the registers from the stack.
__ popad();
__ InitializeRootRegister();
// Return to the continuation point.
__ ret(0);
}
} // namespace
void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
#undef __
} // namespace internal

View File

@ -4101,223 +4101,6 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ int3(); // Unused on this architecture.
}
namespace {
void Generate_DeoptimizationEntry(MacroAssembler* masm,
DeoptimizeKind deopt_kind) {
Isolate* isolate = masm->isolate();
// Save all double registers, they will later be copied to the deoptimizer's
// FrameDescription.
static constexpr int kDoubleRegsSize =
kDoubleSize * XMMRegister::kNumRegisters;
__ AllocateStackSpace(kDoubleRegsSize);
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
int offset = code * kDoubleSize;
__ Movsd(Operand(rsp, offset), xmm_reg);
}
// Save all general purpose registers, they will later be copied to the
// deoptimizer's FrameDescription.
static constexpr int kNumberOfRegisters = Register::kNumRegisters;
for (int i = 0; i < kNumberOfRegisters; i++) {
__ pushq(Register::from_code(i));
}
static constexpr int kSavedRegistersAreaSize =
kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
static constexpr int kCurrentOffsetToReturnAddress = kSavedRegistersAreaSize;
static constexpr int kCurrentOffsetToParentSP =
kCurrentOffsetToReturnAddress + kPCOnStackSize;
__ Store(
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
rbp);
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
// this on linux), since it is another parameter passing register on windows.
Register arg5 = r11;
__ movq(arg_reg_3, Immediate(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
__ movq(arg_reg_4, Operand(rsp, kCurrentOffsetToReturnAddress));
// Load the fp-to-sp-delta.
__ leaq(arg5, Operand(rsp, kCurrentOffsetToParentSP));
__ subq(arg5, rbp);
__ negq(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
__ movq(rax, Immediate(0));
Label context_check;
__ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(rdi, &context_check);
__ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ movq(arg_reg_1, rax);
__ Set(arg_reg_2, static_cast<int>(deopt_kind));
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef V8_TARGET_OS_WIN
__ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
__ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
#else
__ movq(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate));
#endif
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ PopQuad(Operand(rbx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ popq(Operand(rbx, dst_offset));
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
__ movb(__ ExternalReferenceAsOperand(
ExternalReference::stack_is_iterable_address(isolate)),
Immediate(0));
// Remove the return address from the stack.
__ addq(rsp, Immediate(kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ Pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ pushq(rax);
__ PrepareCallCFunction(2);
__ movq(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
}
__ popq(rax);
__ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
__ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ Movsd(xmm_reg, Operand(rbx, src_offset));
}
// Push pc and continuation from the last output frame.
__ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
__ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ PushQuad(Operand(rbx, offset));
}
// Restore the registers from the stack.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
Register r = Register::from_code(i);
// Do not restore rsp, simply pop the value into the next register
// and overwrite this afterwards.
if (r == rsp) {
DCHECK_GT(i, 0);
r = Register::from_code(i - 1);
}
__ popq(r);
}
__ movb(__ ExternalReferenceAsOperand(
ExternalReference::stack_is_iterable_address(isolate)),
Immediate(1));
// Return to the continuation point.
__ ret(0);
}
} // namespace
void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}
void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}
void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}
void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
#undef __
} // namespace internal

View File

@ -176,8 +176,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
// size s.t. pc-relative calls may be used.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
int offset = IsolateData::builtin_entry_slot_offset(
static_cast<Builtins::Name>(code->builtin_index()));
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
ldr(scratch, MemOperand(kRootRegister, offset));
Jump(scratch, cond);
return;
@ -266,8 +266,8 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
// This branch is taken only for specific cctests, where we force isolate
// creation at runtime. At this point, Code space isn't restricted to a
// size s.t. pc-relative calls may be used.
int offset = IsolateData::builtin_entry_slot_offset(
static_cast<Builtins::Name>(code->builtin_index()));
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
ldr(ip, MemOperand(kRootRegister, offset));
Call(ip, cond);
return;
@ -2485,18 +2485,26 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label*) {
BlockConstPoolScope block_const_pool(this);
ldr(ip, MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
Label* exit, DeoptimizeKind kind) {
USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in r10 (we don't need the roots array from now on).
DCHECK_LE(deopt_id, 0xFFFF);
if (CpuFeatures::IsSupported(ARMv7)) {
// On ARMv7, we can use movw (with a maximum immediate of 0xFFFF)
movw(r10, deopt_id);
} else {
// On ARMv6, we might need two instructions.
mov(r10, Operand(deopt_id & 0xFF)); // Set the low byte.
if (deopt_id >= 0xFF) {
orr(r10, r10, Operand(deopt_id & 0xFF00)); // Set the high byte.
}
}
Call(target, RelocInfo::RUNTIME_ENTRY);
CheckConstPool(false, false);
}
void TurboAssembler::Trap() { stop(); }

View File

@ -320,9 +320,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
DeoptimizeKind kind,
Label* jump_deoptimization_entry_label);
// This should only be used when assembling a deoptimizer call because of
// the CheckConstPool invocation, which is only needed for deoptimization.
void CallForDeoptimization(Address target, int deopt_id, Label* exit,
DeoptimizeKind kind);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.

View File

@ -1878,13 +1878,6 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
}
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
Register destination) {
Ldr(destination,
MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(builtin_index)));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
@ -2010,11 +2003,15 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
void TurboAssembler::CallForDeoptimization(
Builtins::Name target, int deopt_id, Label* exit, DeoptimizeKind kind,
Label* jump_deoptimization_entry_label) {
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
Label* exit, DeoptimizeKind kind) {
BlockPoolsScope scope(this);
bl(jump_deoptimization_entry_label);
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(options().code_range_start);
DCHECK_EQ(offset % kInstrSize, 0);
offset = offset / static_cast<int>(kInstrSize);
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize

View File

@ -968,8 +968,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
Register destination);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
@ -982,9 +980,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
DeoptimizeKind kind,
Label* jump_deoptimization_entry_label);
void CallForDeoptimization(Address target, int deopt_id, Label* exit,
DeoptimizeKind kind);
// Calls a C function.
// The called function is not allowed to trigger a

View File

@ -2074,15 +2074,13 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label*) {
CallBuiltin(target);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
Label* exit, DeoptimizeKind kind) {
USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in ebx (we don't need the roots array from now on).
mov(ebx, deopt_id);
call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::Trap() { int3(); }

View File

@ -130,9 +130,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Trap() override;
void DebugBreak() override;
void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
DeoptimizeKind kind,
Label* jump_deoptimization_entry_label);
void CallForDeoptimization(Address target, int deopt_id, Label* exit,
DeoptimizeKind kind);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,

View File

@ -593,12 +593,6 @@ using DummyDescriptor = VoidDescriptor;
// Dummy descriptor that marks builtins with C calling convention.
using CCallDescriptor = VoidDescriptor;
// Marks deoptimization entry builtins. Precise calling conventions currently
// differ based on the platform.
// TODO(jgruber): Once this is unified, we could create a better description
// here.
using DeoptimizationEntryDescriptor = VoidDescriptor;
class AllocateDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)

View File

@ -99,7 +99,7 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_turbo_splitting) set_splitting();
break;
case CodeKind::BUILTIN:
case CodeKind::FOR_TESTING:
case CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING:
if (FLAG_turbo_splitting) set_splitting();
#if ENABLE_GDB_JIT_INTERFACE && DEBUG
set_source_positions();
@ -161,7 +161,7 @@ std::unique_ptr<char[]> OptimizedCompilationInfo::GetDebugName() const {
StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
switch (code_kind()) {
case CodeKind::FOR_TESTING:
case CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING:
case CodeKind::BYTECODE_HANDLER:
case CodeKind::BUILTIN:
return StackFrame::STUB;

View File

@ -150,6 +150,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
return code_kind() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
bool IsTurboprop() const { return code_kind() == CodeKind::TURBOPROP; }
bool IsStub() const {
return code_kind() == CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING;
}
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {

View File

@ -476,7 +476,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << " " << Builtins::name(code.builtin_index());
}
os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_)) {
} else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
// Deoptimization bailouts are stored as runtime entries.
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {

View File

@ -1589,13 +1589,6 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(
Builtins::Name builtin_index) {
DCHECK(root_array_available());
return Operand(kRootRegister,
IsolateData::builtin_entry_slot_offset(builtin_index));
}
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
if (SmiValuesAre32Bits()) {
// The builtin_index register contains the builtin index as a Smi.
@ -2832,15 +2825,13 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
Set(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label*) {
Call(EntryFromBuiltinIndexAsOperand(target));
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
: Deoptimizer::kNonLazyDeoptExitSize);
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
Label* exit, DeoptimizeKind kind) {
USE(exit, kind);
NoRootArrayScope no_root_array(this);
// Save the deopt id in r13 (we don't need the roots array from now on).
movq(r13, Immediate(deopt_id));
call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::Trap() { int3(); }

View File

@ -492,7 +492,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
Operand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
@ -512,9 +511,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RetpolineJump(Register reg);
void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
DeoptimizeKind kind,
Label* jump_deoptimization_entry_label);
void CallForDeoptimization(Address target, int deopt_id, Label* exit,
DeoptimizeKind kind);
void Trap() override;
void DebugBreak() override;

View File

@ -471,11 +471,8 @@ enum class DeoptimizeKind : uint8_t {
kSoft,
kBailout,
kLazy,
kLastDeoptimizeKind = kLazy
};
constexpr DeoptimizeKind kFirstDeoptimizeKind = DeoptimizeKind::kEager;
constexpr DeoptimizeKind kLastDeoptimizeKind = DeoptimizeKind::kLazy;
STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
constexpr int kDeoptimizeKindCount = static_cast<int>(kLastDeoptimizeKind) + 1;
inline size_t hash_value(DeoptimizeKind kind) {
return static_cast<size_t>(kind);
}
@ -490,6 +487,7 @@ inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
case DeoptimizeKind::kBailout:
return os << "Bailout";
}
UNREACHABLE();
}
// Indicates whether the lookup is related to sloppy-mode block-scoped

View File

@ -962,7 +962,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
__ b(exit->label());
CodeGenResult result = AssembleDeoptimizerCall(exit);
if (result != kSuccess) return result;
unwinding_info_writer_.MarkBlockWillExit();
break;
}
case kArchRet:
@ -3815,10 +3817,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {
__ CheckConstPool(true, false);
}
void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {

View File

@ -3135,35 +3135,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {
void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {
__ ForceConstantPoolEmissionWithoutJump();
// We are conservative here, assuming all deopts are lazy deopts.
DCHECK_GE(Deoptimizer::kLazyDeoptExitSize,
Deoptimizer::kNonLazyDeoptExitSize);
__ CheckVeneerPool(
false, false,
static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
// Check which deopt kinds exist in this Code object, to avoid emitting jumps
// to unused entries.
bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
for (auto exit : *exits) {
saw_deopt_kind[static_cast<int>(exit->kind())] = true;
}
// Emit the jumps to deoptimization entries.
UseScratchRegisterScope scope(tasm());
Register scratch = scope.AcquireX();
STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
for (int i = 0; i < kDeoptimizeKindCount; i++) {
if (!saw_deopt_kind[i]) continue;
__ bind(&jump_deoptimization_entry_labels_[i]);
__ LoadEntryFromBuiltinIndex(Deoptimizer::GetDeoptimizationEntry(
isolate(), static_cast<DeoptimizeKind>(i)),
scratch);
__ Jump(scratch);
}
__ CheckVeneerPool(false, false,
deopt_count * Deoptimizer::kLazyDeoptExitSize);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,

View File

@ -162,10 +162,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizeKind deopt_kind = exit->kind();
DeoptimizeReason deoptimization_reason = exit->reason();
Builtins::Name deopt_entry =
Address deopt_entry =
Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
Label* jump_deoptimization_entry_label =
&jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
if (info()->source_positions()) {
tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(),
deoptimization_id);
@ -179,7 +177,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
}
tasm()->CallForDeoptimization(deopt_entry, deoptimization_id, exit->label(),
deopt_kind, jump_deoptimization_entry_label);
deopt_kind);
exit->set_emitted();
return kSuccess;
}
@ -326,7 +324,7 @@ void CodeGenerator::AssembleCode() {
// For some targets, we must make sure that constant and veneer pools are
// emitted before emitting the deoptimization exits.
PrepareForDeoptimizationExits(&deoptimization_exits_);
PrepareForDeoptimizationExits(static_cast<int>(deoptimization_exits_.size()));
if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
deopt_exit_start_offset_ = tasm()->pc_offset();
@ -340,7 +338,7 @@ void CodeGenerator::AssembleCode() {
// Deoptimizer::kSupportsFixedDeoptExitSizes is true, lazy deopts
// might need additional instructions.
auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
static_assert(DeoptimizeKind::kLazy == kLastDeoptimizeKind,
static_assert(DeoptimizeKind::kLazy == DeoptimizeKind::kLastDeoptimizeKind,
"lazy deopts are expected to be emitted last");
if (a->kind() != b->kind()) {
return a->kind() < b->kind();

View File

@ -406,7 +406,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
InstructionOperand* op, MachineType type);
void MarkLazyDeoptSite();
void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
void PrepareForDeoptimizationExits(int deopt_count);
DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
size_t frame_state_offset);
@ -446,14 +446,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
int handler_table_offset_ = 0;
int last_lazy_deopt_pc_ = 0;
// Deoptimization exits must be as small as possible, since their count grows
// with function size. {jump_deoptimization_entry_labels_} is an optimization
// to that effect, which extracts the (potentially large) instruction
// sequence for the final jump to the deoptimization entry into a single spot
// per Code object. All deopt exits can then near-call to this label. Note:
// not used on all architectures.
Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
// The maximal combined height of all frames produced upon deoptimization, and
// the maximal number of pushed arguments for function calls. Applied as an
// offset to the first stack check of an optimized function.

View File

@ -927,7 +927,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
__ jmp(exit->label());
CodeGenResult result = AssembleDeoptimizerCall(exit);
if (result != kSuccess) return result;
break;
}
case kArchRet:
@ -4843,8 +4844,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {

View File

@ -1099,7 +1099,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
__ jmp(exit->label());
CodeGenResult result = AssembleDeoptimizerCall(exit);
if (result != kSuccess) return result;
unwinding_info_writer_.MarkBlockWillExit();
break;
}
case kArchRet:
@ -4577,8 +4579,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); }
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
void CodeGenerator::IncrementStackAccessCounter(
InstructionOperand* source, InstructionOperand* destination) {

View File

@ -1163,6 +1163,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
// Make sure that we have generated the deopt entries code. This is in order
// to avoid triggering the generation of deopt entries later during code
// assembly.
Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
pipeline_.Serialize();
if (!data_.broker()->is_concurrent_inlining()) {
@ -3005,6 +3010,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
{
CompilationHandleScope compilation_scope(isolate, info);
@ -3229,7 +3235,7 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
bool use_mid_tier_register_allocator,
bool run_verifier) {
OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
CodeKind::FOR_TESTING);
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING);
ZoneStats zone_stats(sequence->isolate()->allocator());
PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
data.InitializeFrameData(nullptr);

View File

@ -2,14 +2,246 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
const int Deoptimizer::kNonLazyDeoptExitSize = 0;
const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
// Everything but pc, lr and ip which will be saved but not restored.
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kNumRegisters;
// Save all allocatable VFP registers before messing with them.
{
// We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm, VFP32DREGS,
CpuFeatureScope::kDontCheckSupported);
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(scratch);
// Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
// Okay to not call AllocateStackSpace here because the size is a known
// small number and we need to use condition codes.
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d15);
}
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
// handle this a bit differently.
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate)));
__ str(fp, MemOperand(scratch));
}
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
// Get the bailout id is passed as r10 by the caller.
__ mov(r2, r10);
// Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
__ mov(r3, lr);
__ add(r4, sp, Operand(kSavedRegistersAreaSize));
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
__ PrepareCallCFunction(6);
__ mov(r0, Operand(0));
Label context_check;
__ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r1, &context_check);
__ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(r1, Operand(static_cast<int>(deopt_kind)));
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
__ mov(r5, Operand(ExternalReference::isolate_address(isolate)));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
__ str(r2, MemOperand(r1, offset));
}
// Copy VFP registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register zero = r4;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ mov(zero, Operand(0));
__ strb(zero, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
__ add(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
__ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
__ add(r2, r2, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
__ bind(&pop_loop_header);
__ cmp(r2, sp);
__ b(ne, &pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r0); // Preserve deoptimizer object across call.
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
__ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r4 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
__ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
__ add(r1, r4, Operand(r1, LSL, 2));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
__ ldr(r2, MemOperand(r4, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r6);
__ bind(&inner_loop_header);
__ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
__ add(r4, r4, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ cmp(r4, r1);
__ b(lt, &outer_push_loop);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
DwVfpRegister reg = DwVfpRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ vldr(reg, r1, src_offset);
}
// Push pc and continuation from the last output frame.
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
__ push(r6);
// Push the registers from the last output frame.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r6, MemOperand(r2, offset));
__ push(r6);
}
// Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.Acquire();
Register one = r4;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ mov(one, Operand(1));
__ strb(one, MemOperand(is_iterable));
}
// Remove sp, lr and pc.
__ Drop(3);
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(scratch);
}
__ stop();
}
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
const int kShift = n % 2 == 0 ? 0 : 32;
@ -33,5 +265,7 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
#undef __
} // namespace internal
} // namespace v8

View File

@ -3,7 +3,12 @@
// found in the LICENSE file.
#include "src/api/api.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/pointer-authentication.h"
namespace v8 {
@ -17,6 +22,286 @@ const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 1 * kInstrSize;
#endif
#define __ masm->
namespace {
void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
int dst_offset, const CPURegList& reg_list,
const Register& temp0, const Register& temp1,
int src_offset = 0) {
DCHECK_EQ(reg_list.Count() % 2, 0);
UseScratchRegisterScope temps(masm);
CPURegList copy_to_input = reg_list;
int reg_size = reg_list.RegisterSizeInBytes();
DCHECK_EQ(temp0.SizeInBytes(), reg_size);
DCHECK_EQ(temp1.SizeInBytes(), reg_size);
// Compute some temporary addresses to avoid having the macro assembler set
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
masm->Add(src, sp, src_offset);
masm->Add(dst, dst, dst_offset);
// Write reg_list into the frame pointed to by dst.
for (int i = 0; i < reg_list.Count(); i += 2) {
masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
CPURegister reg0 = copy_to_input.PopLowestIndex();
CPURegister reg1 = copy_to_input.PopLowestIndex();
int offset0 = reg0.code() * reg_size;
int offset1 = reg1.code() * reg_size;
// Pair up adjacent stores, otherwise write them separately.
if (offset1 == offset0 + reg_size) {
masm->Stp(temp0, temp1, MemOperand(dst, offset0));
} else {
masm->Str(temp0, MemOperand(dst, offset0));
masm->Str(temp1, MemOperand(dst, offset1));
}
}
masm->Sub(dst, dst, dst_offset);
}
void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
const Register& src_base, int src_offset) {
DCHECK_EQ(reg_list.Count() % 2, 0);
UseScratchRegisterScope temps(masm);
CPURegList restore_list = reg_list;
int reg_size = restore_list.RegisterSizeInBytes();
// Compute a temporary addresses to avoid having the macro assembler set
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
masm->Add(src, src_base, src_offset);
// No need to restore padreg.
restore_list.Remove(padreg);
// Restore every register in restore_list from src.
while (!restore_list.IsEmpty()) {
CPURegister reg0 = restore_list.PopLowestIndex();
CPURegister reg1 = restore_list.PopLowestIndex();
int offset0 = reg0.code() * reg_size;
if (reg1 == NoCPUReg) {
masm->Ldr(reg0, MemOperand(src, offset0));
break;
}
int offset1 = reg1.code() * reg_size;
// Pair up adjacent loads, otherwise read them separately.
if (offset1 == offset0 + reg_size) {
masm->Ldp(reg0, reg1, MemOperand(src, offset0));
} else {
masm->Ldr(reg0, MemOperand(src, offset0));
masm->Ldr(reg1, MemOperand(src, offset1));
}
}
}
} // namespace
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
// TODO(all): This code needs to be revisited. We probably only need to save
// caller-saved registers here. Callee-saved registers can be stored directly
// in the input frame.
// Save all allocatable double registers.
CPURegList saved_double_registers(
CPURegister::kVRegister, kDRegSizeInBits,
RegisterConfiguration::Default()->allocatable_double_codes_mask());
DCHECK_EQ(saved_double_registers.Count() % 2, 0);
__ PushCPURegList(saved_double_registers);
// We save all the registers except sp, lr, platform register (x18) and the
// masm scratches.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
saved_registers.Remove(ip0);
saved_registers.Remove(ip1);
saved_registers.Remove(x18);
saved_registers.Combine(fp);
saved_registers.Align();
DCHECK_EQ(saved_registers.Count() % 2, 0);
__ PushCPURegList(saved_registers);
__ Mov(x3, Operand(ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate)));
__ Str(fp, MemOperand(x3));
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSize) +
(saved_double_registers.Count() * kDRegSize);
// Floating point registers are saved on the stack above core registers.
const int kDoubleRegistersOffset = saved_registers.Count() * kXRegSize;
// We don't use a bailout id for arm64, because we can compute the id from the
// address. Pass kMaxUInt32 instead to signify this.
Register bailout_id = x2;
__ Mov(bailout_id, kMaxUInt32);
Register code_object = x3;
Register fp_to_sp = x4;
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta.
__ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
__ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
// Ensure we can safely load from below fp.
DCHECK_GT(kSavedRegistersAreaSize, -StandardFrameConstants::kFunctionOffset);
__ Ldr(x0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// If x1 is a smi, zero x0.
__ Tst(x1, kSmiTagMask);
__ CzeroX(x0, eq);
__ Mov(x1, static_cast<int>(deopt_kind));
// Following arguments are already loaded:
// - x2: bailout id
// - x3: code object address
// - x4: fp-to-sp delta
__ Mov(x5, ExternalReference::isolate_address(isolate));
{
// Call Deoptimizer::New().
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve "deoptimizer" object in register x0.
Register deoptimizer = x0;
// Get the input frame descriptor pointer.
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(),
saved_registers, x2, x3);
// Copy double registers to the input frame.
CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
saved_double_registers, x2, x3, kDoubleRegistersOffset);
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.AcquireX();
__ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ strb(xzr, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
__ Drop(kSavedRegistersAreaSize / kXRegSize);
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
Register unwind_limit = x2;
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
__ SlotAddress(x1, 0);
__ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
__ Mov(x5, unwind_limit);
__ CopyDoubleWords(x3, x1, x5);
__ Drop(unwind_limit);
// Compute the output frame in the deoptimizer.
__ Push(padreg, x0); // Preserve deoptimizer object across call.
{
// Call Deoptimizer::ComputeOutputFrames().
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
__ Mov(sp, scratch);
}
// Replace the current (input) frame with the output frames.
Label outer_push_loop, outer_loop_header;
__ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
__ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
__ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2));
__ B(&outer_loop_header);
__ Bind(&outer_push_loop);
Register current_frame = x2;
Register frame_size = x3;
__ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
__ Lsr(frame_size, x3, kSystemPointerSizeLog2);
__ Claim(frame_size);
__ Add(x7, current_frame, FrameDescription::frame_content_offset());
__ SlotAddress(x6, 0);
__ CopyDoubleWords(x6, x7, frame_size);
__ Bind(&outer_loop_header);
__ Cmp(x0, x1);
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
RestoreRegList(masm, saved_double_registers, x1,
FrameDescription::double_registers_offset());
{
UseScratchRegisterScope temps(masm);
Register is_iterable = temps.AcquireX();
Register one = x4;
__ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ Mov(one, Operand(1));
__ strb(one, MemOperand(is_iterable));
}
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
// ARM code.
// Restore registers from the last output frame.
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
// reloading the other registers.
DCHECK(!saved_registers.IncludesAliasOf(lr));
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
RestoreRegList(masm, saved_registers, last_output_frame,
FrameDescription::registers_offset());
UseScratchRegisterScope temps(masm);
temps.Exclude(x17);
Register continuation = x17;
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
__ Autibsp();
#endif
__ Br(continuation);
}
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
static_cast<uint32_t>(double_registers_[n].get_bits()));
@ -46,5 +331,7 @@ void FrameDescription::SetPc(intptr_t pc) {
pc_ = pc;
}
#undef __
} // namespace internal
} // namespace v8

View File

@ -26,7 +26,6 @@
#include "src/objects/debug-objects-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/smi.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/tracing/trace-event.h"
#include "torque-generated/exported-class-definitions.h"
@ -174,6 +173,25 @@ class FrameWriter {
unsigned top_offset_;
};
DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) {
Code* start = &deopt_entry_code_[0];
Code* end = &deopt_entry_code_[DeoptimizerData::kLastDeoptimizeKind + 1];
strong_roots_entry_ =
heap_->RegisterStrongRoots(FullObjectSlot(start), FullObjectSlot(end));
}
DeoptimizerData::~DeoptimizerData() {
heap_->UnregisterStrongRoots(strong_roots_entry_);
}
Code DeoptimizerData::deopt_entry_code(DeoptimizeKind kind) {
return deopt_entry_code_[static_cast<int>(kind)];
}
void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code code) {
deopt_entry_code_[static_cast<int>(kind)] = code;
}
Code Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_.IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
@ -190,7 +208,7 @@ Code Deoptimizer::FindDeoptimizingCode(Address addr) {
return Code();
}
// We rely on this function not causing a GC. It is called from generated code
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
unsigned bailout_id, Address from,
@ -198,13 +216,16 @@ Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
JSFunction function = JSFunction::cast(Object(raw_function));
Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, kind,
bailout_id, from, fp_to_sp_delta);
isolate->set_current_deoptimizer(deoptimizer);
CHECK_NULL(isolate->deoptimizer_data()->current_);
isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
}
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
Deoptimizer* result = isolate->GetAndClearCurrentDeoptimizer();
Deoptimizer* result = isolate->deoptimizer_data()->current_;
CHECK_NOT_NULL(result);
result->DeleteFrameDescriptions();
isolate->deoptimizer_data()->current_ = nullptr;
return result;
}
@ -471,6 +492,8 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind, bool reuse_code) {
case DeoptimizeKind::kBailout:
return "bailout";
}
FATAL("Unsupported deopt kind");
return nullptr;
}
namespace {
@ -513,9 +536,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
deoptimizing_throw_ = true;
}
DCHECK(bailout_id_ == kFixedExitSizeMarker ||
bailout_id_ < kMaxNumberOfEntries);
DCHECK_NE(from, kNullAddress);
compiled_code_ = FindOptimizedCode();
DCHECK(!compiled_code_.is_null());
@ -544,7 +564,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
input_ = new (size) FrameDescription(size, parameter_count);
if (kSupportsFixedDeoptExitSizes) {
DCHECK_EQ(bailout_id_, kFixedExitSizeMarker);
DCHECK_EQ(bailout_id_, kMaxUInt32);
// Calculate bailout id from return address.
DCHECK_GT(kNonLazyDeoptExitSize, 0);
DCHECK_GT(kLazyDeoptExitSize, 0);
@ -556,7 +576,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
Address lazy_deopt_start =
deopt_start + non_lazy_deopt_count * kNonLazyDeoptExitSize;
// The deoptimization exits are sorted so that lazy deopt exits appear last.
static_assert(DeoptimizeKind::kLazy == kLastDeoptimizeKind,
static_assert(DeoptimizeKind::kLazy == DeoptimizeKind::kLastDeoptimizeKind,
"lazy deopts are expected to be emitted last");
// from_ is the value of the link register after the call to the
// deoptimizer, so for the last lazy deopt, from_ points to the first
@ -615,44 +635,42 @@ void Deoptimizer::DeleteFrameDescriptions() {
#endif // DEBUG
}
Builtins::Name Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
return Builtins::kDeoptimizationEntry_Eager;
case DeoptimizeKind::kSoft:
return Builtins::kDeoptimizationEntry_Soft;
case DeoptimizeKind::kBailout:
return Builtins::kDeoptimizationEntry_Bailout;
case DeoptimizeKind::kLazy:
return Builtins::kDeoptimizationEntry_Lazy;
}
Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
DeoptimizeKind kind) {
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
CHECK(!data->deopt_entry_code(kind).is_null());
return data->deopt_entry_code(kind).raw_instruction_start();
}
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind* type_out) {
Code maybe_code = InstructionStream::TryLookupCode(isolate, addr);
if (maybe_code.is_null()) return false;
DeoptimizeKind type) {
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind);
Code code = data->deopt_entry_code(type);
if (code.is_null()) return false;
return addr == code.raw_instruction_start();
}
Code code = maybe_code;
switch (code.builtin_index()) {
case Builtins::kDeoptimizationEntry_Eager:
*type_out = DeoptimizeKind::kEager;
return true;
case Builtins::kDeoptimizationEntry_Soft:
*type_out = DeoptimizeKind::kSoft;
return true;
case Builtins::kDeoptimizationEntry_Bailout:
*type_out = DeoptimizeKind::kBailout;
return true;
case Builtins::kDeoptimizationEntry_Lazy:
*type_out = DeoptimizeKind::kLazy;
return true;
default:
return false;
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind* type) {
if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kEager)) {
*type = DeoptimizeKind::kEager;
return true;
}
UNREACHABLE();
if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kSoft)) {
*type = DeoptimizeKind::kSoft;
return true;
}
if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kLazy)) {
*type = DeoptimizeKind::kLazy;
return true;
}
if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kBailout)) {
*type = DeoptimizeKind::kBailout;
return true;
}
return false;
}
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
@ -800,8 +818,8 @@ void Deoptimizer::TraceDeoptMarked(Isolate* isolate) {
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
// When we call this function, the return address of the previous frame has
// been removed from the stack by the DeoptimizationEntry builtin, so the
// stack is not iterable by the SafeStackFrameIterator.
// been removed from the stack by GenerateDeoptimizationEntries() so the stack
// is not iterable by the SafeStackFrameIterator.
#if V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK
DCHECK_EQ(0, isolate()->isolate_data()->stack_is_iterable());
#endif
@ -863,7 +881,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Do the input frame to output frame(s) translation.
size_t count = translated_state_.frames().size();
// If we are supposed to go to the catch handler, find the catching frame
// for the catch and make sure we only deoptimize up to that frame.
// for the catch and make sure we only deoptimize upto that frame.
if (deoptimizing_throw_) {
size_t catch_handler_frame_index = count;
for (size_t i = count; i-- > 0;) {
@ -1194,7 +1212,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
// authenticated at the end of the DeoptimizationEntry builtin.
// authenticated at the end of GenerateDeoptimizationEntries.
const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
static_cast<intptr_t>(dispatch_builtin.InstructionStart()),
frame_writer.frame()->GetTop());
@ -1521,7 +1539,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
intptr_t pc_value = static_cast<intptr_t>(start + pc_offset);
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
// authenticated at the end of the DeoptimizationEntry builtin.
// authenticated at the end of GenerateDeoptimizationEntries.
output_frame->SetPc(PointerAuthentication::SignAndCheckPC(
pc_value, frame_writer.frame()->GetTop()));
} else {
@ -1922,7 +1940,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
mode, frame_info.frame_has_result_stack_slot()));
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
// authenticated at the end of the DeoptimizationEntry builtin.
// authenticated at the end of GenerateDeoptimizationEntries.
const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
static_cast<intptr_t>(continue_to_builtin.InstructionStart()),
frame_writer.frame()->GetTop());
@ -2019,6 +2037,40 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
return parameter_slots * kSystemPointerSize;
}
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
DeoptimizeKind kind) {
CHECK(kind == DeoptimizeKind::kEager || kind == DeoptimizeKind::kSoft ||
kind == DeoptimizeKind::kLazy || kind == DeoptimizeKind::kBailout);
DeoptimizerData* data = isolate->deoptimizer_data();
if (!data->deopt_entry_code(kind).is_null()) return;
MacroAssembler masm(isolate, CodeObjectRequired::kYes,
NewAssemblerBuffer(16 * KB));
masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, masm.isolate(), kind);
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
// Allocate the code as immovable since the entry addresses will be used
// directly and there is no support for relocating them.
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.set_immovable()
.Build();
CHECK(isolate->heap()->IsImmovable(*code));
CHECK(data->deopt_entry_code(kind).is_null());
data->set_deopt_entry_code(kind, *code);
}
void Deoptimizer::EnsureCodeForDeoptimizationEntries(Isolate* isolate) {
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kEager);
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kLazy);
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kSoft);
EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kBailout);
}
FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
: frame_size_(frame_size),
parameter_count_(parameter_count),

View File

@ -500,13 +500,12 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
static Builtins::Name GetDeoptimizationEntry(Isolate* isolate,
DeoptimizeKind kind);
static Address GetDeoptimizationEntry(Isolate* isolate, DeoptimizeKind kind);
// Returns true if {addr} is a deoptimization entry and stores its type in
// {type_out}. Returns false if {addr} is not a deoptimization entry.
// {type}. Returns false if {addr} is not a deoptimization entry.
static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind* type_out);
DeoptimizeKind* type);
// Code generation support.
static int input_offset() { return offsetof(Deoptimizer, input_); }
@ -521,20 +520,19 @@ class Deoptimizer : public Malloced {
V8_EXPORT_PRIVATE static int GetDeoptimizedCodeCount(Isolate* isolate);
static const int kNotDeoptimizationEntry = -1;
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
DeoptimizeKind kind);
static void EnsureCodeForDeoptimizationEntries(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
static constexpr int kMaxNumberOfEntries = 16384;
// This marker is passed to Deoptimizer::New as {bailout_id} on platforms
// that have fixed deopt sizes (see also kSupportsFixedDeoptExitSizes). The
// actual deoptimization id is then calculated from the return address.
static constexpr unsigned kFixedExitSizeMarker = kMaxUInt32;
static const int kMaxNumberOfEntries = 16384;
// Set to true when the architecture supports deoptimization exit sequences
// of a fixed size, that can be sorted so that the deoptimization index is
// deduced from the address of the deoptimization exit.
// TODO(jgruber): Remove this, and support for variable deopt exit sizes,
// once all architectures use fixed exit sizes.
static const bool kSupportsFixedDeoptExitSizes;
// Size of deoptimization exit sequence. This is only meaningful when
@ -557,6 +555,9 @@ class Deoptimizer : public Malloced {
Code FindOptimizedCode();
void DeleteFrameDescriptions();
static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind type);
void DoComputeOutputFrames();
void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int frame_index, bool goto_catch_handler);
@ -578,6 +579,10 @@ class Deoptimizer : public Malloced {
static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo shared);
static unsigned ComputeOutgoingArgumentSize(Code code, unsigned bailout_id);
static void GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate,
DeoptimizeKind kind);
static void MarkAllCodeForContext(NativeContext native_context);
static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
// Searches the list of known deoptimizing code for a Code object
@ -822,6 +827,36 @@ class FrameDescription {
}
};
class DeoptimizerData {
public:
explicit DeoptimizerData(Heap* heap);
~DeoptimizerData();
#ifdef DEBUG
bool IsDeoptEntryCode(Code code) const {
for (int i = 0; i < kLastDeoptimizeKind + 1; i++) {
if (code == deopt_entry_code_[i]) return true;
}
return false;
}
#endif // DEBUG
private:
Heap* heap_;
static const int kLastDeoptimizeKind =
static_cast<int>(DeoptimizeKind::kLastDeoptimizeKind);
Code deopt_entry_code_[kLastDeoptimizeKind + 1];
Code deopt_entry_code(DeoptimizeKind kind);
void set_deopt_entry_code(DeoptimizeKind kind, Code code);
Deoptimizer* current_;
StrongRootsEntry* strong_roots_entry_;
friend class Deoptimizer;
DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
};
class TranslationBuffer {
public:
explicit TranslationBuffer(Zone* zone) : contents_(zone) {}

View File

@ -4,14 +4,201 @@
#if V8_TARGET_ARCH_IA32
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 5;
const int Deoptimizer::kLazyDeoptExitSize = 5;
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
const int Deoptimizer::kNonLazyDeoptExitSize = 0;
const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
__ AllocateStackSpace(kDoubleRegsSize);
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
int offset = code * kDoubleSize;
__ movsd(Operand(esp, offset), xmm_reg);
}
__ pushad();
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
__ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
// The bailout id is passed in ebx by the caller.
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize));
__ sub(edx, ebp);
__ neg(edx);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6, eax);
__ mov(eax, Immediate(0));
Label context_check;
__ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(edi, &context_check);
__ mov(eax, Operand(ebp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kSystemPointerSize),
Immediate(static_cast<int>(deopt_kind)));
__ mov(Operand(esp, 2 * kSystemPointerSize), ebx); // Bailout id.
__ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate)));
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
__ mov(esi, Operand(eax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ pop(Operand(esi, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Fill in the double input registers.
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize;
__ movsd(xmm0, Operand(esp, src_offset));
__ movsd(Operand(esi, dst_offset), xmm0);
}
// Clear FPU all exceptions.
// TODO(ulan): Find out why the TOP register is not zero here in some cases,
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
__ mov_b(__ ExternalReferenceAsOperand(
ExternalReference::stack_is_iterable_address(isolate), edx),
Immediate(0));
// Remove the return address and the double registers.
__ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
__ add(ecx, esp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(edx, Operand(esi, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t)));
__ bind(&pop_loop_header);
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, esi);
__ mov(Operand(esp, 0 * kSystemPointerSize), eax);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(eax);
__ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one
// past the last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_system_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: esi = current FrameDescription*, ecx = loop
// index.
__ mov(esi, Operand(eax, 0));
__ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(esi, ecx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
// In case of a failed STUB, we have to restore the XMM registers.
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(esi, src_offset));
}
// Push pc and continuation from the last output frame.
__ push(Operand(esi, FrameDescription::pc_offset()));
__ push(Operand(esi, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ push(Operand(esi, offset));
}
__ mov_b(__ ExternalReferenceAsOperand(
ExternalReference::stack_is_iterable_address(isolate), edx),
Immediate(1));
// Restore the registers from the stack.
__ popad();
__ InitializeRootRegister();
// Return to the continuation point.
__ ret(0);
}
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
@ -33,6 +220,8 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
#undef __
} // namespace internal
} // namespace v8

View File

@ -4,14 +4,217 @@
#if V8_TARGET_ARCH_X64
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 7;
const int Deoptimizer::kLazyDeoptExitSize = 7;
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
const int Deoptimizer::kNonLazyDeoptExitSize = 0;
const int Deoptimizer::kLazyDeoptExitSize = 0;
#define __ masm->
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Isolate* isolate,
DeoptimizeKind deopt_kind) {
NoRootArrayScope no_root_array(masm);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
__ AllocateStackSpace(kDoubleRegsSize);
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
int offset = code * kDoubleSize;
__ Movsd(Operand(rsp, offset), xmm_reg);
}
// We push all registers onto the stack, even though we do not need
// to restore all later.
for (int i = 0; i < kNumberOfRegisters; i++) {
Register r = Register::from_code(i);
__ pushq(r);
}
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
__ Store(
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
rbp);
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
// this on linux), since it is another parameter passing register on windows.
Register arg5 = r11;
// The bailout id is passed using r13 on the stack.
__ movq(arg_reg_3, r13);
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
__ movq(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
__ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
__ subq(arg5, rbp);
__ negq(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
__ movq(rax, Immediate(0));
Label context_check;
__ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(rdi, &context_check);
__ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ movq(arg_reg_1, rax);
__ Set(arg_reg_2, static_cast<int>(deopt_kind));
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef V8_TARGET_OS_WIN
__ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
__ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
#else
__ movq(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate));
#endif
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ PopQuad(Operand(rbx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ popq(Operand(rbx, dst_offset));
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
// walk the stack without the return address.
__ movb(__ ExternalReferenceAsOperand(
ExternalReference::stack_is_iterable_address(isolate)),
Immediate(0));
// Remove the return address from the stack.
__ addq(rsp, Immediate(kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ Pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ pushq(rax);
__ PrepareCallCFunction(2);
__ movq(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
}
__ popq(rax);
__ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
__ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ Movsd(xmm_reg, Operand(rbx, src_offset));
}
// Push pc and continuation from the last output frame.
__ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
__ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ PushQuad(Operand(rbx, offset));
}
// Restore the registers from the stack.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
Register r = Register::from_code(i);
// Do not restore rsp, simply pop the value into the next register
// and overwrite this afterwards.
if (r == rsp) {
DCHECK_GT(i, 0);
r = Register::from_code(i - 1);
}
__ popq(r);
}
__ movb(__ ExternalReferenceAsOperand(
ExternalReference::stack_is_iterable_address(isolate)),
Immediate(1));
// Return to the continuation point.
__ ret(0);
}
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
@ -33,6 +236,8 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
#undef __
} // namespace internal
} // namespace v8

View File

@ -253,7 +253,8 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
host.as_wasm_code()->native_module()->GetRuntimeStubId(
relocinfo->wasm_stub_call_address()));
out->AddFormatted(" ;; wasm stub: %s", runtime_stub_name);
} else if (RelocInfo::IsRuntimeEntry(rmode) && isolate != nullptr) {
} else if (RelocInfo::IsRuntimeEntry(rmode) && isolate &&
isolate->deoptimizer_data() != nullptr) {
// A runtime entry relocinfo might be a deoptimization bailout.
Address addr = relocinfo->target_address();
DeoptimizeKind type;

View File

@ -1296,8 +1296,9 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - kind: " << shared().kind();
os << "\n - context: " << Brief(context());
os << "\n - code: " << Brief(code());
if (code().kind() == CodeKind::FOR_TESTING) {
os << "\n - FOR_TESTING";
if (code().kind() == CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING) {
os << "\n - FunctionTester function";
} else if (ActiveTierIsIgnition()) {
os << "\n - interpreted";
if (shared().HasBytecodeArray()) {

View File

@ -57,10 +57,6 @@ class IsolateData final {
static constexpr int builtin_entry_table_offset() {
return kBuiltinEntryTableOffset - kIsolateRootBias;
}
static constexpr int builtin_entry_slot_offset(Builtins::Name builtin_index) {
CONSTEXPR_DCHECK(Builtins::IsBuiltinId(builtin_index));
return builtin_entry_table_offset() + builtin_index * kSystemPointerSize;
}
// Root-register-relative offset of the builtins table.
static constexpr int builtins_table_offset() {

View File

@ -3076,6 +3076,8 @@ void Isolate::Deinit() {
ReleaseSharedPtrs();
delete deoptimizer_data_;
deoptimizer_data_ = nullptr;
string_table_.reset();
builtins_.TearDown();
bootstrapper_->TearDown();
@ -3535,6 +3537,8 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
}
DCHECK_NOT_NULL(wasm_engine_);
deoptimizer_data_ = new DeoptimizerData(heap());
if (setup_delegate_ == nullptr) {
setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
}

View File

@ -77,7 +77,7 @@ class CompilationStatistics;
class CompilerDispatcher;
class Counters;
class Debug;
class Deoptimizer;
class DeoptimizerData;
class DescriptorLookupCache;
class EmbeddedFileWriterInterface;
class EternalHandles;
@ -1024,17 +1024,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
Deoptimizer* GetAndClearCurrentDeoptimizer() {
Deoptimizer* result = current_deoptimizer_;
CHECK_NOT_NULL(result);
current_deoptimizer_ = nullptr;
return result;
}
void set_current_deoptimizer(Deoptimizer* deoptimizer) {
DCHECK_NULL(current_deoptimizer_);
DCHECK_NOT_NULL(deoptimizer);
current_deoptimizer_ = deoptimizer;
}
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
void set_deoptimizer_lazy_throw(bool value) {
deoptimizer_lazy_throw_ = value;
@ -1740,7 +1730,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
Logger* logger_ = nullptr;
StubCache* load_stub_cache_ = nullptr;
StubCache* store_stub_cache_ = nullptr;
Deoptimizer* current_deoptimizer_ = nullptr;
DeoptimizerData* deoptimizer_data_ = nullptr;
bool deoptimizer_lazy_throw_ = false;
MaterializedObjectStore* materialized_object_store_ = nullptr;
bool capture_stack_trace_for_uncaught_exceptions_ = false;

View File

@ -150,6 +150,10 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
if (result.is_null()) return MaybeHandle<Code>();
}
if (!is_movable_) {
result = heap->EnsureImmovableCode(result, object_size);
}
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowHeapAllocation no_gc;

View File

@ -837,6 +837,11 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return *this;
}
CodeBuilder& set_immovable() {
is_movable_ = false;
return *this;
}
CodeBuilder& set_is_turbofanned() {
is_turbofanned_ = true;
return *this;
@ -883,6 +888,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
BasicBlockProfilerData* profiler_data_ = nullptr;
bool is_executable_ = true;
bool read_only_data_container_ = false;
bool is_movable_ = true;
bool is_turbofanned_ = false;
int stack_slots_ = 0;
};

View File

@ -4981,6 +4981,33 @@ void Heap::DisableInlineAllocation() {
}
}
HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
// Code objects which should stay at a fixed address are allocated either
// in the first page of code space, in large object space, or (during
// snapshot creation) the containing page is marked as immovable.
DCHECK(!heap_object.is_null());
#ifndef V8_ENABLE_THIRD_PARTY_HEAP
DCHECK(code_space_->Contains(heap_object));
#endif
DCHECK_GE(object_size, 0);
if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() ||
code_space_->first_page()->Contains(heap_object.address())) {
BasicMemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
// be moved.
CreateFillerObjectAt(heap_object.address(), object_size,
ClearRecordedSlots::kNo);
heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
UnprotectAndRegisterMemoryChunk(heap_object);
ZapCodeObject(heap_object.address(), object_size);
OnAllocationEvent(heap_object, object_size);
}
}
return heap_object;
}
HeapObject Heap::AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment) {
@ -5033,6 +5060,40 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
return HeapObject();
}
// TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
// parameter and just do what's necessary.
HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
AllocationResult alloc = code_lo_space()->AllocateRaw(size);
HeapObject result;
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// Two GCs before panicking.
for (int i = 0; i < 2; i++) {
CollectGarbage(alloc.RetrySpace(),
GarbageCollectionReason::kAllocationFailure);
alloc = code_lo_space()->AllocateRaw(size);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
}
isolate()->counters()->gc_last_resort_from_handles()->Increment();
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
AlwaysAllocateScope scope(this);
alloc = code_lo_space()->AllocateRaw(size);
}
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
}
// TODO(1181417): Fix this.
FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
return HeapObject();
}
void Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();

View File

@ -1977,10 +1977,17 @@ class Heap {
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
V8_WARN_UNUSED_RESULT HeapObject AllocateRawCodeInLargeObjectSpace(int size);
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
AllocationType allocation);
// Takes a code object and checks if it is on memory which is not subject to
// compaction. This method will return a new code object on an immovable
// memory location if the original code object was movable.
HeapObject EnsureImmovableCode(HeapObject heap_object, int object_size);
// Allocates a partial map for bootstrapping.
V8_WARN_UNUSED_RESULT AllocationResult
AllocatePartialMap(InstanceType instance_type, int instance_size);

View File

@ -2175,7 +2175,7 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
return; // We log this later using LogCompiledFunctions.
case CodeKind::BYTECODE_HANDLER:
return; // We log it later by walking the dispatch table.
case CodeKind::FOR_TESTING:
case CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING:
description = "STUB code";
tag = CodeEventListener::STUB_TAG;
break;

View File

@ -11,20 +11,22 @@
namespace v8 {
namespace internal {
#define CODE_KIND_LIST(V) \
V(TURBOFAN) \
V(BYTECODE_HANDLER) \
V(FOR_TESTING) \
V(BUILTIN) \
V(REGEXP) \
V(WASM_FUNCTION) \
V(WASM_TO_CAPI_FUNCTION) \
V(WASM_TO_JS_FUNCTION) \
V(JS_TO_WASM_FUNCTION) \
V(JS_TO_JS_FUNCTION) \
V(C_WASM_ENTRY) \
V(INTERPRETED_FUNCTION) \
V(NATIVE_CONTEXT_INDEPENDENT) \
// TODO(jgruber): Convert deopt entries to builtins and rename
// DEOPT_ENTRIES_OR_FOR_TESTING to FOR_TESTING.
#define CODE_KIND_LIST(V) \
V(TURBOFAN) \
V(BYTECODE_HANDLER) \
V(DEOPT_ENTRIES_OR_FOR_TESTING) \
V(BUILTIN) \
V(REGEXP) \
V(WASM_FUNCTION) \
V(WASM_TO_CAPI_FUNCTION) \
V(WASM_TO_JS_FUNCTION) \
V(JS_TO_WASM_FUNCTION) \
V(JS_TO_JS_FUNCTION) \
V(C_WASM_ENTRY) \
V(INTERPRETED_FUNCTION) \
V(NATIVE_CONTEXT_INDEPENDENT) \
V(TURBOPROP)
enum class CodeKind {

View File

@ -89,6 +89,13 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
if (!obj.IsCode()) return false;
Code code = Code::cast(obj);
// TODO(v8:8768): Deopt entry code should not be serialized.
if (code.kind() == CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING &&
isolate->deoptimizer_data() != nullptr) {
if (isolate->deoptimizer_data()->IsDeoptEntryCode(code)) return false;
}
if (code.kind() == CodeKind::REGEXP) return false;
if (!code.is_builtin()) return true;
if (code.is_off_heap_trampoline()) return false;

View File

@ -21,8 +21,9 @@ Handle<Code> AssembleCodeImpl(std::function<void(MacroAssembler&)> assemble) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
if (FLAG_print_code) {
code->Print();
}

View File

@ -23,7 +23,8 @@ class CodeAssemblerTester {
const char* name = "test")
: zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
scope_(isolate),
state_(isolate, &zone_, descriptor, CodeKind::FOR_TESTING, name,
state_(isolate, &zone_, descriptor,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING, name,
PoisoningMitigationLevel::kDontPoison, Builtins::kNoBuiltinId) {}
// Test generating code for a stub. Assumes VoidDescriptor call interface.
@ -47,7 +48,8 @@ class CodeAssemblerTester {
const char* name = "test")
: zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
scope_(isolate),
state_(isolate, &zone_, call_descriptor, CodeKind::FOR_TESTING, name,
state_(isolate, &zone_, call_descriptor,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING, name,
PoisoningMitigationLevel::kDontPoison, Builtins::kNoBuiltinId) {}
CodeAssemblerState* state() { return &state_; }

View File

@ -91,7 +91,7 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
}
private:
CodeKind kind_ = CodeKind::FOR_TESTING;
CodeKind kind_ = CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING;
MaybeHandle<Code> code_;
};

View File

@ -965,10 +965,10 @@ class CodeGeneratorTester {
int extra_stack_space = 0)
: zone_(environment->main_zone()),
info_(ArrayVector("test"), environment->main_zone(),
CodeKind::FOR_TESTING),
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING),
linkage_(environment->test_descriptor()),
frame_(environment->test_descriptor()->CalculateFixedFrameSize(
CodeKind::FOR_TESTING)) {
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)) {
// Pick half of the stack parameters at random and move them into spill
// slots, separated by `extra_stack_space` bytes.
// When testing a move with stack slots using CheckAssembleMove or

View File

@ -109,7 +109,7 @@ TEST(TestLinkageStubCall) {
Zone zone(isolate->allocator(), ZONE_NAME);
Callable callable = Builtins::CallableFor(isolate, Builtins::kToNumber);
OptimizedCompilationInfo info(ArrayVector("test"), &zone,
CodeKind::FOR_TESTING);
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING);
auto call_descriptor = Linkage::GetStubCallDescriptor(
&zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
@ -130,7 +130,7 @@ TEST(TestFPLinkageStubCall) {
Callable callable =
Builtins::CallableFor(isolate, Builtins::kWasmFloat64ToNumber);
OptimizedCompilationInfo info(ArrayVector("test"), &zone,
CodeKind::FOR_TESTING);
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING);
auto call_descriptor = Linkage::GetStubCallDescriptor(
&zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);

View File

@ -246,7 +246,7 @@ Handle<Code> CompileGraph(const char* name, CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
OptimizedCompilationInfo info(ArrayVector("testing"), graph->zone(),
CodeKind::FOR_TESTING);
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING);
Handle<Code> code = Pipeline::GenerateCodeForTesting(
&info, isolate, call_descriptor, graph,
AssemblerOptions::Default(isolate), schedule)

View File

@ -48,6 +48,7 @@
V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(Regress5831) \
V(Regress10560) \
V(Regress538257) \
V(Regress587004) \

View File

@ -395,7 +395,9 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
CodeDesc desc;
masm.GetCode(i_isolate, &desc);
Handle<Code> code_handle =
Factory::CodeBuilder(i_isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(i_isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
heap::AbandonCurrentlyFreeMemory(heap->old_space());
Handle<HeapNumber> value_handle(
i_isolate->factory()->NewHeapNumber<AllocationType::kOld>(1.1));

View File

@ -206,8 +206,9 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
Handle<Code> copy;
{
@ -230,8 +231,9 @@ static void CheckFindCodeObject(Isolate* isolate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
CHECK(code->IsCode());
HeapObject obj = HeapObject::cast(*code);
@ -242,8 +244,9 @@ static void CheckFindCodeObject(Isolate* isolate) {
CHECK_EQ(*code, found);
}
Handle<Code> copy =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> copy = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
HeapObject obj_copy = HeapObject::cast(*copy);
Object not_right =
isolate->FindCodeObject(obj_copy.address() + obj_copy.Size() / 2);
@ -6517,6 +6520,68 @@ HEAP_TEST(Regress670675) {
DCHECK(marking->IsStopped());
}
namespace {
Handle<Code> GenerateDummyImmovableCode(Isolate* isolate) {
Assembler assm(AssemblerOptions{});
const int kNumberOfNops = 1 << 10;
for (int i = 0; i < kNumberOfNops; i++) {
assm.nop(); // supported on all architectures
}
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.set_immovable()
.Build();
CHECK(code->IsCode());
return code;
}
} // namespace
HEAP_TEST(Regress5831) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
// Used to ensure that the generated code is not collected.
const int kInitialSize = 32;
Handle<FixedArray> array = isolate->factory()->NewFixedArray(kInitialSize);
// Ensure that all immovable code space pages are full and we overflow into
// LO_SPACE.
const int kMaxIterations = 1 << 16;
bool overflowed_into_lospace = false;
for (int i = 0; i < kMaxIterations; i++) {
Handle<Code> code = GenerateDummyImmovableCode(isolate);
array = FixedArray::SetAndGrow(isolate, array, i, code);
CHECK(heap->code_space()->Contains(*code) ||
heap->code_lo_space()->Contains(*code));
if (heap->code_lo_space()->Contains(*code)) {
overflowed_into_lospace = true;
break;
}
}
CHECK(overflowed_into_lospace);
// Fake a serializer run.
isolate->serializer_enabled_ = true;
// Generate the code.
Handle<Code> code = GenerateDummyImmovableCode(isolate);
CHECK_GE(MemoryChunkLayout::MaxRegularCodeObjectSize(), code->Size());
CHECK(!heap->code_space()->first_page()->Contains(code->address()));
// Ensure it's not in large object space.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*code);
CHECK(chunk->owner_identity() != LO_SPACE);
CHECK(chunk->NeverEvacuate());
}
HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
if (!FLAG_incremental_marking) return;
ManualGCScope manual_gc_scope;
@ -7258,8 +7323,9 @@ TEST(Regress10900) {
masm.Push(ReadOnlyRoots(heap).undefined_value_handle());
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
{
// Generate multiple code pages.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());

View File

@ -51,7 +51,9 @@ TEST(WeakReferencesBasic) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
CHECK(code->IsCode());
lh->set_data1(HeapObjectReference::Weak(*code));

View File

@ -203,7 +203,8 @@ TEST(TryProbeStubCache) {
// Generate some number of handlers.
for (int i = 0; i < 30; i++) {
handlers.push_back(CreateCodeOfKind(CodeKind::FOR_TESTING));
handlers.push_back(
CreateCodeOfKind(CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING));
}
// Ensure that GC does happen because from now on we are going to fill our

View File

@ -60,8 +60,9 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -96,8 +97,9 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -141,8 +143,9 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -187,8 +190,9 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -318,7 +322,9 @@ TEST(4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -380,7 +386,9 @@ TEST(5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -410,8 +418,9 @@ TEST(6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -477,8 +486,9 @@ static void TestRoundingMode(VCVTTypes types,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -659,8 +669,9 @@ TEST(8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -768,8 +779,9 @@ TEST(9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -873,8 +885,9 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -967,8 +980,9 @@ TEST(11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -1093,7 +1107,9 @@ TEST(13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -1164,8 +1180,9 @@ TEST(14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -2045,7 +2062,9 @@ TEST(15) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -2320,8 +2339,9 @@ TEST(16) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -2399,7 +2419,9 @@ TEST(sdiv) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -2459,7 +2481,9 @@ TEST(udiv) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -2487,8 +2511,9 @@ TEST(smmla) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2512,8 +2537,9 @@ TEST(smmul) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2537,8 +2563,9 @@ TEST(sxtb) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2562,8 +2589,9 @@ TEST(sxtab) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2587,8 +2615,9 @@ TEST(sxth) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2612,8 +2641,9 @@ TEST(sxtah) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2637,8 +2667,9 @@ TEST(uxtb) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2662,8 +2693,9 @@ TEST(uxtab) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2687,8 +2719,9 @@ TEST(uxth) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2712,8 +2745,9 @@ TEST(uxtah) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2754,7 +2788,9 @@ TEST(rbit) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
@ -2834,7 +2870,8 @@ TEST(code_relative_offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING)
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.set_self_reference(code_object)
.Build();
auto f = GeneratedCode<F_iiiii>::FromCode(*code);
@ -2874,8 +2911,9 @@ TEST(msr_mrs) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -2972,7 +3010,9 @@ TEST(ARMv8_float32_vrintX) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -3073,7 +3113,9 @@ TEST(ARMv8_vrintX) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -3210,7 +3252,9 @@ TEST(ARMv8_vsel) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -3301,7 +3345,9 @@ TEST(ARMv8_vminmax_f64) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -3381,7 +3427,9 @@ TEST(ARMv8_vminmax_f32) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -3514,7 +3562,9 @@ static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(
CodeDesc desc;
assm.GetCode(assm.isolate(), &desc);
Handle<Code> code =
Factory::CodeBuilder(assm.isolate(), desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(assm.isolate(), desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -3676,8 +3726,9 @@ TEST(unaligned_loads) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -3719,8 +3770,9 @@ TEST(unaligned_stores) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -3819,8 +3871,9 @@ TEST(vswp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -4032,7 +4085,9 @@ TEST(split_add_immediate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -4052,7 +4107,9 @@ TEST(split_add_immediate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -4075,7 +4132,9 @@ TEST(split_add_immediate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);

View File

@ -158,16 +158,18 @@ static void InitializeVM() {
#define RUN() simulator.RunFrom(reinterpret_cast<Instruction*>(code->entry()))
#define END() \
__ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
core.Dump(&masm); \
__ PopCalleeSavedRegisters(); \
__ Ret(); \
{ \
CodeDesc desc; \
__ GetCode(masm.isolate(), &desc); \
code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); \
if (FLAG_print_code) code->Print(); \
#define END() \
__ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
core.Dump(&masm); \
__ PopCalleeSavedRegisters(); \
__ Ret(); \
{ \
CodeDesc desc; \
__ GetCode(masm.isolate(), &desc); \
code = Factory::CodeBuilder(isolate, desc, \
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING) \
.Build(); \
if (FLAG_print_code) code->Print(); \
}
#else // ifdef USE_SIMULATOR.
@ -204,15 +206,17 @@ static void InitializeVM() {
f.Call(); \
}
#define END() \
core.Dump(&masm); \
__ PopCalleeSavedRegisters(); \
__ Ret(); \
{ \
CodeDesc desc; \
__ GetCode(masm.isolate(), &desc); \
code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); \
if (FLAG_print_code) code->Print(); \
#define END() \
core.Dump(&masm); \
__ PopCalleeSavedRegisters(); \
__ Ret(); \
{ \
CodeDesc desc; \
__ GetCode(masm.isolate(), &desc); \
code = Factory::CodeBuilder(isolate, desc, \
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING) \
.Build(); \
if (FLAG_print_code) code->Print(); \
}
#endif // ifdef USE_SIMULATOR.
@ -14883,7 +14887,8 @@ TEST(pool_size) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING)
code = Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.set_self_reference(masm.CodeObject())
.Build();

View File

@ -62,8 +62,9 @@ TEST(AssemblerIa320) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -100,8 +101,9 @@ TEST(AssemblerIa321) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -142,8 +144,9 @@ TEST(AssemblerIa322) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -171,8 +174,9 @@ TEST(AssemblerIa323) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -200,8 +204,9 @@ TEST(AssemblerIa324) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -228,8 +233,9 @@ TEST(AssemblerIa325) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
CHECK_EQ(42, res);
@ -261,8 +267,9 @@ TEST(AssemblerIa326) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -293,8 +300,9 @@ TEST(AssemblerIa328) {
__ ret(0);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -376,8 +384,9 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
@ -427,8 +436,9 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
@ -492,8 +502,9 @@ TEST(AssemblerIa32Extractps) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -531,8 +542,9 @@ TEST(AssemblerIa32SSE) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -564,8 +576,9 @@ TEST(AssemblerIa32SSE3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -792,8 +805,9 @@ TEST(AssemblerX64FMA_sd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1020,8 +1034,9 @@ TEST(AssemblerX64FMA_ss) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1128,8 +1143,9 @@ TEST(AssemblerIa32BMI1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1176,8 +1192,9 @@ TEST(AssemblerIa32LZCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1224,8 +1241,9 @@ TEST(AssemblerIa32POPCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1370,8 +1388,9 @@ TEST(AssemblerIa32BMI2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1414,8 +1433,9 @@ TEST(AssemblerIa32JumpTables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1462,8 +1482,9 @@ TEST(AssemblerIa32JumpTables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1505,8 +1526,9 @@ TEST(Regress621926) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;

View File

@ -64,8 +64,9 @@ TEST(MIPS0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(static_cast<int32_t>(0xABC), res);
@ -99,8 +100,9 @@ TEST(MIPS1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F1>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(50, 0, 0, 0, 0));
CHECK_EQ(1275, res);
@ -236,8 +238,9 @@ TEST(MIPS2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(static_cast<int32_t>(0x31415926), res);
@ -336,8 +339,9 @@ TEST(MIPS3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
t.a = 1.5e14;
@ -438,8 +442,9 @@ TEST(MIPS4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e22;
t.b = 2.75e11;
@ -499,8 +504,9 @@ TEST(MIPS5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e4;
t.b = 2.75e8;
@ -567,8 +573,9 @@ TEST(MIPS6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x11223344;
t.si = 0x99AABBCC;
@ -659,8 +666,9 @@ TEST(MIPS7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e14;
t.b = 2.75e11;
@ -756,7 +764,9 @@ TEST(MIPS8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.input = 0x12345678;
f.Call(&t, 0x0, 0, 0, 0);
@ -800,8 +810,9 @@ TEST(MIPS9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
USE(code);
}
@ -851,8 +862,9 @@ TEST(MIPS10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
t.b_word = 0x0FF00FF0; // 0x0FF00FF0 -> 0x as double.
@ -978,8 +990,9 @@ TEST(MIPS11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.reg_init = 0xAABBCCDD;
t.mem_init = 0x11223344;
@ -1103,8 +1116,9 @@ TEST(MIPS12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.x = 1;
t.y = 2;
@ -1156,8 +1170,9 @@ TEST(MIPS13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.cvt_big_in = 0xFFFFFFFF;
@ -1276,8 +1291,9 @@ TEST(MIPS14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.round_up_in = 123.51;
@ -1381,7 +1397,9 @@ TEST(seleqz_selnez) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
(f.Call(&test, 0, 0, 0, 0));
@ -1495,7 +1513,9 @@ TEST(min_max) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@ -1605,7 +1625,9 @@ TEST(rint_d) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@ -1652,7 +1674,9 @@ TEST(sel) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const int test_size = 3;
@ -1784,7 +1808,9 @@ TEST(rint_s) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@ -1827,8 +1853,9 @@ TEST(Cvt_d_uw) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.input = inputs[i];
@ -1909,7 +1936,9 @@ TEST(mina_maxa) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@ -1989,7 +2018,9 @@ TEST(trunc_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -2069,7 +2100,9 @@ TEST(movz_movn) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -2170,7 +2203,9 @@ TEST(movt_movd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
(f.Call(&test, 0, 0, 0, 0));
@ -2254,8 +2289,9 @@ TEST(cvt_w_d) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
@ -2321,8 +2357,9 @@ TEST(trunc_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -2390,8 +2427,9 @@ TEST(round_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -2462,7 +2500,9 @@ TEST(round_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -2534,8 +2574,9 @@ TEST(sub) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@ -2613,8 +2654,9 @@ TEST(sqrt_rsqrt_recip) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
@ -2693,8 +2735,9 @@ TEST(neg) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
@ -2750,8 +2793,9 @@ TEST(mul) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@ -2806,8 +2850,9 @@ TEST(mov) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -2873,8 +2918,9 @@ TEST(floor_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -2945,7 +2991,9 @@ TEST(floor_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -3016,8 +3064,9 @@ TEST(ceil_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -3088,7 +3137,9 @@ TEST(ceil_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@ -3155,8 +3206,9 @@ TEST(jump_tables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -3220,8 +3272,9 @@ TEST(jump_tables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -3292,8 +3345,9 @@ TEST(jump_tables3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -3344,7 +3398,9 @@ TEST(BITSWAP) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.r1 = 0x781A15C3;
t.r2 = 0x8B71FCDE;
@ -3478,7 +3534,9 @@ TEST(class_fmt) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.dSignalingNan = std::numeric_limits<double>::signaling_NaN();
@ -3568,8 +3626,9 @@ TEST(ABS) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = -2.0;
test.b = -2.0;
@ -3661,8 +3720,9 @@ TEST(ADD_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = 2.0;
test.b = 3.0;
@ -3816,7 +3876,9 @@ TEST(C_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.dOp1 = 2.0;
test.dOp2 = 3.0;
@ -4016,7 +4078,9 @@ TEST(CMP_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
@ -4201,8 +4265,9 @@ TEST(CVT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.cvt_d_s_in = -0.51;
@ -4413,8 +4478,9 @@ TEST(DIV_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
@ -4505,8 +4571,9 @@ uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@ -4560,8 +4627,9 @@ uint32_t run_aluipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint32_t)code->entry(); // Set the program counter.
@ -4613,8 +4681,9 @@ uint32_t run_auipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint32_t)code->entry(); // Set the program counter.
@ -4688,8 +4757,9 @@ uint32_t run_lwpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@ -4768,8 +4838,9 @@ uint32_t run_jic(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@ -4839,8 +4910,9 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@ -4945,8 +5017,9 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -5115,8 +5188,9 @@ uint32_t run_jialc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@ -5163,8 +5237,9 @@ static uint32_t run_addiupc(int32_t imm19) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint32_t)code->entry(); // Set the program counter.
@ -5245,8 +5320,9 @@ int32_t run_bc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@ -5326,8 +5402,9 @@ int32_t run_balc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@ -5350,8 +5427,9 @@ uint32_t run_aui(uint32_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@ -5439,8 +5517,9 @@ uint32_t run_bal(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@ -5491,8 +5570,9 @@ TEST(Trampoline) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int32_t res = reinterpret_cast<int32_t>(f.Call(42, 42, 0, 0, 0));
@ -5618,8 +5698,9 @@ void helper_madd_msub_maddf_msubf(F func) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
@ -5703,8 +5784,9 @@ uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
@ -5807,8 +5889,9 @@ TEST(MSA_fill_copy) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -5875,8 +5958,9 @@ TEST(MSA_fill_copy_2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -5932,8 +6016,9 @@ TEST(MSA_fill_copy_3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -5977,8 +6062,9 @@ void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -6078,7 +6164,9 @@ TEST(MSA_move_v) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -6123,7 +6211,9 @@ void run_msa_sldi(OperFunc GenerateOperation,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -6207,8 +6297,9 @@ void run_msa_ctc_cfc(uint32_t value) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -6317,8 +6408,9 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -6495,8 +6587,9 @@ uint32_t run_Ins(uint32_t imm, uint32_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
@ -6545,8 +6638,9 @@ uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
@ -6607,8 +6701,9 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -7027,8 +7122,9 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -8077,8 +8173,9 @@ void run_msa_vector(struct TestCaseMsaVector* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -8165,8 +8262,9 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -8638,8 +8736,9 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -8716,8 +8815,9 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -8795,8 +8895,9 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -9801,8 +9902,9 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -60,8 +60,9 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -96,8 +97,9 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -144,8 +146,9 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -213,8 +216,9 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -327,7 +331,7 @@ TEST(4) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
CodeKind::FOR_TESTING,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@ -387,7 +391,7 @@ TEST(5) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
CodeKind::FOR_TESTING,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@ -422,7 +426,7 @@ TEST(6) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
CodeKind::FOR_TESTING,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@ -497,7 +501,7 @@ static void TestRoundingMode(VCVTTypes types,
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
CodeKind::FOR_TESTING,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@ -684,7 +688,7 @@ TEST(8) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
CodeKind::FOR_TESTING,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@ -799,7 +803,7 @@ TEST(9) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
CodeKind::FOR_TESTING,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@ -910,7 +914,7 @@ TEST(10) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
CodeKind::FOR_TESTING,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@ -1007,7 +1011,7 @@ TEST(11) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
CodeKind::FOR_TESTING,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG

View File

@ -63,8 +63,9 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -102,8 +103,9 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -153,8 +155,9 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -208,8 +211,9 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -250,7 +254,7 @@ TEST(4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, CodeKind::FOR_TESTING, Handle<Code>());
desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@ -278,7 +282,7 @@ TEST(5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, CodeKind::FOR_TESTING, Handle<Code>());
desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@ -312,7 +316,7 @@ TEST(6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, CodeKind::FOR_TESTING, Handle<Code>());
desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@ -344,7 +348,7 @@ TEST(7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, CodeKind::FOR_TESTING, Handle<Code>());
desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@ -375,7 +379,7 @@ TEST(8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, CodeKind::FOR_TESTING, Handle<Code>());
desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@ -402,7 +406,7 @@ TEST(9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, CodeKind::FOR_TESTING, Handle<Code>());
desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@ -486,8 +490,9 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -540,8 +545,9 @@ TEST(11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -594,8 +600,9 @@ TEST(12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -658,8 +665,9 @@ TEST(13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -749,8 +757,9 @@ TEST(14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -839,8 +848,9 @@ TEST(15) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -886,8 +896,9 @@ TEST(16) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -960,8 +971,9 @@ TEST(17) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif
@ -1052,8 +1064,9 @@ TEST(18) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
code->Print();
#endif

View File

@ -743,8 +743,9 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F0>::FromCode(*code);
int res = f.Call();
@ -800,8 +801,9 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F0>::FromCode(*code);
int res = f.Call();
@ -865,8 +867,9 @@ TEST(AssemblerX64Extractps) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -902,8 +905,9 @@ TEST(AssemblerX64SSE) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -933,8 +937,9 @@ TEST(AssemblerX64SSE3) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1158,8 +1163,9 @@ TEST(AssemblerX64FMA_sd) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1384,8 +1390,9 @@ TEST(AssemblerX64FMA_ss) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1460,8 +1467,9 @@ TEST(AssemblerX64SSE_ss) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1546,8 +1554,9 @@ TEST(AssemblerX64AVX_ss) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1786,8 +1795,9 @@ TEST(AssemblerX64AVX_sd) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -1978,8 +1988,9 @@ TEST(AssemblerX64BMI1) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -2038,8 +2049,9 @@ TEST(AssemblerX64LZCNT) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -2098,8 +2110,9 @@ TEST(AssemblerX64POPCNT) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -2361,8 +2374,9 @@ TEST(AssemblerX64BMI2) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@ -2405,8 +2419,9 @@ TEST(AssemblerX64JumpTables1) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2453,8 +2468,9 @@ TEST(AssemblerX64JumpTables2) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -2510,8 +2526,9 @@ TEST(AssemblerX64vmovups) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);

View File

@ -39,9 +39,10 @@ TEST(CodeLayoutWithoutUnwindingInfo) {
code_desc.unwinding_info_size = 0;
code_desc.origin = nullptr;
Handle<Code> code = Factory::CodeBuilder(CcTest::i_isolate(), code_desc,
CodeKind::FOR_TESTING)
.Build();
Handle<Code> code =
Factory::CodeBuilder(CcTest::i_isolate(), code_desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
CHECK(!code->has_unwinding_info());
CHECK_EQ(code->raw_instruction_size(), buffer_size);
@ -86,9 +87,10 @@ TEST(CodeLayoutWithUnwindingInfo) {
code_desc.unwinding_info_size = unwinding_info_size;
code_desc.origin = nullptr;
Handle<Code> code = Factory::CodeBuilder(CcTest::i_isolate(), code_desc,
CodeKind::FOR_TESTING)
.Build();
Handle<Code> code =
Factory::CodeBuilder(CcTest::i_isolate(), code_desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
CHECK(code->has_unwinding_info());
CHECK_EQ(code->raw_instruction_size(), buffer_size + unwinding_info_size);

View File

@ -131,7 +131,7 @@ Handle<JSFunction> CreateCsaDescriptorArrayLookup(Isolate* isolate) {
compiler::CodeAssemblerTester asm_tester(
isolate, kNumParams + 1, // +1 to include receiver.
CodeKind::FOR_TESTING);
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING);
{
CodeStubAssembler m(asm_tester.state());
@ -176,7 +176,7 @@ Handle<JSFunction> CreateCsaTransitionArrayLookup(Isolate* isolate) {
const int kNumParams = 2;
compiler::CodeAssemblerTester asm_tester(
isolate, kNumParams + 1, // +1 to include receiver.
CodeKind::FOR_TESTING);
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING);
{
CodeStubAssembler m(asm_tester.state());

View File

@ -988,8 +988,9 @@ TEST(DisasmIa320) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
USE(code);
#ifdef OBJECT_PRINT
StdoutStream os;

View File

@ -993,8 +993,9 @@ TEST(DisasmX64) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
USE(code);
#ifdef OBJECT_PRINT
StdoutStream os;

View File

@ -4068,7 +4068,8 @@ TEST(WeakReference) {
i::CodeDesc desc;
assm.GetCode(i_isolate, &desc);
i::Handle<i::Code> code =
i::Factory::CodeBuilder(i_isolate, desc, i::CodeKind::FOR_TESTING)
i::Factory::CodeBuilder(i_isolate, desc,
i::CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
CHECK(code->IsCode());

View File

@ -146,8 +146,9 @@ TEST(ExtractLane) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@ -277,8 +278,9 @@ TEST(ReplaceLane) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);

View File

@ -65,8 +65,9 @@ TEST(EmbeddedObj) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);

View File

@ -88,8 +88,9 @@ TEST(BYTESWAP) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (size_t i = 0; i < arraysize(test_values); i++) {
@ -198,8 +199,9 @@ TEST(jump_tables4) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -261,8 +263,9 @@ TEST(jump_tables5) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -349,8 +352,9 @@ TEST(jump_tables6) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -374,8 +378,9 @@ static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F1>::FromCode(*code);
@ -502,8 +507,9 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@ -615,7 +621,9 @@ TEST(OverflowInstructions) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.lhs = ii;
t.rhs = jj;
@ -737,8 +745,9 @@ TEST(min_max_nan) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@ -772,8 +781,9 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@ -1019,8 +1029,9 @@ bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
int32_t res = reinterpret_cast<int32_t>(f.Call(rs, rd, 0, 0, 0));
@ -1114,7 +1125,8 @@ static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
Factory::CodeBuilder(masm->isolate(), desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
@ -1256,7 +1268,8 @@ static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
Factory::CodeBuilder(masm->isolate(), desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;

View File

@ -108,8 +108,9 @@ TEST(BYTESWAP) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (size_t i = 0; i < arraysize(test_values); i++) {
@ -163,8 +164,9 @@ TEST(LoadConstants) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<FV>::FromCode(*code);
(void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
@ -206,8 +208,9 @@ TEST(LoadAddress) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<FV>::FromCode(*code);
(void)f.Call(0, 0, 0, 0, 0);
@ -263,8 +266,9 @@ TEST(jump_tables4) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -333,8 +337,9 @@ TEST(jump_tables5) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -422,8 +427,9 @@ TEST(jump_tables6) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@ -447,8 +453,9 @@ static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F1>::FromCode(*code);
@ -527,8 +534,9 @@ static uint64_t run_dlsa(uint64_t rt, uint64_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<FV>::FromCode(*code);
@ -677,8 +685,9 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@ -853,7 +862,9 @@ TEST(OverflowInstructions) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate, desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.lhs = ii;
t.rhs = jj;
@ -975,8 +986,9 @@ TEST(min_max_nan) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@ -1010,8 +1022,9 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@ -1374,8 +1387,9 @@ bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(rs, rd, 0, 0, 0));
@ -1469,7 +1483,8 @@ static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
Factory::CodeBuilder(masm->isolate(), desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;
@ -1611,7 +1626,8 @@ static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
Factory::CodeBuilder(masm->isolate(), desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef DEBUG
StdoutStream os;

View File

@ -449,8 +449,9 @@ TEST(EmbeddedObj) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);

View File

@ -204,8 +204,9 @@ void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
TestData t = initial_data;
Simulator::current(isolate)->Call<void>(code->entry(), &t);
@ -276,8 +277,9 @@ int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> code = Factory::CodeBuilder(
isolate, desc, CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
Simulator::current(isolate)->Call<void>(code->entry(), test_data);
return Simulator::current(isolate)->wreg(0);
}

View File

@ -240,7 +240,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
callee.Return(static_cast<int>(desc->ReturnCount()), returns.get());
OptimizedCompilationInfo info(ArrayVector("testing"), &zone,
CodeKind::FOR_TESTING);
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, i_isolate, desc, callee.graph(),
AssemblerOptions::Default(i_isolate),
@ -286,7 +286,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// Call the wrapper.
OptimizedCompilationInfo wrapper_info(ArrayVector("wrapper"), &zone,
CodeKind::FOR_TESTING);
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING);
Handle<Code> wrapper_code =
Pipeline::GenerateCodeForTesting(
&wrapper_info, i_isolate, wrapper_desc, caller.graph(),

View File

@ -155,7 +155,9 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
tasm.GetCode(nullptr, &desc);
if (FLAG_print_code) {
Handle<Code> code =
Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate(), desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
StdoutStream os;
code->Print(os);
}

View File

@ -161,7 +161,9 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
tasm.GetCode(nullptr, &desc);
if (FLAG_print_code) {
Handle<Code> code =
Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build();
Factory::CodeBuilder(isolate(), desc,
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING)
.Build();
StdoutStream os;
code->Print(os);
}

View File

@ -24,7 +24,7 @@ CodeStubAssemblerTestState::CodeStubAssemblerTestState(
CodeStubAssemblerTest* test)
: compiler::CodeAssemblerState(
test->isolate(), test->zone(), VoidDescriptor{},
CodeKind::FOR_TESTING, "test",
CodeKind::DEOPT_ENTRIES_OR_FOR_TESTING, "test",
PoisoningMitigationLevel::kPoisonCriticalOnly) {}
TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) {