Revert "[Compiler] Remove code aging support."

This reverts commit a205117c6e.

Reason for revert: breaks Arm64

Original change's description:
> [Compiler] Remove code aging support.
> 
> Code aging is no longer supported by any remaining compilers now
> that full codegen has been removed. This CL removes all vestiges of
> code aging.
> 
> BUG=v8:6409
> 
> Change-Id: I945ebcc20c7c55120550c8ee36188bfa042ea65e
> Reviewed-on: https://chromium-review.googlesource.com/619153
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Reviewed-by: Yang Guo <yangguo@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Marja Hölttä <marja@chromium.org>
> Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#47501}

TBR=ulan@chromium.org,rmcilroy@chromium.org,marja@chromium.org,yangguo@chromium.org,mstarzinger@chromium.org,rodolph.perfetta@arm.com

Change-Id: I9d8b2985e2d472697908270d93a35eb7ef9c88a8
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:6409
Reviewed-on: https://chromium-review.googlesource.com/625998
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47506}
This commit is contained in:
Ross McIlroy 2017-08-22 12:21:41 +00:00 committed by Commit Bot
parent 208cdfd933
commit 42d3d36bc3
86 changed files with 2483 additions and 104 deletions

View File

@ -152,6 +152,29 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on Arm.
return Handle<Code>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Memory::Address_at(pc_ +
(kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Memory::Address_at(pc_ +
(kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
stub->instruction_start();
}
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@ -174,6 +197,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
@ -191,6 +216,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}

View File

@ -398,6 +398,67 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
#undef __
#ifdef DEBUG
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
#endif
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushStandardFrame(r1);
patcher->masm()->nop(ip.code());
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
Address target_address = Memory::Address_at(
sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
PatchingAssembler patcher(Assembler::IsolateData(isolate), sequence,
young_length / Assembler::kInstrSize);
patcher.add(r0, pc, Operand(-8));
patcher.ldr(pc, MemOperand(pc, -4));
patcher.emit_code_stub_address(stub);
patcher.FlushICache(isolate);
}
}
} // namespace internal
} // namespace v8

View File

@ -34,9 +34,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
{
PatchingAssembler patcher(Assembler::IsolateData(isolate),
code_start_address, 1);
PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
patcher.bkpt(0);
patcher.FlushICache(isolate);
}

View File

@ -1153,7 +1153,23 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
PushCommonFrame(scratch);
}
void TurboAssembler::Prologue() { PushStandardFrame(r1); }
void TurboAssembler::Prologue(bool code_pre_aging) {
{ PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
if (code_pre_aging) {
// Pre-age the code.
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
add(r0, pc, Operand(-8));
ldr(pc, MemOperand(pc, -4));
emit_code_stub_address(stub);
} else {
PushStandardFrame(r1);
nop(ip.code());
}
}
}
void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {

View File

@ -117,7 +117,7 @@ class TurboAssembler : public Assembler {
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
void Prologue();
void Prologue(bool code_pre_aging);
// Push a standard frame, consisting of lr, fp, context and JS function
void PushStandardFrame(Register function_reg);

View File

@ -810,6 +810,34 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
}
}
static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on ARM64.
return Handle<Code>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
// Read the stub entry point from the code age sequence.
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
// Overwrite the stub entry point in the code age sequence. This is loaded as
// a literal so there is no need to call FlushICache here.
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
Memory::Address_at(stub_entry_address) = stub->instruction_start();
}
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||

View File

@ -329,6 +329,7 @@ bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
mode != RelocInfo::VENEER_POOL &&
mode != RelocInfo::CODE_AGE_SEQUENCE &&
mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
mode != RelocInfo::DEOPT_INLINING_ID &&
mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);

View File

@ -24,6 +24,59 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
// -------------------------------------------------------------------------
// Code generators
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
PatchingAssembler patcher(isolate, young_sequence_.start(),
young_sequence_.length() / kInstructionSize);
// The young sequence is the frame setup code for FUNCTION code types. It is
// generated by FullCodeGenerator::Generate.
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
#ifdef DEBUG
const int length = kCodeAgeStubEntryOffset / kInstructionSize;
DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
PatchingAssembler patcher_old(isolate, old_sequence_.start(), length);
MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
#endif
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return memcmp(candidate, old_sequence_.start(), kCodeAgeStubEntryOffset) == 0;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return MacroAssembler::IsYoungSequence(isolate, sequence);
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
byte* target = sequence + kCodeAgeStubEntryOffset;
Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
PatchingAssembler patcher(isolate, sequence,
kNoCodeAgeSequenceLength / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
}
}
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,

View File

@ -36,9 +36,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
{
PatchingAssembler patcher(Assembler::IsolateData(isolate),
code_start_address, 1);
PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
patcher.brk(0);
}

View File

@ -23,6 +23,9 @@
namespace v8 {
namespace internal {
// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
#define __
MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer,
unsigned buffer_size,
CodeObjectRequired create_code_object)
@ -1154,11 +1157,11 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
Register temp = temps.AcquireX();
Label loop;
Mov(temp, count / 2);
Bind(&loop);
__ Mov(temp, count / 2);
__ Bind(&loop);
PushHelper(2, size, src, src, NoReg, NoReg);
Subs(temp, temp, 1);
B(ne, &loop);
__ Subs(temp, temp, 1);
__ B(ne, &loop);
count %= 2;
}
@ -2085,9 +2088,9 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// after we drop current frame. We add kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch0;
add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
add(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
__ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
__ add(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
@ -2100,14 +2103,14 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
}
if (FLAG_debug_code) {
Cmp(src_reg, dst_reg);
Check(lo, kStackAccessBelowStackPointer);
__ Cmp(src_reg, dst_reg);
__ Check(lo, kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
@ -2116,18 +2119,18 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch1;
Label loop, entry;
B(&entry);
bind(&loop);
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
Cmp(jssp, src_reg);
B(ne, &loop);
__ B(&entry);
__ bind(&loop);
__ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
__ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
__ bind(&entry);
__ Cmp(jssp, src_reg);
__ B(ne, &loop);
// Leave current frame.
Mov(jssp, dst_reg);
SetStackPointer(jssp);
AssertStackConsistency();
__ Mov(jssp, dst_reg);
__ SetStackPointer(jssp);
__ AssertStackConsistency();
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@ -2333,7 +2336,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
InvokeFlag flag) {
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
LoadObject(x1, function);
__ LoadObject(x1, function);
InvokeFunction(x1, expected, actual, flag);
}
@ -2404,9 +2407,13 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
Uxtw(result.W(), result.W());
}
void TurboAssembler::Prologue() {
Push(lr, fp, cp, x1);
Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
void TurboAssembler::Prologue(bool code_pre_aging) {
if (code_pre_aging) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
__ EmitCodeAgeSequence(stub);
} else {
__ EmitFrameSetupForCodeAgePatching();
}
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
@ -2881,7 +2888,7 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
// Load the map's "bit field 2".
Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
__ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
DecodeField<Map::ElementsKindBits>(result);
}
@ -3144,7 +3151,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
Register dst,
LinkRegisterStatus lr_status,
SaveFPRegsMode fp_mode) {
ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite");
ASM_LOCATION("MacroAssembler::RecordWrite");
DCHECK(!AreAliased(object, map));
if (emit_debug_code()) {
@ -3223,7 +3230,7 @@ void MacroAssembler::RecordWrite(
RememberedSetAction remembered_set_action,
SmiCheck smi_check,
PointersToHereCheck pointers_to_here_check_for_value) {
ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite");
ASM_LOCATION("MacroAssembler::RecordWrite");
DCHECK(!AreAliased(object, value));
if (emit_debug_code()) {
@ -3510,7 +3517,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
DCHECK(!kCallerSaved.IncludesAliasOf(StackPointer()));
DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
// The provided arguments, and their proper procedure-call standard registers.
CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
@ -3739,6 +3746,77 @@ void MacroAssembler::Printf(const char * format,
FPTmpList()->set_list(old_fp_tmp_list);
}
void TurboAssembler::EmitFrameSetupForCodeAgePatching() {
// TODO(jbramley): Other architectures use the internal memcpy to copy the
// sequence. If this is a performance bottleneck, we should consider caching
// the sequence and copying it in the same way.
InstructionAccurateScope scope(this,
kNoCodeAgeSequenceLength / kInstructionSize);
DCHECK(jssp.Is(StackPointer()));
EmitFrameSetupForCodeAgePatching(this);
}
void TurboAssembler::EmitCodeAgeSequence(Code* stub) {
InstructionAccurateScope scope(this,
kNoCodeAgeSequenceLength / kInstructionSize);
DCHECK(jssp.Is(StackPointer()));
EmitCodeAgeSequence(this, stub);
}
#undef __
#define __ assm->
void TurboAssembler::EmitFrameSetupForCodeAgePatching(Assembler* assm) {
Label start;
__ bind(&start);
// We can do this sequence using four instructions, but the code ageing
// sequence that patches it needs five, so we use the extra space to try to
// simplify some addressing modes and remove some dependencies (compared to
// using two stp instructions with write-back).
__ sub(jssp, jssp, 4 * kXRegSize);
__ sub(csp, csp, 4 * kXRegSize);
__ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
__ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
__ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
__ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
}
void TurboAssembler::EmitCodeAgeSequence(Assembler* assm, Code* stub) {
Label start;
__ bind(&start);
// When the stub is called, the sequence is replaced with the young sequence
// (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
// stub jumps to &start, stored in x0. The young sequence does not call the
// stub so there is no infinite loop here.
//
// A branch (br) is used rather than a call (blr) because this code replaces
// the frame setup code that would normally preserve lr.
__ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
__ adr(x0, &start);
__ br(ip0);
// IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
// until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
__ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
if (stub) {
__ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
__ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
}
}
bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(is_young ||
isolate->code_aging_helper()->IsOld(sequence));
return is_young;
}
#undef __
UseScratchRegisterScope::~UseScratchRegisterScope() {
available_->set_list(old_available_);
availablefp_->set_list(old_availablefp_);
@ -3775,6 +3853,7 @@ MemOperand NativeContextMemOperand() {
#define __ masm->
void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
const Label* smi_check) {
Assembler::BlockPoolsScope scope(masm);

View File

@ -23,11 +23,8 @@
FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
#if DEBUG
#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
#define ASM_LOCATION_IN_ASSEMBLER(message) \
Debug("LOCATION: " message, __LINE__, NO_PARAM)
#else
#define ASM_LOCATION(message)
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
#else
#define ASM_UNIMPLEMENTED(message)
@ -1161,7 +1158,41 @@ class TurboAssembler : public Assembler {
inline void Mrs(const Register& rt, SystemRegister sysreg);
// Generates function prologue code.
void Prologue();
void Prologue(bool code_pre_aging);
// Code ageing support functions.
// Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
// function as old, it replaces some of the function prologue (generated by
// FullCodeGenerator::Generate) with a call to a special stub (ultimately
// generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
// function prologue to its initial young state (indicating that it has been
// recently run) and continues. A young function is therefore one which has a
// normal frame setup sequence, and an old function has a code age sequence
// which calls a code ageing stub.
// Set up a basic stack frame for young code (or code exempt from ageing) with
// type FUNCTION. It may be patched later for code ageing support. This is
// done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
//
// This function takes an Assembler so it can be called from either a
// MacroAssembler or a PatchingAssembler context.
static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
// Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
void EmitFrameSetupForCodeAgePatching();
// Emit a code age sequence that calls the relevant code age stub. The code
// generated by this sequence is expected to replace the code generated by
// EmitFrameSetupForCodeAgePatching, and represents an old function.
//
// If stub is NULL, this function generates the code age sequence but omits
// the stub address that is normally embedded in the instruction stream. This
// can be used by debug code to verify code age sequences.
static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
// Call EmitCodeAgeSequence from a MacroAssembler context.
void EmitCodeAgeSequence(Code* stub);
void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
DCHECK(allow_macro_instructions());

View File

@ -189,12 +189,14 @@ void AssemblerBase::Print(Isolate* isolate) {
v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_, nullptr);
}
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler)
: PredictableCodeSizeScope(assembler, -1) {}
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
@ -204,6 +206,7 @@ PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
assembler_->set_predictable_code_size(true);
}
PredictableCodeSizeScope::~PredictableCodeSizeScope() {
// TODO(svenpanne) Remove the 'if' when everything works.
if (expected_size_ >= 0) {
@ -212,6 +215,7 @@ PredictableCodeSizeScope::~PredictableCodeSizeScope() {
assembler_->set_predictable_code_size(old_value_);
}
// -----------------------------------------------------------------------------
// Implementation of CpuFeatureScope
@ -229,6 +233,7 @@ CpuFeatureScope::~CpuFeatureScope() {
}
#endif
bool CpuFeatures::initialized_ = false;
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::icache_line_size_ = 0;
@ -400,6 +405,7 @@ uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
return pc_delta & kSmallPCDeltaMask;
}
void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
// Write a byte of tagged pc-delta, possibly preceded by an explicit pc-jump.
pc_delta = WriteLongPCJump(pc_delta);
@ -410,11 +416,13 @@ void RelocInfoWriter::WriteShortData(intptr_t data_delta) {
*--pos_ = static_cast<byte>(data_delta);
}
void RelocInfoWriter::WriteMode(RelocInfo::Mode rmode) {
STATIC_ASSERT(RelocInfo::NUMBER_OF_MODES <= (1 << kLongTagBits));
*--pos_ = static_cast<int>((rmode << kTagBits) | kDefaultTag);
}
void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
// Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
pc_delta = WriteLongPCJump(pc_delta);
@ -422,6 +430,7 @@ void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
*--pos_ = pc_delta;
}
void RelocInfoWriter::WriteIntData(int number) {
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(number);
@ -430,6 +439,7 @@ void RelocInfoWriter::WriteIntData(int number) {
}
}
void RelocInfoWriter::WriteData(intptr_t data_delta) {
for (int i = 0; i < kIntptrSize; i++) {
*--pos_ = static_cast<byte>(data_delta);
@ -438,6 +448,7 @@ void RelocInfoWriter::WriteData(intptr_t data_delta) {
}
}
void RelocInfoWriter::Write(const RelocInfo* rinfo) {
RelocInfo::Mode rmode = rinfo->rmode();
#ifdef DEBUG
@ -476,23 +487,28 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#endif
}
inline int RelocIterator::AdvanceGetTag() {
return *--pos_ & kTagMask;
}
inline RelocInfo::Mode RelocIterator::GetMode() {
return static_cast<RelocInfo::Mode>((*pos_ >> kTagBits) &
((1 << kLongTagBits) - 1));
}
inline void RelocIterator::ReadShortTaggedPC() {
rinfo_.pc_ += *pos_ >> kTagBits;
}
inline void RelocIterator::AdvanceReadPC() {
rinfo_.pc_ += *--pos_;
}
void RelocIterator::AdvanceReadInt() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
@ -501,6 +517,7 @@ void RelocIterator::AdvanceReadInt() {
rinfo_.data_ = x;
}
void RelocIterator::AdvanceReadData() {
intptr_t x = 0;
for (int i = 0; i < kIntptrSize; i++) {
@ -509,6 +526,7 @@ void RelocIterator::AdvanceReadData() {
rinfo_.data_ = x;
}
void RelocIterator::AdvanceReadLongPCJump() {
// Read the 32-kSmallPCDeltaBits most significant bits of the
// pc jump in kChunkBits bit chunks and shift them into place.
@ -529,6 +547,7 @@ inline void RelocIterator::ReadShortData() {
rinfo_.data_ = unsigned_b;
}
void RelocIterator::next() {
DCHECK(!done());
// Basically, do the opposite of RelocInfoWriter::Write.
@ -580,6 +599,15 @@ void RelocIterator::next() {
}
}
}
if (code_age_sequence_ != NULL) {
byte* old_code_age_sequence = code_age_sequence_;
code_age_sequence_ = NULL;
if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
rinfo_.data_ = 0;
rinfo_.pc_ = old_code_age_sequence;
return;
}
}
done_ = true;
}
@ -592,6 +620,15 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
end_ = code->relocation_start();
done_ = false;
mode_mask_ = mode_mask;
byte* sequence = code->FindCodeAgeSequence();
// We get the isolate from the map, because at serialization time
// the code pointer has been cloned and isn't really in heap space.
Isolate* isolate = code->map()->GetIsolate();
if (sequence != NULL && !Code::IsYoungSequence(isolate, sequence)) {
code_age_sequence_ = sequence;
} else {
code_age_sequence_ = NULL;
}
if (mode_mask_ == 0) pos_ = end_;
next();
}
@ -604,10 +641,12 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
end_ = pos_ - desc.reloc_size;
done_ = false;
mode_mask_ = mode_mask;
code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
}
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@ -624,6 +663,7 @@ bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
}
#endif
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
@ -657,6 +697,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "constant pool";
case VENEER_POOL:
return "veneer pool";
case CODE_AGE_SEQUENCE:
return "code age sequence";
case WASM_MEMORY_REFERENCE:
return "wasm memory reference";
case WASM_MEMORY_SIZE_REFERENCE:
@ -676,6 +718,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "unknown relocation type";
}
void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << static_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
if (IsComment(rmode_)) {
@ -713,6 +756,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
}
#endif // ENABLE_DISASSEMBLER
#ifdef VERIFY_HEAP
void RelocInfo::Verify(Isolate* isolate) {
switch (rmode_) {
@ -762,10 +806,14 @@ void RelocInfo::Verify(Isolate* isolate) {
case PC_JUMP:
UNREACHABLE();
break;
case CODE_AGE_SEQUENCE:
DCHECK(Code::IsYoungSequence(isolate, pc_) || code_age_stub()->IsCode());
break;
}
}
#endif // VERIFY_HEAP
// Implementation of ExternalReference
static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
@ -780,6 +828,7 @@ static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
UNREACHABLE();
}
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
@ -799,14 +848,17 @@ ExternalReference::ExternalReference(
Isolate* isolate = NULL)
: address_(Redirect(isolate, fun->address(), type)) {}
ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
: ExternalReference(Runtime::FunctionForId(id), isolate) {}
ExternalReference::ExternalReference(const Runtime::Function* f,
Isolate* isolate)
: address_(Redirect(isolate, f->entry,
BuiltinCallTypeForResultSize(f->result_size))) {}
ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
return ExternalReference(isolate);
}
@ -831,6 +883,7 @@ ExternalReference::ExternalReference(IsolateAddressId id, Isolate* isolate)
ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
ExternalReference ExternalReference::
incremental_marking_record_write_function(Isolate* isolate) {
return ExternalReference(Redirect(
@ -845,6 +898,7 @@ ExternalReference ExternalReference::store_buffer_overflow_function(
FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
}
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
@ -852,11 +906,27 @@ ExternalReference ExternalReference::delete_handle_scope_extensions(
FUNCTION_ADDR(HandleScope::DeleteExtensions)));
}
ExternalReference ExternalReference::get_date_field_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
}
ExternalReference ExternalReference::get_make_code_young_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
}
ExternalReference ExternalReference::get_mark_code_as_executed_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted)));
}
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@ -873,12 +943,14 @@ ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
return ExternalReference(isolate->stress_deopt_count_address());
}
ExternalReference ExternalReference::new_deoptimizer_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
}
ExternalReference ExternalReference::compute_output_frames_function(
Isolate* isolate) {
return ExternalReference(
@ -1047,6 +1119,7 @@ ExternalReference ExternalReference::log_enter_external_function(
Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
}
ExternalReference ExternalReference::log_leave_external_function(
Isolate* isolate) {
return ExternalReference(
@ -1057,20 +1130,24 @@ ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_array_start());
}
ExternalReference ExternalReference::allocation_sites_list_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->allocation_sites_list_address());
}
ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_jslimit());
}
ExternalReference ExternalReference::address_of_real_stack_limit(
Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
}
ExternalReference ExternalReference::address_of_regexp_stack_limit(
Isolate* isolate) {
return ExternalReference(isolate->regexp_stack()->limit_address());
@ -1095,94 +1172,114 @@ ExternalReference ExternalReference::new_space_allocation_top_address(
return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::new_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
}
ExternalReference ExternalReference::old_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->OldSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::old_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->OldSpaceAllocationLimitAddress());
}
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));
}
ExternalReference ExternalReference::handle_scope_next_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_next_address(isolate));
}
ExternalReference ExternalReference::handle_scope_limit_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_limit_address(isolate));
}
ExternalReference ExternalReference::scheduled_exception_address(
Isolate* isolate) {
return ExternalReference(isolate->scheduled_exception_address());
}
ExternalReference ExternalReference::address_of_pending_message_obj(
Isolate* isolate) {
return ExternalReference(isolate->pending_message_obj_address());
}
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
}
ExternalReference ExternalReference::address_of_one_half() {
return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
}
ExternalReference ExternalReference::address_of_minus_one_half() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.minus_one_half));
}
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.negative_infinity));
}
ExternalReference ExternalReference::address_of_the_hole_nan() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.the_hole_nan));
}
ExternalReference ExternalReference::address_of_uint32_bias() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.uint32_bias));
}
ExternalReference ExternalReference::address_of_float_abs_constant() {
return ExternalReference(reinterpret_cast<void*>(&float_absolute_constant));
}
ExternalReference ExternalReference::address_of_float_neg_constant() {
return ExternalReference(reinterpret_cast<void*>(&float_negate_constant));
}
ExternalReference ExternalReference::address_of_double_abs_constant() {
return ExternalReference(reinterpret_cast<void*>(&double_absolute_constant));
}
ExternalReference ExternalReference::address_of_double_neg_constant() {
return ExternalReference(reinterpret_cast<void*>(&double_negate_constant));
}
ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
return ExternalReference(isolate->is_profiling_address());
}
ExternalReference ExternalReference::invoke_function_callback(
Isolate* isolate) {
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
@ -1191,6 +1288,7 @@ ExternalReference ExternalReference::invoke_function_callback(
return ExternalReference(&thunk_fun, thunk_type, isolate);
}
ExternalReference ExternalReference::invoke_accessor_getter_callback(
Isolate* isolate) {
Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
@ -1200,6 +1298,7 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback(
return ExternalReference(&thunk_fun, thunk_type, isolate);
}
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state(
@ -1227,6 +1326,7 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
return ExternalReference(Redirect(isolate, function));
}
ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
@ -1239,6 +1339,7 @@ ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
ExternalReference ExternalReference::re_word_character_map() {
return ExternalReference(
NativeRegExpMacroAssembler::word_character_map_address());
@ -1447,10 +1548,12 @@ ExternalReference ExternalReference::page_flags(Page* page) {
MemoryChunk::kFlagsOffset);
}
ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
return ExternalReference(entry);
}
ExternalReference ExternalReference::cpu_features() {
DCHECK(CpuFeatures::initialized_);
return ExternalReference(&CpuFeatures::supported_);
@ -1477,6 +1580,7 @@ ExternalReference ExternalReference::runtime_function_table_address(
const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
}
double power_helper(Isolate* isolate, double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
@ -1495,6 +1599,7 @@ double power_helper(Isolate* isolate, double x, double y) {
return power_double_double(x, y);
}
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
@ -1513,6 +1618,7 @@ double power_double_int(double x, int y) {
return p;
}
double power_double_double(double x, double y) {
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
@ -1522,6 +1628,7 @@ double power_double_double(double x, double y) {
return Pow(x, y);
}
ExternalReference ExternalReference::power_double_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
@ -1529,6 +1636,7 @@ ExternalReference ExternalReference::power_double_double_function(
BUILTIN_FP_FP_CALL));
}
ExternalReference ExternalReference::mod_two_doubles_operation(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
@ -1556,18 +1664,22 @@ ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
}
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
bool operator!=(ExternalReference lhs, ExternalReference rhs) {
return !(lhs == rhs);
}
size_t hash_value(ExternalReference reference) {
return base::hash<Address>()(reference.address());
}
std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
os << static_cast<const void*>(reference.address());
const Runtime::Function* fn = Runtime::FunctionForEntry(reference.address());
@ -1575,6 +1687,7 @@ std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
return os;
}
ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
int double_reach_bits) {
info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
@ -1582,6 +1695,7 @@ ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
}
ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
ConstantPoolEntry::Type type) const {
const PerTypeEntryInfo& info = info_[type];
@ -1611,6 +1725,7 @@ ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
return ConstantPoolEntry::REGULAR;
}
ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
DCHECK(!emitted_label_.is_bound());
@ -1659,6 +1774,7 @@ ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
return access;
}
void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
ConstantPoolEntry::Type type) {
PerTypeEntryInfo& info = info_[type];
@ -1684,6 +1800,7 @@ void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
}
}
void ConstantPoolBuilder::EmitGroup(Assembler* assm,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
@ -1742,6 +1859,7 @@ void ConstantPoolBuilder::EmitGroup(Assembler* assm,
}
}
// Emit and return position of pool. Zero implies no constant pool.
int ConstantPoolBuilder::Emit(Assembler* assm) {
bool emitted = emitted_label_.is_bound();
@ -1794,6 +1912,7 @@ void Assembler::RecordDeoptReason(DeoptimizeReason reason,
RecordRelocInfo(RelocInfo::DEOPT_ID, id);
}
void Assembler::RecordComment(const char* msg) {
if (FLAG_code_comments) {
EnsureSpace ensure_space(this);
@ -1801,6 +1920,7 @@ void Assembler::RecordComment(const char* msg) {
}
}
void Assembler::DataAlign(int m) {
DCHECK(m >= 2 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {

View File

@ -368,6 +368,8 @@ class RelocInfo {
NUMBER_OF_MODES,
NONE32, // never recorded 32-bit value
NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explicitly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
@ -429,6 +431,9 @@ class RelocInfo {
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
static inline bool IsWasmMemoryReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE;
}
@ -527,6 +532,10 @@ class RelocInfo {
INLINE(void set_target_cell(
Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Handle<Code> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(
Code* stub, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Returns the address of the constant pool entry where the target address
// is held. This should only be called if IsInConstantPool returns true.
@ -708,6 +717,7 @@ class RelocIterator: public Malloced {
byte* pos_;
byte* end_;
byte* code_age_sequence_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
@ -819,6 +829,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_date_field_function(Isolate* isolate);
static ExternalReference date_cache_stamp(Isolate* isolate);
static ExternalReference get_make_code_young_function(Isolate* isolate);
static ExternalReference get_mark_code_as_executed_function(Isolate* isolate);
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);

View File

@ -2584,6 +2584,15 @@ class FunctionLiteral final : public Expression {
bool ShouldEagerCompile() const;
void SetShouldEagerCompile();
// A hint that we expect this function to be called (exactly) once,
// i.e. we suspect it's an initialization function.
bool should_be_used_once_hint() const {
return ShouldNotBeUsedOnceHintField::decode(bit_field_);
}
void set_should_be_used_once_hint() {
bit_field_ = ShouldNotBeUsedOnceHintField::update(bit_field_, true);
}
FunctionType function_type() const {
return FunctionTypeBits::decode(bit_field_);
}
@ -2653,6 +2662,7 @@ class FunctionLiteral final : public Expression {
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
ShouldNotBeUsedOnceHintField::encode(false) |
DontOptimizeReasonField::encode(kNoReason);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
DCHECK_EQ(body == nullptr, expected_property_count < 0);
@ -2662,8 +2672,11 @@ class FunctionLiteral final : public Expression {
: public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class ShouldNotBeUsedOnceHintField
: public BitField<bool, HasDuplicateParameters::kNext, 1> {};
class DontOptimizeReasonField
: public BitField<BailoutReason, HasDuplicateParameters::kNext, 8> {};
: public BitField<BailoutReason, ShouldNotBeUsedOnceHintField::kNext, 8> {
};
int expected_property_count_;
int parameter_count_;

View File

@ -1585,6 +1585,71 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Jump(r4);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - isolate
// r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(2, 0);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
// save/restore the registers without worrying about which of them contain
// pointers.
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - isolate
// r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(2, 0);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
__ PushStandardFrame(r1);
// Jump to point after the code-age stub.
__ add(r0, r0, Operand(kNoCodeAgeSequenceLength));
__ mov(pc, r0);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);

View File

@ -1602,6 +1602,82 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Jump(x4);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code fast, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// The following caller-saved registers must be saved and restored when
// calling through to the runtime:
// x0 - The address from which to resume execution.
// x1 - isolate
// x3 - new target
// lr - The return address for the JSFunction itself. It has not yet been
// preserved on the stack because the frame setup code was replaced
// with a call to this stub, to handle code ageing.
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Push(x0, x1, x3, fp, lr);
__ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ Pop(lr, fp, x3, x1, x0);
}
// The calling function has been made young again, so return to execute the
// real frame set-up code.
__ Br(x0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
// save/restore the registers without worrying about which of them contain
// pointers.
// The following caller-saved registers must be saved and restored when
// calling through to the runtime:
// x0 - The address from which to resume execution.
// x1 - isolate
// x3 - new target
// lr - The return address for the JSFunction itself. It has not yet been
// preserved on the stack because the frame setup code was replaced
// with a call to this stub, to handle code ageing.
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Push(x0, x1, x3, fp, lr);
__ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
__ Pop(lr, fp, x3, x1, x0);
// Perform prologue operations usually performed by the young code stub.
__ EmitFrameSetupForCodeAgePatching(masm);
}
// Jump to point after the code-age stub.
__ Add(x0, x0, kNoCodeAgeSequenceLength);
__ Br(x0);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);

View File

@ -8,6 +8,26 @@
namespace v8 {
namespace internal {
#define CODE_AGE_LIST_WITH_ARG(V, A) \
V(Quadragenarian, A) \
V(Quinquagenarian, A) \
V(Sexagenarian, A) \
V(Septuagenarian, A) \
V(Octogenarian, A)
#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
#define CODE_AGE_LIST(V) CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
#define CODE_AGE_LIST_COMPLETE(V) \
V(ToBeExecutedOnce) \
V(NotExecuted) \
V(ExecutedOnce) \
V(NoAge) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
#define DECLARE_CODE_AGE_BUILTIN(C, V) V(Make##C##CodeYoungAgain)
// CPP: Builtin in C++. Entered via BUILTIN_EXIT frame.
// Args: name
// API: Builtin in C++ for API callbacks. Entered via EXIT frame.
@ -24,6 +44,9 @@ namespace internal {
// Args: name
#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
/* Code aging */ \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \
\
/* GC write barrirer */ \
TFC(RecordWrite, RecordWrite, 1) \
\
@ -121,6 +144,9 @@ namespace internal {
ASM(CompileLazy) \
ASM(CheckOptimizationMarker) \
ASM(InstantiateAsmJs) \
ASM(MarkCodeAsToBeExecutedOnce) \
ASM(MarkCodeAsExecutedOnce) \
ASM(MarkCodeAsExecutedTwice) \
ASM(NotifyDeoptimized) \
ASM(NotifySoftDeoptimized) \
ASM(NotifyLazyDeoptimized) \

View File

@ -1371,6 +1371,78 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ jmp(ecx);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Re-execute the code that was patched back to the young age when
// the stub returns.
__ sub(Operand(esp, 0), Immediate(5));
__ pushad();
__ mov(eax, Operand(esp, 8 * kPointerSize));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(2, ebx);
__ mov(Operand(esp, 1 * kPointerSize),
Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ popad();
__ ret(0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
// save/restore the registers without worrying about which of them contain
// pointers.
__ pushad();
__ mov(eax, Operand(esp, 8 * kPointerSize));
__ sub(eax, Immediate(Assembler::kCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(2, ebx);
__ mov(Operand(esp, 1 * kPointerSize),
Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
}
__ popad();
// Perform prologue operations usually performed by the young code stub.
__ pop(eax); // Pop return address into scratch register.
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS Function.
__ push(eax); // Push return address after frame prologue.
// Jump to point after the code-age stub.
__ ret(0);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
// Enter an internal frame.
{

View File

@ -1561,6 +1561,80 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Set a0 to point to the head of the PlatformCodeAge sequence.
__ Subu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
// a3 - new target
RegList saved_regs =
(a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ MultiPop(saved_regs);
__ Jump(a0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
// save/restore the registers without worrying about which of them contain
// pointers.
// Set a0 to point to the head of the PlatformCodeAge sequence.
__ Subu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
// a3 - new target
RegList saved_regs =
(a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
__ MultiPop(saved_regs);
// Perform prologue operations usually performed by the young code stub.
__ PushStandardFrame(a1);
// Jump to point after the code-age stub.
__ Jump(a0, kNoCodeAgeSequenceLength);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);

View File

@ -1567,6 +1567,81 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Jump(t0);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Set a0 to point to the head of the PlatformCodeAge sequence.
__ Dsubu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
// a3 - new target
RegList saved_regs =
(a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ MultiPop(saved_regs);
__ Jump(a0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
// save/restore the registers without worrying about which of them contain
// pointers.
// Set a0 to point to the head of the PlatformCodeAge sequence.
__ Dsubu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
// a3 - new target
RegList saved_regs =
(a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
__ MultiPop(saved_regs);
// Perform prologue operations usually performed by the young code stub.
__ PushStandardFrame(a1);
// Jump to point after the code-age stub.
__ Daddu(a0, a0, Operand((kNoCodeAgeSequenceLength)));
__ Jump(a0);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);

View File

@ -1612,6 +1612,86 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpToJSEntry(ip);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Point r3 at the start of the PlatformCodeAge sequence.
__ mr(r3, ip);
// The following registers must be saved and restored when calling through to
// the runtime:
// r3 - contains return address (beginning of patch sequence)
// r4 - isolate
// r6 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ mflr(r0);
__ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r5);
__ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ mtlr(r0);
__ mr(ip, r3);
__ Jump(ip);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Point r3 at the start of the PlatformCodeAge sequence.
__ mr(r3, ip);
// The following registers must be saved and restored when calling through to
// the runtime:
// r3 - contains return address (beginning of patch sequence)
// r4 - isolate
// r6 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ mflr(r0);
__ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r5);
__ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
__ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ mtlr(r0);
__ mr(ip, r3);
// Perform prologue operations usually performed by the young code stub.
__ PushStandardFrame(r4);
// Jump to point after the code-age stub.
__ addi(r3, ip, Operand(kNoCodeAgeSequenceLength));
__ Jump(r3);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);

View File

@ -1607,6 +1607,90 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpToJSEntry(ip);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Point r2 at the start of the PlatformCodeAge sequence.
__ CleanseP(r14);
__ SubP(r14, Operand(kCodeAgingSequenceLength));
__ LoadRR(r2, r14);
__ pop(r14);
// The following registers must be saved and restored when calling through to
// the runtime:
// r2 - contains return address (beginning of patch sequence)
// r3 - isolate
// r5 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r4);
__ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
__ LoadRR(ip, r2);
__ Jump(ip);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Point r2 at the start of the PlatformCodeAge sequence.
__ CleanseP(r14);
__ SubP(r14, Operand(kCodeAgingSequenceLength));
__ LoadRR(r2, r14);
__ pop(r14);
// The following registers must be saved and restored when calling through to
// the runtime:
// r2 - contains return address (beginning of patch sequence)
// r3 - isolate
// r5 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r4);
__ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
__ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
__ LoadRR(ip, r2);
// Perform prologue operations usually performed by the young code stub.
__ PushStandardFrame(r3);
// Jump to point after the code-age stub.
__ AddP(r2, ip, Operand(kNoCodeAgeSequenceLength));
__ Jump(r2);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);

View File

@ -1348,6 +1348,74 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ jmp(rcx);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Re-execute the code that was patched back to the young age when
// the stub returns.
__ subp(Operand(rsp, 0), Immediate(5));
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(2);
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ Popad();
__ ret(0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
// save/restore the registers without worrying about which of them contain
// pointers.
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
__ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(2);
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
}
__ Popad();
// Perform prologue operations usually performed by the young code stub.
__ PopReturnAddressTo(kScratchRegister);
__ pushq(rbp); // Caller's frame pointer.
__ movp(rbp, rsp);
__ Push(rsi); // Callee's context.
__ Push(rdi); // Callee's JS Function.
__ PushReturnAddressFrom(kScratchRegister);
// Jump to point after the code-age stub.
__ ret(0);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
// Enter an internal frame.
{

View File

@ -66,6 +66,7 @@
namespace v8 {
namespace internal {
class CompilationInfo;
class EhFrameWriter;
class ParseInfo;
@ -89,6 +90,7 @@ class CodeGenerator {
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
@ -102,6 +104,33 @@ V8_EXPORT_PRIVATE double modulo(double x, double y);
double fast_sqrt(double input, Isolate* isolate);
void lazily_initialize_fast_sqrt(Isolate* isolate);
class CodeAgingHelper {
public:
explicit CodeAgingHelper(Isolate* isolate);
uint32_t young_sequence_length() const { return young_sequence_.length(); }
bool IsYoung(byte* candidate) const {
return memcmp(candidate,
young_sequence_.start(),
young_sequence_.length()) == 0;
}
void CopyYoungSequenceTo(byte* new_buffer) const {
CopyBytes(new_buffer, young_sequence_.start(), young_sequence_.length());
}
#ifdef DEBUG
bool IsOld(byte* candidate) const;
#endif
protected:
const EmbeddedVector<byte, kNoCodeAgeSequenceLength> young_sequence_;
#ifdef DEBUG
#ifdef V8_TARGET_ARCH_ARM64
const EmbeddedVector<byte, kNoCodeAgeSequenceLength> old_sequence_;
#endif
#endif
};
} // namespace internal
} // namespace v8

View File

@ -151,6 +151,14 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
// Code getters and setters.
bool GeneratePreagedPrologue() const {
// Generate a pre-aged prologue if we are optimizing for size, which
// will make code old more aggressive. Only apply to Code::FUNCTION,
// since only functions are aged in the compilation cache.
return FLAG_optimize_for_size && FLAG_age_code &&
output_code_kind() == Code::FUNCTION;
}
void SetCode(Handle<Code> code) { code_ = code; }
void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {

View File

@ -278,6 +278,13 @@ void InstallUnoptimizedCode(CompilationInfo* compilation_info) {
// Ensure feedback metadata is installed.
EnsureFeedbackMetadata(compilation_info);
// Mark code to be executed once before being aged if necessary.
// TODO(6409): Remove when full-codegen dies.
DCHECK(!compilation_info->code().is_null());
if (compilation_info->literal()->should_be_used_once_hint()) {
compilation_info->code()->MarkToBeExecutedOnce(compilation_info->isolate());
}
// Update the shared function info with the scope info.
Handle<ScopeInfo> scope_info = compilation_info->scope()->scope_info();
shared->set_scope_info(*scope_info);

View File

@ -2728,7 +2728,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(lr, fp);
__ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue();
__ Prologue(this->info()->GeneratePreagedPrologue());
if (descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
@ -2736,7 +2736,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
if (!info()->GeneratePreagedPrologue()) {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
}
int shrink_slots =

View File

@ -2415,12 +2415,14 @@ void CodeGenerator::AssembleConstructFrame() {
// Link the frame
if (descriptor->IsJSFunctionCall()) {
DCHECK(!descriptor->UseNativeStack());
__ Prologue();
__ Prologue(this->info()->GeneratePreagedPrologue());
} else {
__ Push(lr, fp);
__ Mov(fp, __ StackPointer());
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
if (!info()->GeneratePreagedPrologue()) {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
// Create OSR entry if applicable
if (info()->is_osr()) {

View File

@ -2659,7 +2659,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ push(ebp);
__ mov(ebp, esp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue();
__ Prologue(this->info()->GeneratePreagedPrologue());
if (descriptor->PushArgumentCount()) {
__ push(kJavaScriptCallArgCountRegister);
}

View File

@ -3189,7 +3189,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(ra, fp);
__ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue();
__ Prologue(this->info()->GeneratePreagedPrologue());
if (descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}

View File

@ -3485,7 +3485,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(ra, fp);
__ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue();
__ Prologue(this->info()->GeneratePreagedPrologue());
if (descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}

View File

@ -2248,7 +2248,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ mr(fp, sp);
}
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(ip);
__ Prologue(this->info()->GeneratePreagedPrologue(), ip);
if (descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}

View File

@ -2617,7 +2617,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(r14, fp);
__ LoadRR(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(ip);
__ Prologue(this->info()->GeneratePreagedPrologue(), ip);
if (descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}

View File

@ -2980,7 +2980,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ pushq(rbp);
__ movq(rbp, rsp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue();
__ Prologue(this->info()->GeneratePreagedPrologue());
if (descriptor->PushArgumentCount()) {
__ pushq(kJavaScriptCallArgCountRegister);
}
@ -2988,7 +2988,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
}
if (!descriptor->IsJSFunctionCall()) {
if (!descriptor->IsJSFunctionCall() || !info()->GeneratePreagedPrologue()) {
unwinding_info_writer_.MarkFrameConstructed(pc_base);
}
}

View File

@ -245,6 +245,13 @@ Counters::Counters(Isolate* isolate)
{&Counters::size_of_FIXED_ARRAY_##name##_, \
"c:" "V8.SizeOf_FIXED_ARRAY-" #name},
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
#define SC(name) \
{&Counters::count_of_CODE_AGE_##name##_, \
"c:" "V8.CountOf_CODE_AGE-" #name}, \
{&Counters::size_of_CODE_AGE_##name##_, \
"c:" "V8.SizeOf_CODE_AGE-" #name},
CODE_AGE_LIST_COMPLETE(SC)
#undef SC
};
// clang-format on
@ -282,6 +289,12 @@ void Counters::ResetCounterFunction(CounterLookupCallback f) {
size_of_FIXED_ARRAY_##name##_.Reset();
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
#define SC(name) \
count_of_CODE_AGE_##name##_.Reset(); \
size_of_CODE_AGE_##name##_.Reset();
CODE_AGE_LIST_COMPLETE(SC)
#undef SC
}
void Counters::ResetCreateHistogramFunction(CreateHistogramCallback f) {

View File

@ -1346,6 +1346,14 @@ class Counters : public std::enable_shared_from_this<Counters> {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
#define SC(name) \
StatsCounter* count_of_CODE_AGE_##name() \
{ return &count_of_CODE_AGE_##name##_; } \
StatsCounter* size_of_CODE_AGE_##name() \
{ return &size_of_CODE_AGE_##name##_; }
CODE_AGE_LIST_COMPLETE(SC)
#undef SC
// clang-format off
enum Id {
#define RATE_ID(name, caption, max, res) k_##name,
@ -1377,6 +1385,10 @@ class Counters : public std::enable_shared_from_this<Counters> {
#define COUNTER_ID(name) kCountOfFIXED_ARRAY__##name, \
kSizeOfFIXED_ARRAY__##name,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
#undef COUNTER_ID
#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \
kSizeOfCODE_AGE__##name,
CODE_AGE_LIST_COMPLETE(COUNTER_ID)
#undef COUNTER_ID
stats_counter_count
};
@ -1468,6 +1480,12 @@ class Counters : public std::enable_shared_from_this<Counters> {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
#define SC(name) \
StatsCounter size_of_CODE_AGE_##name##_; \
StatsCounter count_of_CODE_AGE_##name##_;
CODE_AGE_LIST_COMPLETE(SC)
#undef SC
RuntimeCallStats runtime_call_stats_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);

View File

@ -161,6 +161,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"date_cache_stamp");
Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
"address_of_pending_message_obj");
Add(ExternalReference::get_make_code_young_function(isolate).address(),
"Code::MakeCodeYoung");
Add(ExternalReference::cpu_features().address(), "cpu_features");
Add(ExternalReference::old_space_allocation_top_address(isolate).address(),
"Heap::OldSpaceAllocationTopAddress");
@ -169,6 +171,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
Add(ExternalReference::allocation_sites_list_address(isolate).address(),
"Heap::allocation_sites_list_address()");
Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
"Code::MarkCodeAsExecuted");
Add(ExternalReference::is_profiling_address(isolate).address(),
"Isolate::is_profiling");
Add(ExternalReference::scheduled_exception_address(isolate).address(),

View File

@ -587,6 +587,7 @@ DEFINE_BOOL(trace_fragmentation_verbose, false,
DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
DEFINE_BOOL(trace_mutator_utilization, false,
"print mutator utilization, allocation speed, gc speed")
DEFINE_BOOL(age_code, true, "track un-executed functions to age code")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
@ -756,6 +757,7 @@ DEFINE_BOOL(trace_deopt, false, "trace optimize function deoptimization")
DEFINE_BOOL(serialize_toplevel, true, "enable caching of toplevel scripts")
DEFINE_BOOL(serialize_eager, false, "compile eagerly when caching scripts")
DEFINE_BOOL(serialize_age_code, false, "pre age code in the code cache")
DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
#ifdef DEBUG
DEFINE_BOOL(external_reference_stats, false,

View File

@ -5143,6 +5143,16 @@ class SlotVerifyingVisitor : public ObjectVisitor {
}
}
void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override {
Object* target = rinfo->code_age_stub();
if (ShouldHaveBeenRecorded(host, target)) {
CHECK(
InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
}
}
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
Object* target = rinfo->target_object();
if (ShouldHaveBeenRecorded(host, target)) {
@ -6798,6 +6808,14 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
*object_sub_type = #name; \
return true;
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
#define COMPARE_AND_RETURN_NAME(name) \
case ObjectStats::FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - \
Code::kFirstCodeAge: \
*object_type = "CODE_TYPE"; \
*object_sub_type = "CODE_AGE/" #name; \
return true;
CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
}
return false;

View File

@ -1487,6 +1487,14 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
collector_->RecordRelocSlot(host, rinfo, object);
}
inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Code* stub = rinfo->code_age_stub();
USE(stub);
DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
}
// Entries that are skipped for recording.
inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {}
inline void VisitExternalReference(Foreign* host, Address* p) final {}
@ -1573,6 +1581,9 @@ class YoungGenerationRecordMigratedSlotVisitor final
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
UNREACHABLE();
}
void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final {
UNREACHABLE();
}
private:
// Only record slots for host objects that are considered as live by the full

View File

@ -104,14 +104,20 @@ void ObjectStats::PrintJSON(const char* key) {
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
PrintInstanceTypeJSON(key, gc_count, "*FIXED_ARRAY_" #name, \
FIRST_FIXED_ARRAY_SUB_TYPE + name);
#define CODE_AGE_WRAPPER(name) \
PrintInstanceTypeJSON( \
key, gc_count, "*CODE_AGE_" #name, \
FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
CODE_KIND_LIST(CODE_KIND_WRAPPER)
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER)
CODE_AGE_LIST_COMPLETE(CODE_AGE_WRAPPER)
#undef INSTANCE_TYPE_WRAPPER
#undef CODE_KIND_WRAPPER
#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
#undef CODE_AGE_WRAPPER
#undef PRINT_INSTANCE_TYPE_DATA
#undef PRINT_KEY_AND_ID
}
@ -155,14 +161,21 @@ void ObjectStats::Dump(std::stringstream& stream) {
DumpInstanceTypeData(stream, "*FIXED_ARRAY_" #name, \
FIRST_FIXED_ARRAY_SUB_TYPE + name);
#define CODE_AGE_WRAPPER(name) \
DumpInstanceTypeData( \
stream, "*CODE_AGE_" #name, \
FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
CODE_KIND_LIST(CODE_KIND_WRAPPER);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER);
CODE_AGE_LIST_COMPLETE(CODE_AGE_WRAPPER);
stream << "\"END\":{}}}";
#undef INSTANCE_TYPE_WRAPPER
#undef CODE_KIND_WRAPPER
#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
#undef CODE_AGE_WRAPPER
#undef PRINT_INSTANCE_TYPE_DATA
}
@ -205,6 +218,19 @@ void ObjectStats::CheckpointObjectStats() {
static_cast<int>(object_sizes_last_time_[index]));
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
index = \
FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
counters->count_of_CODE_AGE_##name()->Increment( \
static_cast<int>(object_counts_[index])); \
counters->count_of_CODE_AGE_##name()->Decrement( \
static_cast<int>(object_counts_last_time_[index])); \
counters->size_of_CODE_AGE_##name()->Increment( \
static_cast<int>(object_sizes_[index])); \
counters->size_of_CODE_AGE_##name()->Decrement( \
static_cast<int>(object_sizes_last_time_[index]));
CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
@ -492,7 +518,7 @@ void ObjectStatsCollector::RecordBytecodeArrayDetails(BytecodeArray* obj) {
}
void ObjectStatsCollector::RecordCodeDetails(Code* code) {
stats_->RecordCodeSubTypeStats(code->kind(), code->Size());
stats_->RecordCodeSubTypeStats(code->kind(), code->GetAge(), code->Size());
RecordFixedArrayHelper(code, code->deoptimization_data(),
DEOPTIMIZATION_DATA_SUB_TYPE, 0);
if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {

View File

@ -27,8 +27,9 @@ class ObjectStats {
FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
FIRST_FIXED_ARRAY_SUB_TYPE =
FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
OBJECT_STATS_COUNT =
FIRST_CODE_AGE_SUB_TYPE =
FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
};
void ClearObjectStats(bool clear_last_time_stats = false);
@ -44,14 +45,21 @@ class ObjectStats {
size_histogram_[type][HistogramIndexFromSize(size)]++;
}
void RecordCodeSubTypeStats(int code_sub_type, size_t size) {
void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
int code_age_index =
FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
code_sub_type_index < FIRST_FIXED_ARRAY_SUB_TYPE);
code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
code_age_index < OBJECT_STATS_COUNT);
object_counts_[code_sub_type_index]++;
object_sizes_[code_sub_type_index] += size;
object_counts_[code_age_index]++;
object_sizes_[code_age_index] += size;
const int idx = HistogramIndexFromSize(size);
size_histogram_[code_sub_type_index][idx]++;
size_histogram_[code_age_index][idx]++;
}
bool RecordFixedArraySubTypeStats(FixedArrayBase* array, int array_sub_type,

View File

@ -287,6 +287,9 @@ int MarkingVisitor<ConcreteVisitor>::VisitBytecodeArray(Map* map,
template <typename ConcreteVisitor>
int MarkingVisitor<ConcreteVisitor>::VisitCode(Map* map, Code* code) {
if (FLAG_age_code && !heap_->isolate()->serializer_enabled()) {
code->MakeOlder();
}
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
int size = Code::BodyDescriptor::SizeOf(map, code);
Code::BodyDescriptor::IterateBody(code, size, visitor);
@ -391,6 +394,17 @@ void MarkingVisitor<ConcreteVisitor>::VisitCodeTarget(Code* host,
visitor->MarkObject(host, target);
}
template <typename ConcreteVisitor>
void MarkingVisitor<ConcreteVisitor>::VisitCodeAgeSequence(Code* host,
RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Code* target = rinfo->code_age_stub();
DCHECK_NOT_NULL(target);
collector_->RecordRelocSlot(host, rinfo, target);
visitor->MarkObject(host, target);
}
} // namespace internal
} // namespace v8

View File

@ -129,6 +129,7 @@ class MarkingVisitor : public HeapVisitor<int, ConcreteVisitor> {
// ObjectVisitor implementation.
V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
V8_INLINE void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final;
// Skip weak next code link.
V8_INLINE void VisitNextCodeLink(Code* host, Object** p) final {}

View File

@ -59,6 +59,11 @@ void RelocInfo::apply(intptr_t delta) {
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
} else if (IsCodeAgeSequence(rmode_)) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
}
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
@ -147,6 +152,32 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
}
}
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(*pc_ == kCallOpcode);
return Handle<Code>::cast(Memory::Object_Handle_at(pc_ + 1));
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + 1, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(*pc_ == kCallOpcode);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(stub->GetIsolate(), pc_ + 1, host_,
stub->instruction_start(),
icache_flush_mode);
}
void RelocInfo::WipeOut(Isolate* isolate) {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
@ -172,6 +203,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(host(), this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
@ -190,6 +223,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
@ -253,7 +288,7 @@ void Assembler::emit(Handle<HeapObject> handle) {
}
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
if (!RelocInfo::IsNone(rmode)) {
if (!RelocInfo::IsNone(rmode) && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
RecordRelocInfo(rmode);
}
emit(x);

View File

@ -184,9 +184,9 @@ void Displacement::init(Label* L, Type type) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE;
const int RelocInfo::kApplyMask =
RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::CODE_AGE_SEQUENCE;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@ -1596,7 +1596,8 @@ int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK(RelocInfo::IsCodeTarget(rmode)
|| rmode == RelocInfo::CODE_AGE_SEQUENCE);
EMIT(0xE8);
emit(code, rmode);
}

View File

@ -579,6 +579,56 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
#undef __
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
CodePatcher patcher(isolate, young_sequence_.start(),
young_sequence_.length());
patcher.masm()->push(ebp);
patcher.masm()->mov(ebp, esp);
patcher.masm()->push(esi);
patcher.masm()->push(edi);
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return *candidate == kCallOpcode;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
sequence++; // Skip the kCallOpcode byte
Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
Assembler::kCallTargetAddressOffset;
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
}
} // namespace internal
} // namespace v8

View File

@ -94,7 +94,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Fail hard and early if we enter this code object again.
CodePatcher patcher(isolate, code_start_address, 1);
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->int3();
DeoptimizationInputData* data =

View File

@ -636,11 +636,20 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
push(Immediate(StackFrame::TypeToMarker(type)));
}
void TurboAssembler::Prologue() {
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(esi); // Callee's context.
push(edi); // Callee's JS function.
void TurboAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(this,
kNoCodeAgeSequenceLength);
if (code_pre_aging) {
// Pre-age the code.
call(BUILTIN_CODE(isolate(), MarkCodeAsExecutedOnce),
RelocInfo::CODE_AGE_SEQUENCE);
Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
} else {
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(esi); // Callee's context.
push(edi); // Callee's JS function.
}
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {

View File

@ -189,7 +189,7 @@ class TurboAssembler : public Assembler {
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
void Prologue();
void Prologue(bool code_pre_aging);
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
void Lzcnt(Register dst, const Operand& src);

View File

@ -2306,6 +2306,7 @@ Isolate::Isolate(bool enable_serializer)
logger_(NULL),
load_stub_cache_(NULL),
store_stub_cache_(NULL),
code_aging_helper_(NULL),
deoptimizer_data_(NULL),
deoptimizer_lazy_throw_(false),
materialized_object_store_(NULL),
@ -2558,6 +2559,8 @@ Isolate::~Isolate() {
load_stub_cache_ = NULL;
delete store_stub_cache_;
store_stub_cache_ = NULL;
delete code_aging_helper_;
code_aging_helper_ = NULL;
delete materialized_object_store_;
materialized_object_store_ = NULL;
@ -2751,6 +2754,8 @@ bool Isolate::Init(StartupDeserializer* des) {
return false;
}
code_aging_helper_ = new CodeAgingHelper(this);
// Initialize the interface descriptors ahead of time.
#define INTERFACE_DESCRIPTOR(Name, ...) \
{ Name##Descriptor(this); }

View File

@ -51,6 +51,7 @@ class BasicBlockProfiler;
class Bootstrapper;
class CallInterfaceDescriptorData;
class CancelableTaskManager;
class CodeAgingHelper;
class CodeEventDispatcher;
class CodeGenerator;
class CodeRange;
@ -901,6 +902,7 @@ class Isolate {
Heap* heap() { return &heap_; }
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
void set_deoptimizer_lazy_throw(bool value) {
@ -1455,6 +1457,7 @@ class Isolate {
StackGuard stack_guard_;
StubCache* load_stub_cache_;
StubCache* store_stub_cache_;
CodeAgingHelper* code_aging_helper_;
DeoptimizerData* deoptimizer_data_;
bool deoptimizer_lazy_throw_;
MaterializedObjectStore* materialized_object_store_;

View File

@ -278,6 +278,31 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
static const int kNoCodeAgeSequenceLength = 7 * Assembler::kInstrSize;
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on Arm.
return Handle<Code>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(stub->GetIsolate(),
pc_ + Assembler::kInstrSize, host_,
stub->instruction_start());
}
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@ -303,6 +328,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
@ -321,6 +348,8 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}

View File

@ -682,6 +682,77 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
#ifdef DEBUG
// nop(CODE_AGE_MARKER_NOP)
static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
#endif
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushStandardFrame(a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
Address target_address =
Assembler::target_address_at(sequence + Assembler::kInstrSize);
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
// GetCodeAge() extracts the stub address from this instruction.
patcher.masm()->li(
t9,
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
CONSTANT_SIZE);
patcher.masm()->nop(); // Prevent jalr to jal optimization.
patcher.masm()->jalr(t9, a0);
patcher.masm()->nop(); // Branch delay slot nop.
patcher.masm()->nop(); // Pad the empty space.
}
}
#undef __
} // namespace internal

View File

@ -30,7 +30,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
CodePatcher patcher(isolate, code_start_address, 1);
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->break_(0xCC);
DeoptimizationInputData* data =

View File

@ -4808,7 +4808,29 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
PushCommonFrame(scratch);
}
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
if (code_pre_aging) {
// Pre-age the code.
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
// GetCodeAge() extracts the stub address from this instruction.
li(t9,
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
CONSTANT_SIZE);
nop(); // Prevent jalr to jal optimization.
jalr(t9, a0);
nop(); // Branch delay slot nop.
nop(); // Pad the empty space.
} else {
PushStandardFrame(a1);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
}
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
int stack_offset, fp_offset;

View File

@ -158,7 +158,7 @@ class TurboAssembler : public Assembler {
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
void Prologue();
void Prologue(bool code_pre_aging);
void InitializeRootRegister() {
ExternalReference roots_array_start =

View File

@ -253,6 +253,31 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
static const int kNoCodeAgeSequenceLength = 9 * Assembler::kInstrSize;
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on Arm.
return Handle<Code>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(stub->GetIsolate(),
pc_ + Assembler::kInstrSize, host_,
stub->instruction_start());
}
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@ -278,6 +303,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
@ -296,6 +323,8 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}

View File

@ -684,6 +684,79 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
#ifdef DEBUG
// nop(CODE_AGE_MARKER_NOP)
static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
#endif
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushStandardFrame(a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
Address target_address =
Assembler::target_address_at(sequence + Assembler::kInstrSize);
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
// GetCodeAge() extracts the stub address from this instruction.
patcher.masm()->li(
t9,
Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
ADDRESS_LOAD);
patcher.masm()->nop(); // Prevent jalr to jal optimization.
patcher.masm()->jalr(t9, a0);
patcher.masm()->nop(); // Branch delay slot nop.
patcher.masm()->nop(); // Pad the empty space.
}
}
#undef __
} // namespace internal

View File

@ -30,7 +30,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
CodePatcher patcher(isolate, code_start_address, 1);
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->break_(0xCC);
DeoptimizationInputData* data =

View File

@ -5056,7 +5056,31 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
PushCommonFrame(scratch);
}
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
if (code_pre_aging) {
// Pre-age the code.
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
// GetCodeAge() extracts the stub address from this instruction.
li(t9,
Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
ADDRESS_LOAD);
nop(); // Prevent jalr to jal optimization.
jalr(t9, a0);
nop(); // Branch delay slot nop.
nop(); // Pad the empty space.
} else {
PushStandardFrame(a1);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
}
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
int stack_offset, fp_offset;

View File

@ -187,7 +187,7 @@ class TurboAssembler : public Assembler {
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
void Prologue();
void Prologue(bool code_pre_aging);
void InitializeRootRegister() {
ExternalReference roots_array_start =

View File

@ -13930,6 +13930,16 @@ void ObjectVisitor::VisitCodeTarget(Code* host, RelocInfo* rinfo) {
DCHECK_EQ(old_pointer, new_pointer);
}
void ObjectVisitor::VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Object* old_pointer = rinfo->code_age_stub();
Object* new_pointer = old_pointer;
if (old_pointer != nullptr) {
VisitPointer(host, &new_pointer);
DCHECK_EQ(old_pointer, new_pointer);
}
}
void ObjectVisitor::VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
Object* old_pointer = rinfo->target_object();
@ -14014,6 +14024,10 @@ void Code::CopyFrom(const CodeDesc& desc) {
Address p = it.rinfo()->target_runtime_entry(origin);
it.rinfo()->set_target_runtime_entry(
GetIsolate(), p, UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (mode == RelocInfo::CODE_AGE_SEQUENCE) {
Handle<Object> p = it.rinfo()->code_age_stub_handle(origin);
Code* code = Code::cast(*p);
it.rinfo()->set_code_age_stub(code, SKIP_ICACHE_FLUSH);
} else {
intptr_t delta = instruction_start() - desc.buffer;
it.rinfo()->apply(delta);
@ -14178,6 +14192,136 @@ void JSFunction::ClearTypeFeedbackInfo() {
}
}
void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge);
}
void Code::MarkCodeAsExecuted(byte* sequence, Isolate* isolate) {
PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge);
}
// NextAge defines the Code::Age state transitions during a GC cycle.
static Code::Age NextAge(Code::Age age) {
switch (age) {
case Code::kNotExecutedCodeAge: // Keep, until we've been executed.
case Code::kToBeExecutedOnceCodeAge: // Keep, until we've been executed.
case Code::kLastCodeAge: // Clamp at last Code::Age value.
return age;
case Code::kExecutedOnceCodeAge:
// Pre-age code that has only been executed once.
return static_cast<Code::Age>(Code::kPreAgedCodeAge + 1);
default:
return static_cast<Code::Age>(age + 1); // Default case: Increase age.
}
}
// IsOldAge defines the collection criteria for a Code object.
static bool IsOldAge(Code::Age age) {
return age >= Code::kIsOldCodeAge || age == Code::kNotExecutedCodeAge;
}
void Code::MakeYoung(Isolate* isolate) {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) MakeCodeAgeSequenceYoung(sequence, isolate);
}
void Code::PreAge(Isolate* isolate) {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
PatchPlatformCodeAge(isolate, sequence, kPreAgedCodeAge);
}
}
void Code::MarkToBeExecutedOnce(Isolate* isolate) {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
PatchPlatformCodeAge(isolate, sequence, kToBeExecutedOnceCodeAge);
}
}
void Code::MakeOlder() {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
Isolate* isolate = GetIsolate();
Age age = GetCodeAge(isolate, sequence);
Age next_age = NextAge(age);
if (age != next_age) {
PatchPlatformCodeAge(isolate, sequence, next_age);
}
}
}
bool Code::IsOld() {
return IsOldAge(GetAge());
}
byte* Code::FindCodeAgeSequence() {
return FLAG_age_code && prologue_offset() != Code::kPrologueOffsetNotSet &&
kind() == OPTIMIZED_FUNCTION
? instruction_start() + prologue_offset()
: NULL;
}
Code::Age Code::GetAge() {
byte* sequence = FindCodeAgeSequence();
if (sequence == NULL) {
return kNoAgeCodeAge;
}
return GetCodeAge(GetIsolate(), sequence);
}
Code::Age Code::GetAgeOfCodeAgeStub(Code* code) {
Isolate* isolate = code->GetIsolate();
#define HANDLE_CODE_AGE(AGE) \
if (code == *BUILTIN_CODE(isolate, Make##AGE##CodeYoungAgain)) { \
return k##AGE##CodeAge; \
}
CODE_AGE_LIST(HANDLE_CODE_AGE)
#undef HANDLE_CODE_AGE
if (code == *BUILTIN_CODE(isolate, MarkCodeAsExecutedOnce)) {
return kNotExecutedCodeAge;
}
if (code == *BUILTIN_CODE(isolate, MarkCodeAsExecutedTwice)) {
return kExecutedOnceCodeAge;
}
if (code == *BUILTIN_CODE(isolate, MarkCodeAsToBeExecutedOnce)) {
return kToBeExecutedOnceCodeAge;
}
UNREACHABLE();
}
Code* Code::GetCodeAgeStub(Isolate* isolate, Age age) {
switch (age) {
#define HANDLE_CODE_AGE(AGE) \
case k##AGE##CodeAge: { \
return *BUILTIN_CODE(isolate, Make##AGE##CodeYoungAgain); \
}
CODE_AGE_LIST(HANDLE_CODE_AGE)
#undef HANDLE_CODE_AGE
case kNotExecutedCodeAge: {
return *BUILTIN_CODE(isolate, MarkCodeAsExecutedOnce);
}
case kExecutedOnceCodeAge: {
return *BUILTIN_CODE(isolate, MarkCodeAsExecutedTwice);
}
case kToBeExecutedOnceCodeAge: {
return *BUILTIN_CODE(isolate, MarkCodeAsToBeExecutedOnce);
}
default:
UNREACHABLE();
break;
}
return NULL;
}
void Code::PrintDeoptLocation(FILE* out, Address pc) {
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, pc);
class SourcePosition pos = info.position;
@ -17485,7 +17629,11 @@ void CompilationCacheTable::Age() {
}
} else if (get(entry_index)->IsFixedArray()) {
SharedFunctionInfo* info = SharedFunctionInfo::cast(get(value_index));
if (info->IsInterpreted() && info->bytecode_array()->IsOld()) {
bool is_old =
info->IsInterpreted()
? info->bytecode_array()->IsOld()
: info->code()->kind() != Code::FUNCTION || info->code()->IsOld();
if (is_old) {
for (int i = 0; i < kEntrySize; i++) {
NoWriteBarrierSet(this, entry_index + i, the_hole_value);
}

View File

@ -3260,19 +3260,16 @@ class PodArray : public ByteArray {
// BytecodeArray represents a sequence of interpreter bytecodes.
class BytecodeArray : public FixedArrayBase {
public:
#define DECL_BYTECODE_AGE_ENUM(X) k##X##BytecodeAge,
enum Age {
kNoAgeBytecodeAge = 0,
kQuadragenarianBytecodeAge,
kQuinquagenarianBytecodeAge,
kSexagenarianBytecodeAge,
kSeptuagenarianBytecodeAge,
kOctogenarianBytecodeAge,
kAfterLastBytecodeAge,
CODE_AGE_LIST(DECL_BYTECODE_AGE_ENUM) kAfterLastBytecodeAge,
kFirstBytecodeAge = kNoAgeBytecodeAge,
kLastBytecodeAge = kAfterLastBytecodeAge - 1,
kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1,
kIsOldBytecodeAge = kSexagenarianBytecodeAge
};
#undef DECL_BYTECODE_AGE_ENUM
static int SizeFor(int length) {
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
@ -4012,6 +4009,37 @@ class Code: public HeapObject {
void ClearInlineCaches();
#define DECL_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
kToBeExecutedOnceCodeAge = -3,
kNotExecutedCodeAge = -2,
kExecutedOnceCodeAge = -1,
kNoAgeCodeAge = 0,
CODE_AGE_LIST(DECL_CODE_AGE_ENUM) kAfterLastCodeAge,
kFirstCodeAge = kToBeExecutedOnceCodeAge,
kLastCodeAge = kAfterLastCodeAge - 1,
kCodeAgeCount = kAfterLastCodeAge - kFirstCodeAge - 1,
kIsOldCodeAge = kSexagenarianCodeAge,
kPreAgedCodeAge = kIsOldCodeAge - 1
};
#undef DECL_CODE_AGE_ENUM
// Code aging. Indicates how many full GCs this code has survived without
// being entered through the prologue. Used to determine when to flush code
// held in the compilation cache.
static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
void MakeYoung(Isolate* isolate);
void PreAge(Isolate* isolate);
void MarkToBeExecutedOnce(Isolate* isolate);
void MakeOlder();
static bool IsYoungSequence(Isolate* isolate, byte* sequence);
bool IsOld();
Age GetAge();
static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) {
return GetCodeAgeStub(isolate, kNotExecutedCodeAge);
}
void PrintDeoptLocation(FILE* out, Address pc);
bool CanDeoptAt(Address pc);
@ -4149,6 +4177,16 @@ class Code: public HeapObject {
private:
friend class RelocIterator;
friend class Deoptimizer; // For FindCodeAgeSequence.
// Code aging
byte* FindCodeAgeSequence();
static Age GetCodeAge(Isolate* isolate, byte* sequence);
static Age GetAgeOfCodeAgeStub(Code* code);
static Code* GetCodeAgeStub(Isolate* isolate, Age age);
// Code aging -- platform-specific
static void PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Age age);
bool is_promise_rejection() const;
bool is_exception_caught() const;
@ -7372,6 +7410,10 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a runtime entry in the instruction stream.
virtual void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) {}
// Visits the byte sequence in a function's prologue that contains information
// about the code's age.
virtual void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo);
// Visit pointer embedded into a code object.
virtual void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo);

View File

@ -4245,6 +4245,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// handling in Scope::ResolveVariable needs to change.
bool is_lazy_top_level_function =
can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
bool should_be_used_once_hint = false;
bool has_braces = true;
ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr;
{
@ -4339,6 +4340,9 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
function_literal->set_function_token_position(
formal_parameters.scope->start_position());
if (should_be_used_once_hint) {
function_literal->set_should_be_used_once_hint();
}
impl()->AddFunctionForNameInference(function_literal);

View File

@ -2585,6 +2585,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
ZoneList<Statement*>* body = nullptr;
int expected_property_count = -1;
bool should_be_used_once_hint = false;
int num_parameters = -1;
int function_length = -1;
bool has_duplicate_parameters = false;
@ -2638,8 +2639,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
DCHECK(is_lazy_top_level_function);
bookmark.Apply();
// This is probably an initialization function. Inform the compiler it
// should also eager-compile this function.
// should also eager-compile this function, and that we expect it to be
// used once.
eager_compile_hint = FunctionLiteral::kShouldEagerCompile;
should_be_used_once_hint = true;
scope->ResetAfterPreparsing(ast_value_factory(), true);
zone_scope.Reset();
// Trigger eager (re-)parsing, just below this block.
@ -2705,6 +2708,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_length, duplicate_parameters, function_type, eager_compile_hint,
pos, true, function_literal_id, produced_preparsed_scope_data);
function_literal->set_function_token_position(function_token_pos);
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
if (should_infer_name) {
DCHECK_NOT_NULL(fni_);

View File

@ -341,6 +341,7 @@ class PreParserExpression {
// More dummy implementations of things PreParser doesn't need to track:
void SetShouldEagerCompile() {}
void set_should_be_used_once_hint() {}
int position() const { return kNoSourcePosition; }
void set_function_token_position(int position) {}

View File

@ -220,6 +220,44 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
static const int kNoCodeAgeInstructions =
FLAG_enable_embedded_constant_pool ? 7 : 6;
static const int kCodeAgingInstructions =
Assembler::kMovInstructionsNoConstantPool + 3;
static const int kNoCodeAgeSequenceInstructions =
((kNoCodeAgeInstructions >= kCodeAgingInstructions)
? kNoCodeAgeInstructions
: kCodeAgingInstructions);
static const int kNoCodeAgeSequenceNops =
(kNoCodeAgeSequenceInstructions - kNoCodeAgeInstructions);
static const int kCodeAgingSequenceNops =
(kNoCodeAgeSequenceInstructions - kCodeAgingInstructions);
static const int kCodeAgingTargetDelta = 1 * Assembler::kInstrSize;
static const int kNoCodeAgeSequenceLength =
(kNoCodeAgeSequenceInstructions * Assembler::kInstrSize);
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on PPC.
return Handle<Code>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(
stub->GetIsolate(), pc_ + kCodeAgingTargetDelta, host_,
stub->instruction_start(), icache_flush_mode);
}
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@ -249,6 +287,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(host(), this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
@ -267,6 +307,8 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}

View File

@ -152,6 +152,71 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
#undef __
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushStandardFrame(r4);
for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
patcher->masm()->nop();
}
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return Assembler::IsNop(Assembler::instr_at(candidate));
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
Code* code = NULL;
Address target_address =
Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// Don't use Call -- we need to preserve ip and lr.
// GenerateMakeCodeYoungAgainCommon for the stub code.
patcher.masm()->nop(); // marker to detect sequence (see IsOld)
patcher.masm()->mov(r3, Operand(target));
patcher.masm()->Jump(r3);
for (int i = 0; i < kCodeAgingSequenceNops; i++) {
patcher.masm()->nop();
}
}
}
} // namespace internal
} // namespace v8

View File

@ -37,7 +37,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
CodePatcher patcher(isolate, code_start_address, 1);
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =

View File

@ -876,9 +876,35 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
}
}
void TurboAssembler::Prologue(Register base, int prologue_offset) {
void TurboAssembler::Prologue(bool code_pre_aging, Register base,
int prologue_offset) {
DCHECK(!base.is(no_reg));
PushStandardFrame(r4);
{
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
// The following instructions must remain together and unmodified
// for code aging to work properly.
if (code_pre_aging) {
// Pre-age the code.
// This matches the code found in PatchPlatformCodeAge()
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// Don't use Call -- we need to preserve ip and lr
nop(); // marker to detect sequence (see IsOld)
mov(r3, Operand(target));
Jump(r3);
for (int i = 0; i < kCodeAgingSequenceNops; i++) {
nop();
}
} else {
// This matches the code found in GetNoCodeAgeSequence()
PushStandardFrame(r4);
for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
nop();
}
}
}
if (FLAG_enable_embedded_constant_pool) {
// base contains prologue address
LoadConstantPoolPointerRegister(base, -prologue_offset);

View File

@ -177,7 +177,7 @@ class TurboAssembler : public Assembler {
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type, Register base = no_reg,
int prologue_offset = 0);
void Prologue(Register base, int prologue_offset = 0);
void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Push a standard frame, consisting of lr, fp, constant pool,
// context and JS function

View File

@ -199,6 +199,49 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
#if V8_TARGET_ARCH_S390X
// NOP(2byte) + PUSH + MOV + BASR =
// NOP + LAY + STG + IIHF + IILF + BASR
static const int kCodeAgingSequenceLength = 28;
static const int kCodeAgingTargetDelta = 14; // Jump past NOP + PUSH to IIHF
// LAY + 4 * STG + LA
static const int kNoCodeAgeSequenceLength = 34;
#else
#if (V8_HOST_ARCH_S390)
// NOP + NILH + LAY + ST + IILF + BASR
static const int kCodeAgingSequenceLength = 24;
static const int kCodeAgingTargetDelta = 16; // Jump past NOP to IILF
// NILH + LAY + 4 * ST + LA
static const int kNoCodeAgeSequenceLength = 30;
#else
// NOP + LAY + ST + IILF + BASR
static const int kCodeAgingSequenceLength = 20;
static const int kCodeAgingTargetDelta = 12; // Jump past NOP to IILF
// LAY + 4 * ST + LA
static const int kNoCodeAgeSequenceLength = 26;
#endif
#endif
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on S390.
return Handle<Code>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(
stub->GetIsolate(), pc_ + kCodeAgingTargetDelta, host_,
stub->instruction_start(), icache_flush_mode);
}
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@ -227,6 +270,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(host(), this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
@ -243,6 +288,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}

View File

@ -153,6 +153,73 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
#undef __
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length(), CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushStandardFrame(r3);
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return Assembler::IsNop(Assembler::instr_at(candidate));
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
Code* code = NULL;
Address target_address =
Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, young_length);
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
// knows where to pick up the return address
//
// Since we can no longer guarantee ip will hold the branch address
// because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon
// can calculate the branch address offset
patcher.masm()->nop(); // marker to detect sequence (see IsOld)
patcher.masm()->CleanseP(r14);
patcher.masm()->Push(r14);
patcher.masm()->mov(r2, Operand(target));
patcher.masm()->Call(r2);
for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
i += 2) {
// TODO(joransiu): Create nop function to pad
// (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
patcher.masm()->nop(); // 2-byte nops().
}
}
}
} // namespace internal
} // namespace v8

View File

@ -35,7 +35,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
CodePatcher patcher(isolate, code_start_address, 2);
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 2);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =

View File

@ -906,9 +906,35 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
}
}
void TurboAssembler::Prologue(Register base, int prologue_offset) {
void TurboAssembler::Prologue(bool code_pre_aging, Register base,
int prologue_offset) {
DCHECK(!base.is(no_reg));
PushStandardFrame(r3);
{
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
// The following instructions must remain together and unmodified
// for code aging to work properly.
if (code_pre_aging) {
// Pre-age the code.
// This matches the code found in PatchPlatformCodeAge()
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
nop();
CleanseP(r14);
Push(r14);
mov(r2, Operand(target));
Call(r2);
for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
i += 2) {
// TODO(joransiu): Create nop function to pad
// (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
nop(); // 2-byte nops().
}
} else {
// This matches the code found in GetNoCodeAgeSequence()
PushStandardFrame(r3);
}
}
}
void TurboAssembler::EnterFrame(StackFrame::Type type,

View File

@ -716,7 +716,7 @@ class TurboAssembler : public Assembler {
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type, Register base = no_reg,
int prologue_offset = 0);
void Prologue(Register base, int prologue_offset = 0);
void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();

View File

@ -91,6 +91,8 @@ void ObjectDeserializer::
for (Code* code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
isolate()->heap()->RecordWritesIntoCode(code);
if (FLAG_serialize_age_code) code->PreAge(isolate());
Assembler::FlushICache(isolate(), code->instruction_start(),
code->instruction_size());
}

View File

@ -837,6 +837,8 @@ Address Serializer::ObjectSerializer::PrepareCode() {
// relocations, because some of these fields are needed for the latter.
code->WipeOutHeader();
}
// Code age headers are not serializable.
code->MakeYoung(serializer_->isolate());
return code->address();
}

View File

@ -54,7 +54,8 @@ void Assembler::emitw(uint16_t x) {
}
void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK(RelocInfo::IsCodeTarget(rmode) ||
rmode == RelocInfo::CODE_AGE_SEQUENCE);
RecordRelocInfo(rmode);
int current = code_targets_.length();
if (current > 0 && !target.is_null() &&
@ -321,6 +322,11 @@ Address Assembler::runtime_entry_at(Address pc) {
void RelocInfo::apply(intptr_t delta) {
if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
} else if (IsCodeAgeSequence(rmode_)) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= static_cast<int32_t>(delta); // Relocate entry.
}
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
Memory::Address_at(pc_) += delta;
@ -429,6 +435,31 @@ void RelocInfo::WipeOut(Isolate* isolate) {
}
}
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(*pc_ == kCallOpcode);
return origin->code_target_object_handle_at(pc_ + 1);
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + 1, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(*pc_ == kCallOpcode);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(stub->GetIsolate(), pc_ + 1, host_,
stub->instruction_start(),
icache_flush_mode);
}
template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
@ -441,6 +472,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
@ -459,6 +492,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}

View File

@ -4777,14 +4777,20 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
!serializer_enabled() && !emit_debug_code()) {
return;
} else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
// Don't record pseudo relocation info for code age sequence mode.
return;
}
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE;
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE |
1 << RelocInfo::CODE_AGE_SEQUENCE;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being

View File

@ -157,6 +157,61 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
#undef __
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
CodePatcher patcher(isolate, young_sequence_.start(),
young_sequence_.length());
patcher.masm()->pushq(rbp);
patcher.masm()->movp(rbp, rsp);
patcher.masm()->Push(rsi);
patcher.masm()->Push(rdi);
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return *candidate == kCallOpcode;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
sequence++; // Skip the kCallOpcode byte
Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
Assembler::kCallTargetAddressOffset;
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start());
patcher.masm()->Nop(
kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
}
}
Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
DCHECK(index >= 0);
int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;

View File

@ -35,7 +35,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
CodePatcher patcher(isolate, instruction_start, 1);
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->int3();
DeoptimizationInputData* data =

View File

@ -2159,7 +2159,8 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK(RelocInfo::IsCodeTarget(rmode) ||
rmode == RelocInfo::CODE_AGE_SEQUENCE);
call(code_object, rmode);
#ifdef DEBUG
CHECK_EQ(end_position, pc_offset());
@ -2926,11 +2927,20 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
Push(Immediate(StackFrame::TypeToMarker(type)));
}
void TurboAssembler::Prologue() {
pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
Push(rsi); // Callee's context.
Push(rdi); // Callee's JS function.
void TurboAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(this,
kNoCodeAgeSequenceLength);
if (code_pre_aging) {
// Pre-age the code.
Call(BUILTIN_CODE(isolate(), MarkCodeAsExecutedOnce),
RelocInfo::CODE_AGE_SEQUENCE);
Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
Push(rsi); // Callee's context.
Push(rdi); // Callee's JS function.
}
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {

View File

@ -350,7 +350,7 @@ class TurboAssembler : public Assembler {
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
void Prologue();
void Prologue(bool code_pre_aging);
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.

View File

@ -1284,8 +1284,9 @@ TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
}
TEST(CompilationCacheCachingBehavior) {
// If we do not have the compilation cache turned off, this test is invalid.
if (!FLAG_compilation_cache) {
// If we do not age code, or have the compilation cache turned off, this
// test is invalid.
if (!FLAG_age_code || !FLAG_compilation_cache) {
return;
}
CcTest::InitializeVM();
@ -1330,8 +1331,10 @@ TEST(CompilationCacheCachingBehavior) {
// Progress code age until it's old and ready for GC.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
CHECK(pair.shared()->HasBytecodeArray());
pair.shared()->bytecode_array()->MakeOlder();
pair.shared()->code()->MakeOlder();
if (pair.shared()->HasBytecodeArray()) {
pair.shared()->bytecode_array()->MakeOlder();
}
}
CcTest::CollectAllGarbage();
@ -3407,6 +3410,114 @@ UNINITIALIZED_TEST(ReleaseStackTraceData) {
isolate->Dispose();
}
TEST(Regress159140) {
if (!FLAG_incremental_marking) return;
FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
LocalContext env;
Heap* heap = isolate->heap();
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
CcTest::CollectAllGarbage();
// Prepare several closures that are all eligible for code flushing
// because all reachable ones are not optimized. Make sure that the
// optimized code object is directly reachable through a handle so
// that it is marked black during incremental marking.
Handle<Code> code;
{
HandleScope inner_scope(isolate);
CompileRun("function h(x) {}"
"function mkClosure() {"
" return function(x) { return x + 1; };"
"}"
"var f = mkClosure();"
"var g = mkClosure();"
"f(1); f(2);"
"g(1); g(2);"
"h(1); h(2);"
"%OptimizeFunctionOnNextCall(f); f(3);"
"%OptimizeFunctionOnNextCall(h); h(3);");
Handle<JSFunction> f = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(env.local(), v8_str("f")).ToLocalChecked())));
CHECK(f->is_compiled());
CompileRun("f = null;");
Handle<JSFunction> g = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(env.local(), v8_str("g")).ToLocalChecked())));
CHECK(g->is_compiled());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
g->code()->MakeOlder();
}
code = inner_scope.CloseAndEscape(Handle<Code>(f->code()));
}
// Simulate incremental marking so that the functions are enqueued as
// code flushing candidates. Then optimize one function. Finally
// finish the GC to complete code flushing.
heap::SimulateIncrementalMarking(heap);
CompileRun("%OptimizeFunctionOnNextCall(g); g(3);");
CcTest::CollectAllGarbage();
// Unoptimized code is missing and the deoptimizer will go ballistic.
CompileRun("g('bozo');");
}
TEST(Regress165495) {
if (!FLAG_incremental_marking) return;
FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
CcTest::CollectAllGarbage();
// Prepare an optimized closure that the optimized code map will get
// populated. Then age the unoptimized code to trigger code flushing
// but make sure the optimized code is unreachable.
{
HandleScope inner_scope(isolate);
LocalContext env;
CompileRun("function mkClosure() {"
" return function(x) { return x + 1; };"
"}"
"var f = mkClosure();"
"f(1); f(2);"
"%OptimizeFunctionOnNextCall(f); f(3);");
Handle<JSFunction> f = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(env.local(), v8_str("f")).ToLocalChecked())));
CHECK(f->is_compiled());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
f->shared()->code()->MakeOlder();
}
CompileRun("f = null;");
}
// Simulate incremental marking so that unoptimized code is flushed
// even though it still is cached in the optimized code map.
heap::SimulateIncrementalMarking(heap);
CcTest::CollectAllGarbage();
// Make a new closure that will get code installed from the code map.
// Unoptimized code is missing and the deoptimizer will go ballistic.
CompileRun("var g = mkClosure(); g('bozo');");
}
TEST(Regress169928) {
FLAG_allow_natives_syntax = true;
FLAG_opt = false;
@ -3487,6 +3598,67 @@ TEST(Regress169928) {
.ToLocalChecked();
}
TEST(Regress513496) {
FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
// Perfrom one initial GC to enable code flushing.
CcTest::CollectAllGarbage();
// Prepare an optimized closure with containing an inlined function. Then age
// the inlined unoptimized code to trigger code flushing but make sure the
// outer optimized code is kept in the optimized code map.
Handle<SharedFunctionInfo> optimized_code;
{
LocalContext context;
HandleScope inner_scope(isolate);
CompileRun(
"function g(x) { return x + 1 }"
"function mkClosure() {"
" return function(x) { return g(x); };"
"}"
"var f = mkClosure();"
"f(1); f(2);"
"%OptimizeFunctionOnNextCall(f); f(3);");
Handle<JSFunction> g = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CcTest::global()
->Get(context.local(), v8_str("g"))
.ToLocalChecked())));
CHECK(g->shared()->is_compiled());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
g->shared()->code()->MakeOlder();
}
Handle<JSFunction> f = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CcTest::global()
->Get(context.local(), v8_str("f"))
.ToLocalChecked())));
CHECK(f->is_compiled());
// Lookup the optimized code and keep it alive.
Code* result = f->feedback_vector()->optimized_code();
Handle<Code> optimized_code(result, isolate);
optimized_code = inner_scope.CloseAndEscape(handle(result, isolate));
CompileRun("f = null");
}
// Finish a full GC cycle so that the unoptimized code of 'g' is flushed even
// though the optimized code for 'f' is reachable via the optimized code map.
CcTest::CollectAllGarbage();
// Make a new closure that will get code installed from the code map.
// Unoptimized code is missing and the deoptimizer will go ballistic.
CompileRun("var h = mkClosure(); h('bozo');");
}
TEST(LargeObjectSlotRecording) {
if (!FLAG_incremental_marking) return;
if (FLAG_never_compact) return;