Revert of [turbofan] Discard the shared code entry in the optimized code map. (patchset #3 id:40001 of https://codereview.chromium.org/2401653002/ )

Reason for revert:
Possible GCSTRESS failure, investigating.

Original issue's description:
> [turbofan] Discard the shared code entry in the optimized code map.
>
> At one time, we hoped to generate the same code for different
> native contexts. But in truth, much performance comes from optimizing
> on the native context. Now we abandon this pathway.
>
> BUG=
>
> Committed: https://crrev.com/55af3c44c99a6e4cd6d53df775023d760ad2b2c3
> Cr-Commit-Position: refs/heads/master@{#40079}

TBR=mstarzinger@chromium.org,ishell@chromium.org,bmeurer@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review-Url: https://codereview.chromium.org/2403453002
Cr-Commit-Position: refs/heads/master@{#40081}
This commit is contained in:
mvstanton 2016-10-07 05:07:53 -07:00 committed by Commit bot
parent 7db0ecdec3
commit c59d2f09ec
16 changed files with 348 additions and 27 deletions

View File

@ -1370,6 +1370,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@ -1436,12 +1437,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, r5);
@ -1476,8 +1480,20 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&try_shared);
__ bind(&maybe_call_runtime);
__ pop(closure);
// Last possibility. Check the context free optimized code map entry.
__ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
__ pop(new_target);
__ pop(argument_count);
// Is the full code valid?

View File

@ -1381,6 +1381,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@ -1438,10 +1439,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
Label install_optimized_code_and_tailcall;
__ Bind(&install_optimized_code_and_tailcall);
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, x5);
@ -1472,6 +1476,18 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ B(&gotta_call_runtime);
__ Bind(&maybe_call_runtime);
// Last possibility. Check the context free optimized code map entry.
__ Ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ B(&install_optimized_code_and_tailcall);
__ Bind(&try_shared);
// Is the full code valid?
__ Ldr(entry,

View File

@ -1055,6 +1055,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@ -1117,12 +1118,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(entry, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, eax);
@ -1156,8 +1160,20 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&try_shared);
__ bind(&maybe_call_runtime);
__ pop(closure);
// Last possibility. Check the context free optimized code map entry.
__ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
__ pop(new_target);
__ pop(argument_count);
// Is the full code valid?

View File

@ -1371,6 +1371,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@ -1434,12 +1435,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, t1);
@ -1474,8 +1478,20 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&try_shared);
__ bind(&maybe_call_runtime);
__ pop(closure);
// Last possibility. Check the context free optimized code map entry.
__ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
__ pop(new_target);
__ pop(argument_count);
// Is the full code valid?

View File

@ -1363,6 +1363,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@ -1426,12 +1427,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, a5);
@ -1466,8 +1470,20 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&try_shared);
__ bind(&maybe_call_runtime);
__ pop(closure);
// Last possibility. Check the context free optimized code map entry.
__ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
__ pop(new_target);
__ pop(argument_count);
// Is the full code valid?

View File

@ -1031,6 +1031,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@ -1084,10 +1085,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ movp(entry, FieldOperand(map, index, times_pointer_size,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, r15);
@ -1120,6 +1124,18 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&maybe_call_runtime);
// Last possibility. Check the context free optimized code map entry.
__ movp(entry, FieldOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
// Is the full code valid?
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));

View File

@ -495,6 +495,18 @@ void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
literals, info->osr_ast_id());
// Do not cache (native) context-independent code compiled for OSR.
if (code->is_turbofanned() && info->is_osr()) return;
// Cache optimized (native) context-independent code.
if (FLAG_turbo_cache_shared_code && code->is_turbofanned() &&
!info->is_native_context_specializing()) {
DCHECK(!info->is_function_context_specializing());
DCHECK(info->osr_ast_id().IsNone());
Handle<SharedFunctionInfo> shared(function->shared());
SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(shared, code);
}
}
bool Renumber(ParseInfo* parse_info) {

View File

@ -5363,6 +5363,11 @@ class HObjectAccess final {
SharedFunctionInfo::kOptimizedCodeMapOffset);
}
static HObjectAccess ForOptimizedCodeMapSharedCode() {
return HObjectAccess(kInobject, FixedArray::OffsetOfElementAt(
SharedFunctionInfo::kSharedCodeIndex));
}
static HObjectAccess ForFunctionContextPointer() {
return HObjectAccess(kInobject, JSFunction::kContextOffset);
}

View File

@ -2215,7 +2215,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
code = isolate()->builtins()->Illegal();
}
share->set_code(*code);
share->set_optimized_code_map(*empty_fixed_array());
share->set_optimized_code_map(*cleared_optimized_code_map());
share->set_scope_info(ScopeInfo::Empty(isolate()));
share->set_outer_scope_info(*the_hole_value());
Handle<Code> construct_stub =

View File

@ -484,6 +484,8 @@ DEFINE_BOOL(turbo_loop_peeling, false, "Turbofan loop peeling")
DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")

View File

@ -2825,6 +2825,14 @@ void Heap::CreateInitialObjects() {
Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
set_empty_weak_cell(*cell);
cell->clear();
Handle<FixedArray> cleared_optimized_code_map =
factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
*cell);
STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
SharedFunctionInfo::kSharedCodeIndex == 0);
set_cleared_optimized_code_map(*cleared_optimized_code_map);
}
set_detached_contexts(empty_fixed_array());

View File

@ -63,6 +63,7 @@ using v8::MemoryPressureLevel;
V(FixedArray, empty_type_feedback_vector, EmptyTypeFeedbackVector) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
/* Entries beyond the first 32 */ \
/* The roots above this line should be boring from a GC point of view. */ \

View File

@ -6416,7 +6416,7 @@ bool SharedFunctionInfo::IsSubjectToDebugging() { return !IsBuiltin(); }
bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
return optimized_code_map() == GetHeap()->empty_fixed_array();
return optimized_code_map() == GetHeap()->cleared_optimized_code_map();
}

View File

@ -12133,6 +12133,22 @@ Handle<LiteralsArray> SharedFunctionInfo::FindOrCreateLiterals(
return literals;
}
void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Code> code) {
Isolate* isolate = shared->GetIsolate();
if (isolate->serializer_enabled()) return;
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
// Empty code maps are unsupported.
if (!shared->OptimizedCodeMapIsCleared()) {
Handle<WeakCell> cell = isolate->factory()->NewWeakCell(code);
// A collection may have occured and cleared the optimized code map in the
// allocation above.
if (!shared->OptimizedCodeMapIsCleared()) {
shared->optimized_code_map()->set(kSharedCodeIndex, *cell);
}
}
}
// static
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
@ -12149,11 +12165,13 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
if (shared->OptimizedCodeMapIsCleared()) {
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
new_code_map->set(kSharedCodeIndex, *isolate->factory()->empty_weak_cell(),
SKIP_WRITE_BARRIER);
entry = kEntriesStart;
} else {
Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
if (entry >= kEntriesStart) {
if (entry > kSharedCodeIndex) {
// Just set the code and literals of the entry.
if (!code.is_null()) {
Handle<WeakCell> code_cell =
@ -12223,8 +12241,8 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
void SharedFunctionInfo::ClearOptimizedCodeMap() {
FixedArray* empty_fixed_array = GetHeap()->empty_fixed_array();
set_optimized_code_map(empty_fixed_array, SKIP_WRITE_BARRIER);
FixedArray* cleared_map = GetHeap()->cleared_optimized_code_map();
set_optimized_code_map(cleared_map, SKIP_WRITE_BARRIER);
}
@ -12274,11 +12292,23 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
}
dst += kEntryLength;
}
if (WeakCell::cast(code_map->get(kSharedCodeIndex))->value() ==
optimized_code) {
// Evict context-independent code as well.
code_map->set(kSharedCodeIndex, heap->empty_weak_cell(),
SKIP_WRITE_BARRIER);
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint();
PrintF(" (context-independent code)]\n");
}
}
if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
length - dst);
if (code_map->length() == kEntriesStart) {
if (code_map->length() == kEntriesStart &&
WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
ClearOptimizedCodeMap();
}
}
@ -12292,7 +12322,8 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
// Always trim even when array is cleared because of heap verifier.
GetHeap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(code_map,
shrink_by);
if (code_map->length() == kEntriesStart) {
if (code_map->length() == kEntriesStart &&
WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
ClearOptimizedCodeMap();
}
}
@ -13805,6 +13836,11 @@ int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
return i;
}
}
Object* shared_code =
WeakCell::cast(optimized_code_map->get(kSharedCodeIndex))->value();
if (shared_code->IsCode() && osr_ast_id.IsNone()) {
return kSharedCodeIndex;
}
}
return -1;
}
@ -13818,6 +13854,8 @@ void SharedFunctionInfo::ClearCodeFromOptimizedCodeMap() {
optimized_code_map->set(i + kCachedCodeOffset, empty_weak_cell,
SKIP_WRITE_BARRIER);
}
optimized_code_map->set(kSharedCodeIndex, empty_weak_cell,
SKIP_WRITE_BARRIER);
}
}
@ -13827,14 +13865,24 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
if (entry != kNotFound) {
FixedArray* code_map = optimized_code_map();
DCHECK_LE(entry + kEntryLength, code_map->length());
WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
WeakCell* literals_cell =
WeakCell::cast(code_map->get(entry + kLiteralsOffset));
if (entry == kSharedCodeIndex) {
// We know the weak cell isn't cleared because we made sure of it in
// SearchOptimizedCodeMapEntry and performed no allocations since that
// call.
result = {
Code::cast(WeakCell::cast(code_map->get(kSharedCodeIndex))->value()),
nullptr};
} else {
DCHECK_LE(entry + kEntryLength, code_map->length());
WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
WeakCell* literals_cell =
WeakCell::cast(code_map->get(entry + kLiteralsOffset));
result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
literals_cell->cleared() ? nullptr : LiteralsArray::cast(
literals_cell->value())};
result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
literals_cell->cleared()
? nullptr
: LiteralsArray::cast(literals_cell->value())};
}
}
return result;
}

View File

@ -7169,6 +7169,10 @@ class SharedFunctionInfo: public HeapObject {
static Handle<LiteralsArray> FindOrCreateLiterals(
Handle<SharedFunctionInfo> shared, Handle<Context> native_context);
// Add or update entry in the optimized code map for context-independent code.
static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Code> code);
// Add or update entry in the optimized code map for context-dependent code.
// If {code} is not given, then an existing entry's code won't be overwritten.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
@ -7183,7 +7187,8 @@ class SharedFunctionInfo: public HeapObject {
Handle<Object> script_object);
// Layout description of the optimized code map.
static const int kEntriesStart = 0;
static const int kSharedCodeIndex = 0;
static const int kEntriesStart = 1;
static const int kContextOffset = 0;
static const int kCachedCodeOffset = 1;
static const int kLiteralsOffset = 2;
@ -7785,8 +7790,8 @@ class SharedFunctionInfo: public HeapObject {
private:
// Returns entry from optimized code map for specified context and OSR entry.
// The result is either kNotFound, or a start index of the context-dependent
// entry.
// The result is either kNotFound, kSharedCodeIndex for context-independent
// entry or a start index of the context-dependent entry.
int SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id);

View File

@ -406,6 +406,150 @@ TEST(OptimizedCodeSharing1) {
}
}
// Test that optimized code for different closures is actually shared.
TEST(OptimizedCodeSharing2) {
if (FLAG_stress_compaction) return;
FLAG_allow_natives_syntax = true;
FLAG_native_context_specialization = false;
FLAG_turbo_cache_shared_code = true;
const char* flag = "--turbo-filter=*";
FlagList::SetFlagsFromString(flag, StrLength(flag));
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Script> script = v8_compile(
"function MakeClosure() {"
" return function() { return x; };"
"}");
Handle<Code> reference_code;
{
LocalContext env;
env->Global()
->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), 23))
.FromJust();
script->GetUnboundScript()
->BindToCurrentContext()
->Run(env.local())
.ToLocalChecked();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());");
Handle<JSFunction> fun0 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
->Get(env.local(), v8_str("closure0"))
.ToLocalChecked())));
CHECK(fun0->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
reference_code = handle(fun0->code());
}
for (int i = 0; i < 3; i++) {
LocalContext env;
env->Global()
->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), i))
.FromJust();
script->GetUnboundScript()
->BindToCurrentContext()
->Run(env.local())
.ToLocalChecked();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
"var closure1 = MakeClosure(); closure1();"
"var closure2 = MakeClosure(); closure2();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
->Get(env.local(), v8_str("closure1"))
.ToLocalChecked())));
Handle<JSFunction> fun2 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
->Get(env.local(), v8_str("closure2"))
.ToLocalChecked())));
CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK_EQ(*reference_code, fun1->code());
CHECK_EQ(*reference_code, fun2->code());
}
}
// Test that optimized code for different closures is actually shared.
TEST(OptimizedCodeSharing3) {
if (FLAG_stress_compaction) return;
FLAG_allow_natives_syntax = true;
FLAG_native_context_specialization = false;
FLAG_turbo_cache_shared_code = true;
const char* flag = "--turbo-filter=*";
FlagList::SetFlagsFromString(flag, StrLength(flag));
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Script> script = v8_compile(
"function MakeClosure() {"
" return function() { return x; };"
"}");
Handle<Code> reference_code;
{
LocalContext env;
env->Global()
->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), 23))
.FromJust();
script->GetUnboundScript()
->BindToCurrentContext()
->Run(env.local())
.ToLocalChecked();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());");
Handle<JSFunction> fun0 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
->Get(env.local(), v8_str("closure0"))
.ToLocalChecked())));
CHECK(fun0->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
reference_code = handle(fun0->code());
// Evict only the context-dependent entry from the optimized code map. This
// leaves it in a state where only the context-independent entry exists.
fun0->shared()->TrimOptimizedCodeMap(SharedFunctionInfo::kEntryLength);
}
for (int i = 0; i < 3; i++) {
LocalContext env;
env->Global()
->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), i))
.FromJust();
script->GetUnboundScript()
->BindToCurrentContext()
->Run(env.local())
.ToLocalChecked();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
"var closure1 = MakeClosure(); closure1();"
"var closure2 = MakeClosure(); closure2();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
->Get(env.local(), v8_str("closure1"))
.ToLocalChecked())));
Handle<JSFunction> fun2 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
->Get(env.local(), v8_str("closure2"))
.ToLocalChecked())));
CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK_EQ(*reference_code, fun1->code());
CHECK_EQ(*reference_code, fun2->code());
}
}
TEST(CompileFunctionInContext) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());