diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc index 7878ecd0c7..d5a700cd9f 100644 --- a/src/arm/ic-arm.cc +++ b/src/arm/ic-arm.cc @@ -967,6 +967,14 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { } +bool LoadIC::PatchInlinedContextualLoad(Address address, + Object* map, + Object* cell) { + // TODO(): implement this. + return false; +} + + bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) { // Find the end of the inlined code for the store if there is an // inlined version of the store. diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc index ab33884e41..87a7fb941a 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/arm/stub-cache-arm.cc @@ -2219,11 +2219,11 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, } __ mov(r0, r4); - __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3); + __ IncrementCounter(&Counters::named_load_global_stub, 1, r1, r3); __ Ret(); __ bind(&miss); - __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3); + __ IncrementCounter(&Counters::named_load_global_stub_miss, 1, r1, r3); GenerateLoadMiss(masm(), Code::LOAD_IC); // Return the generated code. diff --git a/src/frames.h b/src/frames.h index 7a09bb4024..2d4f338ae0 100644 --- a/src/frames.h +++ b/src/frames.h @@ -67,7 +67,7 @@ class PcToCodeCache : AllStatic { static PcToCodeCacheEntry* GetCacheEntry(Address pc); private: - static const int kPcToCodeCacheSize = 256; + static const int kPcToCodeCacheSize = 1024; static PcToCodeCacheEntry cache_[kPcToCodeCacheSize]; }; diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc index 86f3877c7e..bde2f18449 100644 --- a/src/ia32/codegen-ia32.cc +++ b/src/ia32/codegen-ia32.cc @@ -9144,9 +9144,15 @@ class DeferredReferenceGetNamedValue: public DeferredCode { public: DeferredReferenceGetNamedValue(Register dst, Register receiver, - Handle name) - : dst_(dst), receiver_(receiver), name_(name) { - set_comment("[ DeferredReferenceGetNamedValue"); + Handle name, + bool is_contextual) + : dst_(dst), + receiver_(receiver), + name_(name), + is_contextual_(is_contextual) { + set_comment(is_contextual + ? "[ DeferredReferenceGetNamedValue (contextual)" + : "[ DeferredReferenceGetNamedValue"); } virtual void Generate(); @@ -9158,6 +9164,7 @@ class DeferredReferenceGetNamedValue: public DeferredCode { Register dst_; Register receiver_; Handle name_; + bool is_contextual_; }; @@ -9167,9 +9174,15 @@ void DeferredReferenceGetNamedValue::Generate() { } __ Set(ecx, Immediate(name_)); Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); - __ call(ic, RelocInfo::CODE_TARGET); - // The call must be followed by a test eax instruction to indicate - // that the inobject property case was inlined. + RelocInfo::Mode mode = is_contextual_ + ? RelocInfo::CODE_TARGET_CONTEXT + : RelocInfo::CODE_TARGET; + __ call(ic, mode); + // The call must be followed by: + // - a test eax instruction to indicate that the inobject property + // case was inlined. + // - a mov ecx instruction to indicate that the contextual property + // load was inlined. // // Store the delta to the map check instruction here in the test // instruction. Use masm_-> instead of the __ macro since the @@ -9177,8 +9190,13 @@ void DeferredReferenceGetNamedValue::Generate() { int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); // Here we use masm_-> instead of the __ macro because this is the // instruction that gets patched and coverage code gets in the way. - masm_->test(eax, Immediate(-delta_to_patch_site)); - __ IncrementCounter(&Counters::named_load_inline_miss, 1); + if (is_contextual_) { + masm_->mov(ecx, -delta_to_patch_site); + __ IncrementCounter(&Counters::named_load_global_inline_miss, 1); + } else { + masm_->test(eax, Immediate(-delta_to_patch_site)); + __ IncrementCounter(&Counters::named_load_inline_miss, 1); + } if (!dst_.is(eax)) __ mov(dst_, eax); } @@ -9349,12 +9367,17 @@ Result CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) { #ifdef DEBUG int original_height = frame()->height(); #endif + + bool contextual_load_in_builtin = + is_contextual && + (Bootstrapper::IsActive() || + (!info_->closure().is_null() && info_->closure()->IsBuiltin())); + Result result; - // Do not inline the inobject property case for loads from the global - // object. Also do not inline for unoptimized code. This saves time in - // the code generator. Unoptimized code is toplevel code or code that is - // not in a loop. - if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { + // Do not inline in the global code or when not in loop. + if (scope()->is_global_scope() || + loop_nesting() == 0 || + contextual_load_in_builtin) { Comment cmnt(masm(), "[ Load from named Property"); frame()->Push(name); @@ -9367,19 +9390,26 @@ Result CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) { // instruction here. __ nop(); } else { - // Inline the inobject property case. - Comment cmnt(masm(), "[ Inlined named property load"); + // Inline the property load. + Comment cmnt(masm(), is_contextual + ? "[ Inlined contextual property load" + : "[ Inlined named property load"); Result receiver = frame()->Pop(); receiver.ToRegister(); result = allocator()->Allocate(); ASSERT(result.is_valid()); DeferredReferenceGetNamedValue* deferred = - new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name); + new DeferredReferenceGetNamedValue(result.reg(), + receiver.reg(), + name, + is_contextual); - // Check that the receiver is a heap object. - __ test(receiver.reg(), Immediate(kSmiTagMask)); - deferred->Branch(zero); + if (!is_contextual) { + // Check that the receiver is a heap object. + __ test(receiver.reg(), Immediate(kSmiTagMask)); + deferred->Branch(zero); + } __ bind(deferred->patch_site()); // This is the map check instruction that will be patched (so we can't @@ -9391,17 +9421,33 @@ Result CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) { // which allows the assert below to succeed and patching to work. deferred->Branch(not_equal); - // The delta from the patch label to the load offset must be statically - // known. + // The delta from the patch label to the actual load must be + // statically known. ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) == LoadIC::kOffsetToLoadInstruction); - // The initial (invalid) offset has to be large enough to force a 32-bit - // instruction encoding to allow patching with an arbitrary offset. Use - // kMaxInt (minus kHeapObjectTag). - int offset = kMaxInt; - masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset)); - __ IncrementCounter(&Counters::named_load_inline, 1); + if (is_contextual) { + // Load the (initialy invalid) cell and get its value. + masm()->mov(result.reg(), Factory::null_value()); + if (FLAG_debug_code) { + __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset), + Factory::global_property_cell_map()); + __ Assert(equal, "Uninitialized inlined contextual load"); + } + __ mov(result.reg(), + FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset)); + __ cmp(result.reg(), Factory::the_hole_value()); + deferred->Branch(equal); + __ IncrementCounter(&Counters::named_load_global_inline, 1); + } else { + // The initial (invalid) offset has to be large enough to force a 32-bit + // instruction encoding to allow patching with an arbitrary offset. Use + // kMaxInt (minus kHeapObjectTag). + int offset = kMaxInt; + masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset)); + __ IncrementCounter(&Counters::named_load_inline, 1); + } + deferred->BindExit(); } ASSERT(frame()->height() == original_height - 1); diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc index 87af0d9b51..413c36e922 100644 --- a/src/ia32/ic-ia32.cc +++ b/src/ia32/ic-ia32.cc @@ -1661,6 +1661,38 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { } +// One byte opcode for mov ecx,0xXXXXXXXX. +static const byte kMovEcxByte = 0xB9; + +bool LoadIC::PatchInlinedContextualLoad(Address address, + Object* map, + Object* cell) { + // The address of the instruction following the call. + Address mov_instruction_address = + address + Assembler::kCallTargetAddressOffset; + // If the instruction following the call is not a cmp eax, nothing + // was inlined. + if (*mov_instruction_address != kMovEcxByte) return false; + + Address delta_address = mov_instruction_address + 1; + // The delta to the start of the map check instruction. + int delta = *reinterpret_cast(delta_address); + + // The map address is the last 4 bytes of the 7-byte + // operand-immediate compare instruction, so we add 3 to get the + // offset to the last 4 bytes. + Address map_address = mov_instruction_address + delta + 3; + *(reinterpret_cast(map_address)) = map; + + // The cell is in the last 4 bytes of a five byte mov reg, imm32 + // instruction, so we add 1 to get the offset to the last 4 bytes. + Address offset_address = + mov_instruction_address + delta + kOffsetToLoadInstruction + 1; + *reinterpret_cast(offset_address) = cell; + return true; +} + + bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) { // The address of the instruction following the call. Address test_instruction_address = diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc index a71c33243d..e2af9d45aa 100644 --- a/src/ia32/stub-cache-ia32.cc +++ b/src/ia32/stub-cache-ia32.cc @@ -2480,12 +2480,12 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, __ Check(not_equal, "DontDelete cells can't contain the hole"); } - __ IncrementCounter(&Counters::named_load_global_inline, 1); + __ IncrementCounter(&Counters::named_load_global_stub, 1); __ mov(eax, ebx); __ ret(0); __ bind(&miss); - __ IncrementCounter(&Counters::named_load_global_inline_miss, 1); + __ IncrementCounter(&Counters::named_load_global_stub_miss, 1); GenerateLoadMiss(masm(), Code::LOAD_IC); // Return the generated code. diff --git a/src/ic.cc b/src/ic.cc index b4a333ec90..5b62a8a09c 100644 --- a/src/ic.cc +++ b/src/ic.cc @@ -299,6 +299,7 @@ void LoadIC::ClearInlinedVersion(Address address) { // present) to guarantee failure by holding an invalid map (the null // value). The offset can be patched to anything. PatchInlinedLoad(address, Heap::null_value(), 0); + PatchInlinedContextualLoad(address, Heap::null_value(), Heap::null_value()); } @@ -720,6 +721,14 @@ Object* KeyedCallIC::LoadFunction(State state, } +#ifdef DEBUG +#define TRACE_IC_NAMED(msg, name) \ + if (FLAG_trace_ic) PrintF(msg, *(name)->ToCString()) +#else +#define TRACE_IC_NAMED(msg, name) +#endif + + Object* LoadIC::Load(State state, Handle object, Handle name) { // If the object is undefined or null it's illegal to try to get any // of its properties; throw a TypeError in that case. @@ -797,15 +806,24 @@ Object* LoadIC::Load(State state, Handle object, Handle name) { LOG(SuspectReadEvent(*name, *object)); } - bool can_be_inlined = + bool can_be_inlined_precheck = FLAG_use_ic && - state == PREMONOMORPHIC && lookup.IsProperty() && lookup.IsCacheable() && lookup.holder() == *object && - lookup.type() == FIELD && !object->IsAccessCheckNeeded(); + bool can_be_inlined = + can_be_inlined_precheck && + state == PREMONOMORPHIC && + lookup.type() == FIELD; + + bool can_be_inlined_contextual = + can_be_inlined_precheck && + state == UNINITIALIZED && + lookup.holder()->IsGlobalObject() && + lookup.type() == NORMAL; + if (can_be_inlined) { Map* map = lookup.holder()->map(); // Property's index in the properties array. If negative we have @@ -816,32 +834,29 @@ Object* LoadIC::Load(State state, Handle object, Handle name) { int offset = map->instance_size() + (index * kPointerSize); if (PatchInlinedLoad(address(), map, offset)) { set_target(megamorphic_stub()); -#ifdef DEBUG - if (FLAG_trace_ic) { - PrintF("[LoadIC : inline patch %s]\n", *name->ToCString()); - } -#endif + TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name); return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex()); -#ifdef DEBUG } else { - if (FLAG_trace_ic) { - PrintF("[LoadIC : no inline patch %s (patching failed)]\n", - *name->ToCString()); - } + TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n", + name); } } else { - if (FLAG_trace_ic) { - PrintF("[LoadIC : no inline patch %s (not inobject)]\n", - *name->ToCString()); - } + TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name); + } + } else if (can_be_inlined_contextual) { + Map* map = lookup.holder()->map(); + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast( + lookup.holder()->property_dictionary()->ValueAt( + lookup.GetDictionaryEntry())); + if (PatchInlinedContextualLoad(address(), map, cell)) { + set_target(megamorphic_stub()); + TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name); + ASSERT(cell->value() != Heap::the_hole_value()); + return cell->value(); } } else { if (FLAG_use_ic && state == PREMONOMORPHIC) { - if (FLAG_trace_ic) { - PrintF("[LoadIC : no inline patch %s (not inlinable)]\n", - *name->ToCString()); -#endif - } + TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name); } } diff --git a/src/ic.h b/src/ic.h index 17450cc356..a5fada09f4 100644 --- a/src/ic.h +++ b/src/ic.h @@ -298,6 +298,10 @@ class LoadIC: public IC { static bool PatchInlinedLoad(Address address, Object* map, int index); + static bool PatchInlinedContextualLoad(Address address, + Object* map, + Object* cell); + friend class IC; }; diff --git a/src/v8-counters.h b/src/v8-counters.h index 8c948cc7b3..a8eb9d2a6b 100644 --- a/src/v8-counters.h +++ b/src/v8-counters.h @@ -161,6 +161,8 @@ namespace internal { SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \ SC(named_load_global_inline, V8.NamedLoadGlobalInline) \ SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \ + SC(named_load_global_stub, V8.NamedLoadGlobalStub) \ + SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss) \ SC(keyed_store_field, V8.KeyedStoreField) \ SC(keyed_store_inline, V8.KeyedStoreInline) \ SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \ diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc index 98219ff6cb..62e769123e 100644 --- a/src/x64/ic-x64.cc +++ b/src/x64/ic-x64.cc @@ -1726,6 +1726,14 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { } +bool LoadIC::PatchInlinedContextualLoad(Address address, + Object* map, + Object* cell) { + // TODO(): implement this. + return false; +} + + // The offset from the inlined patch site to the start of the inlined // store instruction. const int StoreIC::kOffsetToStoreInstruction = 20; diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index ab61390f0f..df745cc7fe 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -1852,12 +1852,12 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, __ Check(not_equal, "DontDelete cells can't contain the hole"); } - __ IncrementCounter(&Counters::named_load_global_inline, 1); + __ IncrementCounter(&Counters::named_load_global_stub, 1); __ movq(rax, rbx); __ ret(0); __ bind(&miss); - __ IncrementCounter(&Counters::named_load_global_inline_miss, 1); + __ IncrementCounter(&Counters::named_load_global_stub_miss, 1); GenerateLoadMiss(masm(), Code::LOAD_IC); // Return the generated code.