Fix secondary stub cache and add a test for the stub cache lookups.

Review URL: https://chromiumcodereview.appspot.com/9496010

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10864 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
erik.corry@gmail.com 2012-02-29 10:45:59 +00:00
parent abffcbdd3e
commit 9f375ea880
10 changed files with 322 additions and 84 deletions

View File

@ -399,7 +399,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5);
masm, flags, r1, r2, r3, r4, r5, r6);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@ -438,7 +438,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5);
masm, flags, r1, r2, r3, r4, r5, r6);
__ bind(&miss);
}
@ -706,7 +706,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags =
Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5);
masm, flags, r0, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@ -1516,7 +1516,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5);
masm, flags, r1, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);

View File

@ -43,59 +43,83 @@ static void ProbeTable(Isolate* isolate,
MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register receiver,
Register name,
// Number of the cache entry, not scaled.
Register offset,
int offset_shift_bits,
Register scratch,
Register scratch2) {
Register scratch2,
Register offset_scratch) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
// Check the relative positions of the address fields.
ASSERT(value_off_addr > key_off_addr);
ASSERT((value_off_addr - key_off_addr) % 4 == 0);
ASSERT((value_off_addr - key_off_addr) < (256 * 4));
ASSERT(map_off_addr > key_off_addr);
ASSERT((map_off_addr - key_off_addr) % 4 == 0);
ASSERT((map_off_addr - key_off_addr) < (256 * 4));
Label miss;
Register offsets_base_addr = scratch;
Register base_addr = scratch;
scratch = no_reg;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ add(offset_scratch, offset, Operand(offset, LSL, 1));
// Calculate the base address of the entry.
__ mov(base_addr, Operand(key_offset));
__ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
// Check that the key in the entry matches the name.
__ mov(offsets_base_addr, Operand(key_offset));
__ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
__ ldr(ip, MemOperand(base_addr, 0));
__ cmp(name, ip);
__ b(ne, &miss);
// Check the map matches.
__ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
__ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ cmp(ip, scratch2);
__ b(ne, &miss);
// Get the code entry from the cache.
__ add(offsets_base_addr, offsets_base_addr,
Operand(value_off_addr - key_off_addr));
__ ldr(scratch2,
MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
Register code = scratch2;
scratch2 = no_reg;
__ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
__ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
Register flags_reg = base_addr;
base_addr = no_reg;
__ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
// It's a nice optimization if this constant is encodable in the bic insn.
uint32_t mask = Code::kFlagsNotUsedInLookup;
ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
__ bic(scratch2, scratch2, Operand(mask));
__ bic(flags_reg, flags_reg, Operand(mask));
// Using cmn and the negative instead of cmp means we can use movw.
if (flags < 0) {
__ cmn(scratch2, Operand(-flags));
__ cmn(flags_reg, Operand(-flags));
} else {
__ cmp(scratch2, Operand(flags));
__ cmp(flags_reg, Operand(flags));
}
__ b(ne, &miss);
// Re-load code entry from cache.
__ ldr(offset,
MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Jump to the first instruction in the code stub.
__ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(offset);
__ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
// Miss: fall through.
__ bind(&miss);
@ -167,13 +191,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register name,
Register scratch,
Register extra,
Register extra2) {
Register extra2,
Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
// Make sure that code is valid. The shifting code relies on the
// entry size being 8.
ASSERT(sizeof(Entry) == 8);
// Make sure that code is valid. The multiplying code relies on the
// entry size being 12.
ASSERT(sizeof(Entry) == 12);
// Make sure the flags does not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@ -193,6 +218,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!scratch.is(no_reg));
ASSERT(!extra.is(no_reg));
ASSERT(!extra2.is(no_reg));
ASSERT(!extra3.is(no_reg));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
extra2, extra3);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
@ -201,29 +231,32 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
uint32_t mask = (kPrimaryTableSize - 1) << kHeapObjectTagSize;
uint32_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps.
__ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
// Mask down the eor argument to the minimum to keep the immediate
// ARM-encodable.
__ eor(scratch, scratch, Operand(flags & mask));
// Prefer and_ to ubfx here because ubfx takes 2 cycles.
__ and_(scratch, scratch, Operand(mask));
__ mov(scratch, Operand(scratch, LSR, 1));
// Probe the primary table.
ProbeTable(isolate,
masm,
flags,
kPrimary,
receiver,
name,
scratch,
1,
extra,
extra2);
extra2,
extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name, LSR, 1));
uint32_t mask2 = (kSecondaryTableSize - 1) << (kHeapObjectTagSize - 1);
__ add(scratch, scratch, Operand((flags >> 1) & mask2));
__ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
uint32_t mask2 = (kSecondaryTableSize - 1);
__ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
__ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table.
@ -231,15 +264,18 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
masm,
flags,
kSecondary,
receiver,
name,
scratch,
1,
extra,
extra2);
extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
extra2, extra3);
}

View File

@ -563,6 +563,13 @@ DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
// code-stubs.cc
DEFINE_bool(print_code_stubs, false, "print code stubs")
DEFINE_bool(test_secondary_stub_cache,
false,
"test secondary stub cache by disabling the primary one")
DEFINE_bool(test_primary_stub_cache,
false,
"test primary stub cache by disabling the secondary one")
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(print_code, false, "print generated code")

View File

@ -44,19 +44,30 @@ static void ProbeTable(Isolate* isolate,
Code::Flags flags,
StubCache::Table table,
Register name,
Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset,
Register extra) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0));
if (extra.is_valid()) {
// Get the code entry from the cache.
__ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
__ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
__ j(not_equal, &miss);
// Check the map matches.
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Check that the flags match what we're looking for.
@ -65,6 +76,14 @@ static void ProbeTable(Isolate* isolate,
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Jump to the first instruction in the code stub.
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(extra);
@ -75,11 +94,19 @@ static void ProbeTable(Isolate* isolate,
__ push(offset);
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
__ j(not_equal, &miss);
// Check the map matches.
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Restore offset register.
__ mov(offset, Operand(esp, 0));
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
@ -87,9 +114,17 @@ static void ProbeTable(Isolate* isolate,
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Restore offset and re-load code entry from cache.
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Jump to the first instruction in the code stub.
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
@ -159,12 +194,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register name,
Register scratch,
Register extra,
Register extra2) {
Register extra2,
Register extra3) {
Label miss;
// Assert that code is valid. The shifting code relies on the entry size
// being 8.
ASSERT(sizeof(Entry) == 8);
// Assert that code is valid. The multiplying code relies on the entry size
// being 12.
ASSERT(sizeof(Entry) == 12);
// Assert the flags do not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@ -176,37 +212,51 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!extra.is(name));
ASSERT(!extra.is(scratch));
// Assert scratch and extra registers are valid, and extra2 is unused.
// Assert scratch and extra registers are valid, and extra2/3 are unused.
ASSERT(!scratch.is(no_reg));
ASSERT(extra2.is(no_reg));
ASSERT(extra3.is(no_reg));
Register offset = scratch;
scratch = no_reg;
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ mov(offset, FieldOperand(name, String::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
// ProbeTable expects the offset to be pointer scaled, which it is, because
// the heap object tag size is 2 and the pointer size log 2 is also 2.
ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
// Probe the primary table.
ProbeTable(isolate(), masm, flags, kPrimary, name, scratch, extra);
ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ sub(scratch, name);
__ add(scratch, Immediate(flags));
__ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
__ mov(offset, FieldOperand(name, String::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ sub(offset, name);
__ add(offset, Immediate(flags));
__ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
ProbeTable(isolate(), masm, flags, kSecondary, name, scratch, extra);
ProbeTable(
isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
}

View File

@ -273,14 +273,22 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
STUB_CACHE_TABLE,
2,
"StubCache::primary_->value");
Add(stub_cache->key_reference(StubCache::kSecondary).address(),
Add(stub_cache->map_reference(StubCache::kPrimary).address(),
STUB_CACHE_TABLE,
3,
"StubCache::primary_->map");
Add(stub_cache->key_reference(StubCache::kSecondary).address(),
STUB_CACHE_TABLE,
4,
"StubCache::secondary_->key");
Add(stub_cache->value_reference(StubCache::kSecondary).address(),
STUB_CACHE_TABLE,
4,
5,
"StubCache::secondary_->value");
Add(stub_cache->map_reference(StubCache::kSecondary).address(),
STUB_CACHE_TABLE,
6,
"StubCache::secondary_->map");
// Runtime entries
Add(ExternalReference::perform_gc_function(isolate).address(),

View File

@ -77,14 +77,15 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
// Compute the primary entry.
int primary_offset = PrimaryOffset(name, flags, map);
Entry* primary = entry(primary_, primary_offset);
Code* hit = primary->value;
Code* old_code = primary->value;
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
if (hit != isolate_->builtins()->builtin(Builtins::kIllegal)) {
Code::Flags primary_flags = Code::RemoveTypeFromFlags(hit->flags());
int secondary_offset =
SecondaryOffset(primary->key, primary_flags, primary_offset);
if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
Map* old_map = primary->map;
Code::Flags old_flags = Code::RemoveTypeFromFlags(old_code->flags());
int seed = PrimaryOffset(primary->key, old_flags, old_map);
int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
Entry* secondary = entry(secondary_, secondary_offset);
*secondary = *primary;
}
@ -92,6 +93,8 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
// Update primary cache.
primary->key = name;
primary->value = code;
primary->map = map;
isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
return code;
}

View File

@ -69,6 +69,7 @@ class StubCache {
struct Entry {
String* key;
Code* value;
Map* map;
};
void Initialize();
@ -252,7 +253,7 @@ class StubCache {
Handle<Context> global_context);
// Generate code for probing the stub cache table.
// Arguments extra and extra2 may be used to pass additional scratch
// Arguments extra, extra2 and extra3 may be used to pass additional scratch
// registers. Set to no_reg if not needed.
void GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
@ -260,7 +261,8 @@ class StubCache {
Register name,
Register scratch,
Register extra,
Register extra2 = no_reg);
Register extra2 = no_reg,
Register extra3 = no_reg);
enum Table {
kPrimary,
@ -274,6 +276,12 @@ class StubCache {
}
SCTableReference map_reference(StubCache::Table table) {
return SCTableReference(
reinterpret_cast<Address>(&first_entry(table)->map));
}
SCTableReference value_reference(StubCache::Table table) {
return SCTableReference(
reinterpret_cast<Address>(&first_entry(table)->value));
@ -300,7 +308,16 @@ class StubCache {
RelocInfo::Mode mode,
Code::Kind kind);
// Computes the hashed offsets for primary and secondary caches.
// The stub cache has a primary and secondary level. The two levels have
// different hashing algorithms in order to avoid simultaneous collisions
// in both caches. Unlike a probing strategy (quadratic or otherwise) the
// update strategy on updates is fairly clear and simple: Any existing entry
// in the primary cache is moved to the secondary cache, and secondary cache
// entries are overwritten.
// Hash algorithm for the primary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kHeapObjectTagSize.
static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
// This works well because the heap object tag size and the hash
// shift are equal. Shifting down the length field to get the
@ -324,23 +341,30 @@ class StubCache {
return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
}
// Hash algorithm for the secondary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kHeapObjectTagSize.
static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
// Use the seed from the primary cache in the secondary cache.
uint32_t string_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
uint32_t key = seed - string_low32bits + flags;
// We always set the in_loop bit to zero when generating the lookup code
// so do it here too so the hash codes match.
uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
uint32_t key = (seed - string_low32bits) + iflags;
return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
}
// Compute the entry for a given offset in exactly the same way as
// we do in generated code. We generate an hash code that already
// ends in String::kHashShift 0s. Then we shift it so it is a multiple
// ends in String::kHashShift 0s. Then we multiply it so it is a multiple
// of sizeof(Entry). This makes it easier to avoid making mistakes
// in the hashed offset computations.
static Entry* entry(Entry* table, int offset) {
const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
const int multiplier = sizeof(*table) >> String::kHashShift;
return reinterpret_cast<Entry*>(
reinterpret_cast<Address>(table) + (offset << shift_amount));
reinterpret_cast<Address>(table) + offset * multiplier);
}
static const int kPrimaryTableBits = 11;

View File

@ -198,6 +198,9 @@ namespace internal {
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(negative_lookups, V8.NegativeLookups) \
SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \
SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \

View File

@ -43,32 +43,61 @@ static void ProbeTable(Isolate* isolate,
MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register receiver,
Register name,
// The offset is scaled by 4, based on
// kHeapObjectTagSize, which is two bits
Register offset) {
ASSERT_EQ(8, kPointerSize);
ASSERT_EQ(16, sizeof(StubCache::Entry));
// We need to scale up the pointer by 2 because the offset is scaled by less
// than the pointer size.
ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
ScaleFactor scale_factor = times_2;
ASSERT_EQ(24, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
// and shifting optimizations).
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0));
__ LoadAddress(kScratchRegister, key_offset);
// Check that the key in the entry matches the name.
// Multiply entry offset by 16 to get the entry address. Since the
// offset register already holds the entry offset times four, multiply
// by a further four.
__ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
__ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
__ j(not_equal, &miss);
// Get the code entry from the cache.
// Use key_offset + kPointerSize, rather than loading value_offset.
// Get the map entry from the cache.
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
__ movq(kScratchRegister,
Operand(kScratchRegister, offset, times_4, kPointerSize));
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
__ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
__ LoadAddress(kScratchRegister, value_offset);
__ movq(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, 0));
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
__ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Jump to the first instruction in the code stub.
__ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(kScratchRegister);
@ -134,14 +163,16 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register name,
Register scratch,
Register extra,
Register extra2) {
Register extra2,
Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
USE(extra2); // The register extra2 is not used on the X64 platform.
// Make sure that code is valid. The shifting code relies on the
// entry size being 16.
ASSERT(sizeof(Entry) == 16);
USE(extra3); // The register extra2 is not used on the X64 platform.
// Make sure that code is valid. The multiplying code relies on the
// entry size being 24.
ASSERT(sizeof(Entry) == 24);
// Make sure the flags do not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@ -153,6 +184,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Check scratch register is valid, extra and extra2 are unused.
ASSERT(!scratch.is(no_reg));
ASSERT(extra2.is(no_reg));
ASSERT(extra3.is(no_reg));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
@ -162,10 +197,12 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, name, scratch);
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
@ -177,11 +214,12 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, name, scratch);
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
}

View File

@ -16102,3 +16102,72 @@ TEST(CallCompletedCallbackTwoExceptions) {
v8::V8::AddCallCompletedCallback(CallCompletedCallbackException);
CompileRun("throw 'first exception';");
}
static int probes_counter = 0;
static int misses_counter = 0;
static int updates_counter = 0;
static int* LookupCounter(const char* name) {
if (strcmp(name, "c:V8.MegamorphicStubCacheProbes") == 0) {
return &probes_counter;
} else if (strcmp(name, "c:V8.MegamorphicStubCacheMisses") == 0) {
return &misses_counter;
} else if (strcmp(name, "c:V8.MegamorphicStubCacheUpdates") == 0) {
return &updates_counter;
}
return NULL;
}
static const char* kMegamorphicTestProgram =
"function ClassA() { };"
"function ClassB() { };"
"ClassA.prototype.foo = function() { };"
"ClassB.prototype.foo = function() { };"
"function fooify(obj) { obj.foo(); };"
"var a = new ClassA();"
"var b = new ClassB();"
"for (var i = 0; i < 10000; i++) {"
" fooify(a);"
" fooify(b);"
"}";
static void StubCacheHelper(bool primary) {
V8::SetCounterFunction(LookupCounter);
USE(kMegamorphicTestProgram);
#ifdef DEBUG
i::FLAG_native_code_counters = true;
if (primary) {
i::FLAG_test_primary_stub_cache = true;
} else {
i::FLAG_test_secondary_stub_cache = true;
}
i::FLAG_crankshaft = false;
v8::HandleScope scope;
LocalContext env;
int initial_probes = probes_counter;
int initial_misses = misses_counter;
int initial_updates = updates_counter;
CompileRun(kMegamorphicTestProgram);
int probes = probes_counter - initial_probes;
int misses = misses_counter - initial_misses;
int updates = updates_counter - initial_updates;
CHECK_LT(updates, 10);
CHECK_LT(misses, 10);
CHECK_GE(probes, 10000);
#endif
}
TEST(SecondaryStubCache) {
StubCacheHelper(true);
}
TEST(PrimaryStubCache) {
StubCacheHelper(false);
}