Further robustify the keyed lookup cache against unlucky hash

seeds.  This change is performance neutral on most snapshot
VM builds, but provides a big improvement on string-fasta
on around 5% of builds.
Review URL: https://chromiumcodereview.appspot.com/9193015

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10478 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
erik.corry@gmail.com 2012-01-23 16:18:10 +00:00
parent 5da21118e8
commit 83b439a0f9
6 changed files with 163 additions and 123 deletions

View File

@ -1036,21 +1036,29 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
Label try_second_entry, hit_on_first_entry, load_in_object_property;
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(isolate);
__ mov(r4, Operand(cache_keys));
__ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
// Move r4 to second entry.
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
// Load map and move r4 to next entry.
__ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
__ cmp(r2, r5);
__ b(ne, &try_second_entry);
__ b(ne, &try_next_entry);
__ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol
__ cmp(r0, r5);
__ b(eq, &hit_on_first_entry);
__ b(eq, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
__ bind(&try_second_entry);
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol.
// Last entry: Load map and move r4 to symbol.
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
__ cmp(r2, r5);
__ b(ne, &slow);
__ ldr(r5, MemOperand(r4));
@ -1065,22 +1073,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(isolate);
// Hit on second entry.
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ bind(&hit_on_nth_entry[i]);
__ mov(r4, Operand(cache_field_offsets));
__ add(r3, r3, Operand(1));
if (i != 0) {
__ add(r3, r3, Operand(i));
}
__ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
__ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
__ sub(r5, r5, r6, SetCC);
__ b(ge, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
// Hit on first entry.
__ bind(&hit_on_first_entry);
__ mov(r4, Operand(cache_field_offsets));
__ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
__ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
__ sub(r5, r5, r6, SetCC);
__ b(ge, &property_array_property);
}
}
// Load in-object property.
__ bind(&load_in_object_property);

View File

@ -6523,15 +6523,11 @@ int KeyedLookupCache::Hash(Map* map, String* name) {
int KeyedLookupCache::Lookup(Map* map, String* name) {
int index = (Hash(map, name) & kHashMask);
Key& key = keys_[index];
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index + i];
if ((key.map == map) && key.name->Equals(name)) {
return field_offsets_[index];
return field_offsets_[index + i];
}
ASSERT(kEntriesPerBucket == 2); // There are two entries to check.
// First entry in the bucket missed, check the second.
Key& key2 = keys_[index + 1];
if ((key2.map == map) && key2.name->Equals(name)) {
return field_offsets_[index + 1];
}
return kNotFound;
}
@ -6541,13 +6537,29 @@ void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
String* symbol;
if (HEAP->LookupSymbolIfExists(name, &symbol)) {
int index = (Hash(map, symbol) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
// help to get the most frequently used one in position 0).
for (int i = 0; i< kEntriesPerBucket; i++) {
Key& key = keys_[index];
Key& key2 = keys_[index + 1]; // Second entry in the bucket.
// Demote the first entry to the second in the bucket.
key2.map = key.map;
key2.name = key.name;
field_offsets_[index + 1] = field_offsets_[index];
Object* free_entry_indicator = NULL;
if (key.map == free_entry_indicator) {
key.map = map;
key.name = symbol;
field_offsets_[index + i] = field_offset;
return;
}
}
// No free entry found in this bucket, so we move them all down one and
// put the new entry at position zero.
for (int i = kEntriesPerBucket - 1; i > 0; i--) {
Key& key = keys_[index + i];
Key& key2 = keys_[index + i - 1];
key = key2;
field_offsets_[index + i] = field_offsets_[index + i - 1];
}
// Write the new first entry.
Key& key = keys_[index];
key.map = map;
key.name = symbol;
field_offsets_[index] = field_offset;

View File

@ -2135,13 +2135,17 @@ class KeyedLookupCache {
// Clear the cache.
void Clear();
static const int kLength = 128;
static const int kLength = 256;
static const int kCapacityMask = kLength - 1;
static const int kMapHashShift = 5;
static const int kHashMask = -2; // Zero the last bit.
static const int kEntriesPerBucket = 2;
static const int kHashMask = -4; // Zero the last two bits.
static const int kEntriesPerBucket = 4;
static const int kNotFound = -1;
// kEntriesPerBucket should be a power of 2.
STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
private:
KeyedLookupCache() {
for (int i = 0; i < kLength; ++i) {

View File

@ -538,20 +538,30 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
Label try_second_entry, hit_on_first_entry, load_in_object_property;
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(masm->isolate());
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
__ mov(edi, ecx);
__ shl(edi, kPointerSizeLog2 + 1);
if (i != 0) {
__ add(edi, Immediate(kPointerSize * i * 2));
}
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &try_second_entry);
__ j(not_equal, &try_next_entry);
__ add(edi, Immediate(kPointerSize));
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(equal, &hit_on_first_entry);
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
__ bind(&try_second_entry);
__ lea(edi, Operand(ecx, 1));
__ shl(edi, kPointerSizeLog2 + 1);
__ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(edi, Immediate(kPointerSize));
@ -566,22 +576,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
// Hit on second entry.
__ add(ecx, Immediate(1));
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ bind(&hit_on_nth_entry[i]);
if (i != 0) {
__ add(ecx, Immediate(i));
}
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ sub(edi, ecx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
// Hit on first entry.
__ bind(&hit_on_first_entry);
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ sub(edi, ecx);
__ j(above_equal, &property_array_property);
}
}
// Load in-object property.
__ bind(&load_in_object_property);

View File

@ -1038,21 +1038,27 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
Label try_second_entry, hit_on_first_entry, load_in_object_property;
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(isolate);
__ li(t0, Operand(cache_keys));
__ sll(at, a3, kPointerSizeLog2 + 1);
__ addu(t0, t0, at);
__ lw(t1, MemOperand(t0));
__ Branch(&try_second_entry, ne, a2, Operand(t1));
__ lw(t1, MemOperand(t0, kPointerSize));
__ Branch(&hit_on_first_entry, eq, a0, Operand(t1));
__ bind(&try_second_entry);
__ lw(t1, MemOperand(t0, kPointerSize * 2));
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
__ lw(t1, MemOperand(t0, kPointerSize * i * 2));
__ Branch(&try_next_entry, ne, a2, Operand(t1));
__ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
__ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
__ bind(&try_next_entry);
}
__ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
__ Branch(&slow, ne, a2, Operand(t1));
__ lw(t1, MemOperand(t0, kPointerSize * 3));
__ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
__ Branch(&slow, ne, a0, Operand(t1));
// Get field offset.
@ -1063,25 +1069,20 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(isolate);
// Hit on second entry.
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ bind(&hit_on_nth_entry[i]);
__ li(t0, Operand(cache_field_offsets));
__ sll(at, a3, kPointerSizeLog2);
__ addu(at, t0, at);
__ lw(t1, MemOperand(at, kPointerSize));
__ lw(t1, MemOperand(at, kPointerSize * i));
__ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
__ Subu(t1, t1, t2);
__ Branch(&property_array_property, ge, t1, Operand(zero_reg));
if (i != 0) {
__ Branch(&load_in_object_property);
// Hit on first entry.
__ bind(&hit_on_first_entry);
__ li(t0, Operand(cache_field_offsets));
__ sll(at, a3, kPointerSizeLog2);
__ addu(at, t0, at);
__ lw(t1, MemOperand(at));
__ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
__ Subu(t1, t1, t2);
__ Branch(&property_array_property, ge, t1, Operand(zero_reg));
}
}
// Load in-object property.
__ bind(&load_in_object_property);

View File

@ -467,43 +467,50 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
Label try_second_entry, hit_on_first_entry, load_in_object_property;
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys(masm->isolate());
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
__ movq(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
__ j(not_equal, &try_second_entry);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
__ j(equal, &hit_on_first_entry);
int off = kPointerSize * i * 2;
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
__ bind(&try_second_entry);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, kPointerSize * 2));
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize * 3));
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
// Hit on second entry.
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ bind(&hit_on_nth_entry[i]);
if (i != 0) {
__ addl(rcx, Immediate(i));
}
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ addl(rcx, Immediate(1));
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ subq(rdi, rcx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
// Hit on first entry.
__ bind(&hit_on_first_entry);
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ subq(rdi, rcx);
__ j(above_equal, &property_array_property);
}
}
// Load in-object property.
__ bind(&load_in_object_property);