Unravel kHeapObjectTagSize from the stub cache.

The stub cache used kHeapObjectTagSize to scale indices, but there
doesn't appear to be a direct need for this. Instead, the stub cache has
its own kCacheIndexShift quantity.

BUG=
R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/401613003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22466 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
Jacob.Bramley@arm.com 2014-07-18 09:57:39 +00:00
parent de8cac4450
commit 38cb7830aa
8 changed files with 39 additions and 38 deletions

View File

@ -202,10 +202,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
uint32_t mask = kPrimaryTableSize - 1; uint32_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and // We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps. // they are always 01 for maps.
__ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize)); __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift));
// Mask down the eor argument to the minimum to keep the immediate // Mask down the eor argument to the minimum to keep the immediate
// ARM-encodable. // ARM-encodable.
__ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
// Prefer and_ to ubfx here because ubfx takes 2 cycles. // Prefer and_ to ubfx here because ubfx takes 2 cycles.
__ and_(scratch, scratch, Operand(mask)); __ and_(scratch, scratch, Operand(mask));
@ -222,9 +222,9 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
extra3); extra3);
// Primary miss: Compute hash for secondary probe. // Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
uint32_t mask2 = kSecondaryTableSize - 1; uint32_t mask2 = kSecondaryTableSize - 1;
__ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
__ and_(scratch, scratch, Operand(mask2)); __ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table. // Probe the secondary table.

View File

@ -177,7 +177,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ Add(scratch, scratch, extra); __ Add(scratch, scratch, extra);
__ Eor(scratch, scratch, flags); __ Eor(scratch, scratch, flags);
// We shift out the last two bits because they are not part of the hash. // We shift out the last two bits because they are not part of the hash.
__ Ubfx(scratch, scratch, kHeapObjectTagSize, __ Ubfx(scratch, scratch, kCacheIndexShift,
CountTrailingZeros(kPrimaryTableSize, 64)); CountTrailingZeros(kPrimaryTableSize, 64));
// Probe the primary table. // Probe the primary table.
@ -185,8 +185,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
scratch, extra, extra2, extra3); scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary table. // Primary miss: Compute hash for secondary table.
__ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
__ Add(scratch, scratch, flags >> kHeapObjectTagSize); __ Add(scratch, scratch, flags >> kCacheIndexShift);
__ And(scratch, scratch, kSecondaryTableSize - 1); __ And(scratch, scratch, kSecondaryTableSize - 1);
// Probe the secondary table. // Probe the secondary table.

View File

@ -205,10 +205,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ xor_(offset, flags); __ xor_(offset, flags);
// We mask out the last two bits because they are not part of the hash and // We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below. // they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize); __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
// ProbeTable expects the offset to be pointer scaled, which it is, because // ProbeTable expects the offset to be pointer scaled, which it is, because
// the heap object tag size is 2 and the pointer size log 2 is also 2. // the heap object tag size is 2 and the pointer size log 2 is also 2.
ASSERT(kHeapObjectTagSize == kPointerSizeLog2); ASSERT(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table. // Probe the primary table.
ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra); ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
@ -217,10 +217,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags); __ xor_(offset, flags);
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize); __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name); __ sub(offset, name);
__ add(offset, Immediate(flags)); __ add(offset, Immediate(flags));
__ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize); __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table. // Probe the secondary table.
ProbeTable( ProbeTable(

View File

@ -196,8 +196,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
uint32_t mask = kPrimaryTableSize - 1; uint32_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and // We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps. // they are always 01 for maps.
__ srl(scratch, scratch, kHeapObjectTagSize); __ srl(scratch, scratch, kCacheIndexShift);
__ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
__ And(scratch, scratch, Operand(mask)); __ And(scratch, scratch, Operand(mask));
// Probe the primary table. // Probe the primary table.
@ -213,10 +213,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
extra3); extra3);
// Primary miss: Compute hash for secondary probe. // Primary miss: Compute hash for secondary probe.
__ srl(at, name, kHeapObjectTagSize); __ srl(at, name, kCacheIndexShift);
__ Subu(scratch, scratch, at); __ Subu(scratch, scratch, at);
uint32_t mask2 = kSecondaryTableSize - 1; uint32_t mask2 = kSecondaryTableSize - 1;
__ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); __ Addu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
__ And(scratch, scratch, Operand(mask2)); __ And(scratch, scratch, Operand(mask2));
// Probe the secondary table. // Probe the secondary table.

View File

@ -197,8 +197,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
uint64_t mask = kPrimaryTableSize - 1; uint64_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and // We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps. // they are always 01 for maps.
__ dsrl(scratch, scratch, kHeapObjectTagSize); __ dsrl(scratch, scratch, kCacheIndexShift);
__ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
__ And(scratch, scratch, Operand(mask)); __ And(scratch, scratch, Operand(mask));
// Probe the primary table. // Probe the primary table.
@ -214,10 +214,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
extra3); extra3);
// Primary miss: Compute hash for secondary probe. // Primary miss: Compute hash for secondary probe.
__ dsrl(at, name, kHeapObjectTagSize); __ dsrl(at, name, kCacheIndexShift);
__ Dsubu(scratch, scratch, at); __ Dsubu(scratch, scratch, at);
uint64_t mask2 = kSecondaryTableSize - 1; uint64_t mask2 = kSecondaryTableSize - 1;
__ Daddu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); __ Daddu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
__ And(scratch, scratch, Operand(mask2)); __ And(scratch, scratch, Operand(mask2));
// Probe the secondary table. // Probe the secondary table.

View File

@ -183,6 +183,11 @@ class StubCache {
static const int kInterceptorArgsHolderIndex = 3; static const int kInterceptorArgsHolderIndex = 3;
static const int kInterceptorArgsLength = 4; static const int kInterceptorArgsLength = 4;
// Setting the entry size such that the index is shifted by Name::kHashShift
// is convenient; shifting down the length field (to extract the hash code)
// automatically discards the hash bit field.
static const int kCacheIndexShift = Name::kHashShift;
private: private:
explicit StubCache(Isolate* isolate); explicit StubCache(Isolate* isolate);
@ -195,13 +200,9 @@ class StubCache {
// Hash algorithm for the primary table. This algorithm is replicated in // Hash algorithm for the primary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that // assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kHeapObjectTagSize. // is scaled by 1 << kCacheIndexShift.
static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) { static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
// This works well because the heap object tag size and the hash STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
// shift are equal. Shifting down the length field to get the
// hash code would effectively throw away two bits of the hash
// code.
STATIC_ASSERT(kHeapObjectTagSize == Name::kHashShift);
// Compute the hash of the name (use entire hash field). // Compute the hash of the name (use entire hash field).
ASSERT(name->HasHashCode()); ASSERT(name->HasHashCode());
uint32_t field = name->hash_field(); uint32_t field = name->hash_field();
@ -216,12 +217,12 @@ class StubCache {
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
// Base the offset on a simple combination of name, flags, and map. // Base the offset on a simple combination of name, flags, and map.
uint32_t key = (map_low32bits + field) ^ iflags; uint32_t key = (map_low32bits + field) ^ iflags;
return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize); return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
} }
// Hash algorithm for the secondary table. This algorithm is replicated in // Hash algorithm for the secondary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that // assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kHeapObjectTagSize. // is scaled by 1 << kCacheIndexShift.
static int SecondaryOffset(Name* name, Code::Flags flags, int seed) { static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
// Use the seed from the primary cache in the secondary cache. // Use the seed from the primary cache in the secondary cache.
uint32_t name_low32bits = uint32_t name_low32bits =
@ -231,7 +232,7 @@ class StubCache {
uint32_t iflags = uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
uint32_t key = (seed - name_low32bits) + iflags; uint32_t key = (seed - name_low32bits) + iflags;
return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize); return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
} }
// Compute the entry for a given offset in exactly the same way as // Compute the entry for a given offset in exactly the same way as

View File

@ -24,13 +24,13 @@ static void ProbeTable(Isolate* isolate,
Register receiver, Register receiver,
Register name, Register name,
// The offset is scaled by 4, based on // The offset is scaled by 4, based on
// kHeapObjectTagSize, which is two bits // kCacheIndexShift, which is two bits
Register offset) { Register offset) {
// We need to scale up the pointer by 2 when the offset is scaled by less // We need to scale up the pointer by 2 when the offset is scaled by less
// than the pointer size. // than the pointer size.
ASSERT(kPointerSize == kInt64Size ASSERT(kPointerSize == kInt64Size
? kPointerSizeLog2 == kHeapObjectTagSize + 1 ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
: kPointerSizeLog2 == kHeapObjectTagSize); : kPointerSizeLog2 == StubCache::kCacheIndexShift);
ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1; ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry)); ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
@ -175,7 +175,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ xorp(scratch, Immediate(flags)); __ xorp(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash and // We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below. // they are always 01 for maps. Also in the two 'and' instructions below.
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table. // Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch); ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
@ -184,10 +184,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xorp(scratch, Immediate(flags)); __ xorp(scratch, Immediate(flags));
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
__ subl(scratch, name); __ subl(scratch, name);
__ addl(scratch, Immediate(flags)); __ addl(scratch, Immediate(flags));
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize)); __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table. // Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch); ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);

View File

@ -205,10 +205,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ xor_(offset, flags); __ xor_(offset, flags);
// We mask out the last two bits because they are not part of the hash and // We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below. // they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize); __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
// ProbeTable expects the offset to be pointer scaled, which it is, because // ProbeTable expects the offset to be pointer scaled, which it is, because
// the heap object tag size is 2 and the pointer size log 2 is also 2. // the heap object tag size is 2 and the pointer size log 2 is also 2.
ASSERT(kHeapObjectTagSize == kPointerSizeLog2); ASSERT(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table. // Probe the primary table.
ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra); ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
@ -217,10 +217,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags); __ xor_(offset, flags);
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize); __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name); __ sub(offset, name);
__ add(offset, Immediate(flags)); __ add(offset, Immediate(flags));
__ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize); __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table. // Probe the secondary table.
ProbeTable( ProbeTable(