x87: Move IC code into a subdir and move ic-compilation related code from stub-cache into ic-compiler

port r23306.

original commit message:
   Move IC code into a subdir and move ic-compilation related code from stub-cache into ic-compiler.

BUG=
R=verwaest@chromium.org, weiliang.lin@intel.com

Review URL: https://codereview.chromium.org/500923002

Patch from Chunyang Dai <chunyang.dai@intel.com>.

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23339 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
weiliang.lin@intel.com 2014-08-25 09:00:04 +00:00
parent 6190ac0882
commit d171b9d535
11 changed files with 303 additions and 385 deletions

View File

@ -6,114 +6,13 @@
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
#include "src/ic-inl.h"
#include "src/stub-cache.h"
#include "src/ic/ic-compiler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(Isolate* isolate,
MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset,
Register extra) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0));
if (extra.is_valid()) {
// Get the code entry from the cache.
__ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
__ j(not_equal, &miss);
// Check the map matches.
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
__ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Jump to the first instruction in the code stub.
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(extra);
__ bind(&miss);
} else {
// Save the offset on the stack.
__ push(offset);
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
__ j(not_equal, &miss);
// Check the map matches.
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Restore offset register.
__ mov(offset, Operand(esp, 0));
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
__ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Restore offset and re-load code entry from cache.
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Jump to the first instruction in the code stub.
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(offset);
// Pop at miss.
__ bind(&miss);
__ pop(offset);
}
}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@ -147,89 +46,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_equal, miss_label);
Label done;
NameDictionaryLookupStub::GenerateNegativeLookup(masm,
miss_label,
&done,
properties,
name,
scratch1);
NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
properties, name, scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
Register scratch,
Register extra,
Register extra2,
Register extra3) {
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
// being 12.
DCHECK(sizeof(Entry) == 12);
// Assert the flags do not name a specific type.
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Assert that there are no register conflicts.
DCHECK(!scratch.is(receiver));
DCHECK(!scratch.is(name));
DCHECK(!extra.is(receiver));
DCHECK(!extra.is(name));
DCHECK(!extra.is(scratch));
// Assert scratch and extra registers are valid, and extra2/3 are unused.
DCHECK(!scratch.is(no_reg));
DCHECK(extra2.is(no_reg));
DCHECK(extra3.is(no_reg));
Register offset = scratch;
scratch = no_reg;
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
// ProbeTable expects the offset to be pointer scaled, which it is, because
// the heap object tag size is 2 and the pointer size log 2 is also 2.
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name);
__ add(offset, Immediate(flags));
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
ProbeTable(
isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
}
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Get the global function with the given index.
@ -259,10 +82,8 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
}
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
@ -281,12 +102,8 @@ static void PushInterceptorArguments(MacroAssembler* masm,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
Handle<JSObject> holder_obj,
IC::UtilityId id) {
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
NamedLoadHandlerCompiler::kInterceptorArgsLength);
@ -325,9 +142,8 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
receiver_map,
&holder_lookup);
Handle<JSObject> api_holder =
optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@ -376,8 +192,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
Handle<PropertyCell> cell =
JSGlobalObject::EnsurePropertyCell(global, name);
Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
if (masm->serializer_enabled()) {
@ -491,12 +306,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
__ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
scratch2,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
DCHECK(value_reg.is(eax));
@ -512,8 +323,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
// object and the number of in-object properties is not going to change.
index -= transition->inobject_properties();
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
SmiCheck smi_check =
representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
@ -529,12 +340,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
offset,
storage_reg,
scratch1,
EMIT_REMEMBERED_SET,
smi_check);
__ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
@ -552,12 +359,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition(
if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
offset,
storage_reg,
receiver_reg,
EMIT_REMEMBERED_SET,
smi_check);
__ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
EMIT_REMEMBERED_SET, smi_check);
}
}
@ -599,8 +402,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
&& !scratch2.is(scratch1));
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
// Keep track of the current object in register reg.
Register reg = object_reg;
@ -634,8 +437,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
@ -660,9 +463,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(
masm(), Handle<JSGlobalObject>::cast(current), name,
scratch2, miss);
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
}
if (load_prototype_from_map) {
@ -912,8 +714,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
__ push(value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
CALL_FUNCTION, NullCallWrapper());
__ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@ -1020,8 +822,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
CALL_FUNCTION, NullCallWrapper());
__ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@ -1180,7 +982,7 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement(
#undef __
} } // namespace v8::internal
}
} // namespace v8::internal
#endif // V8_TARGET_ARCH_X87

View File

@ -7,9 +7,8 @@
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
#include "src/ic-inl.h"
#include "src/runtime.h"
#include "src/stub-cache.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
@ -21,8 +20,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
Register type,
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
@ -42,13 +40,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
// name is not internalized, and will jump to the miss_label in that
// case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label,
Register elements,
Register name,
Register r0,
Register r1,
Register result) {
static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
Register elements, Register name,
Register r0, Register r1, Register result) {
// Register use:
//
// elements - holds the property dictionary on entry and is unchanged.
@ -66,13 +60,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
NameDictionaryLookupStub::GeneratePositiveLookup(masm,
miss_label,
&done,
elements,
name,
r0,
r1);
NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
elements, name, r0, r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@ -99,13 +88,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// call if name is not internalized, and will jump to the miss_label in
// that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm,
Label* miss_label,
Register elements,
Register name,
Register value,
Register r0,
Register r1) {
static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
Register elements, Register name,
Register value, Register r0, Register r1) {
// Register use:
//
// elements - holds the property dictionary on entry and is clobbered.
@ -121,13 +106,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Probe the dictionary.
NameDictionaryLookupStub::GeneratePositiveLookup(masm,
miss_label,
&done,
elements,
name,
r0,
r1);
NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
elements, name, r0, r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@ -139,7 +119,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
const int kTypeAndReadOnlyMask =
(PropertyDetails::TypeField::kMask |
PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
PropertyDetails::AttributesField::encode(READ_ONLY))
<< kSmiTagSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(kTypeAndReadOnlyMask));
__ j(not_zero, miss_label);
@ -158,10 +139,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register map,
int interceptor_bit,
Label* slow) {
Register receiver, Register map,
int interceptor_bit, Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
@ -190,12 +169,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register scratch,
Register result,
Label* not_fast_array,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register scratch,
Register result, Label* not_fast_array,
Label* out_of_range) {
// Register use:
// receiver - holds the receiver and is unchanged.
@ -233,12 +209,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Checks whether a key is an array index string or a unique name.
// Falls through if the key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm,
Register key,
Register map,
Register hash,
Label* index_string,
Label* not_unique) {
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
@ -266,13 +239,9 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
Register scratch1,
Register scratch2,
Label* unmapped_case,
Label* slow_case) {
static Operand GenerateMappedArgumentsLookup(
MacroAssembler* masm, Register object, Register key, Register scratch1,
Register scratch2, Label* unmapped_case, Label* slow_case) {
Heap* heap = masm->isolate()->heap();
Factory* factory = masm->isolate()->factory();
@ -302,10 +271,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load element index and check whether it is the hole.
const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
__ mov(scratch2, FieldOperand(scratch1,
key,
times_half_pointer_size,
kHeaderSize));
__ mov(scratch2,
FieldOperand(scratch1, key, times_half_pointer_size, kHeaderSize));
__ cmp(scratch2, factory->the_hole_value());
__ j(equal, unmapped_case);
@ -314,9 +281,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// map in scratch1).
const int kContextOffset = FixedArray::kHeaderSize;
__ mov(scratch1, FieldOperand(scratch1, kContextOffset));
return FieldOperand(scratch1,
scratch2,
times_half_pointer_size,
return FieldOperand(scratch1, scratch2, times_half_pointer_size,
Context::kHeaderSize);
}
@ -336,9 +301,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, scratch);
__ j(greater_equal, slow_case);
return FieldOperand(backing_store,
key,
times_half_pointer_size,
return FieldOperand(backing_store, key, times_half_pointer_size,
FixedArray::kHeaderSize);
}
@ -359,8 +322,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(
masm, receiver, eax, Map::kHasIndexedInterceptor, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
@ -379,9 +342,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a number dictionary.
// ebx: untagged index
// eax: elements
__ CheckMap(eax,
isolate->factory()->hash_table_map(),
&slow,
__ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
DONT_DO_SMI_CHECK);
Label slow_pop_receiver;
// Push receiver on the stack to free up a register for the dictionary
@ -404,8 +365,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
masm, receiver, eax, Map::kHasNamedInterceptor, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
&slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@ -492,8 +453,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load property array property.
__ bind(&property_array_property);
__ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ mov(eax, FieldOperand(eax, edi, times_pointer_size,
FixedArray::kHeaderSize));
__ mov(eax,
FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@ -527,10 +488,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
Register result = eax;
DCHECK(!result.is(scratch));
StringCharAtGenerator char_at_generator(receiver,
index,
scratch,
result,
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
@ -597,8 +555,7 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
Label slow, notin;
Factory* factory = masm->isolate()->factory();
Operand mapped_location =
GenerateMappedArgumentsLookup(
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, receiver, key, ebx, eax, &notin, &slow);
__ mov(eax, mapped_location);
__ Ret();
@ -625,9 +582,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
DCHECK(name.is(ecx));
DCHECK(value.is(eax));
Operand mapped_location =
GenerateMappedArgumentsLookup(masm, receiver, name, ebx, edi, &notin,
&slow);
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, receiver, name, ebx, edi, &notin, &slow);
__ mov(mapped_location, value);
__ lea(ecx, mapped_location);
__ mov(edx, value);
@ -648,12 +604,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
static void KeyedStoreGenerateGenericHelper(
MacroAssembler* masm,
Label* fast_object,
Label* fast_double,
Label* slow,
KeyedStoreCheckMap check_map,
KeyedStoreIncrementLength increment_length) {
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
Label transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
Label fast_double_without_map_check;
@ -713,8 +665,7 @@ static void KeyedStoreGenerateGenericHelper(
__ mov(FixedArrayElementOperand(ebx, key), value);
// Update write barrier for the elements array address.
__ mov(edx, value); // Preserve the value which is returned.
__ RecordWriteArray(
ebx, edx, key, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ RecordWriteArray(ebx, edx, key, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
__ bind(fast_double);
@ -750,32 +701,24 @@ static void KeyedStoreGenerateGenericHelper(
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
__ CheckMap(value,
masm->isolate()->factory()->heap_number_map(),
&non_double_value,
DONT_DO_SMI_CHECK);
__ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
&non_double_value, DONT_DO_SMI_CHECK);
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
// and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
ebx,
edi,
slow);
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(
masm, receiver, key, value, ebx, mode, slow);
FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
ebx, mode, slow);
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
slow);
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
edi, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, ebx, mode, slow);
@ -787,14 +730,11 @@ static void KeyedStoreGenerateGenericHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
slow);
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
ebx, edi, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, ebx, mode, slow);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
value, ebx, mode, slow);
__ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@ -877,8 +817,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
&slow, kCheckMap, kDontIncrementLength);
KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
kCheckMap, kDontIncrementLength);
KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
&slow, kDontCheckMap, kIncrementLength);
}
@ -894,8 +834,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, receiver, name, ebx, eax);
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, ebx,
eax);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@ -990,9 +930,7 @@ const Register StoreIC::NameRegister() { return ecx; }
const Register StoreIC::ValueRegister() { return eax; }
const Register KeyedStoreIC::MapRegister() {
return ebx;
}
const Register KeyedStoreIC::MapRegister() { return ebx; }
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
@ -1008,9 +946,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Return address is on the stack.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, ReceiverRegister(), NameRegister(),
ebx, no_reg);
masm->isolate()->stub_cache()->GenerateProbe(masm, flags, ReceiverRegister(),
NameRegister(), ebx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@ -1186,8 +1123,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, test=%p, delta=%d\n",
address, test_instruction_address, delta);
PrintF("[ patching ic at %p, test=%p, delta=%d\n", address,
test_instruction_address, delta);
}
// Patch with a short conditional jump. Enabling means switching from a short
@ -1199,13 +1136,13 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
*jmp_address == Assembler::kJcShortOpcode)
: (*jmp_address == Assembler::kJnzShortOpcode ||
*jmp_address == Assembler::kJzShortOpcode));
Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
Condition cc =
(check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
} } // namespace v8::internal
}
} // namespace v8::internal
#endif // V8_TARGET_ARCH_X87

View File

@ -0,0 +1,183 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
Code::Flags flags, StubCache::Table table, Register name,
Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset, Register extra) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0));
if (extra.is_valid()) {
// Get the code entry from the cache.
__ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
__ j(not_equal, &miss);
// Check the map matches.
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
__ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Jump to the first instruction in the code stub.
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(extra);
__ bind(&miss);
} else {
// Save the offset on the stack.
__ push(offset);
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
__ j(not_equal, &miss);
// Check the map matches.
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Restore offset register.
__ mov(offset, Operand(esp, 0));
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
__ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Restore offset and re-load code entry from cache.
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Jump to the first instruction in the code stub.
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(offset);
// Pop at miss.
__ bind(&miss);
__ pop(offset);
}
}
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
Register receiver, Register name,
Register scratch, Register extra, Register extra2,
Register extra3) {
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
// being 12.
DCHECK(sizeof(Entry) == 12);
// Assert the flags do not name a specific type.
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Assert that there are no register conflicts.
DCHECK(!scratch.is(receiver));
DCHECK(!scratch.is(name));
DCHECK(!extra.is(receiver));
DCHECK(!extra.is(name));
DCHECK(!extra.is(scratch));
// Assert scratch and extra registers are valid, and extra2/3 are unused.
DCHECK(!scratch.is(no_reg));
DCHECK(extra2.is(no_reg));
DCHECK(extra3.is(no_reg));
Register offset = scratch;
scratch = no_reg;
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
// ProbeTable expects the offset to be pointer scaled, which it is, because
// the heap object tag size is 2 and the pointer size log 2 is also 2.
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name);
__ add(offset, Immediate(flags));
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
ProbeTable(isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
}
#undef __
}
} // namespace v8::internal
#endif // V8_TARGET_ARCH_X87

View File

@ -9,7 +9,6 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/stub-cache.h"
namespace v8 {
namespace internal {

View File

@ -9,11 +9,11 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/ic/ic-compiler.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/runtime.h"
#include "src/stub-cache.h"
namespace v8 {
namespace internal {

View File

@ -5,8 +5,7 @@
#ifndef V8_X87_CODE_STUBS_X87_H_
#define V8_X87_CODE_STUBS_X87_H_
#include "src/ic-inl.h"
#include "src/macro-assembler.h"
#include "src/code-stubs.h"
namespace v8 {
namespace internal {

View File

@ -6,7 +6,7 @@
#define V8_X87_CODEGEN_X87_H_
#include "src/ast.h"
#include "src/ic-inl.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {

View File

@ -14,7 +14,6 @@
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
#include "src/stub-cache.h"
namespace v8 {
namespace internal {

View File

@ -10,8 +10,6 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/hydrogen-osr.h"
#include "src/ic.h"
#include "src/stub-cache.h"
#include "src/x87/lithium-codegen-x87.h"
namespace v8 {

View File

@ -32,9 +32,9 @@
#include "src/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;

View File

@ -907,7 +907,6 @@
'../../src/x87/frames-x87.cc',
'../../src/x87/frames-x87.h',
'../../src/x87/full-codegen-x87.cc',
'../../src/x87/ic-x87.cc',
'../../src/x87/lithium-codegen-x87.cc',
'../../src/x87/lithium-codegen-x87.h',
'../../src/x87/lithium-gap-resolver-x87.cc',
@ -918,7 +917,9 @@
'../../src/x87/macro-assembler-x87.h',
'../../src/x87/regexp-macro-assembler-x87.cc',
'../../src/x87/regexp-macro-assembler-x87.h',
'../../src/x87/stub-cache-x87.cc',
'../../src/ic/x87/ic-x87.cc',
'../../src/ic/x87/ic-compiler-x87.cc',
'../../src/ic/x87/stub-cache-x87.cc',
],
}],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {