[ic] Delete old KeyedLoadIC code

RIP, handwritten KeyedLoadICStub, handwritten KeyedLoadIC_Megamorphic,
and hydrogenized KeyedLoadGeneric!

Review-Url: https://codereview.chromium.org/2424433002
Cr-Commit-Position: refs/heads/master@{#40354}
This commit is contained in:
jkummerow 2016-10-17 03:31:03 -07:00 committed by Commit bot
parent ab5379074d
commit 3f6e0a4ef9
60 changed files with 8 additions and 4688 deletions

View File

@ -3051,14 +3051,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(r2);
CallICStub stub(isolate(), state());
@ -3153,75 +3145,6 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
}
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
Register key = LoadWithVectorDescriptor::NameRegister(); // r2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register receiver_map = r5;
Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ b(ne, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmp(key, feedback);
__ b(ne, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());

View File

@ -1883,85 +1883,6 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
bic(t0, t0, Operand(0xc0000000u));
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register t0,
Register t1,
Register t2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
//
// t1 - used to hold the capacity mask of the dictionary
//
// t2 - used for the index into the dictionary.
Label done;
GetNumberHash(t0, t1);
// Compute the capacity mask.
ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
SmiUntag(t1);
sub(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use t2 for index calculations and keep the hash intact in t0.
mov(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(t2, t2, Operand(t1));
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
// Check if the key is identical to the name.
add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
cmp(key, Operand(ip));
if (i != kNumberDictionaryProbes - 1) {
b(eq, &done);
} else {
b(ne, miss);
}
}
bind(&done);
// Check that the value is a field property.
// t2: elements + (index * kPointerSize)
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ldr(t1, FieldMemOperand(t2, kDetailsOffset));
DCHECK_EQ(DATA, 0);
tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
b(ne, miss);
// Get the value at the masked, scaled index and return.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
ldr(result, FieldMemOperand(t2, kValueOffset));
}
void MacroAssembler::Allocate(int object_size,
Register result,
Register scratch1,
@ -2418,20 +2339,6 @@ void MacroAssembler::CompareRoot(Register obj,
cmp(obj, ip);
}
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {

View File

@ -729,15 +729,6 @@ class MacroAssembler: public Assembler {
void GetNumberHash(Register t0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register t0,
Register t1,
Register t2);
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@ -900,13 +891,6 @@ class MacroAssembler: public Assembler {
Register type_reg,
InstanceType type);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,

View File

@ -2974,14 +2974,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(x2);
CallICStub stub(isolate(), state());
@ -3079,74 +3071,6 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(handler);
}
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
Register key = LoadWithVectorDescriptor::NameRegister(); // x2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register receiver_map = x5;
Register scratch1 = x6;
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ Bind(&try_array);
// Is it a fixed array?
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ Bind(&polymorphic);
HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
__ Bind(&not_array);
// Is it generic?
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ Bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ Cmp(key, feedback);
__ B(ne, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, false, &miss);
__ Bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
__ Bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());

View File

@ -3719,20 +3719,6 @@ void MacroAssembler::TestAndSplit(const Register& reg,
}
}
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
B(hi, fail);
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
@ -3944,69 +3930,6 @@ void MacroAssembler::GetNumberHash(Register key, Register scratch) {
Bic(key, key, Operand(0xc0000000u));
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register scratch0,
Register scratch1,
Register scratch2,
Register scratch3) {
DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
Label done;
SmiUntag(scratch0, key);
GetNumberHash(scratch0, scratch1);
// Compute the capacity mask.
Ldrsw(scratch1,
UntagSmiFieldMemOperand(elements,
SeededNumberDictionary::kCapacityOffset));
Sub(scratch1, scratch1, 1);
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
} else {
Mov(scratch2, scratch0);
}
And(scratch2, scratch2, scratch1);
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
Ldr(scratch3,
FieldMemOperand(scratch2,
SeededNumberDictionary::kElementsStartOffset));
Cmp(key, scratch3);
if (i != (kNumberDictionaryProbes - 1)) {
B(eq, &done);
} else {
B(ne, miss);
}
}
Bind(&done);
// Check that the value is a field property.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
DCHECK_EQ(DATA, 0);
TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
// Get the value at the masked, scaled index and return.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
Ldr(result, FieldMemOperand(scratch2, kValueOffset));
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {

View File

@ -1576,10 +1576,6 @@ class MacroAssembler : public Assembler {
Label* if_any_set,
Label* fall_through);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map, Register scratch, Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
@ -1622,27 +1618,6 @@ class MacroAssembler : public Assembler {
// It uses the same algorithm as ComputeIntegerHash in utils.h.
void GetNumberHash(Register key, Register scratch);
// Load value from the dictionary.
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register scratch0,
Register scratch1,
Register scratch2,
Register scratch3);
// ---------------------------------------------------------------------------
// Frames.

View File

@ -1215,19 +1215,6 @@ ExternalReference ExternalReference::log_leave_external_function(
Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
}
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
}
ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
Isolate* isolate) {
return ExternalReference(
isolate->keyed_lookup_cache()->field_offsets_address());
}
ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_array_start());
}

View File

@ -951,10 +951,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference log_enter_external_function(Isolate* isolate);
static ExternalReference log_leave_external_function(Isolate* isolate);
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
// Static variable Heap::roots_array_start()
static ExternalReference roots_array_start(Isolate* isolate);

View File

@ -10,10 +10,6 @@
namespace v8 {
namespace internal {
void Builtins::Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
KeyedLoadIC::GenerateMegamorphic(masm);
}
void Builtins::Generate_KeyedLoadIC_Megamorphic_TF(
CodeStubAssembler* assembler) {
typedef compiler::Node Node;

View File

@ -179,7 +179,6 @@ namespace internal {
TFS(Typeof, BUILTIN, kNoExtraICState, Typeof) \
\
/* Handlers */ \
ASH(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState) \
TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState, \
LoadWithVector) \
ASM(KeyedLoadIC_Miss) \

View File

@ -54,31 +54,19 @@ Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
// static
Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
if (FLAG_tf_load_ic_stub) {
KeyedLoadICTrampolineTFStub stub(isolate);
return make_callable(stub);
}
KeyedLoadICTrampolineStub stub(isolate);
KeyedLoadICTrampolineTFStub stub(isolate);
return make_callable(stub);
}
// static
Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
if (FLAG_tf_load_ic_stub) {
KeyedLoadICTFStub stub(isolate);
return make_callable(stub);
}
KeyedLoadICStub stub(isolate);
KeyedLoadICTFStub stub(isolate);
return make_callable(stub);
}
// static
Callable CodeFactory::KeyedLoadIC_Megamorphic(Isolate* isolate) {
if (FLAG_tf_load_ic_stub) {
return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
LoadWithVectorDescriptor(isolate));
}
return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic(),
return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
LoadWithVectorDescriptor(isolate));
}

View File

@ -1204,253 +1204,5 @@ Handle<Code> RegExpConstructResultStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
class CodeStubGraphBuilder<KeyedLoadGenericStub>
: public CodeStubGraphBuilderBase {
public:
explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
: CodeStubGraphBuilderBase(info, stub) {}
typedef KeyedLoadGenericStub::Descriptor Descriptor;
protected:
virtual HValue* BuildCodeStub();
void BuildElementsKindLimitCheck(HGraphBuilder::IfBuilder* if_builder,
HValue* bit_field2,
ElementsKind kind);
void BuildFastElementLoad(HGraphBuilder::IfBuilder* if_builder,
HValue* receiver,
HValue* key,
HValue* instance_type,
HValue* bit_field2,
ElementsKind kind);
KeyedLoadGenericStub* casted_stub() {
return static_cast<KeyedLoadGenericStub*>(stub());
}
};
void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildElementsKindLimitCheck(
HGraphBuilder::IfBuilder* if_builder, HValue* bit_field2,
ElementsKind kind) {
ElementsKind next_kind = static_cast<ElementsKind>(kind + 1);
HValue* kind_limit = Add<HConstant>(
static_cast<int>(Map::ElementsKindBits::encode(next_kind)));
if_builder->If<HCompareNumericAndBranch>(bit_field2, kind_limit, Token::LT);
if_builder->Then();
}
void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildFastElementLoad(
HGraphBuilder::IfBuilder* if_builder, HValue* receiver, HValue* key,
HValue* instance_type, HValue* bit_field2, ElementsKind kind) {
BuildElementsKindLimitCheck(if_builder, bit_field2, kind);
IfBuilder js_array_check(this);
js_array_check.If<HCompareNumericAndBranch>(
instance_type, Add<HConstant>(JS_ARRAY_TYPE), Token::EQ);
js_array_check.Then();
Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
true, kind,
LOAD, NEVER_RETURN_HOLE,
STANDARD_STORE));
js_array_check.Else();
Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
false, kind,
LOAD, NEVER_RETURN_HOLE,
STANDARD_STORE));
js_array_check.End();
}
HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
HValue* receiver = GetParameter(Descriptor::kReceiver);
HValue* key = GetParameter(Descriptor::kName);
// Split into a smi/integer case and unique string case.
HIfContinuation index_name_split_continuation(graph()->CreateBasicBlock(),
graph()->CreateBasicBlock());
BuildKeyedIndexCheck(key, &index_name_split_continuation);
IfBuilder index_name_split(this, &index_name_split_continuation);
index_name_split.Then();
{
// Key is an index (number)
key = Pop();
int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor);
BuildJSObjectCheck(receiver, bit_field_mask);
HValue* map =
Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
HValue* instance_type =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
HValue* bit_field2 =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
IfBuilder kind_if(this);
BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
FAST_HOLEY_ELEMENTS);
kind_if.Else();
{
BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
FAST_HOLEY_DOUBLE_ELEMENTS);
}
kind_if.Else();
// The DICTIONARY_ELEMENTS check generates a "kind_if.Then"
BuildElementsKindLimitCheck(&kind_if, bit_field2, DICTIONARY_ELEMENTS);
{
HValue* elements = AddLoadElements(receiver);
HValue* hash = BuildElementIndexHash(key);
Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash));
}
kind_if.Else();
// The SLOW_SLOPPY_ARGUMENTS_ELEMENTS check generates a "kind_if.Then"
STATIC_ASSERT(FAST_SLOPPY_ARGUMENTS_ELEMENTS <
SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
BuildElementsKindLimitCheck(&kind_if, bit_field2,
SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
// Non-strict elements are not handled.
Add<HDeoptimize>(DeoptimizeReason::kNonStrictElementsInKeyedLoadGenericStub,
Deoptimizer::EAGER);
Push(graph()->GetConstant0());
kind_if.ElseDeopt(
DeoptimizeReason::kElementsKindUnhandledInKeyedLoadGenericStub);
kind_if.End();
}
index_name_split.Else();
{
// Key is a unique string.
key = Pop();
int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor);
BuildJSObjectCheck(receiver, bit_field_mask);
HIfContinuation continuation;
BuildTestForDictionaryProperties(receiver, &continuation);
IfBuilder if_dict_properties(this, &continuation);
if_dict_properties.Then();
{
// Key is string, properties are dictionary mode
BuildNonGlobalObjectCheck(receiver);
HValue* properties = Add<HLoadNamedField>(
receiver, nullptr, HObjectAccess::ForPropertiesPointer());
HValue* hash =
Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForNameHashField());
hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift));
HValue* value =
BuildUncheckedDictionaryElementLoad(receiver, properties, key, hash);
Push(value);
}
if_dict_properties.Else();
{
// TODO(dcarney): don't use keyed lookup cache, but convert to use
// megamorphic stub cache.
UNREACHABLE();
// Key is string, properties are fast mode
HValue* hash = BuildKeyedLookupCacheHash(receiver, key);
ExternalReference cache_keys_ref =
ExternalReference::keyed_lookup_cache_keys(isolate());
HValue* cache_keys = Add<HConstant>(cache_keys_ref);
HValue* map =
Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
HValue* base_index = AddUncasted<HMul>(hash, Add<HConstant>(2));
base_index->ClearFlag(HValue::kCanOverflow);
HIfContinuation inline_or_runtime_continuation(
graph()->CreateBasicBlock(), graph()->CreateBasicBlock());
{
IfBuilder lookup_ifs[KeyedLookupCache::kEntriesPerBucket];
for (int probe = 0; probe < KeyedLookupCache::kEntriesPerBucket;
++probe) {
IfBuilder* lookup_if = &lookup_ifs[probe];
lookup_if->Initialize(this);
int probe_base = probe * KeyedLookupCache::kEntryLength;
HValue* map_index = AddUncasted<HAdd>(
base_index,
Add<HConstant>(probe_base + KeyedLookupCache::kMapIndex));
map_index->ClearFlag(HValue::kCanOverflow);
HValue* key_index = AddUncasted<HAdd>(
base_index,
Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
key_index->ClearFlag(HValue::kCanOverflow);
HValue* map_to_check =
Add<HLoadKeyed>(cache_keys, map_index, nullptr, nullptr,
FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
lookup_if->If<HCompareObjectEqAndBranch>(map_to_check, map);
lookup_if->And();
HValue* key_to_check =
Add<HLoadKeyed>(cache_keys, key_index, nullptr, nullptr,
FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
lookup_if->If<HCompareObjectEqAndBranch>(key_to_check, key);
lookup_if->Then();
{
ExternalReference cache_field_offsets_ref =
ExternalReference::keyed_lookup_cache_field_offsets(isolate());
HValue* cache_field_offsets =
Add<HConstant>(cache_field_offsets_ref);
HValue* index = AddUncasted<HAdd>(hash, Add<HConstant>(probe));
index->ClearFlag(HValue::kCanOverflow);
HValue* property_index =
Add<HLoadKeyed>(cache_field_offsets, index, nullptr, cache_keys,
INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
Push(property_index);
}
lookup_if->Else();
}
for (int i = 0; i < KeyedLookupCache::kEntriesPerBucket; ++i) {
lookup_ifs[i].JoinContinuation(&inline_or_runtime_continuation);
}
}
IfBuilder inline_or_runtime(this, &inline_or_runtime_continuation);
inline_or_runtime.Then();
{
// Found a cached index, load property inline.
Push(Add<HLoadFieldByIndex>(receiver, Pop()));
}
inline_or_runtime.Else();
{
// KeyedLookupCache miss; call runtime.
Add<HPushArguments>(receiver, key);
Push(Add<HCallRuntime>(
Runtime::FunctionForId(Runtime::kKeyedGetProperty), 2));
}
inline_or_runtime.End();
}
if_dict_properties.End();
}
index_name_split.End();
return Pop();
}
Handle<Code> KeyedLoadGenericStub::GenerateCode() {
return DoGenerateCode(this);
}
} // namespace internal
} // namespace v8

View File

@ -2099,14 +2099,6 @@ void LoadDictionaryElementStub::InitializeDescriptor(
FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
}
void KeyedLoadGenericStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
}
void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
if (kind() == Code::KEYED_LOAD_IC) {

View File

@ -44,7 +44,6 @@ class ObjectLiteral;
V(StoreElement) \
V(SubString) \
V(KeyedStoreIC) \
V(KeyedLoadIC) \
V(LoadGlobalIC) \
V(FastNewObject) \
V(FastNewRestParameter) \
@ -59,7 +58,6 @@ class ObjectLiteral;
/* version of the corresponding stub is */ \
/* used universally */ \
V(CallICTrampoline) \
V(KeyedLoadICTrampoline) \
V(KeyedStoreICTrampoline) \
/* --- HydrogenCodeStubs --- */ \
V(StringAdd) \
@ -71,7 +69,6 @@ class ObjectLiteral;
/* These will be ported/eliminated */ \
/* as part of the new IC system, ask */ \
/* ishell before doing anything */ \
V(KeyedLoadGeneric) \
V(LoadConstant) \
V(LoadDictionaryElement) \
V(LoadFastElement) \
@ -1994,17 +1991,6 @@ class LoadDictionaryElementStub : public HydrogenCodeStub {
DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
};
class KeyedLoadGenericStub : public HydrogenCodeStub {
public:
explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
};
class LoadICTrampolineStub : public TurboFanCodeStub {
public:
explicit LoadICTrampolineStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@ -2037,17 +2023,6 @@ class LoadGlobalICTrampolineStub : public TurboFanCodeStub {
DEFINE_CODE_STUB(LoadGlobalICTrampoline, TurboFanCodeStub);
};
class KeyedLoadICTrampolineStub : public PlatformCodeStub {
public:
explicit KeyedLoadICTrampolineStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_PLATFORM_CODE_STUB(KeyedLoadICTrampoline, PlatformCodeStub);
};
class KeyedLoadICTrampolineTFStub : public LoadICTrampolineStub {
public:
explicit KeyedLoadICTrampolineTFStub(Isolate* isolate)
@ -2157,21 +2132,6 @@ class LoadGlobalICStub : public TurboFanCodeStub {
DEFINE_CODE_STUB(LoadGlobalIC, TurboFanCodeStub);
};
class KeyedLoadICStub : public PlatformCodeStub {
public:
explicit KeyedLoadICStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_PLATFORM_CODE_STUB(KeyedLoadIC, PlatformCodeStub);
protected:
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
class KeyedLoadICTFStub : public LoadICStub {
public:
explicit KeyedLoadICTFStub(Isolate* isolate) : LoadICStub(isolate) {}

View File

@ -1647,190 +1647,6 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
return environment()->Pop();
}
void HGraphBuilder::BuildJSObjectCheck(HValue* receiver,
int bit_field_mask) {
// Check that the object isn't a smi.
Add<HCheckHeapObject>(receiver);
// Get the map of the receiver.
HValue* map =
Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
// Check the instance type and if an access check is needed, this can be
// done with a single load, since both bytes are adjacent in the map.
HObjectAccess access(HObjectAccess::ForMapInstanceTypeAndBitField());
HValue* instance_type_and_bit_field =
Add<HLoadNamedField>(map, nullptr, access);
HValue* mask = Add<HConstant>(0x00FF | (bit_field_mask << 8));
HValue* and_result = AddUncasted<HBitwise>(Token::BIT_AND,
instance_type_and_bit_field,
mask);
HValue* sub_result = AddUncasted<HSub>(and_result,
Add<HConstant>(JS_OBJECT_TYPE));
Add<HBoundsCheck>(sub_result,
Add<HConstant>(LAST_JS_OBJECT_TYPE + 1 - JS_OBJECT_TYPE));
}
void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
HIfContinuation* join_continuation) {
// The sometimes unintuitively backward ordering of the ifs below is
// convoluted, but necessary. All of the paths must guarantee that the
// if-true of the continuation returns a smi element index and the if-false of
// the continuation returns either a symbol or a unique string key. All other
// object types cause a deopt to fall back to the runtime.
IfBuilder key_smi_if(this);
key_smi_if.If<HIsSmiAndBranch>(key);
key_smi_if.Then();
{
Push(key); // Nothing to do, just continue to true of continuation.
}
key_smi_if.Else();
{
HValue* map = Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForMap());
HValue* instance_type =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
// Non-unique string, check for a string with a hash code that is actually
// an index.
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
IfBuilder not_string_or_name_if(this);
not_string_or_name_if.If<HCompareNumericAndBranch>(
instance_type,
Add<HConstant>(LAST_UNIQUE_NAME_TYPE),
Token::GT);
not_string_or_name_if.Then();
{
// Non-smi, non-Name, non-String: Try to convert to smi in case of
// HeapNumber.
// TODO(danno): This could call some variant of ToString
Push(AddUncasted<HForceRepresentation>(key, Representation::Smi()));
}
not_string_or_name_if.Else();
{
// String or Name: check explicitly for Name, they can short-circuit
// directly to unique non-index key path.
IfBuilder not_symbol_if(this);
not_symbol_if.If<HCompareNumericAndBranch>(
instance_type,
Add<HConstant>(SYMBOL_TYPE),
Token::NE);
not_symbol_if.Then();
{
// String: check whether the String is a String of an index. If it is,
// extract the index value from the hash.
HValue* hash = Add<HLoadNamedField>(key, nullptr,
HObjectAccess::ForNameHashField());
HValue* not_index_mask = Add<HConstant>(static_cast<int>(
String::kContainsCachedArrayIndexMask));
HValue* not_index_test = AddUncasted<HBitwise>(
Token::BIT_AND, hash, not_index_mask);
IfBuilder string_index_if(this);
string_index_if.If<HCompareNumericAndBranch>(not_index_test,
graph()->GetConstant0(),
Token::EQ);
string_index_if.Then();
{
// String with index in hash: extract string and merge to index path.
Push(BuildDecodeField<String::ArrayIndexValueBits>(hash));
}
string_index_if.Else();
{
// Key is a non-index String, check for uniqueness/internalization.
// If it's not internalized yet, internalize it now.
HValue* not_internalized_bit = AddUncasted<HBitwise>(
Token::BIT_AND,
instance_type,
Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
IfBuilder internalized(this);
internalized.If<HCompareNumericAndBranch>(not_internalized_bit,
graph()->GetConstant0(),
Token::EQ);
internalized.Then();
Push(key);
internalized.Else();
Add<HPushArguments>(key);
HValue* intern_key = Add<HCallRuntime>(
Runtime::FunctionForId(Runtime::kInternalizeString), 1);
Push(intern_key);
internalized.End();
// Key guaranteed to be a unique string
}
string_index_if.JoinContinuation(join_continuation);
}
not_symbol_if.Else();
{
Push(key); // Key is symbol
}
not_symbol_if.JoinContinuation(join_continuation);
}
not_string_or_name_if.JoinContinuation(join_continuation);
}
key_smi_if.JoinContinuation(join_continuation);
}
void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
// Get the the instance type of the receiver, and make sure that it is
// not one of the global object types.
HValue* map =
Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
HValue* instance_type =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
HValue* global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
IfBuilder if_global_object(this);
if_global_object.If<HCompareNumericAndBranch>(instance_type, global_type,
Token::EQ);
if_global_object.ThenDeopt(DeoptimizeReason::kReceiverWasAGlobalObject);
if_global_object.End();
}
void HGraphBuilder::BuildTestForDictionaryProperties(
HValue* object,
HIfContinuation* continuation) {
HValue* properties = Add<HLoadNamedField>(
object, nullptr, HObjectAccess::ForPropertiesPointer());
HValue* properties_map =
Add<HLoadNamedField>(properties, nullptr, HObjectAccess::ForMap());
HValue* hash_map = Add<HLoadRoot>(Heap::kHashTableMapRootIndex);
IfBuilder builder(this);
builder.If<HCompareObjectEqAndBranch>(properties_map, hash_map);
builder.CaptureContinuation(continuation);
}
HValue* HGraphBuilder::BuildKeyedLookupCacheHash(HValue* object,
HValue* key) {
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
HValue* object_map =
Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMapAsInteger32());
HValue* shifted_map = AddUncasted<HShr>(
object_map, Add<HConstant>(KeyedLookupCache::kMapHashShift));
HValue* string_hash =
Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForStringHashField());
HValue* shifted_hash = AddUncasted<HShr>(
string_hash, Add<HConstant>(String::kHashShift));
HValue* xor_result = AddUncasted<HBitwise>(Token::BIT_XOR, shifted_map,
shifted_hash);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
return AddUncasted<HBitwise>(Token::BIT_AND, xor_result,
Add<HConstant>(mask));
}
HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
HValue* seed = Add<HConstant>(seed_value);
@ -1997,7 +1813,6 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
return Pop();
}
HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
HValue* done) {
NoObservableSideEffectsScope scope(this);

View File

@ -1410,28 +1410,6 @@ class HGraphBuilder {
HValue* BuildToNumber(HValue* input);
HValue* BuildToObject(HValue* receiver);
void BuildJSObjectCheck(HValue* receiver,
int bit_field_mask);
// Checks a key value that's being used for a keyed element access context. If
// the key is a index, i.e. a smi or a number in a unique string with a cached
// numeric value, the "true" of the continuation is joined. Otherwise,
// if the key is a name or a unique string, the "false" of the continuation is
// joined. Otherwise, a deoptimization is triggered. In both paths of the
// continuation, the key is pushed on the top of the environment.
void BuildKeyedIndexCheck(HValue* key,
HIfContinuation* join_continuation);
// Checks the properties of an object if they are in dictionary case, in which
// case "true" of continuation is taken, otherwise the "false"
void BuildTestForDictionaryProperties(HValue* object,
HIfContinuation* continuation);
void BuildNonGlobalObjectCheck(HValue* receiver);
HValue* BuildKeyedLookupCacheHash(HValue* object,
HValue* key);
HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
HValue* elements, HValue* key,
HValue* hash);

View File

@ -16,8 +16,6 @@ namespace internal {
V(ConstantGlobalVariableAssignment, "Constant global variable assignment") \
V(ConversionOverflow, "conversion overflow") \
V(DivisionByZero, "division by zero") \
V(ElementsKindUnhandledInKeyedLoadGenericStub, \
"ElementsKind unhandled in KeyedLoadGenericStub") \
V(ExpectedHeapNumber, "Expected heap number") \
V(ExpectedSmi, "Expected smi") \
V(ForcedDeoptToRuntime, "Forced deopt to runtime") \
@ -46,8 +44,6 @@ namespace internal {
V(NegativeKeyEncountered, "Negative key encountered") \
V(NegativeValue, "negative value") \
V(NoCache, "no cache") \
V(NonStrictElementsInKeyedLoadGenericStub, \
"non-strict elements in KeyedLoadGenericStub") \
V(NotAHeapNumber, "not a heap number") \
V(NotAHeapNumberUndefinedBoolean, "not a heap number/undefined/true/false") \
V(NotAHeapNumberUndefined, "not a heap number/undefined") \

View File

@ -56,11 +56,6 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"Heap::NewSpaceAllocationTopAddress()");
Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
"mod_two_doubles");
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
"KeyedLookupCache::field_offsets()");
Add(ExternalReference::handle_scope_next_address(isolate).address(),
"HandleScope::next");
Add(ExternalReference::handle_scope_limit_address(isolate).address(),

View File

@ -92,30 +92,10 @@ inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) {
details.representation().IsDouble());
}
inline FieldIndex FieldIndex::ForKeyedLookupCacheIndex(Map* map, int index) {
if (FLAG_compiled_keyed_generic_loads) {
return ForLoadByFieldIndex(map, index);
} else {
return ForPropertyIndex(map, index);
}
}
inline FieldIndex FieldIndex::FromFieldAccessStubKey(int key) {
return FieldIndex(key);
}
inline int FieldIndex::GetKeyedLookupCacheIndex() const {
if (FLAG_compiled_keyed_generic_loads) {
return GetLoadByFieldIndex();
} else {
return property_index();
}
}
} // namespace internal
} // namespace v8

View File

@ -27,7 +27,6 @@ class FieldIndex final {
static FieldIndex ForInObjectOffset(int offset, Map* map = NULL);
static FieldIndex ForDescriptor(Map* map, int descriptor_index);
static FieldIndex ForLoadByFieldIndex(Map* map, int index);
static FieldIndex ForKeyedLookupCacheIndex(Map* map, int index);
static FieldIndex FromFieldAccessStubKey(int key);
int GetLoadByFieldIndex() const;
@ -67,8 +66,6 @@ class FieldIndex final {
return result;
}
int GetKeyedLookupCacheIndex() const;
int GetFieldAccessStubKey() const {
return bit_field_ &
(IsInObjectBits::kMask | IsDoubleBits::kMask | IndexBits::kMask);

View File

@ -255,8 +255,6 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
#undef FLAG_SHIPPING_FEATURES
// Flags for experimental implementation features.
DEFINE_BOOL(compiled_keyed_generic_loads, false,
"use optimizing compiler to generate keyed generic load stubs")
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")

View File

@ -1464,9 +1464,6 @@ void Heap::MarkCompactEpilogue() {
void Heap::MarkCompactPrologue() {
// At any old GC clear the keyed lookup cache to enable collection of unused
// maps.
isolate_->keyed_lookup_cache()->Clear();
isolate_->context_slot_cache()->Clear();
isolate_->descriptor_lookup_cache()->Clear();
RegExpResultsCache::Clear(string_split_cache());
@ -2887,9 +2884,6 @@ void Heap::CreateInitialObjects() {
set_noscript_shared_function_infos(Smi::kZero);
// Initialize keyed lookup cache.
isolate_->keyed_lookup_cache()->Clear();
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();

View File

@ -3016,183 +3016,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
static void HandleArrayCases(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register feedback, bool is_polymorphic,
Label* miss) {
// feedback initially contains the feedback array
Label next, next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
__ push(receiver);
__ push(vector);
Register receiver_map = receiver;
Register cached_map = vector;
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
__ mov(receiver_map, FieldOperand(receiver, 0));
__ bind(&compare_map);
__ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
// A named keyed load might have a 2 element array, all other cases can count
// on an array with at least 2 {map, handler} pairs, so they can go right
// into polymorphic array handling.
__ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
__ j(not_equal, is_polymorphic ? &start_polymorphic : &next);
// found, now call handler.
Register handler = feedback;
__ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ pop(vector);
__ pop(receiver);
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
__ jmp(handler);
if (!is_polymorphic) {
__ bind(&next);
__ cmp(FieldOperand(feedback, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(2)));
__ j(not_equal, &start_polymorphic);
__ pop(vector);
__ pop(receiver);
__ jmp(miss);
}
// Polymorphic, we have to loop from 2 to N
__ bind(&start_polymorphic);
__ push(key);
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(2)));
__ bind(&next_loop);
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize));
__ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
__ j(not_equal, &prepare_next);
__ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
__ pop(key);
__ pop(vector);
__ pop(receiver);
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
__ jmp(handler);
__ bind(&prepare_next);
__ add(counter, Immediate(Smi::FromInt(2)));
__ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
__ j(less, &next_loop);
// We exhausted our array of map handler pairs.
__ pop(key);
__ pop(vector);
__ pop(receiver);
__ jmp(miss);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register weak_cell, Label* miss) {
// feedback initially contains the feedback array
Label compare_smi_map;
// Move the weak map into the weak_cell register.
Register ic_map = weak_cell;
__ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &compare_smi_map);
__ cmp(ic_map, FieldOperand(receiver, 0));
__ j(not_equal, miss);
Register handler = weak_cell;
__ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
__ jmp(handler);
// In microbenchmarks, it made sense to unroll this code so that the call to
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
__ bind(&compare_smi_map);
__ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, miss);
__ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
__ jmp(handler);
}
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
Register key = LoadWithVectorDescriptor::NameRegister(); // ecx
Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
Register feedback = edi;
__ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
__ j(not_equal, &try_array);
HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss);
__ bind(&try_array);
// Is it a fixed array?
__ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmp(key, feedback);
__ j(not_equal, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());

View File

@ -810,20 +810,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
@ -1413,82 +1399,6 @@ void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
and_(r0, 0x3fffffff);
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2,
Register result) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
// key - holds the smi key on entry and is unchanged.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
//
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
//
// result - holds the result on exit if the load succeeds and we fall through.
Label done;
GetNumberHash(r0, r1);
// Compute capacity mask.
mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
shr(r1, kSmiTagSize); // convert smi to int
dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(r2, r1);
// Scale the index by multiplying by the entry size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
}
}
bind(&done);
// Check that the value is a field property.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
DCHECK_EQ(DATA, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
j(not_zero, miss);
// Get the value at the masked, scaled index.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {

View File

@ -391,11 +391,6 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Label* fail,
@ -612,10 +607,6 @@ class MacroAssembler: public Assembler {
void GetNumberHash(Register r0, Register scratch);
void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
Register r0, Register r1, Register r2,
Register result);
// ---------------------------------------------------------------------------
// Allocation support

View File

@ -19,18 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, global_object);
__ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, global_object);
}
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@ -126,138 +114,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register map,
Register scratch,
int interceptor_bit, Label* slow) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ b(ne, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ cmp(scratch, Operand(JS_OBJECT_TYPE));
__ b(lt, slow);
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
//
// Scratch registers:
//
// elements - holds the elements of the receiver and its prototypes.
//
// scratch1 - used to hold elements length, bit fields, base addresses.
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch1));
__ b(lo, &in_bounds);
// Out-of-bounds. Check the prototype chain to see if we can just return
// 'undefined'.
__ cmp(key, Operand(0));
__ b(lt, slow); // Negative keys can't take the fast OOB path.
__ bind(&check_prototypes);
__ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(&check_next_prototype);
__ ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ CompareRoot(scratch2, Heap::kNullValueRootIndex);
__ b(eq, &absent);
__ ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
// scratch2: map of current prototype
__ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
__ b(lo, slow);
__ ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
__ tst(scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor)));
__ b(ne, slow);
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(ne, slow);
__ jmp(&check_next_prototype);
__ bind(&absent);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
__ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to check the prototype chain.
__ b(eq, &check_prototypes);
__ mov(result, scratch2);
__ bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// The key is not a smi.
Label unique;
// Is it a name?
__ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
__ b(hi, not_unique);
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ b(eq, &unique);
// Is the string an array index, with cached numeric value?
__ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
__ b(eq, index_string);
// Is the string internalized? We know it's a string, so a single
// bit test is enough.
// map: key map
__ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0);
__ tst(hash, Operand(kIsNotInternalizedMask));
__ b(ne, not_unique);
__ bind(&unique);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@ -340,106 +196,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = LoadDescriptor::NameRegister();
Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(r2));
DCHECK(receiver.is(r1));
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r0, r3, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r4,
r3);
__ Ret();
__ bind(&check_number_dictionary);
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
// Check whether the elements is a number dictionary.
// r3: elements map
// r4: elements
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
__ SmiUntag(r0, key);
__ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
__ Ret();
// Slow case, key and receiver still in r2 and r1.
__ bind(&slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r4,
r3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r4, ip);
__ b(eq, &probe_dictionary);
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r4, r5, r6, r9));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ mov(slot, Operand(Smi::FromInt(slot_index)));
masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r4, r5,
r6, r9);
// Cache miss.
GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// r3: elements
__ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
// Load the property to r0.
GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
r4, r3);
__ Ret();
__ bind(&index_name);
__ IndexFromHash(r3, key);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),

View File

@ -15,18 +15,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
// "type" holds an instance type on entry and is not clobbered.
// Generated code branch on "global_object" if type is any kind of global
// JS object.
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
__ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
__ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
__ B(eq, global_object);
}
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@ -116,144 +104,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object and return the map of the
// receiver in 'map_scratch' if the receiver is not a SMI.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register map_scratch,
Register scratch,
int interceptor_bit, Label* slow) {
DCHECK(!AreAliased(map_scratch, scratch));
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
__ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
__ Tbnz(scratch, interceptor_bit, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object, we enter the
// runtime system to make sure that indexing into string objects work
// as intended.
STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
__ Cmp(scratch, JS_OBJECT_TYPE);
__ B(lt, slow);
}
// Loads an indexed element from a fast case array.
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// elements - holds the elements of the receiver and its prototypes. Clobbered.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
Register result, Label* slow) {
DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2));
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
// Check for fast array.
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Cmp(key, scratch1);
__ B(lo, &in_bounds);
// Out of bounds. Check the prototype chain to see if we can just return
// 'undefined'.
__ Cmp(key, Operand(Smi::kZero));
__ B(lt, slow); // Negative keys can't take the fast OOB path.
__ Bind(&check_prototypes);
__ Ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Bind(&check_next_prototype);
__ Ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ JumpIfRoot(scratch2, Heap::kNullValueRootIndex, &absent);
__ Ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ Ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
// scratch2: map of current prototype
__ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
__ B(lo, slow);
__ Ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
__ Tbnz(scratch1, Map::kIsAccessCheckNeeded, slow);
__ Tbnz(scratch1, Map::kHasIndexedInterceptor, slow);
__ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex, slow);
__ B(&check_next_prototype);
__ Bind(&absent);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ B(&done);
__ Bind(&in_bounds);
// Fast case: Do the load.
__ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ SmiUntag(scratch2, key);
__ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
// In case the loaded value is the_hole we have to check the prototype chain.
__ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, &check_prototypes);
// Move the value to the result register.
// 'result' can alias with 'receiver' or 'key' but these two must be
// preserved if we jump to 'slow'.
__ Mov(result, scratch2);
__ Bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
// The map of the key is returned in 'map_scratch'.
// If the jump to 'index_string' is done the hash of the key is left
// in 'hash_scratch'.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map_scratch, Register hash_scratch,
Label* index_string, Label* not_unique) {
DCHECK(!AreAliased(key, map_scratch, hash_scratch));
// Is the key a name?
Label unique;
__ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
not_unique, hi);
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ B(eq, &unique);
// Is the string an array index with cached numeric value?
__ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
__ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
index_string);
// Is the string internalized? We know it's a string, so a single bit test is
// enough.
__ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0);
__ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
__ Bind(&unique);
// Fall through if the key is a unique name.
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = x0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@ -323,127 +173,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
Register receiver, Register scratch1,
Register scratch2, Register scratch3,
Register scratch4, Register scratch5,
Label* slow) {
DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
scratch5));
Isolate* isolate = masm->isolate();
Label check_number_dictionary;
// If we can load the value, it should be returned in x0.
Register result = x0;
GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
Map::kHasIndexedInterceptor, slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
result, slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1,
scratch1, scratch2);
__ Ret();
__ Bind(&check_number_dictionary);
__ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
// Check whether we have a number dictionary.
__ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
__ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
scratch4, scratch5);
__ Ret();
}
static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
Register receiver, Register scratch1,
Register scratch2, Register scratch3,
Register scratch4, Register scratch5,
Label* slow) {
DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
scratch5));
Isolate* isolate = masm->isolate();
Label probe_dictionary, property_array_property;
// If we can load the value, it should be returned in x0.
Register result = x0;
GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
Map::kHasNamedInterceptor, slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
__ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
masm->isolate()->load_stub_cache()->GenerateProbe(
masm, receiver, key, scratch1, scratch2, scratch3, scratch4);
// Cache miss.
KeyedLoadIC::GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it exists.
__ Bind(&probe_dictionary);
__ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
// Load the property.
GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
scratch1, scratch2);
__ Ret();
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name;
Register key = LoadDescriptor::NameRegister();
Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(x2));
DCHECK(receiver.is(x1));
__ JumpIfNotSmi(key, &check_name);
__ Bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
// Slow case.
__ Bind(&slow);
__ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_generic_slow(),
1, x4, x3);
GenerateRuntimeGetProperty(masm);
__ Bind(&check_name);
GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
GenerateKeyedLoadWithNameKey(masm, key, receiver, x4, x5, x6, x7, x3, &slow);
__ Bind(&index_name);
__ IndexFromHash(x3, key);
// Now jump to the place where smi keys are handled.
__ B(&index_smi);
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),

View File

@ -18,18 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ cmp(type, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, global_object);
__ cmp(type, JS_GLOBAL_PROXY_TYPE);
__ j(equal, global_object);
}
// Helper function used to load a property from a dictionary backing
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
@ -132,238 +120,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register map,
int interceptor_bit, Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
// map - used to hold the map of the receiver.
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ test_b(
FieldOperand(map, Map::kBitFieldOffset),
Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ j(not_zero, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type. In
// the case that the object is a value-wrapper object, we enter the runtime
// system to make sure that indexing into string objects works as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpInstanceType(map, JS_OBJECT_TYPE);
__ j(below, slow);
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register scratch,
Register scratch2, Register result,
Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
// Scratch registers:
// scratch - used to hold elements of the receiver and the loaded value.
// scratch2 - holds maps and prototypes during prototype chain check.
// result - holds the result on exit if the load succeeds and
// we fall through.
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(scratch);
// Check that the key (index) is within bounds.
__ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
__ j(below, &in_bounds);
// Out-of-bounds. Check the prototype chain to see if we can just return
// 'undefined'.
__ cmp(key, 0);
__ j(less, slow); // Negative keys can't take the fast OOB path.
__ bind(&check_prototypes);
__ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
__ bind(&check_next_prototype);
__ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ cmp(scratch2, masm->isolate()->factory()->null_value());
__ j(equal, &absent);
__ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
__ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
// scratch: elements of current prototype
// scratch2: map of current prototype
__ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
__ j(below, slow);
__ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
Immediate((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor)));
__ j(not_zero, slow);
__ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
__ j(not_equal, slow);
__ jmp(&check_next_prototype);
__ bind(&absent);
__ mov(result, masm->isolate()->factory()->undefined_value());
__ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
__ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
// In case the loaded value is the_hole we have to check the prototype chain.
__ j(equal, &check_prototypes);
__ Move(result, scratch);
__ bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if the key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
// map - used to hold the map of the key.
// hash - used to hold the hash of the key.
Label unique;
__ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
__ j(above, not_unique);
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ j(equal, &unique);
// Is the string an array index, with cached numeric value?
__ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
__ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
__ j(zero, index_string);
// Is the string internalized? We already know it's a string so a single
// bit test is enough.
STATIC_ASSERT(kNotInternalizedTag != 0);
__ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
Immediate(kIsNotInternalizedMask));
__ j(not_zero, not_unique);
__ bind(&unique);
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
Isolate* isolate = masm->isolate();
Counters* counters = isolate->counters();
__ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
__ ret(0);
__ bind(&check_number_dictionary);
__ mov(ebx, key);
__ SmiUntag(ebx);
__ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// ebx: untagged index
// eax: elements
__ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
DONT_DO_SMI_CHECK);
Label slow_pop_receiver;
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(receiver);
__ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
// Pop receiver before returning.
__ pop(receiver);
__ ret(0);
__ bind(&slow_pop_receiver);
// Pop the receiver from the stack and jump to runtime.
__ pop(receiver);
__ bind(&slow);
// Slow case: jump to runtime.
__ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
&slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(isolate->factory()->hash_table_map()));
__ j(equal, &probe_dictionary);
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(isolate);
int slot = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
edi);
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
// Cache miss.
GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
__ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
__ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
__ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_name);
__ IndexFromHash(ebx, key);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {

View File

@ -522,19 +522,6 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
PatchInlinedSmiCode(isolate, address, DISABLE_INLINED_SMI_CHECK);
}
// static
Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate,
ExtraICState extra_state) {
// TODO(ishell): remove extra_ic_state
if (FLAG_compiled_keyed_generic_loads) {
return KeyedLoadGenericStub(isolate).GetCode();
} else {
return isolate->builtins()->KeyedLoadIC_Megamorphic();
}
}
static bool MigrateDeprecated(Handle<Object> object) {
if (!object->IsJSObject()) return false;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);

View File

@ -355,10 +355,6 @@ class KeyedLoadIC : public LoadIC {
// Code generator routines.
static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
ExtraICState extra_state);
static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);

View File

@ -19,16 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
}
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@ -129,141 +119,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register map,
Register scratch,
int interceptor_bit, Label* slow) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ Branch(slow, ne, at, Operand(zero_reg));
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
//
// Scratch registers:
//
// elements - holds the elements of the receiver and its prototypes.
//
// scratch1 - used to hold elements length, bit fields, base addresses.
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&in_bounds, lo, key, Operand(scratch1));
// Out-of-bounds. Check the prototype chain to see if we can just return
// 'undefined'.
// Negative keys can't take the fast OOB path.
__ Branch(slow, lt, key, Operand(zero_reg));
__ bind(&check_prototypes);
__ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(&check_next_prototype);
__ lw(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(&absent, eq, scratch2, Operand(at));
__ lw(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ lw(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
// scratch2: map of current prototype
__ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
__ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
__ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
__ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor)));
__ Branch(slow, ne, at, Operand(zero_reg));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(slow, ne, elements, Operand(at));
__ Branch(&check_next_prototype);
__ bind(&absent);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ Branch(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
__ Addu(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ Lsa(at, scratch1, key, kPointerSizeLog2 - kSmiTagSize);
__ lw(scratch2, MemOperand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to check the prototype chain.
__ Branch(&check_prototypes, eq, scratch2, Operand(at));
__ Move(result, scratch2);
__ bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// The key is not a smi.
Label unique;
// Is it a name?
__ GetObjectType(key, map, hash);
__ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
// Is the string an array index, with cached numeric value?
__ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
__ Branch(index_string, eq, at, Operand(zero_reg));
// Is the string internalized? We know it's a string, so a single
// bit test is enough.
// map: key map
__ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0);
__ And(at, hash, Operand(kIsNotInternalizedMask));
__ Branch(not_unique, ne, at, Operand(zero_reg));
__ bind(&unique);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@ -345,105 +200,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in ra.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = LoadDescriptor::NameRegister();
Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(a2));
DCHECK(receiver.is(a1));
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, t0,
a3);
__ Ret();
__ bind(&check_number_dictionary);
__ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
// Check whether the elements is a number dictionary.
// a3: elements map
// t0: elements
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&slow, ne, a3, Operand(at));
__ sra(a0, key, kSmiTagSize);
__ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1);
__ Ret();
// Slow case, key and receiver still in a2 and a1.
__ bind(&slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, t0,
a3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&probe_dictionary, eq, t0, Operand(at));
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, t0, t1,
t2, t5);
// Cache miss.
GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// a3: elements
__ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
// Load the property to v0.
GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
t0, a3);
__ Ret();
__ bind(&index_name);
__ IndexFromHash(a3, key);
// Now jump to the place where smi keys are handled.
__ Branch(&index_smi);
}
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,

View File

@ -19,16 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
}
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@ -128,142 +118,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register map,
Register scratch,
int interceptor_bit, Label* slow) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ Branch(slow, ne, at, Operand(zero_reg));
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
//
// Scratch registers:
//
// elements - holds the elements of the receiver and its prototypes.
//
// scratch1 - used to hold elements length, bit fields, base addresses.
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&in_bounds, lo, key, Operand(scratch1));
// Out-of-bounds. Check the prototype chain to see if we can just return
// 'undefined'.
// Negative keys can't take the fast OOB path.
__ Branch(slow, lt, key, Operand(zero_reg));
__ bind(&check_prototypes);
__ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(&check_next_prototype);
__ ld(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(&absent, eq, scratch2, Operand(at));
__ ld(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ ld(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
// scratch2: map of current prototype
__ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
__ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
__ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
__ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor)));
__ Branch(slow, ne, at, Operand(zero_reg));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(slow, ne, elements, Operand(at));
__ Branch(&check_next_prototype);
__ bind(&absent);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ Branch(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
__ Daddu(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ SmiScale(at, key, kPointerSizeLog2);
__ daddu(at, at, scratch1);
__ ld(scratch2, MemOperand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to check the prototype chain.
__ Branch(&check_prototypes, eq, scratch2, Operand(at));
__ Move(result, scratch2);
__ bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// The key is not a smi.
Label unique;
// Is it a name?
__ GetObjectType(key, map, hash);
__ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
// Is the string an array index, with cached numeric value?
__ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
__ Branch(index_string, eq, at, Operand(zero_reg));
// Is the string internalized? We know it's a string, so a single
// bit test is enough.
// map: key map
__ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0);
__ And(at, hash, Operand(kIsNotInternalizedMask));
__ Branch(not_unique, ne, at, Operand(zero_reg));
__ bind(&unique);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@ -344,105 +198,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in ra.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = LoadDescriptor::NameRegister();
Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(a2));
DCHECK(receiver.is(a1));
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, a4,
a3);
__ Ret();
__ bind(&check_number_dictionary);
__ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset));
// Check whether the elements is a number dictionary.
// a3: elements map
// a4: elements
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&slow, ne, a3, Operand(at));
__ dsra32(a0, key, 0);
__ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
__ Ret();
// Slow case, key and receiver still in a2 and a1.
__ bind(&slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, a4,
a3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&probe_dictionary, eq, a4, Operand(at));
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, a4, a5,
a6, t1);
// Cache miss.
GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// a3: elements
__ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
// Load the property to v0.
GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
a4, a3);
__ Ret();
__ bind(&index_name);
__ IndexFromHash(a3, key);
// Now jump to the place where smi keys are handled.
__ Branch(&index_smi);
}
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,

View File

@ -19,18 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ beq(global_object);
__ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE));
__ beq(global_object);
}
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@ -131,143 +119,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register map,
Register scratch,
int interceptor_bit, Label* slow) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
__ andi(r0, scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ bne(slow, cr0);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ cmpi(scratch, Operand(JS_OBJECT_TYPE));
__ blt(slow);
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
//
// Scratch registers:
//
// elements - holds the elements of the receiver and its protoypes.
//
// scratch1 - used to hold elements length, bit fields, base addresses.
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmpl(key, scratch1);
__ blt(&in_bounds);
// Out-of-bounds. Check the prototype chain to see if we can just return
// 'undefined'.
__ cmpi(key, Operand::Zero());
__ blt(slow); // Negative keys can't take the fast OOB path.
__ bind(&check_prototypes);
__ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(&check_next_prototype);
__ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ CompareRoot(scratch2, Heap::kNullValueRootIndex);
__ beq(&absent);
__ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
// scratch2: map of current prototype
__ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
__ blt(slow);
__ lbz(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
__ andi(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor)));
__ bne(slow, cr0);
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ bne(slow);
__ jmp(&check_next_prototype);
__ bind(&absent);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
__ addi(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
__ SmiToPtrArrayOffset(scratch2, key);
__ LoadPX(scratch2, MemOperand(scratch2, scratch1));
__ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to check the prototype chain.
__ beq(&check_prototypes);
__ mr(result, scratch2);
__ bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// The key is not a smi.
Label unique;
// Is it a name?
__ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
__ bgt(not_unique);
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ beq(&unique);
// Is the string an array index, with cached numeric value?
__ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ mov(r8, Operand(Name::kContainsCachedArrayIndexMask));
__ and_(r0, hash, r8, SetRC);
__ beq(index_string, cr0);
// Is the string internalized? We know it's a string, so a single
// bit test is enough.
// map: key map
__ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0);
__ andi(r0, hash, Operand(kIsNotInternalizedMask));
__ bne(not_unique, cr0);
__ bind(&unique);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r3;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@ -349,107 +200,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = LoadDescriptor::NameRegister();
Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(r5));
DCHECK(receiver.is(r4));
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r3, r6, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r7,
r6);
__ Ret();
__ bind(&check_number_dictionary);
__ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset));
// Check whether the elements is a number dictionary.
// r6: elements map
// r7: elements
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r6, ip);
__ bne(&slow);
__ SmiUntag(r3, key);
__ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8);
__ Ret();
// Slow case, key and receiver still in r3 and r4.
__ bind(&slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r7,
r6);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r7, ip);
__ beq(&probe_dictionary);
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r7, r8,
r9, r10);
// Cache miss.
GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// r6: elements
__ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, r3, &slow);
// Load the property to r3.
GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
r7, r6);
__ Ret();
__ bind(&index_name);
__ IndexFromHash(r6, key);
// Now jump to the place where smi keys are handled.
__ b(&index_smi);
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),

View File

@ -18,16 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ CmpP(type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ beq(global_object);
__ CmpP(type, Operand(JS_GLOBAL_PROXY_TYPE));
__ beq(global_object);
}
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@ -127,141 +117,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register map,
Register scratch,
int interceptor_bit, Label* slow) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ LoadlB(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
__ mov(r0,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ AndP(r0, scratch);
__ bne(slow /*, cr0*/);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ LoadlB(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ CmpP(scratch, Operand(JS_OBJECT_TYPE));
__ blt(slow);
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
//
// Scratch registers:
//
// elements - holds the elements of the receiver and its protoypes.
//
// scratch1 - used to hold elements length, bit fields, base addresses.
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ CmpLogicalP(key, scratch1);
__ blt(&in_bounds, Label::kNear);
// Out-of-bounds. Check the prototype chain to see if we can just return
// 'undefined'.
__ CmpP(key, Operand::Zero());
__ blt(slow); // Negative keys can't take the fast OOB path.
__ bind(&check_prototypes);
__ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(&check_next_prototype);
__ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ CompareRoot(scratch2, Heap::kNullValueRootIndex);
__ beq(&absent, Label::kNear);
__ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
// scratch2: map of current prototype
__ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
__ blt(slow);
__ LoadlB(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
__ AndP(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor)));
__ bne(slow);
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ bne(slow);
__ jmp(&check_next_prototype);
__ bind(&absent);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
__ AddP(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
__ SmiToPtrArrayOffset(scratch2, key);
__ LoadP(scratch2, MemOperand(scratch2, scratch1));
__ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to check the prototype chain.
__ beq(&check_prototypes);
__ LoadRR(result, scratch2);
__ bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// The key is not a smi.
Label unique;
// Is it a name?
__ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
__ bgt(not_unique);
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ beq(&unique, Label::kNear);
// Is the string an array index, with cached numeric value?
__ LoadlW(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ mov(r7, Operand(Name::kContainsCachedArrayIndexMask));
__ AndP(r0, hash, r7);
__ beq(index_string);
// Is the string internalized? We know it's a string, so a single
// bit test is enough.
// map: key map
__ LoadlB(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0);
__ tmll(hash, Operand(kIsNotInternalizedMask));
__ bne(not_unique);
__ bind(&unique);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r2;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@ -339,103 +194,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = LoadDescriptor::NameRegister();
Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(r4));
DCHECK(receiver.is(r3));
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r2, r5, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, r2, r5, r6, r2, &slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r6,
r5);
__ Ret();
__ bind(&check_number_dictionary);
__ LoadP(r6, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadP(r5, FieldMemOperand(r6, JSObject::kMapOffset));
// Check whether the elements is a number dictionary.
// r5: elements map
// r6: elements
__ CompareRoot(r5, Heap::kHashTableMapRootIndex);
__ bne(&slow, Label::kNear);
__ SmiUntag(r2, key);
__ LoadFromNumberDictionary(&slow, r6, key, r2, r2, r5, r7);
__ Ret();
// Slow case, key and receiver still in r2 and r3.
__ bind(&slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r6,
r5);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r2, r5, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ LoadP(r5, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
__ CompareRoot(r6, Heap::kHashTableMapRootIndex);
__ beq(&probe_dictionary);
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r6, r7,
r8, r9);
// Cache miss.
GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// r5: elements
__ LoadP(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r2.
GenerateDictionaryLoad(masm, &slow, r5, key, r2, r7, r6);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
r6, r5);
__ Ret();
__ bind(&index_name);
__ IndexFromHash(r5, key);
// Now jump to the place where smi keys are handled.
__ b(&index_smi);
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),

View File

@ -18,18 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
__ j(equal, global_object);
__ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
__ j(equal, global_object);
}
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
// must always call a backup property load that is complete.
@ -133,237 +121,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
__ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register map,
int interceptor_bit, Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
// map - used to hold the map of the receiver.
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
// into string objects work as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
__ j(below, slow);
// Check bit field.
__ testb(
FieldOperand(map, Map::kBitFieldOffset),
Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ j(not_zero, slow);
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch, Register result,
Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
//
// Scratch registers:
//
// elements - holds the elements of the receiver and its prototypes.
//
// scratch - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
__ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
// Unsigned comparison rejects negative indices.
__ j(below, &in_bounds);
// Out-of-bounds. Check the prototype chain to see if we can just return
// 'undefined'.
__ SmiCompare(key, Smi::kZero);
__ j(less, slow); // Negative keys can't take the fast OOB path.
__ bind(&check_prototypes);
__ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ bind(&check_next_prototype);
__ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
// scratch: current prototype
__ CompareRoot(scratch, Heap::kNullValueRootIndex);
__ j(equal, &absent);
__ movp(elements, FieldOperand(scratch, JSObject::kElementsOffset));
__ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
// elements: elements of current prototype
// scratch: map of current prototype
__ CmpInstanceType(scratch, JS_OBJECT_TYPE);
__ j(below, slow);
__ testb(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor)));
__ j(not_zero, slow);
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ j(not_equal, slow);
__ jmp(&check_next_prototype);
__ bind(&absent);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
__ movp(scratch, FieldOperand(elements, index.reg, index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to check the prototype chain.
__ j(equal, &check_prototypes);
__ Move(result, scratch);
__ bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if the key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
// map - used to hold the map of the key.
// hash - used to hold the hash of the key.
Label unique;
__ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
__ j(above, not_unique);
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ j(equal, &unique);
// Is the string an array index, with cached numeric value?
__ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
__ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
__ j(zero, index_string); // The value in hash is used at jump target.
// Is the string internalized? We already know it's a string so a single
// bit test is enough.
STATIC_ASSERT(kNotInternalizedTag != 0);
__ testb(FieldOperand(map, Map::kInstanceTypeOffset),
Immediate(kIsNotInternalizedMask));
__ j(not_zero, not_unique);
__ bind(&unique);
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
DCHECK(receiver.is(rdx));
DCHECK(key.is(rcx));
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, receiver, rax,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(rax, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
__ ret(0);
__ bind(&check_number_dictionary);
__ SmiToInteger32(rbx, key);
__ movp(rax, FieldOperand(receiver, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rbx: key as untagged int32
// rax: elements
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
__ LoadFromNumberDictionary(&slow, rax, key, rbx, r9, rdi, rax);
__ ret(0);
__ bind(&slow);
// Slow case: Jump to runtime.
__ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
KeyedLoadIC::GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, rax, Map::kHasNamedInterceptor,
&slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ movp(rbx, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
Register megamorphic_scratch = rdi;
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadDescriptor::SlotRegister();
DCHECK(!AreAliased(megamorphic_scratch, vector, slot));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ Move(vector, dummy_vector);
__ Move(slot, Smi::FromInt(slot_index));
masm->isolate()->load_stub_cache()->GenerateProbe(
masm, receiver, key, megamorphic_scratch, no_reg);
// Cache miss.
GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// rbx: elements
__ movp(rax, FieldOperand(receiver, JSObject::kMapOffset));
__ movb(rax, FieldOperand(rax, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, rax, &slow);
GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax);
__ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_name);
__ IndexFromHash(rbx, key);
__ jmp(&index_smi);
}
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {

View File

@ -18,18 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ cmp(type, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, global_object);
__ cmp(type, JS_GLOBAL_PROXY_TYPE);
__ j(equal, global_object);
}
// Helper function used to load a property from a dictionary backing
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
@ -132,238 +120,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register map,
int interceptor_bit, Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// Scratch registers:
// map - used to hold the map of the receiver.
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ test_b(
FieldOperand(map, Map::kBitFieldOffset),
Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ j(not_zero, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type. In
// the case that the object is a value-wrapper object, we enter the runtime
// system to make sure that indexing into string objects works as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpInstanceType(map, JS_OBJECT_TYPE);
__ j(below, slow);
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register scratch,
Register scratch2, Register result,
Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
// Scratch registers:
// scratch - used to hold elements of the receiver and the loaded value.
// scratch2 - holds maps and prototypes during prototype chain check.
// result - holds the result on exit if the load succeeds and
// we fall through.
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(scratch);
// Check that the key (index) is within bounds.
__ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
__ j(below, &in_bounds);
// Out-of-bounds. Check the prototype chain to see if we can just return
// 'undefined'.
__ cmp(key, 0);
__ j(less, slow); // Negative keys can't take the fast OOB path.
__ bind(&check_prototypes);
__ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
__ bind(&check_next_prototype);
__ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ cmp(scratch2, masm->isolate()->factory()->null_value());
__ j(equal, &absent);
__ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
__ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
// scratch: elements of current prototype
// scratch2: map of current prototype
__ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
__ j(below, slow);
__ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
Immediate((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor)));
__ j(not_zero, slow);
__ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
__ j(not_equal, slow);
__ jmp(&check_next_prototype);
__ bind(&absent);
__ mov(result, masm->isolate()->factory()->undefined_value());
__ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
__ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
// In case the loaded value is the_hole we have to check the prototype chain.
__ j(equal, &check_prototypes);
__ Move(result, scratch);
__ bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if the key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
// map - used to hold the map of the key.
// hash - used to hold the hash of the key.
Label unique;
__ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
__ j(above, not_unique);
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ j(equal, &unique);
// Is the string an array index, with cached numeric value?
__ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
__ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
__ j(zero, index_string);
// Is the string internalized? We already know it's a string so a single
// bit test is enough.
STATIC_ASSERT(kNotInternalizedTag != 0);
__ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
Immediate(kIsNotInternalizedMask));
__ j(not_zero, not_unique);
__ bind(&unique);
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
Isolate* isolate = masm->isolate();
Counters* counters = isolate->counters();
__ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
__ ret(0);
__ bind(&check_number_dictionary);
__ mov(ebx, key);
__ SmiUntag(ebx);
__ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// ebx: untagged index
// eax: elements
__ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
DONT_DO_SMI_CHECK);
Label slow_pop_receiver;
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(receiver);
__ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
// Pop receiver before returning.
__ pop(receiver);
__ ret(0);
__ bind(&slow_pop_receiver);
// Pop the receiver from the stack and jump to runtime.
__ pop(receiver);
__ bind(&slow);
// Slow case: jump to runtime.
__ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
&slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(isolate->factory()->hash_table_map()));
__ j(equal, &probe_dictionary);
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(isolate);
int slot = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
edi);
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
// Cache miss.
GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
__ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
__ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
__ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_name);
__ IndexFromHash(ebx, key);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {

View File

@ -2064,7 +2064,6 @@ Isolate::Isolate(bool enable_serializer)
capture_stack_trace_for_uncaught_exceptions_(false),
stack_trace_for_uncaught_exceptions_frame_limit_(0),
stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
keyed_lookup_cache_(NULL),
context_slot_cache_(NULL),
descriptor_lookup_cache_(NULL),
handle_scope_implementer_(NULL),
@ -2295,8 +2294,6 @@ Isolate::~Isolate() {
descriptor_lookup_cache_ = NULL;
delete context_slot_cache_;
context_slot_cache_ = NULL;
delete keyed_lookup_cache_;
keyed_lookup_cache_ = NULL;
delete load_stub_cache_;
load_stub_cache_ = NULL;
@ -2438,7 +2435,6 @@ bool Isolate::Init(Deserializer* des) {
#undef ASSIGN_ELEMENT
compilation_cache_ = new CompilationCache(this);
keyed_lookup_cache_ = new KeyedLookupCache();
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
unicode_cache_ = new UnicodeCache();

View File

@ -64,7 +64,6 @@ class HStatistics;
class HTracer;
class InlineRuntimeFunctionsTable;
class InnerPointerToCodeCache;
class KeyedLookupCache;
class Logger;
class MaterializedObjectStore;
class OptimizingCompileDispatcher;
@ -860,10 +859,6 @@ class Isolate {
return materialized_object_store_;
}
KeyedLookupCache* keyed_lookup_cache() {
return keyed_lookup_cache_;
}
ContextSlotCache* context_slot_cache() {
return context_slot_cache_;
}
@ -1325,7 +1320,6 @@ class Isolate {
bool capture_stack_trace_for_uncaught_exceptions_;
int stack_trace_for_uncaught_exceptions_frame_limit_;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
KeyedLookupCache* keyed_lookup_cache_;
ContextSlotCache* context_slot_cache_;
DescriptorLookupCache* descriptor_lookup_cache_;
HandleScopeData handle_scope_data_;

View File

@ -13,72 +13,5 @@ void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
}
int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
DisallowHeapAllocation no_gc;
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
}
int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
DisallowHeapAllocation no_gc;
int index = (Hash(map, name) & kHashMask);
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index + i];
if ((key.map == *map) && key.name->Equals(*name)) {
return field_offsets_[index + i];
}
}
return kNotFound;
}
void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
int field_offset) {
DisallowHeapAllocation no_gc;
if (!name->IsUniqueName()) {
if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
Handle<String>::cast(name))
.ToHandle(&name)) {
return;
}
}
// This cache is cleared only between mark compact passes, so we expect the
// cache to only contain old space names.
DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
int index = (Hash(map, name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
// help to get the most frequently used one in position 0).
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index];
Object* free_entry_indicator = NULL;
if (key.map == free_entry_indicator) {
key.map = *map;
key.name = *name;
field_offsets_[index + i] = field_offset;
return;
}
}
// No free entry found in this bucket, so we move them all down one and
// put the new entry at position zero.
for (int i = kEntriesPerBucket - 1; i > 0; i--) {
Key& key = keys_[index + i];
Key& key2 = keys_[index + i - 1];
key = key2;
field_offsets_[index + i] = field_offsets_[index + i - 1];
}
// Write the new first entry.
Key& key = keys_[index];
key.map = *map;
key.name = *name;
field_offsets_[index] = field_offset;
}
void KeyedLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
}
} // namespace internal
} // namespace v8

View File

@ -52,65 +52,6 @@ class DescriptorLookupCache {
DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
};
// Cache for mapping (map, property name) into field offset.
// Cleared at startup and prior to mark sweep collection.
class KeyedLookupCache {
public:
// Lookup field offset for (map, name). If absent, -1 is returned.
int Lookup(Handle<Map> map, Handle<Name> name);
// Update an element in the cache.
void Update(Handle<Map> map, Handle<Name> name, int field_offset);
// Clear the cache.
void Clear();
static const int kLength = 256;
static const int kCapacityMask = kLength - 1;
static const int kMapHashShift = 5;
static const int kHashMask = -4; // Zero the last two bits.
static const int kEntriesPerBucket = 4;
static const int kEntryLength = 2;
static const int kMapIndex = 0;
static const int kKeyIndex = 1;
static const int kNotFound = -1;
// kEntriesPerBucket should be a power of 2.
STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
private:
KeyedLookupCache() {
for (int i = 0; i < kLength; ++i) {
keys_[i].map = NULL;
keys_[i].name = NULL;
field_offsets_[i] = kNotFound;
}
}
static inline int Hash(Handle<Map> map, Handle<Name> name);
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
Address keys_address() { return reinterpret_cast<Address>(&keys_); }
Address field_offsets_address() {
return reinterpret_cast<Address>(&field_offsets_);
}
struct Key {
Map* map;
Name* name;
};
Key keys_[kLength];
int field_offsets_[kLength];
friend class ExternalReference;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
};
} // namespace internal
} // namespace v8

View File

@ -3260,14 +3260,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Addu(sp, sp, a1);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(a2);
CallICStub stub(isolate(), state());
@ -3361,75 +3353,6 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(t9);
}
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
Register key = LoadWithVectorDescriptor::NameRegister(); // a2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
Register feedback = t0;
Register receiver_map = t1;
Register scratch1 = t4;
__ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
__ Branch(&not_array, ne, at, Operand(scratch1));
// We have a polymorphic element handler.
__ JumpIfNotSmi(key, &miss);
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, true, &miss);
__ bind(&not_array);
// Is it generic?
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&try_poly_name, ne, at, Operand(feedback));
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ Branch(&miss, ne, key, Operand(feedback));
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());

View File

@ -619,87 +619,6 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
And(reg0, reg0, Operand(0x3fffffff));
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register reg0,
Register reg1,
Register reg2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// reg0 - holds the untagged key on entry and holds the hash once computed.
//
// reg1 - Used to hold the capacity mask of the dictionary.
//
// reg2 - Used for the index into the dictionary.
// at - Temporary (avoid MacroAssembler instructions also using 'at').
Label done;
GetNumberHash(reg0, reg1);
// Compute the capacity mask.
lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
sra(reg1, reg1, kSmiTagSize);
Subu(reg1, reg1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use reg2 for index calculations and keep the hash intact in reg0.
mov(reg2, reg0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(reg2, reg2, reg1);
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
Lsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
// Check if the key is identical to the name.
Lsa(reg2, elements, reg2, kPointerSizeLog2);
lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
if (i != kNumberDictionaryProbes - 1) {
Branch(&done, eq, key, Operand(at));
} else {
Branch(miss, ne, key, Operand(at));
}
}
bind(&done);
// Check that the value is a field property.
// reg2: elements + (index * kPointerSize).
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
DCHECK_EQ(DATA, 0);
And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
Branch(miss, ne, at, Operand(zero_reg));
// Get the value at the masked, scaled index and return.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
lw(result, FieldMemOperand(reg2, kValueOffset));
}
// ---------------------------------------------------------------------------
// Instruction macros.
@ -4735,20 +4654,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Branch(&loop, ult, current_address, Operand(end_address));
}
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastHoleyElementValue));
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {

View File

@ -504,15 +504,6 @@ class MacroAssembler: public Assembler {
void GetNumberHash(Register reg0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register reg0,
Register reg1,
Register reg2);
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@ -1180,12 +1171,6 @@ class MacroAssembler: public Assembler {
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,

View File

@ -3263,14 +3263,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Daddu(sp, sp, a1);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(a2);
CallICStub stub(isolate(), state());
@ -3364,77 +3356,6 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(t9);
}
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
Register key = LoadWithVectorDescriptor::NameRegister(); // a2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
Register feedback = a4;
Register receiver_map = a5;
Register scratch1 = a6;
__ SmiScale(feedback, slot, kPointerSizeLog2);
__ Daddu(feedback, vector, Operand(feedback));
__ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
__ Branch(&not_array, ne, scratch1, Operand(at));
// We have a polymorphic element handler.
__ JumpIfNotSmi(key, &miss);
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
__ bind(&not_array);
// Is it generic?
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&try_poly_name, ne, feedback, Operand(at));
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ Branch(&miss, ne, key, Operand(feedback));
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ SmiScale(feedback, slot, kPointerSizeLog2);
__ Daddu(feedback, vector, Operand(feedback));
__ ld(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ Branch(&compare_map);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());

View File

@ -637,87 +637,6 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
And(reg0, reg0, Operand(0x3fffffff));
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register reg0,
Register reg1,
Register reg2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// reg0 - holds the untagged key on entry and holds the hash once computed.
//
// reg1 - Used to hold the capacity mask of the dictionary.
//
// reg2 - Used for the index into the dictionary.
// at - Temporary (avoid MacroAssembler instructions also using 'at').
Label done;
GetNumberHash(reg0, reg1);
// Compute the capacity mask.
ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
SmiUntag(reg1, reg1);
Dsubu(reg1, reg1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use reg2 for index calculations and keep the hash intact in reg0.
mov(reg2, reg0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(reg2, reg2, reg1);
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
Dlsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
// Check if the key is identical to the name.
Dlsa(reg2, elements, reg2, kPointerSizeLog2);
ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
if (i != kNumberDictionaryProbes - 1) {
Branch(&done, eq, key, Operand(at));
} else {
Branch(miss, ne, key, Operand(at));
}
}
bind(&done);
// Check that the value is a field property.
// reg2: elements + (index * kPointerSize).
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
DCHECK_EQ(DATA, 0);
And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
Branch(miss, ne, at, Operand(zero_reg));
// Get the value at the masked, scaled index and return.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
ld(result, FieldMemOperand(reg2, kValueOffset));
}
// ---------------------------------------------------------------------------
// Instruction macros.
@ -4883,20 +4802,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Branch(&loop, ult, current_address, Operand(end_address));
}
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastHoleyElementValue));
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {

View File

@ -536,15 +536,6 @@ class MacroAssembler: public Assembler {
void GetNumberHash(Register reg0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register reg0,
Register reg1,
Register reg2);
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@ -1238,12 +1229,6 @@ class MacroAssembler: public Assembler {
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,

View File

@ -3196,14 +3196,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(r5);
CallICStub stub(isolate(), state());
@ -3304,77 +3296,6 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(ip);
}
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r4
Register key = LoadWithVectorDescriptor::NameRegister(); // r5
Register vector = LoadWithVectorDescriptor::VectorRegister(); // r6
Register slot = LoadWithVectorDescriptor::SlotRegister(); // r3
Register feedback = r7;
Register receiver_map = r8;
Register scratch1 = r9;
__ SmiToPtrArrayOffset(r0, slot);
__ add(feedback, vector, r0);
__ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ bne(&not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmp(key, feedback);
__ bne(&miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ SmiToPtrArrayOffset(r0, slot);
__ add(feedback, vector, r0);
__ LoadP(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ b(&compare_map);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());

View File

@ -1729,86 +1729,6 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
ExtractBitRange(t0, t0, 29, 0);
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
Register key, Register result,
Register t0, Register t1,
Register t2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
//
// t1 - used to hold the capacity mask of the dictionary
//
// t2 - used for the index into the dictionary.
Label done;
GetNumberHash(t0, t1);
// Compute the capacity mask.
LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
SmiUntag(t1);
subi(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use t2 for index calculations and keep the hash intact in t0.
mr(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(t2, t2, t1);
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
slwi(ip, t2, Operand(1));
add(t2, t2, ip); // t2 = t2 * 3
// Check if the key is identical to the name.
slwi(t2, t2, Operand(kPointerSizeLog2));
add(t2, elements, t2);
LoadP(ip,
FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
cmp(key, ip);
if (i != kNumberDictionaryProbes - 1) {
beq(&done);
} else {
bne(miss);
}
}
bind(&done);
// Check that the value is a field property.
// t2: elements + (index * kPointerSize)
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
DCHECK_EQ(DATA, 0);
and_(r0, t1, ip, SetRC);
bne(miss, cr0);
// Get the value at the masked, scaled index and return.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
LoadP(result, FieldMemOperand(t2, kValueOffset));
}
void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
@ -2234,20 +2154,6 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
cmp(obj, r0);
}
void MacroAssembler::CheckFastElements(Register map, Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
bgt(fail);
}
void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);

View File

@ -670,11 +670,6 @@ class MacroAssembler : public Assembler {
void GetNumberHash(Register t0, Register scratch);
void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
Register result, Register t0, Register t1,
Register t2);
inline void MarkCode(NopMarkerTypes type) { nop(type); }
// Check if the given instruction is a 'type' marker.
@ -819,11 +814,6 @@ class MacroAssembler : public Assembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map, Register scratch, Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);

View File

@ -3139,12 +3139,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(r4);
CallICStub stub(isolate(), state());
@ -3241,72 +3235,6 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(ip);
}
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r3
Register key = LoadWithVectorDescriptor::NameRegister(); // r4
Register vector = LoadWithVectorDescriptor::VectorRegister(); // r5
Register slot = LoadWithVectorDescriptor::SlotRegister(); // r2
Register feedback = r6;
Register receiver_map = r7;
Register scratch1 = r8;
__ SmiToPtrArrayOffset(r1, slot);
__ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ bne(&not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ CmpP(key, feedback);
__ bne(&miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ SmiToPtrArrayOffset(r1, slot);
__ LoadP(feedback,
FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ b(&compare_map);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());

View File

@ -1624,85 +1624,6 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
ExtractBitRange(t0, t0, 29, 0);
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
Register key, Register result,
Register t0, Register t1,
Register t2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
//
// t1 - used to hold the capacity mask of the dictionary
//
// t2 - used for the index into the dictionary.
Label done;
GetNumberHash(t0, t1);
// Compute the capacity mask.
LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
SmiUntag(t1);
SubP(t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use t2 for index calculations and keep the hash intact in t0.
LoadRR(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
AddP(t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
}
AndP(t2, t1);
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
LoadRR(ip, t2);
sll(ip, Operand(1));
AddP(t2, ip); // t2 = t2 * 3
// Check if the key is identical to the name.
sll(t2, Operand(kPointerSizeLog2));
AddP(t2, elements);
LoadP(ip,
FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
CmpP(key, ip);
if (i != kNumberDictionaryProbes - 1) {
beq(&done, Label::kNear);
} else {
bne(miss);
}
}
bind(&done);
// Check that the value is a field property.
// t2: elements + (index * kPointerSize)
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
DCHECK_EQ(DATA, 0);
AndP(r0, ip, t1);
bne(miss);
// Get the value at the masked, scaled index and return.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
LoadP(result, FieldMemOperand(t2, kValueOffset));
}
void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
@ -2116,18 +2037,6 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
}
void MacroAssembler::CheckFastElements(Register map, Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
Operand(Map::kMaximumBitField2FastHoleyElementValue));
bgt(fail);
}
void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);

View File

@ -940,10 +940,6 @@ class MacroAssembler : public Assembler {
void GetNumberHash(Register t0, Register scratch);
void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
Register result, Register t0, Register t1,
Register t2);
inline void MarkCode(NopMarkerTypes type) { nop(type); }
// Check if the given instruction is a 'type' marker.
@ -1086,10 +1082,6 @@ class MacroAssembler : public Assembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map, Register scratch, Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);

View File

@ -2949,14 +2949,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, Register scratch3,
@ -3028,76 +3020,6 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ jmp(handler);
}
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // rdx
Register key = LoadWithVectorDescriptor::NameRegister(); // rcx
Register vector = LoadWithVectorDescriptor::VectorRegister(); // rbx
Register slot = LoadWithVectorDescriptor::SlotRegister(); // rax
Register feedback = rdi;
Register integer_slot = r8;
Register receiver_map = r9;
__ SmiToInteger32(integer_slot, slot);
__ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
integer_slot, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true,
&miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmpp(key, feedback);
__ j(not_equal, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, false,
&miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());

View File

@ -3677,20 +3677,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
Immediate(static_cast<int8_t>(type)));
}
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
@ -4800,87 +4786,6 @@ void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
andl(r0, Immediate(0x3fffffff));
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2,
Register result) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
//
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
Label done;
GetNumberHash(r0, r1);
// Compute capacity mask.
SmiToInteger32(r1, FieldOperand(elements,
SeededNumberDictionary::kCapacityOffset));
decl(r1);
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
movp(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
andp(r2, r1);
// Scale the index by multiplying by the entry size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
}
}
bind(&done);
// Check that the value is a field property.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
DCHECK_EQ(DATA, 0);
Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Smi::FromInt(PropertyDetails::TypeField::kMask));
j(not_zero, miss);
// Get the value at the masked, scaled index.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {

View File

@ -1112,12 +1112,6 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
@ -1305,15 +1299,6 @@ class MacroAssembler: public Assembler {
void GetNumberHash(Register r0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2,
Register result);
// ---------------------------------------------------------------------------
// Allocation support

View File

@ -2834,14 +2834,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
static void HandleArrayCases(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register feedback, bool is_polymorphic,
@ -2953,64 +2945,6 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ jmp(handler);
}
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
Register key = LoadWithVectorDescriptor::NameRegister(); // ecx
Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
Register feedback = edi;
__ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
__ j(not_equal, &try_array);
HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss);
__ bind(&try_array);
// Is it a fixed array?
__ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmp(key, feedback);
__ j(not_equal, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());

View File

@ -701,20 +701,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
@ -1355,82 +1341,6 @@ void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
and_(r0, 0x3fffffff);
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2,
Register result) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
// key - holds the smi key on entry and is unchanged.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
//
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
//
// result - holds the result on exit if the load succeeds and we fall through.
Label done;
GetNumberHash(r0, r1);
// Compute capacity mask.
mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
shr(r1, kSmiTagSize); // convert smi to int
dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(r2, r1);
// Scale the index by multiplying by the entry size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
}
}
bind(&done);
// Check that the value is a field property.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
DCHECK_EQ(DATA, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
j(not_zero, miss);
// Get the value at the masked, scaled index.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {

View File

@ -381,11 +381,6 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Label* fail,
@ -601,10 +596,6 @@ class MacroAssembler: public Assembler {
void GetNumberHash(Register r0, Register scratch);
void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
Register r0, Register r1, Register r2,
Register result);
// ---------------------------------------------------------------------------
// Allocation support

View File

@ -21828,10 +21828,10 @@ void TestStubCache(bool primary) {
// Enforce recompilation of IC stubs that access megamorphic stub cache
// to respect enabled native code counters and stub cache test flags.
i::CodeStub::Major code_stub_keys[] = {
i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
i::CodeStub::KeyedLoadIC, i::CodeStub::KeyedLoadICTrampoline,
i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
i::CodeStub::KeyedStoreIC, i::CodeStub::KeyedStoreICTrampoline,
i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
i::CodeStub::KeyedLoadICTF, i::CodeStub::KeyedLoadICTrampolineTF,
i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
i::CodeStub::KeyedStoreIC, i::CodeStub::KeyedStoreICTrampoline,
};
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Heap* heap = i_isolate->heap();

View File

@ -2443,7 +2443,7 @@ TEST(CheckCodeNames) {
CHECK(node);
const char* builtin_path1[] = {"::(GC roots)", "::(Builtins)",
"::(KeyedLoadIC_Megamorphic builtin)"};
"::(KeyedLoadIC_Slow builtin)"};
node = GetNodeByPath(snapshot, builtin_path1, arraysize(builtin_path1));
CHECK(node);