Make randomized allocations along 64k granularity boundaries to avoid comitting unused memory.
BUG=56036 TEST=None. Patch by Justin Schuh <jschuh@chromium.org> Review URL: http://codereview.chromium.org/3849004 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5883 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
277b908150
commit
0c064efdb0
1
AUTHORS
1
AUTHORS
@ -9,7 +9,6 @@ ARM Ltd.
|
||||
Hewlett-Packard Development Company, LP
|
||||
|
||||
Alexander Botero-Lowry <alexbl@FreeBSD.org>
|
||||
Alexandre Rames <alexandre.rames@arm.com>
|
||||
Alexandre Vassalotti <avassalotti@gmail.com>
|
||||
Andreas Anyuru <andreas.anyuru@gmail.com>
|
||||
Burcu Dogan <burcujdogan@gmail.com>
|
||||
|
@ -164,7 +164,7 @@ bool RelocInfo::IsPatchedReturnSequence() {
|
||||
|
||||
bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
|
||||
Instr current_instr = Assembler::instr_at(pc_);
|
||||
return !Assembler::IsNop(current_instr, 2);
|
||||
return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
|
||||
}
|
||||
|
||||
|
||||
@ -288,9 +288,7 @@ Address Assembler::target_address_address_at(Address pc) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// Verify that the instruction to patch is a
|
||||
// ldr<cond> <Rd>, [pc +/- offset_12].
|
||||
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
|
||||
ASSERT(IsLdrPcImmediateOffset(instr));
|
||||
int offset = instr & 0xfff; // offset_12 is unsigned
|
||||
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
|
||||
// Verify that the constant pool comes after the instruction referencing it.
|
||||
|
@ -397,13 +397,6 @@ void Assembler::CodeTargetAlign() {
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsNop(Instr instr, int type) {
|
||||
// Check for mov rx, rx.
|
||||
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
|
||||
return instr == (al | 13*B21 | type*B12 | type);
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsBranch(Instr instr) {
|
||||
return (instr & (B27 | B25)) == (B27 | B25);
|
||||
}
|
||||
@ -510,6 +503,13 @@ bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
|
||||
// Check the instruction is indeed a
|
||||
// ldr<cond> <Rd>, [pc +/- offset_12].
|
||||
return (instr & 0x0f7f0000) == 0x051f0000;
|
||||
}
|
||||
|
||||
|
||||
// Labels refer to positions in the (to be) generated code.
|
||||
// There are bound, linked, and unused labels.
|
||||
//
|
||||
@ -1113,8 +1113,8 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
|
||||
positions_recorder()->WriteRecordedPositions();
|
||||
}
|
||||
// Don't allow nop instructions in the form mov rn, rn to be generated using
|
||||
// the mov instruction. They must be generated using nop(int)
|
||||
// pseudo instructions.
|
||||
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
|
||||
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
|
||||
ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
|
||||
addrmod1(cond | 13*B21 | s, r0, dst, src);
|
||||
}
|
||||
@ -2376,6 +2376,13 @@ void Assembler::nop(int type) {
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsNop(Instr instr, int type) {
|
||||
// Check for mov rx, rx.
|
||||
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
|
||||
return instr == (al | 13*B21 | type*B12 | type);
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
|
||||
uint32_t dummy1;
|
||||
uint32_t dummy2;
|
||||
|
@ -1079,7 +1079,22 @@ class Assembler : public Malloced {
|
||||
const Condition cond = al);
|
||||
|
||||
// Pseudo instructions
|
||||
void nop(int type = 0);
|
||||
|
||||
// Different nop operations are used by the code generator to detect certain
|
||||
// states of the generated code.
|
||||
enum NopMarkerTypes {
|
||||
NON_MARKING_NOP = 0,
|
||||
DEBUG_BREAK_NOP,
|
||||
// IC markers.
|
||||
PROPERTY_ACCESS_INLINED,
|
||||
PROPERTY_ACCESS_INLINED_CONTEXT,
|
||||
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
|
||||
// Helper values.
|
||||
LAST_CODE_MARKER,
|
||||
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
|
||||
};
|
||||
|
||||
void nop(int type = 0); // 0 is the default non-marking type.
|
||||
|
||||
void push(Register src, Condition cond = al) {
|
||||
str(src, MemOperand(sp, 4, NegPreIndex), cond);
|
||||
@ -1151,7 +1166,6 @@ class Assembler : public Malloced {
|
||||
static void instr_at_put(byte* pc, Instr instr) {
|
||||
*reinterpret_cast<Instr*>(pc) = instr;
|
||||
}
|
||||
static bool IsNop(Instr instr, int type = 0);
|
||||
static bool IsBranch(Instr instr);
|
||||
static int GetBranchOffset(Instr instr);
|
||||
static bool IsLdrRegisterImmediate(Instr instr);
|
||||
@ -1168,6 +1182,8 @@ class Assembler : public Malloced {
|
||||
static bool IsLdrRegFpOffset(Instr instr);
|
||||
static bool IsStrRegFpNegOffset(Instr instr);
|
||||
static bool IsLdrRegFpNegOffset(Instr instr);
|
||||
static bool IsLdrPcImmediateOffset(Instr instr);
|
||||
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
|
||||
|
||||
|
||||
protected:
|
||||
|
@ -6537,16 +6537,29 @@ void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
|
||||
class DeferredReferenceGetNamedValue: public DeferredCode {
|
||||
public:
|
||||
explicit DeferredReferenceGetNamedValue(Register receiver,
|
||||
Handle<String> name)
|
||||
: receiver_(receiver), name_(name) {
|
||||
set_comment("[ DeferredReferenceGetNamedValue");
|
||||
Handle<String> name,
|
||||
bool is_contextual)
|
||||
: receiver_(receiver),
|
||||
name_(name),
|
||||
is_contextual_(is_contextual),
|
||||
is_dont_delete_(false) {
|
||||
set_comment(is_contextual
|
||||
? "[ DeferredReferenceGetNamedValue (contextual)"
|
||||
: "[ DeferredReferenceGetNamedValue");
|
||||
}
|
||||
|
||||
virtual void Generate();
|
||||
|
||||
void set_is_dont_delete(bool value) {
|
||||
ASSERT(is_contextual_);
|
||||
is_dont_delete_ = value;
|
||||
}
|
||||
|
||||
private:
|
||||
Register receiver_;
|
||||
Handle<String> name_;
|
||||
bool is_contextual_;
|
||||
bool is_dont_delete_;
|
||||
};
|
||||
|
||||
|
||||
@ -6573,10 +6586,20 @@ void DeferredReferenceGetNamedValue::Generate() {
|
||||
// The rest of the instructions in the deferred code must be together.
|
||||
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
// The call must be followed by a nop(1) instruction to indicate that the
|
||||
// in-object has been inlined.
|
||||
__ nop(PROPERTY_ACCESS_INLINED);
|
||||
RelocInfo::Mode mode = is_contextual_
|
||||
? RelocInfo::CODE_TARGET_CONTEXT
|
||||
: RelocInfo::CODE_TARGET;
|
||||
__ Call(ic, mode);
|
||||
// We must mark the code just after the call with the correct marker.
|
||||
MacroAssembler::NopMarkerTypes code_marker;
|
||||
if (is_contextual_) {
|
||||
code_marker = is_dont_delete_
|
||||
? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE
|
||||
: MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT;
|
||||
} else {
|
||||
code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED;
|
||||
}
|
||||
__ MarkCode(code_marker);
|
||||
|
||||
// At this point the answer is in r0. We move it to the expected register
|
||||
// if necessary.
|
||||
@ -6640,7 +6663,7 @@ void DeferredReferenceGetKeyedValue::Generate() {
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
// The call must be followed by a nop instruction to indicate that the
|
||||
// keyed load has been inlined.
|
||||
__ nop(PROPERTY_ACCESS_INLINED);
|
||||
__ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
|
||||
|
||||
// Now go back to the frame that we entered with. This will not overwrite
|
||||
// the receiver or key registers since they were not in use when we came
|
||||
@ -6697,7 +6720,7 @@ void DeferredReferenceSetKeyedValue::Generate() {
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
// The call must be followed by a nop instruction to indicate that the
|
||||
// keyed store has been inlined.
|
||||
__ nop(PROPERTY_ACCESS_INLINED);
|
||||
__ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
|
||||
|
||||
// Block the constant pool for one more instruction after leaving this
|
||||
// constant pool block scope to include the branch instruction ending the
|
||||
@ -6745,7 +6768,7 @@ void DeferredReferenceSetNamedValue::Generate() {
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
// The call must be followed by a nop instruction to indicate that the
|
||||
// named store has been inlined.
|
||||
__ nop(PROPERTY_ACCESS_INLINED);
|
||||
__ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
|
||||
|
||||
// Go back to the frame we entered with. The instructions
|
||||
// generated by this merge are skipped over by the inline store
|
||||
@ -6763,7 +6786,14 @@ void DeferredReferenceSetNamedValue::Generate() {
|
||||
|
||||
// Consumes the top of stack (the receiver) and pushes the result instead.
|
||||
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
|
||||
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
|
||||
bool contextual_load_in_builtin =
|
||||
is_contextual &&
|
||||
(Bootstrapper::IsActive() ||
|
||||
(!info_->closure().is_null() && info_->closure()->IsBuiltin()));
|
||||
|
||||
if (scope()->is_global_scope() ||
|
||||
loop_nesting() == 0 ||
|
||||
contextual_load_in_builtin) {
|
||||
Comment cmnt(masm(), "[ Load from named Property");
|
||||
// Setup the name register and call load IC.
|
||||
frame_->CallLoadIC(name,
|
||||
@ -6773,12 +6803,19 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
|
||||
frame_->EmitPush(r0); // Push answer.
|
||||
} else {
|
||||
// Inline the in-object property case.
|
||||
Comment cmnt(masm(), "[ Inlined named property load");
|
||||
Comment cmnt(masm(), is_contextual
|
||||
? "[ Inlined contextual property load"
|
||||
: "[ Inlined named property load");
|
||||
|
||||
// Counter will be decremented in the deferred code. Placed here to avoid
|
||||
// having it in the instruction stream below where patching will occur.
|
||||
__ IncrementCounter(&Counters::named_load_inline, 1,
|
||||
frame_->scratch0(), frame_->scratch1());
|
||||
if (is_contextual) {
|
||||
__ IncrementCounter(&Counters::named_load_global_inline, 1,
|
||||
frame_->scratch0(), frame_->scratch1());
|
||||
} else {
|
||||
__ IncrementCounter(&Counters::named_load_inline, 1,
|
||||
frame_->scratch0(), frame_->scratch1());
|
||||
}
|
||||
|
||||
// The following instructions are the inlined load of an in-object property.
|
||||
// Parts of this code is patched, so the exact instructions generated needs
|
||||
@ -6789,18 +6826,56 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
|
||||
Register receiver = frame_->PopToRegister();
|
||||
|
||||
DeferredReferenceGetNamedValue* deferred =
|
||||
new DeferredReferenceGetNamedValue(receiver, name);
|
||||
new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
|
||||
|
||||
#ifdef DEBUG
|
||||
int kInlinedNamedLoadInstructions = 7;
|
||||
Label check_inlined_codesize;
|
||||
masm_->bind(&check_inlined_codesize);
|
||||
#endif
|
||||
bool is_dont_delete = false;
|
||||
if (is_contextual) {
|
||||
if (!info_->closure().is_null()) {
|
||||
// When doing lazy compilation we can check if the global cell
|
||||
// already exists and use its "don't delete" status as a hint.
|
||||
AssertNoAllocation no_gc;
|
||||
v8::internal::GlobalObject* global_object =
|
||||
info_->closure()->context()->global();
|
||||
LookupResult lookup;
|
||||
global_object->LocalLookupRealNamedProperty(*name, &lookup);
|
||||
if (lookup.IsProperty() && lookup.type() == NORMAL) {
|
||||
ASSERT(lookup.holder() == global_object);
|
||||
ASSERT(global_object->property_dictionary()->ValueAt(
|
||||
lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
|
||||
is_dont_delete = lookup.IsDontDelete();
|
||||
}
|
||||
}
|
||||
if (is_dont_delete) {
|
||||
__ IncrementCounter(&Counters::dont_delete_hint_hit, 1,
|
||||
frame_->scratch0(), frame_->scratch1());
|
||||
}
|
||||
}
|
||||
|
||||
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||
// Check that the receiver is a heap object.
|
||||
__ tst(receiver, Operand(kSmiTagMask));
|
||||
deferred->Branch(eq);
|
||||
if (!is_contextual) {
|
||||
// Check that the receiver is a heap object.
|
||||
__ tst(receiver, Operand(kSmiTagMask));
|
||||
deferred->Branch(eq);
|
||||
}
|
||||
|
||||
// Check for the_hole_value if necessary.
|
||||
// Below we rely on the number of instructions generated, and we can't
|
||||
// cope with the Check macro which does not generate a fixed number of
|
||||
// instructions.
|
||||
Label skip, check_the_hole, cont;
|
||||
if (FLAG_debug_code && is_contextual && is_dont_delete) {
|
||||
__ b(&skip);
|
||||
__ bind(&check_the_hole);
|
||||
__ Check(ne, "DontDelete cells can't contain the hole");
|
||||
__ b(&cont);
|
||||
__ bind(&skip);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
int InlinedNamedLoadInstructions = 5;
|
||||
Label check_inlined_codesize;
|
||||
masm_->bind(&check_inlined_codesize);
|
||||
#endif
|
||||
|
||||
Register scratch = VirtualFrame::scratch0();
|
||||
Register scratch2 = VirtualFrame::scratch1();
|
||||
@ -6812,12 +6887,42 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
|
||||
__ cmp(scratch, scratch2);
|
||||
deferred->Branch(ne);
|
||||
|
||||
// Initially use an invalid index. The index will be patched by the
|
||||
// inline cache code.
|
||||
__ ldr(receiver, MemOperand(receiver, 0));
|
||||
if (is_contextual) {
|
||||
#ifdef DEBUG
|
||||
InlinedNamedLoadInstructions += 1;
|
||||
#endif
|
||||
// Load the (initially invalid) cell and get its value.
|
||||
masm()->mov(receiver, Operand(Factory::null_value()));
|
||||
__ ldr(receiver,
|
||||
FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
deferred->set_is_dont_delete(is_dont_delete);
|
||||
|
||||
if (!is_dont_delete) {
|
||||
#ifdef DEBUG
|
||||
InlinedNamedLoadInstructions += 3;
|
||||
#endif
|
||||
__ cmp(receiver, Operand(Factory::the_hole_value()));
|
||||
deferred->Branch(eq);
|
||||
} else if (FLAG_debug_code) {
|
||||
#ifdef DEBUG
|
||||
InlinedNamedLoadInstructions += 3;
|
||||
#endif
|
||||
__ cmp(receiver, Operand(Factory::the_hole_value()));
|
||||
__ b(&check_the_hole, eq);
|
||||
__ bind(&cont);
|
||||
}
|
||||
} else {
|
||||
// Initially use an invalid index. The index will be patched by the
|
||||
// inline cache code.
|
||||
__ ldr(receiver, MemOperand(receiver, 0));
|
||||
}
|
||||
|
||||
// Make sure that the expected number of instructions are generated.
|
||||
ASSERT_EQ(kInlinedNamedLoadInstructions,
|
||||
// If the code before is updated, the offsets in ic-arm.cc
|
||||
// LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
|
||||
// to be updated.
|
||||
ASSERT_EQ(InlinedNamedLoadInstructions,
|
||||
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
|
||||
}
|
||||
|
||||
|
@ -194,14 +194,6 @@ enum ArgumentsAllocationMode {
|
||||
};
|
||||
|
||||
|
||||
// Different nop operations are used by the code generator to detect certain
|
||||
// states of the generated code.
|
||||
enum NopMarkerTypes {
|
||||
NON_MARKING_NOP = 0,
|
||||
PROPERTY_ACCESS_INLINED
|
||||
};
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// CodeGenerator
|
||||
|
||||
|
@ -279,7 +279,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
|
||||
__ bind(&check_codesize);
|
||||
__ RecordDebugBreakSlot();
|
||||
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
|
||||
__ nop(2);
|
||||
__ nop(MacroAssembler::DEBUG_BREAK_NOP);
|
||||
}
|
||||
ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
|
||||
masm->InstructionsGeneratedSince(&check_codesize));
|
||||
|
@ -904,9 +904,9 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
__ TailCallExternalReference(ref, 2, 1);
|
||||
}
|
||||
|
||||
|
||||
static inline bool IsInlinedICSite(Address address,
|
||||
Address* inline_end_address) {
|
||||
// Returns the code marker, or the 0 if the code is not marked.
|
||||
static inline int InlinedICSiteMarker(Address address,
|
||||
Address* inline_end_address) {
|
||||
// If the instruction after the call site is not the pseudo instruction nop1
|
||||
// then this is not related to an inlined in-object property load. The nop1
|
||||
// instruction is located just after the call to the IC in the deferred code
|
||||
@ -914,9 +914,11 @@ static inline bool IsInlinedICSite(Address address,
|
||||
// a branch instruction for jumping back from the deferred code.
|
||||
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
|
||||
Instr instr_after_call = Assembler::instr_at(address_after_call);
|
||||
if (!Assembler::IsNop(instr_after_call, PROPERTY_ACCESS_INLINED)) {
|
||||
return false;
|
||||
}
|
||||
int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
|
||||
|
||||
// A negative result means the code is not marked.
|
||||
if (code_marker <= 0) return 0;
|
||||
|
||||
Address address_after_nop = address_after_call + Assembler::kInstrSize;
|
||||
Instr instr_after_nop = Assembler::instr_at(address_after_nop);
|
||||
// There may be some reg-reg move and frame merging code to skip over before
|
||||
@ -933,7 +935,7 @@ static inline bool IsInlinedICSite(Address address,
|
||||
ASSERT(b_offset < 0); // Jumping back from deferred code.
|
||||
*inline_end_address = address_after_nop + b_offset;
|
||||
|
||||
return true;
|
||||
return code_marker;
|
||||
}
|
||||
|
||||
|
||||
@ -941,7 +943,10 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
|
||||
// Find the end of the inlined code for handling the load if this is an
|
||||
// inlined IC call site.
|
||||
Address inline_end_address;
|
||||
if (!IsInlinedICSite(address, &inline_end_address)) return false;
|
||||
if (InlinedICSiteMarker(address, &inline_end_address)
|
||||
!= Assembler::PROPERTY_ACCESS_INLINED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
|
||||
// The immediate must be representable in 12 bits.
|
||||
@ -959,8 +964,12 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
|
||||
CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
|
||||
|
||||
// Patch the map check.
|
||||
// For PROPERTY_ACCESS_INLINED, the load map instruction is generated
|
||||
// 4 instructions before the end of the inlined code.
|
||||
// See codgen-arm.cc CodeGenerator::EmitNamedLoad.
|
||||
int ldr_map_offset = -4;
|
||||
Address ldr_map_instr_address =
|
||||
inline_end_address - 4 * Assembler::kInstrSize;
|
||||
inline_end_address + ldr_map_offset * Assembler::kInstrSize;
|
||||
Assembler::set_target_address_at(ldr_map_instr_address,
|
||||
reinterpret_cast<Address>(map));
|
||||
return true;
|
||||
@ -971,8 +980,41 @@ bool LoadIC::PatchInlinedContextualLoad(Address address,
|
||||
Object* map,
|
||||
Object* cell,
|
||||
bool is_dont_delete) {
|
||||
// TODO(<bug#>): implement this.
|
||||
return false;
|
||||
// Find the end of the inlined code for handling the contextual load if
|
||||
// this is inlined IC call site.
|
||||
Address inline_end_address;
|
||||
int marker = InlinedICSiteMarker(address, &inline_end_address);
|
||||
if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
|
||||
(marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
|
||||
return false;
|
||||
}
|
||||
// On ARM we don't rely on the is_dont_delete argument as the hint is already
|
||||
// embedded in the code marker.
|
||||
bool marker_is_dont_delete =
|
||||
marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
|
||||
|
||||
// These are the offsets from the end of the inlined code.
|
||||
// See codgen-arm.cc CodeGenerator::EmitNamedLoad.
|
||||
int ldr_map_offset = marker_is_dont_delete ? -5: -8;
|
||||
int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
|
||||
if (FLAG_debug_code && marker_is_dont_delete) {
|
||||
// Three extra instructions were generated to check for the_hole_value.
|
||||
ldr_map_offset -= 3;
|
||||
ldr_cell_offset -= 3;
|
||||
}
|
||||
Address ldr_map_instr_address =
|
||||
inline_end_address + ldr_map_offset * Assembler::kInstrSize;
|
||||
Address ldr_cell_instr_address =
|
||||
inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
|
||||
|
||||
// Patch the map check.
|
||||
Assembler::set_target_address_at(ldr_map_instr_address,
|
||||
reinterpret_cast<Address>(map));
|
||||
// Patch the cell address.
|
||||
Assembler::set_target_address_at(ldr_cell_instr_address,
|
||||
reinterpret_cast<Address>(cell));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -980,7 +1022,10 @@ bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
|
||||
// Find the end of the inlined code for the store if there is an
|
||||
// inlined version of the store.
|
||||
Address inline_end_address;
|
||||
if (!IsInlinedICSite(address, &inline_end_address)) return false;
|
||||
if (InlinedICSiteMarker(address, &inline_end_address)
|
||||
!= Assembler::PROPERTY_ACCESS_INLINED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Compute the address of the map load instruction.
|
||||
Address ldr_map_instr_address =
|
||||
@ -1025,7 +1070,10 @@ bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
|
||||
|
||||
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
|
||||
Address inline_end_address;
|
||||
if (!IsInlinedICSite(address, &inline_end_address)) return false;
|
||||
if (InlinedICSiteMarker(address, &inline_end_address)
|
||||
!= Assembler::PROPERTY_ACCESS_INLINED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Patch the map check.
|
||||
Address ldr_map_instr_address =
|
||||
@ -1042,7 +1090,10 @@ bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
|
||||
// Find the end of the inlined code for handling the store if this is an
|
||||
// inlined IC call site.
|
||||
Address inline_end_address;
|
||||
if (!IsInlinedICSite(address, &inline_end_address)) return false;
|
||||
if (InlinedICSiteMarker(address, &inline_end_address)
|
||||
!= Assembler::PROPERTY_ACCESS_INLINED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Patch the map check.
|
||||
Address ldr_map_instr_address =
|
||||
|
@ -319,6 +319,40 @@ class MacroAssembler: public Assembler {
|
||||
Register scratch,
|
||||
Label* miss);
|
||||
|
||||
inline void MarkCode(NopMarkerTypes type) {
|
||||
nop(type);
|
||||
}
|
||||
|
||||
// Check if the given instruction is a 'type' marker.
|
||||
// ie. check if is is a mov r<type>, r<type> (referenced as nop(type))
|
||||
// These instructions are generated to mark special location in the code,
|
||||
// like some special IC code.
|
||||
static inline bool IsMarkedCode(Instr instr, int type) {
|
||||
ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
|
||||
return IsNop(instr, type);
|
||||
}
|
||||
|
||||
|
||||
static inline int GetCodeMarker(Instr instr) {
|
||||
int dst_reg_offset = 12;
|
||||
int dst_mask = 0xf << dst_reg_offset;
|
||||
int src_mask = 0xf;
|
||||
int dst_reg = (instr & dst_mask) >> dst_reg_offset;
|
||||
int src_reg = instr & src_mask;
|
||||
uint32_t non_register_mask = ~(dst_mask | src_mask);
|
||||
uint32_t mov_mask = al | 13 << 21;
|
||||
|
||||
// Return <n> if we have a mov rn rn, else return -1.
|
||||
int type = ((instr & non_register_mask) == mov_mask) &&
|
||||
(dst_reg == src_reg) &&
|
||||
(FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
|
||||
? src_reg
|
||||
: -1;
|
||||
ASSERT((type == -1) ||
|
||||
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
|
||||
return type;
|
||||
}
|
||||
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Allocation support
|
||||
|
Loading…
Reference in New Issue
Block a user