X64: Added inline keyed load/store and a bunch of other missing functions.

Review URL: http://codereview.chromium.org/160272


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2585 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
lrn@chromium.org 2009-07-30 09:18:14 +00:00
parent cffc051177
commit dc8ca16931
10 changed files with 1496 additions and 347 deletions

View File

@ -839,7 +839,8 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address = address + 4;
Address test_instruction_address =
address + Assembler::kTargetAddrToReturnAddrDist;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
@ -865,7 +866,8 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
static bool PatchInlinedMapCheck(Address address, Object* map) {
Address test_instruction_address = address + 4; // 4 = stub address
Address test_instruction_address =
address + Assembler::kTargetAddrToReturnAddrDist;
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address != kTestEaxByte) return false;

View File

@ -389,6 +389,10 @@ class KeyedStoreIC: public IC {
// Support for patching the map that is checked in an inlined
// version of keyed store.
// The address is the patch point for the IC call
// (Assembler::kTargetAddrToReturnAddrDist before the end of
// the call/return address).
// The map is the new map that the inlined code should check against.
static bool PatchInlinedStore(Address address, Object* map);
friend class IC;

View File

@ -1140,6 +1140,9 @@ void Assembler::movq(const Operand& dst, Register src) {
void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);

View File

@ -292,6 +292,7 @@ enum ScaleFactor {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
times_half_pointer_size = times_4,
times_pointer_size = times_8
};

View File

@ -389,6 +389,112 @@ bool CodeGenerator::HasValidEntryRegisters() {
#endif
class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
explicit DeferredReferenceGetKeyedValue(Register dst,
Register receiver,
Register key,
bool is_global)
: dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
set_comment("[ DeferredReferenceGetKeyedValue");
}
virtual void Generate();
Label* patch_site() { return &patch_site_; }
private:
Label patch_site_;
Register dst_;
Register receiver_;
Register key_;
bool is_global_;
};
void DeferredReferenceGetKeyedValue::Generate() {
__ push(receiver_); // First IC argument.
__ push(key_); // Second IC argument.
// Calculate the delta from the IC call instruction to the map check
// movq instruction in the inlined version. This delta is stored in
// a test(rax, delta) instruction after the call so that we can find
// it in the IC initialization code and patch the movq instruction.
// This means that we cannot allow test instructions after calls to
// KeyedLoadIC stubs in other places.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
RelocInfo::Mode mode = is_global_
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
__ Call(ic, mode);
// The delta from the start of the map-compare instruction to the
// test instruction. We use masm_-> directly here instead of the __
// macro because the macro sometimes uses macro expansion to turn
// into something that can't return a value. This is encountered
// when doing generated code coverage tests.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
// TODO(X64): Consider whether it's worth switching the test to a
// 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
// be generated normally.
masm_->testl(rax, Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
__ pop(key_);
__ pop(receiver_);
}
class DeferredReferenceSetKeyedValue: public DeferredCode {
public:
DeferredReferenceSetKeyedValue(Register value,
Register key,
Register receiver)
: value_(value), key_(key), receiver_(receiver) {
set_comment("[ DeferredReferenceSetKeyedValue");
}
virtual void Generate();
Label* patch_site() { return &patch_site_; }
private:
Register value_;
Register key_;
Register receiver_;
Label patch_site_;
};
void DeferredReferenceSetKeyedValue::Generate() {
__ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
// Push receiver and key arguments on the stack.
__ push(receiver_);
__ push(key_);
// Move value argument to eax as expected by the IC stub.
if (!value_.is(rax)) __ movq(rax, value_);
// Call the IC stub.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The delta from the start of the map-compare instructions (initial movq)
// to the test instruction. We use masm_-> directly here instead of the
// __ macro because the macro sometimes uses macro expansion to turn
// into something that can't return a value. This is encountered
// when doing generated code coverage tests.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
masm_->testl(rax, Immediate(-delta_to_patch_site));
// Restore value (returned from store IC), key and receiver
// registers.
if (!value_.is(rax)) __ movq(value_, rax);
__ pop(key_);
__ pop(receiver_);
}
class DeferredStackCheck: public DeferredCode {
public:
DeferredStackCheck() {
@ -2193,9 +2299,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// The receiver is the argument to the runtime call. It is the
// first value pushed when the reference was loaded to the
// frame.
// TODO(X64): Enable this and the switch back to fast, once they work.
// frame_->PushElementAt(target.size() - 1);
// Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
@ -2203,20 +2308,18 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
Load(node->value());
} else {
// Literal* literal = node->value()->AsLiteral();
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
// Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
// There are two cases where the target is not read in the right hand
// side, that are easy to test for: the right hand side is a literal,
// or the right hand side is a different variable. TakeValue invalidates
// the target, with an implicit promise that it will be written to again
// before it is read.
// TODO(X64): Implement TakeValue optimization. Check issue 150016.
if (false) {
// if (literal != NULL || (right_var != NULL && right_var != var)) {
// target.TakeValue(NOT_INSIDE_TYPEOF);
if (literal != NULL || (right_var != NULL && right_var != var)) {
target.TakeValue(NOT_INSIDE_TYPEOF);
} else {
target.GetValue(NOT_INSIDE_TYPEOF);
}
@ -2247,9 +2350,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// argument to the runtime call is the receiver, which is the
// first value pushed as part of the reference, which is below
// the lhs value.
// TODO(X64): Enable this once ToFastProperties works.
// frame_->PushElementAt(target.size());
// Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
frame_->PushElementAt(target.size());
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
}
@ -3645,7 +3747,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
// Smi => false iff zero.
ASSERT(kSmiTag == 0);
__ testq(value.reg(), value.reg());
__ testl(value.reg(), value.reg());
dest->false_target()->Branch(zero);
__ testl(value.reg(), Immediate(kSmiTagMask));
dest->true_target()->Branch(zero);
@ -4130,7 +4232,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// A test rax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test eax
// instruction here.
__ nop();
masm_->nop();
// Discard the global object. The result is in answer.
frame_->Drop();
return answer;
@ -4700,7 +4802,7 @@ void DeferredReferenceGetNamedValue::Generate() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
masm_->testq(rax, Immediate(-delta_to_patch_site));
masm_->testl(rax, Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
@ -5287,7 +5389,8 @@ void Reference::GetValue(TypeofState typeof_state) {
kScratchRegister);
// This branch is always a forwards branch so it's always a fixed
// size which allows the assert below to succeed and patching to work.
deferred->Branch(not_equal);
// Don't use deferred->Branch(...), since that might add coverage code.
masm->j(not_equal, deferred->entry_label());
// The delta from the patch label to the load offset must be
// statically known.
@ -5314,26 +5417,117 @@ void Reference::GetValue(TypeofState typeof_state) {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
// patch the map check if appropriate.
if (cgen_->loop_nesting() > 0) {
Comment cmnt(masm, "[ Inlined load from keyed Property");
// TODO(x64): Implement inlined loads for keyed properties.
// Make sure to load length field as a 32-bit quantity.
// Comment cmnt(masm, "[ Load from keyed Property");
Result key = cgen_->frame()->Pop();
Result receiver = cgen_->frame()->Pop();
key.ToRegister();
receiver.ToRegister();
RelocInfo::Mode mode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed load. The explicit nop instruction is here because
// the push that follows might be peep-hole optimized away.
__ nop();
cgen_->frame()->Push(&answer);
// Use a fresh temporary to load the elements without destroying
// the receiver which is needed for the deferred slow case.
Result elements = cgen_->allocator()->Allocate();
ASSERT(elements.is_valid());
// Use a fresh temporary for the index and later the loaded
// value.
Result index = cgen_->allocator()->Allocate();
ASSERT(index.is_valid());
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(index.reg(),
receiver.reg(),
key.reg(),
is_global);
// Check that the receiver is not a smi (only needed if this
// is not a load from the global context) and that it has the
// expected map.
if (!is_global) {
__ testl(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero);
}
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
__ bind(deferred->patch_site());
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm->movq(kScratchRegister, Factory::null_value(), RelocInfo::EMBEDDED_OBJECT);
masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
deferred->Branch(not_equal);
// Check that the key is a non-negative smi.
__ testl(key.reg(),
Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u)));
deferred->Branch(not_zero);
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ movq(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
__ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Factory::fixed_array_map());
deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
__ movl(index.reg(), key.reg());
__ shrl(index.reg(), Immediate(kSmiTagSize));
__ cmpl(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
// The index register holds the un-smi-tagged key. It has been
// zero-extended to 64-bits, so it can be used directly as index in the
// operand below.
// Load and check that the result is not the hole. We could
// reuse the index or elements register for the value.
//
// TODO(206): Consider whether it makes sense to try some
// heuristic about which register to reuse. For example, if
// one is rax, the we can reuse that one because the value
// coming from the deferred code will be in rax.
Result value = index;
__ movq(value.reg(),
Operand(elements.reg(),
index.reg(),
times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
index.Unuse();
__ Cmp(value.reg(), Factory::the_hole_value());
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
// Restore the receiver and key to the frame and push the
// result on top of it.
cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&key);
cgen_->frame()->Push(&value);
} else {
Comment cmnt(masm, "[ Load from keyed Property");
RelocInfo::Mode mode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed load. The explicit nop instruction is here because
// the push that follows might be peep-hole optimized away.
__ nop();
cgen_->frame()->Push(&answer);
}
break;
}
@ -5400,15 +5594,105 @@ void Reference::SetValue(InitState init_state) {
case KEYED: {
Comment cmnt(masm, "[ Store to keyed Property");
// TODO(x64): Implement inlined version of keyed stores.
// Generate inlined version of the keyed store if the code is in
// a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
SmiAnalysis* key_smi_analysis = property->key()->type();
Result answer = cgen_->frame()->CallKeyedStoreIC();
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed store.
__ nop();
cgen_->frame()->Push(&answer);
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
Comment cmnt(masm, "[ Inlined store to keyed Property");
// Get the receiver, key and value into registers.
Result value = cgen_->frame()->Pop();
Result key = cgen_->frame()->Pop();
Result receiver = cgen_->frame()->Pop();
Result tmp = cgen_->allocator_->Allocate();
ASSERT(tmp.is_valid());
// Determine whether the value is a constant before putting it
// in a register.
bool value_is_constant = value.is_constant();
// Make sure that value, key and receiver are in registers.
value.ToRegister();
key.ToRegister();
receiver.ToRegister();
DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue(value.reg(),
key.reg(),
receiver.reg());
// Check that the value is a smi if it is not a constant.
// We can skip the write barrier for smis and constants.
if (!value_is_constant) {
__ testl(value.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
}
// Check that the key is a non-negative smi.
__ testl(key.reg(),
Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
deferred->Branch(not_zero);
// Check that the receiver is not a smi.
__ testl(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero);
// Check that the receiver is a JSArray.
__ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
deferred->Branch(not_equal);
// Check that the key is within bounds. Both the key and the
// length of the JSArray are smis, so compare only low 32 bits.
__ cmpl(key.reg(),
FieldOperand(receiver.reg(), JSArray::kLengthOffset));
deferred->Branch(greater_equal);
// Get the elements array from the receiver and check that it
// is a flat array (not a dictionary).
__ movq(tmp.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
// Bind the deferred code patch site to be able to locate the
// fixed array map comparison. When debugging, we patch this
// comparison to always fail so that we will hit the IC call
// in the deferred code which will allow the debugger to
// break for fast case stores.
__ bind(deferred->patch_site());
// Avoid using __ to ensure the distance from patch_site
// to the map address is always the same.
masm->movq(kScratchRegister, Factory::fixed_array_map(),
RelocInfo::EMBEDDED_OBJECT);
__ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
kScratchRegister);
deferred->Branch(not_equal);
// Store the value.
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
__ movq(Operand(tmp.reg(),
key.reg(),
times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag),
value.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1);
deferred->BindExit();
cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&key);
cgen_->frame()->Push(&value);
} else {
Result answer = cgen_->frame()->CallKeyedStoreIC();
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed store.
masm->nop();
cgen_->frame()->Push(&answer);
}
break;
}

View File

@ -159,16 +159,64 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// TODO(X64): Implement this when LoadIC is enabled.
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
static bool PatchInlinedMapCheck(Address address, Object* map) {
// Arguments are address of start of call sequence that called
// the IC,
Address test_instruction_address =
address + Assembler::kTargetAddrToReturnAddrDist;
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address != kTestEaxByte) return false;
// Fetch the offset from the test instruction to the map compare
// instructions (starting with the 64-bit immediate mov of the map
// address). This offset is stored in the last 4 bytes of the 5
// byte test instruction.
Address delta_address = test_instruction_address + 1;
int delta = *reinterpret_cast<int*>(delta_address);
// Compute the map address. The map address is in the last 8 bytes
// of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
// to the offset to get the map address.
Address map_address = test_instruction_address + delta + 2;
// Patch the map check.
*(reinterpret_cast<Object**>(map_address)) = map;
return true;
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return PatchInlinedMapCheck(address, map);
}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return PatchInlinedMapCheck(address, map);
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Insert null as the map to check for to make sure the map check fails
// sending control flow to the IC instead of the inlined version.
PatchInlinedLoad(address, Heap::null_value());
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {
// TODO(X64): Implement this when LoadIC is enabled.
// Insert null as the elements map to check for. This will make
// sure that the elements fast-case map check fails so that control
// flows to the IC instead of the inlined version.
PatchInlinedStore(address, Heap::null_value());
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
UNIMPLEMENTED();
// Restore the fast-case elements map check so that the inlined
// version can be used again.
PatchInlinedStore(address, Heap::fixed_array_map());
}
@ -310,18 +358,6 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
// Never patch the map in the map check, so the check always fails.
return false;
}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
// Never patch the map in the map check, so the check always fails.
return false;
}
void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
// ----------- S t a t e -------------
// -- rax : value
@ -539,7 +575,10 @@ const int LoadIC::kOffsetToLoadInstruction = 20;
void LoadIC::ClearInlinedVersion(Address address) {
// TODO(X64): Implement this when LoadIC is enabled.
// Reset the map check of the inlined inobject property load (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
}
@ -605,13 +644,37 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
// TODO(X64): Implement this function. Until then, the code is not patched.
return false;
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kTargetAddrToReturnAddrDist;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction.
int delta = *reinterpret_cast<int*>(delta_address);
// The map address is the last 8 bytes of the 10-byte
// immediate move instruction, so we add 2 to get the
// offset to the last 8 bytes.
Address map_address = test_instruction_address + delta + 2;
*(reinterpret_cast<Object**>(map_address)) = map;
// The offset is in the 32-bit displacement of a seven byte
// memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
// so we add 3 to get the offset of the displacement.
Address offset_address =
test_instruction_address + delta + kOffsetToLoadInstruction + 3;
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
}
void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {

View File

@ -318,6 +318,17 @@ void MacroAssembler::Push(Handle<Object> source) {
}
void MacroAssembler::Push(Smi* source) {
if (IsUnsafeSmi(source)) {
LoadUnsafeSmi(kScratchRegister, source);
push(kScratchRegister);
} else {
int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
push(Immediate(smi));
}
}
void MacroAssembler::Jump(ExternalReference ext) {
movq(kScratchRegister, ext);
jmp(kScratchRegister);
@ -363,6 +374,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
movq(kScratchRegister, code_object, rmode);
#ifdef DEBUG
// Patch target is kPointer size bytes *before* target label.
Label target;
bind(&target);
#endif

View File

@ -164,6 +164,7 @@ class MacroAssembler: public Assembler {
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
void Push(Handle<Object> source);
void Push(Smi* smi);
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);

File diff suppressed because it is too large Load Diff

View File

@ -118,6 +118,7 @@ test-api/HugeConsStringOutOfMemory: CRASH || FAIL
test-api/OutOfMemory: CRASH || FAIL
test-api/OutOfMemoryNested: CRASH || FAIL
test-api/Threading: CRASH || FAIL
test-api/Threading2: PASS || TIMEOUT
test-api/TryCatchSourceInfo: CRASH || FAIL
test-api/RegExpInterruption: PASS || TIMEOUT
test-api/RegExpStringModification: PASS || TIMEOUT