Fix potential overwriting of debug jumps of following code.
Add JSArrayLength, CallKnownFunction, and InstanceType operations. Remove LadGlobal and StoreGlobal again (they fail). Review URL: http://codereview.chromium.org/6347067 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6645 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
d86ac17a6e
commit
19b734fd82
@ -5925,7 +5925,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
|
||||
Handle<Object> p = it.rinfo()->target_object_handle(origin);
|
||||
it.rinfo()->set_target_object(*p);
|
||||
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
|
||||
Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
|
||||
Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
|
||||
it.rinfo()->set_target_cell(*cell);
|
||||
} else if (RelocInfo::IsCodeTarget(mode)) {
|
||||
// rewrite code handles in inline cache targets to direct
|
||||
|
@ -230,4 +230,24 @@ uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
|
||||
}
|
||||
|
||||
|
||||
int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
|
||||
int result = 0;
|
||||
if (!deoptimization_info_.is_empty()) {
|
||||
unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
|
||||
for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
|
||||
DeoptimizationInfo info = deoptimization_info_[i];
|
||||
if (static_cast<int>(info.deoptimization_index) !=
|
||||
Safepoint::kNoDeoptimizationIndex) {
|
||||
if (previous_gap_end + limit > info.pc) {
|
||||
result++;
|
||||
}
|
||||
previous_gap_end = info.pc_after_gap;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
@ -220,8 +220,8 @@ class SafepointTableBuilder BASE_EMBEDDED {
|
||||
int arguments,
|
||||
int deoptimization_index);
|
||||
|
||||
// Update the last safepoint with the size of the code generated for the gap
|
||||
// following it.
|
||||
// Update the last safepoint with the size of the code generated until the
|
||||
// end of the gap following it.
|
||||
void SetPcAfterGap(int pc) {
|
||||
ASSERT(!deoptimization_info_.is_empty());
|
||||
int index = deoptimization_info_.length() - 1;
|
||||
@ -232,6 +232,11 @@ class SafepointTableBuilder BASE_EMBEDDED {
|
||||
// entry must be enough to hold all the pointer indexes.
|
||||
void Emit(Assembler* assembler, int bits_per_entry);
|
||||
|
||||
// Count the number of deoptimization points where the next
|
||||
// following deoptimization point comes less than limit bytes
|
||||
// after the end of this point's gap.
|
||||
int CountShortDeoptimizationIntervals(unsigned limit);
|
||||
|
||||
private:
|
||||
struct DeoptimizationInfo {
|
||||
unsigned pc;
|
||||
@ -247,8 +252,8 @@ class SafepointTableBuilder BASE_EMBEDDED {
|
||||
ZoneList<ZoneList<int>*> indexes_;
|
||||
ZoneList<ZoneList<int>*> registers_;
|
||||
|
||||
bool emitted_;
|
||||
unsigned offset_;
|
||||
bool emitted_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
|
||||
};
|
||||
|
@ -916,6 +916,23 @@ void Assembler::call(const Operand& op) {
|
||||
}
|
||||
|
||||
|
||||
// Calls directly to the given address using a relative offset.
|
||||
// Should only ever be used in Code objects for calls within the
|
||||
// same Code object. Should not be used when generating new code (use labels),
|
||||
// but only when patching existing code.
|
||||
void Assembler::call(Address target) {
|
||||
positions_recorder()->WriteRecordedPositions();
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
// 1110 1000 #32-bit disp.
|
||||
emit(0xE8);
|
||||
Address source = pc_ + 4;
|
||||
intptr_t displacement = target - source;
|
||||
ASSERT(is_int32(displacement));
|
||||
emitl(static_cast<int32_t>(displacement));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::clc() {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
|
@ -553,10 +553,12 @@ class Assembler : public Malloced {
|
||||
// TODO(X64): Rename this, removing the "Real", after changing the above.
|
||||
static const int kRealPatchReturnSequenceAddressOffset = 2;
|
||||
|
||||
// The x64 JS return sequence is padded with int3 to make it large
|
||||
// enough to hold a call instruction when the debugger patches it.
|
||||
// Some x64 JS code is padded with int3 to make it large
|
||||
// enough to hold an instruction when the debugger patches it.
|
||||
static const int kJumpInstructionLength = 13;
|
||||
static const int kCallInstructionLength = 13;
|
||||
static const int kJSReturnSequenceLength = 13;
|
||||
static const int kShortCallInstructionLength = 5;
|
||||
|
||||
// The debug break slot must be able to contain a call instruction.
|
||||
static const int kDebugBreakSlotLength = kCallInstructionLength;
|
||||
@ -585,7 +587,7 @@ class Assembler : public Malloced {
|
||||
|
||||
// Insert the smallest number of nop instructions
|
||||
// possible to align the pc offset to a multiple
|
||||
// of m. m must be a power of 2.
|
||||
// of m, where m must be a power of 2.
|
||||
void Align(int m);
|
||||
// Aligns code to something that's optimal for a jump target for the platform.
|
||||
void CodeTargetAlign();
|
||||
@ -1127,6 +1129,12 @@ class Assembler : public Malloced {
|
||||
void call(Label* L);
|
||||
void call(Handle<Code> target, RelocInfo::Mode rmode);
|
||||
|
||||
// Calls directly to the given address using a relative offset.
|
||||
// Should only ever be used in Code objects for calls within the
|
||||
// same Code object. Should not be used when generating new code (use labels),
|
||||
// but only when patching existing code.
|
||||
void call(Address target);
|
||||
|
||||
// Call near absolute indirect, address in register
|
||||
void call(Register adr);
|
||||
|
||||
|
@ -42,10 +42,65 @@ int Deoptimizer::table_entry_size_ = 10;
|
||||
|
||||
|
||||
int Deoptimizer::patch_size() {
|
||||
return Assembler::kCallInstructionLength;
|
||||
return MacroAssembler::kCallInstructionLength;
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
// Overwrites code with int3 instructions.
|
||||
static void ZapCodeRange(Address from, Address to) {
|
||||
CHECK(from <= to);
|
||||
int length = static_cast<int>(to - from);
|
||||
CodePatcher destroyer(from, length);
|
||||
while (length-- > 0) {
|
||||
destroyer.masm()->int3();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// Iterate through the entries of a SafepointTable that corresponds to
|
||||
// deoptimization points.
|
||||
class SafepointTableDeoptimiztionEntryIterator {
|
||||
public:
|
||||
explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
|
||||
: code_(code), table_(code), index_(-1), limit_(table_.length()) {
|
||||
FindNextIndex();
|
||||
}
|
||||
|
||||
SafepointEntry Next(Address* pc) {
|
||||
if (index_ >= limit_) {
|
||||
*pc = NULL;
|
||||
return SafepointEntry(); // Invalid entry.
|
||||
}
|
||||
*pc = code_->instruction_start() + table_.GetPcOffset(index_);
|
||||
SafepointEntry entry = table_.GetEntry(index_);
|
||||
FindNextIndex();
|
||||
return entry;
|
||||
}
|
||||
|
||||
private:
|
||||
void FindNextIndex() {
|
||||
ASSERT(index_ < limit_);
|
||||
while (++index_ < limit_) {
|
||||
if (table_.GetEntry(index_).deoptimization_index() !=
|
||||
Safepoint::kNoDeoptimizationIndex) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Code* code_;
|
||||
SafepointTable table_;
|
||||
// Index of next deoptimization entry. If negative after calling
|
||||
// FindNextIndex, there are no more, and Next will return an invalid
|
||||
// SafepointEntry.
|
||||
int index_;
|
||||
// Table length.
|
||||
int limit_;
|
||||
};
|
||||
|
||||
|
||||
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
|
||||
AssertNoAllocation no_allocation;
|
||||
|
||||
@ -59,42 +114,74 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
|
||||
code->InvalidateRelocation();
|
||||
|
||||
// For each return after a safepoint insert a absolute call to the
|
||||
// corresponding deoptimization entry.
|
||||
unsigned last_pc_offset = 0;
|
||||
SafepointTable table(function->code());
|
||||
for (unsigned i = 0; i < table.length(); i++) {
|
||||
unsigned pc_offset = table.GetPcOffset(i);
|
||||
SafepointEntry safepoint_entry = table.GetEntry(i);
|
||||
int deoptimization_index = safepoint_entry.deoptimization_index();
|
||||
int gap_code_size = safepoint_entry.gap_code_size();
|
||||
// corresponding deoptimization entry, or a short call to an absolute
|
||||
// jump if space is short. The absolute jumps are put in a table just
|
||||
// before the safepoint table (space was allocated there when the Code
|
||||
// object was created, if necessary).
|
||||
|
||||
Address instruction_start = function->code()->instruction_start();
|
||||
Address jump_table_address =
|
||||
instruction_start + function->code()->safepoint_table_offset();
|
||||
Address previous_pc = instruction_start;
|
||||
|
||||
SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
|
||||
Address entry_pc = NULL;
|
||||
|
||||
SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
|
||||
while (current_entry.is_valid()) {
|
||||
int gap_code_size = current_entry.gap_code_size();
|
||||
unsigned deoptimization_index = current_entry.deoptimization_index();
|
||||
|
||||
#ifdef DEBUG
|
||||
// Destroy the code which is not supposed to run again.
|
||||
unsigned instructions = pc_offset - last_pc_offset;
|
||||
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
|
||||
instructions);
|
||||
for (unsigned i = 0; i < instructions; i++) {
|
||||
destroyer.masm()->int3();
|
||||
}
|
||||
ZapCodeRange(previous_pc, entry_pc);
|
||||
#endif
|
||||
last_pc_offset = pc_offset;
|
||||
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
|
||||
last_pc_offset += gap_code_size;
|
||||
CodePatcher patcher(code->instruction_start() + last_pc_offset,
|
||||
patch_size());
|
||||
// Position where Call will be patched in.
|
||||
Address call_address = entry_pc + gap_code_size;
|
||||
// End of call instruction, if using a direct call to a 64-bit address.
|
||||
Address call_end_address =
|
||||
call_address + MacroAssembler::kCallInstructionLength;
|
||||
|
||||
// Find next deoptimization entry, if any.
|
||||
Address next_pc = NULL;
|
||||
SafepointEntry next_entry = deoptimizations.Next(&next_pc);
|
||||
|
||||
if (!next_entry.is_valid() || next_pc >= call_end_address) {
|
||||
// Room enough to write a long call instruction.
|
||||
CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
|
||||
patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
|
||||
RelocInfo::NONE);
|
||||
last_pc_offset += patch_size();
|
||||
previous_pc = call_end_address;
|
||||
} else {
|
||||
// Not room enough for a long Call instruction. Write a short call
|
||||
// instruction to a long jump placed elsewhere in the code.
|
||||
Address short_call_end_address =
|
||||
call_address + MacroAssembler::kShortCallInstructionLength;
|
||||
ASSERT(next_pc >= short_call_end_address);
|
||||
|
||||
// Write jump in jump-table.
|
||||
jump_table_address -= MacroAssembler::kJumpInstructionLength;
|
||||
CodePatcher jump_patcher(jump_table_address,
|
||||
MacroAssembler::kJumpInstructionLength);
|
||||
jump_patcher.masm()->Jump(
|
||||
GetDeoptimizationEntry(deoptimization_index, LAZY),
|
||||
RelocInfo::NONE);
|
||||
|
||||
// Write call to jump at call_offset.
|
||||
CodePatcher call_patcher(call_address,
|
||||
MacroAssembler::kShortCallInstructionLength);
|
||||
call_patcher.masm()->call(jump_table_address);
|
||||
previous_pc = short_call_end_address;
|
||||
}
|
||||
|
||||
// Continue with next deoptimization entry.
|
||||
current_entry = next_entry;
|
||||
entry_pc = next_pc;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
// Destroy the code which is not supposed to run again.
|
||||
CHECK(code->safepoint_table_offset() >= last_pc_offset);
|
||||
unsigned instructions = code->safepoint_table_offset() - last_pc_offset;
|
||||
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
|
||||
instructions);
|
||||
for (unsigned i = 0; i < instructions; i++) {
|
||||
destroyer.masm()->int3();
|
||||
}
|
||||
ZapCodeRange(previous_pc, jump_table_address);
|
||||
#endif
|
||||
|
||||
// Add the deoptimizing code to the list.
|
||||
@ -390,7 +477,7 @@ void Deoptimizer::EntryGenerator::Generate() {
|
||||
__ pop(Operand(rbx, offset));
|
||||
}
|
||||
|
||||
// Fill in the double input registers.
|
||||
// Fill in the double input registers.
|
||||
int double_regs_offset = FrameDescription::double_registers_offset();
|
||||
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
|
||||
int dst_offset = i * kDoubleSize + double_regs_offset;
|
||||
@ -404,7 +491,7 @@ void Deoptimizer::EntryGenerator::Generate() {
|
||||
__ addq(rsp, Immediate(2 * kPointerSize));
|
||||
}
|
||||
|
||||
// Compute a pointer to the unwinding limit in register ecx; that is
|
||||
// Compute a pointer to the unwinding limit in register rcx; that is
|
||||
// the first stack slot not part of the input frame.
|
||||
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
|
||||
__ addq(rcx, rsp);
|
||||
|
@ -31,7 +31,7 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
static const int kNumRegs = 8;
|
||||
static const int kNumRegs = 16;
|
||||
static const RegList kJSCallerSaved =
|
||||
1 << 0 | // rax
|
||||
1 << 1 | // rcx
|
||||
@ -44,8 +44,7 @@ static const int kNumJSCallerSaved = 5;
|
||||
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
|
||||
|
||||
// Number of registers for which space is reserved in safepoints.
|
||||
// TODO(x64): This should not be 0.
|
||||
static const int kNumSafepointRegisters = 8;
|
||||
static const int kNumSafepointRegisters = 16;
|
||||
|
||||
// ----------------------------------------------------
|
||||
|
||||
|
@ -188,9 +188,19 @@ bool LCodeGen::GenerateDeferredCode() {
|
||||
|
||||
bool LCodeGen::GenerateSafepointTable() {
|
||||
ASSERT(is_done());
|
||||
// Ensure that patching a deoptimization point won't overwrite the table.
|
||||
for (int i = 0; i < Assembler::kCallInstructionLength; i++) {
|
||||
masm()->int3();
|
||||
// Ensure that there is space at the end of the code to write a number
|
||||
// of jump instructions, as well as to afford writing a call near the end
|
||||
// of the code.
|
||||
// The jumps are used when there isn't room in the code stream to write
|
||||
// a long call instruction. Instead it writes a shorter call to a
|
||||
// jump instruction in the same code object.
|
||||
// The calls are used when lazy deoptimizing a function and calls to a
|
||||
// deoptimization function.
|
||||
int short_deopts = safepoints_.CountShortDeoptimizationIntervals(
|
||||
static_cast<unsigned>(MacroAssembler::kJumpInstructionLength));
|
||||
int byte_count = (short_deopts) * MacroAssembler::kJumpInstructionLength;
|
||||
while (byte_count-- > 0) {
|
||||
__ int3();
|
||||
}
|
||||
safepoints_.Emit(masm(), StackSlotCount());
|
||||
return !is_aborted();
|
||||
@ -505,6 +515,7 @@ void LCodeGen::RecordSafepoint(
|
||||
int arguments,
|
||||
int deoptimization_index) {
|
||||
const ZoneList<LOperand*>* operands = pointers->operands();
|
||||
|
||||
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
|
||||
kind, arguments, deoptimization_index);
|
||||
for (int i = 0; i < operands->length(); i++) {
|
||||
@ -773,7 +784,9 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
|
||||
Abort("Unimplemented: %s", "DoJSArrayLength");
|
||||
Register result = ToRegister(instr->result());
|
||||
Register array = ToRegister(instr->InputAt(0));
|
||||
__ movq(result, FieldOperand(array, JSArray::kLengthOffset));
|
||||
}
|
||||
|
||||
|
||||
@ -1503,32 +1516,32 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
|
||||
Register result = ToRegister(instr->result());
|
||||
if (result.is(rax)) {
|
||||
__ load_rax(instr->hydrogen()->cell().location(),
|
||||
RelocInfo::GLOBAL_PROPERTY_CELL);
|
||||
} else {
|
||||
__ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
|
||||
__ movq(result, Operand(result, 0));
|
||||
}
|
||||
if (instr->hydrogen()->check_hole_value()) {
|
||||
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
|
||||
DeoptimizeIf(equal, instr->environment());
|
||||
}
|
||||
Abort("Unimplemented: %s", "DoLoadGlobal");
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
|
||||
Register value = ToRegister(instr->InputAt(0));
|
||||
if (value.is(rax)) {
|
||||
Register temp = ToRegister(instr->TempAt(0));
|
||||
ASSERT(!value.is(temp));
|
||||
bool check_hole = instr->hydrogen()->check_hole_value();
|
||||
if (!check_hole && value.is(rax)) {
|
||||
__ store_rax(instr->hydrogen()->cell().location(),
|
||||
RelocInfo::GLOBAL_PROPERTY_CELL);
|
||||
} else {
|
||||
__ movq(kScratchRegister,
|
||||
Handle<Object>::cast(instr->hydrogen()->cell()),
|
||||
RelocInfo::GLOBAL_PROPERTY_CELL);
|
||||
__ movq(Operand(kScratchRegister, 0), value);
|
||||
return;
|
||||
}
|
||||
// If the cell we are storing to contains the hole it could have
|
||||
// been deleted from the property dictionary. In that case, we need
|
||||
// to update the property details in the property dictionary to mark
|
||||
// it as no longer deleted. We deoptimize in that case.
|
||||
__ movq(temp,
|
||||
Handle<Object>::cast(instr->hydrogen()->cell()),
|
||||
RelocInfo::GLOBAL_PROPERTY_CELL);
|
||||
if (check_hole) {
|
||||
__ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
|
||||
DeoptimizeIf(equal, instr->environment());
|
||||
}
|
||||
__ movq(Operand(temp, 0), value);
|
||||
}
|
||||
|
||||
|
||||
@ -1775,7 +1788,9 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
|
||||
Abort("Unimplemented: %s", "DoCallKnownGlobal");
|
||||
ASSERT(ToRegister(instr->result()).is(rax));
|
||||
__ Move(rdi, instr->target());
|
||||
CallKnownFunction(instr->target(), instr->arity(), instr);
|
||||
}
|
||||
|
||||
|
||||
@ -2046,7 +2061,33 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
||||
Abort("Unimplemented: %s", "DoCheckInstanceType");
|
||||
Register input = ToRegister(instr->InputAt(0));
|
||||
InstanceType first = instr->hydrogen()->first();
|
||||
InstanceType last = instr->hydrogen()->last();
|
||||
|
||||
__ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
|
||||
|
||||
// If there is only one type in the interval check for equality.
|
||||
if (first == last) {
|
||||
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
|
||||
Immediate(static_cast<int8_t>(first)));
|
||||
DeoptimizeIf(not_equal, instr->environment());
|
||||
} else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
|
||||
// String has a dedicated bit in instance type.
|
||||
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
|
||||
Immediate(kIsNotStringMask));
|
||||
DeoptimizeIf(not_zero, instr->environment());
|
||||
} else {
|
||||
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
|
||||
Immediate(static_cast<int8_t>(first)));
|
||||
DeoptimizeIf(below, instr->environment());
|
||||
// Omit check for the last type.
|
||||
if (last != LAST_TYPE) {
|
||||
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
|
||||
Immediate(static_cast<int8_t>(last)));
|
||||
DeoptimizeIf(above, instr->environment());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -1185,8 +1185,8 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
|
||||
Abort("Unimplemented: %s", "DoCallNamed");
|
||||
return NULL;
|
||||
argument_count_ -= instr->argument_count();
|
||||
return MarkAsCall(DefineFixed(new LCallNamed, rax), instr);
|
||||
}
|
||||
|
||||
|
||||
@ -1197,8 +1197,8 @@ LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
|
||||
Abort("Unimplemented: %s", "DoCallKnownGlobal");
|
||||
return NULL;
|
||||
argument_count_ -= instr->argument_count();
|
||||
return MarkAsCall(DefineFixed(new LCallKnownGlobal, rax), instr);
|
||||
}
|
||||
|
||||
|
||||
@ -1408,8 +1408,8 @@ LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
|
||||
Abort("Unimplemented: %s", "DoJSArrayLength");
|
||||
return NULL;
|
||||
LOperand* array = UseRegisterAtStart(instr->value());
|
||||
return DefineAsRegister(new LJSArrayLength(array));
|
||||
}
|
||||
|
||||
|
||||
@ -1512,8 +1512,9 @@ LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
|
||||
Abort("Unimplemented: %s", "DoCheckInstanceType");
|
||||
return NULL;
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
LCheckInstanceType* result = new LCheckInstanceType(value);
|
||||
return AssignEnvironment(result);
|
||||
}
|
||||
|
||||
|
||||
@ -1575,7 +1576,9 @@ LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
|
||||
return new LStoreGlobal(UseRegisterAtStart(instr->value()));
|
||||
LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
|
||||
TempRegister());
|
||||
return instr->check_hole_value() ? AssignEnvironment(result) : result;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1300,10 +1300,11 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
|
||||
};
|
||||
|
||||
|
||||
class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
|
||||
class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
|
||||
public:
|
||||
explicit LStoreGlobal(LOperand* value) {
|
||||
explicit LStoreGlobal(LOperand* value, LOperand* temp) {
|
||||
inputs_[0] = value;
|
||||
temps_[0] = temp;
|
||||
}
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
|
||||
@ -1637,11 +1638,10 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
|
||||
};
|
||||
|
||||
|
||||
class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
|
||||
class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
|
||||
public:
|
||||
LCheckInstanceType(LOperand* value, LOperand* temp) {
|
||||
explicit LCheckInstanceType(LOperand* value) {
|
||||
inputs_[0] = value;
|
||||
temps_[0] = temp;
|
||||
}
|
||||
|
||||
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
|
||||
|
@ -68,7 +68,9 @@ void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
|
||||
void MacroAssembler::CompareRoot(const Operand& with,
|
||||
Heap::RootListIndex index) {
|
||||
ASSERT(!with.AddressUsesRegister(kScratchRegister));
|
||||
LoadRoot(kScratchRegister, index);
|
||||
cmpq(with, kScratchRegister);
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
void LoadRoot(Register destination, Heap::RootListIndex index);
|
||||
void CompareRoot(Register with, Heap::RootListIndex index);
|
||||
void CompareRoot(Operand with, Heap::RootListIndex index);
|
||||
void CompareRoot(const Operand& with, Heap::RootListIndex index);
|
||||
void PushRoot(Heap::RootListIndex index);
|
||||
void StoreRoot(Register source, Heap::RootListIndex index);
|
||||
|
||||
@ -607,7 +607,7 @@ class MacroAssembler: public Assembler {
|
||||
// Emit call to the code we are currently generating.
|
||||
void CallSelf() {
|
||||
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
|
||||
call(self, RelocInfo::CODE_TARGET);
|
||||
Call(self, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
// Non-x64 instructions.
|
||||
|
@ -66,7 +66,7 @@ test-deoptimization/DeoptimizeBinaryOperationMOD: FAIL
|
||||
test-deoptimization/DeoptimizeBinaryOperationDIV: FAIL
|
||||
test-deoptimization/DeoptimizeLoadICStoreIC: FAIL
|
||||
test-deoptimization/DeoptimizeLoadICStoreICNested: FAIL
|
||||
test-deoptimization/DeoptimizeCompare: FAIL
|
||||
test-deoptimization/DeoptimizeCompare: PASS || FAIL
|
||||
|
||||
# Tests that time out with crankshaft.
|
||||
test-api/Threading: SKIP
|
||||
|
Loading…
Reference in New Issue
Block a user