Further improve deopt reason output.

* Make the detailed deopt reason mandatory on x64, other platforms
  will follow in separate CLs.

* Extracted and improved jump table entry sharing logic: When
  --trace-deopt is on, we get separate entries for different deopt
  reasons. This enables us to distinguish the several reasons single
  instructions can have.

* Don't emit superfluous jump table comments: The bailout ID is still
  visible, and the jump table entry number is not interesting (but
  easy to determine if really needed).

* Unify the internal name of the jump table member across platforms.

R=jarin@chromium.org

Review URL: https://codereview.chromium.org/595513002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24123 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
svenpanne@chromium.org 2014-09-22 14:16:38 +00:00
parent 1adad89746
commit 7384467552
13 changed files with 164 additions and 179 deletions

View File

@ -319,29 +319,25 @@ bool LCodeGen::GenerateJumpTable() {
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
deopt_jump_table_.length() * 7)) {
jump_table_.length() * 7)) {
Abort(kGeneratedCodeIsTooLarge);
}
if (deopt_jump_table_.length() > 0) {
if (jump_table_.length() > 0) {
Label needs_frame, call_deopt_entry;
Comment(";;; -------------------- Jump table --------------------");
Address base = deopt_jump_table_[0].address;
Address base = jump_table_[0].address;
Register entry_offset = scratch0();
int length = deopt_jump_table_.length();
int length = jump_table_.length();
for (int i = 0; i < length; i++) {
Deoptimizer::JumpTableEntry* table_entry = &deopt_jump_table_[i];
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Deoptimizer::BailoutType type = table_entry->bailout_type;
DCHECK(type == deopt_jump_table_[0].bailout_type);
DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
Address entry = table_entry->address;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so instead
@ -909,17 +905,15 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry) ||
(deopt_jump_table_.last().bailout_type != bailout_type) ||
(deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
deopt_jump_table_.Add(table_entry, zone());
if (jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
__ b(condition, &deopt_jump_table_.last().label);
__ b(condition, &jump_table_.last().label);
}
}

View File

@ -26,7 +26,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@ -332,7 +332,7 @@ class LCodeGen: public LCodeGenBase {
void EmitVectorLoadICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;

View File

@ -839,11 +839,7 @@ bool LCodeGen::GenerateJumpTable() {
Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
__ Bind(&table_entry->label);
Deoptimizer::BailoutType type = table_entry->bailout_type;
Address entry = table_entry->address;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so instead
@ -1053,14 +1049,13 @@ void LCodeGen::DeoptimizeBranch(
DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Deoptimizer::JumpTableEntry* table_entry =
new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() || (jump_table_.last()->address != entry) ||
(jump_table_.last()->bailout_type != bailout_type) ||
(jump_table_.last()->needs_frame != !frame_is_built_)) {
Deoptimizer::JumpTableEntry* table_entry =
new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
!frame_is_built_);
if (jump_table_.is_empty() ||
!table_entry->IsEquivalentTo(*jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
__ B(&jump_table_.last()->label, branch_type, reg, bit);

View File

@ -104,6 +104,15 @@ class Deoptimizer : public Malloced {
struct Reason {
Reason(int r, const char* m, const char* d)
: raw_position(r), mnemonic(m), detail(d) {}
bool operator==(const Reason& other) const {
return raw_position == other.raw_position &&
CStringEquals(mnemonic, other.mnemonic) &&
CStringEquals(detail, other.detail);
}
bool operator!=(const Reason& other) const { return !(*this == other); }
int raw_position;
const char* mnemonic;
const char* detail;
@ -117,6 +126,13 @@ class Deoptimizer : public Malloced {
reason(the_reason),
bailout_type(type),
needs_frame(frame) {}
bool IsEquivalentTo(const JumpTableEntry& other) const {
return address == other.address && bailout_type == other.bailout_type &&
needs_frame == other.needs_frame &&
(!FLAG_trace_deopt || reason == other.reason);
}
Label label;
Address address;
Reason reason;

View File

@ -386,10 +386,6 @@ bool LCodeGen::GenerateJumpTable() {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
Deoptimizer::BailoutType type = table_entry->bailout_type;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
DeoptComment(table_entry->reason);
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
@ -874,14 +870,12 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
DeoptComment(reason);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
jump_table_.last().address != entry ||
jump_table_.last().needs_frame != !frame_is_built_ ||
jump_table_.last().bailout_type != bailout_type) {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {

View File

@ -324,25 +324,21 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateJumpTable() {
if (deopt_jump_table_.length() > 0) {
if (jump_table_.length() > 0) {
Label needs_frame, call_deopt_entry;
Comment(";;; -------------------- Jump table --------------------");
Address base = deopt_jump_table_[0].address;
Address base = jump_table_[0].address;
Register entry_offset = t9;
int length = deopt_jump_table_.length();
int length = jump_table_.length();
for (int i = 0; i < length; i++) {
Deoptimizer::JumpTableEntry* table_entry = &deopt_jump_table_[i];
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Deoptimizer::BailoutType type = table_entry->bailout_type;
DCHECK(type == deopt_jump_table_[0].bailout_type);
DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
Address entry = table_entry->address;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so instead
@ -872,17 +868,15 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry) ||
(deopt_jump_table_.last().bailout_type != bailout_type) ||
(deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
deopt_jump_table_.Add(table_entry, zone());
if (jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
__ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
__ Branch(&jump_table_.last().label, condition, src1, src2);
}
}

View File

@ -25,7 +25,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@ -364,7 +364,7 @@ class LCodeGen: public LCodeGenBase {
void EmitVectorLoadICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;

View File

@ -300,21 +300,17 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateJumpTable() {
if (deopt_jump_table_.length() > 0) {
if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label table_start;
__ bind(&table_start);
Label needs_frame;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
Deoptimizer::JumpTableEntry* table_entry = &deopt_jump_table_[i];
for (int i = 0; i < jump_table_.length(); i++) {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
Deoptimizer::BailoutType type = table_entry->bailout_type;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
DeoptComment(table_entry->reason);
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (table_entry->needs_frame) {
@ -822,17 +818,15 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry) ||
(deopt_jump_table_.last().bailout_type != bailout_type) ||
(deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
deopt_jump_table_.Add(table_entry, zone());
if (jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
__ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
__ Branch(&jump_table_.last().label, condition, src1, src2);
}
}

View File

@ -25,7 +25,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
@ -365,7 +365,7 @@ class LCodeGen: public LCodeGenBase {
void EmitVectorLoadICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;

View File

@ -26,6 +26,13 @@ namespace internal {
// ----------------------------------------------------------------------------
// General helper functions
// Same as strcmp, but can handle NULL arguments.
inline bool CStringEquals(const char* s1, const char* s2) {
return (s1 == s2) || (s1 != NULL && s2 != NULL && strcmp(s1, s2) == 0);
}
// X must be a power of 2. Returns the number of trailing zeros.
inline int WhichPowerOf2(uint32_t x) {
DCHECK(base::bits::IsPowerOfTwo32(x));

View File

@ -306,10 +306,6 @@ bool LCodeGen::GenerateJumpTable() {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
Deoptimizer::BailoutType type = table_entry->bailout_type;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
DeoptComment(table_entry->reason);
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
@ -784,14 +780,12 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
DeoptComment(reason);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
jump_table_.last().address != entry ||
jump_table_.last().needs_frame != !frame_is_built_ ||
jump_table_.last().bailout_type != bailout_type) {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {
@ -1039,7 +1033,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ andl(dividend, Immediate(mask));
__ negl(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
}
@ -1056,7 +1050,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr);
DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@ -1071,7 +1065,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmpl(dividend, Immediate(0));
DeoptimizeIf(less, instr);
DeoptimizeIf(less, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
@ -1093,7 +1087,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "division by zero");
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@ -1104,7 +1098,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "minus zero");
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Set(result_reg, 0);
@ -1124,7 +1118,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
__ testl(result_reg, result_reg);
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "minus zero");
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@ -1150,13 +1144,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
}
return;
}
@ -1183,7 +1177,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr);
DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@ -1191,7 +1185,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
@ -1238,7 +1232,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@ -1247,7 +1241,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
DeoptimizeIf(sign, instr);
DeoptimizeIf(sign, instr, "minus zero");
__ bind(&dividend_not_zero);
}
@ -1257,7 +1251,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "overflow");
__ bind(&dividend_not_min_int);
}
@ -1286,19 +1280,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmpl(dividend, Immediate(kMinInt));
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ testl(dividend, Immediate(mask));
DeoptimizeIf(not_zero, instr);
DeoptimizeIf(not_zero, instr, "remainder not zero");
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@ -1319,7 +1313,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr);
DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
@ -1327,7 +1321,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "minus zero");
}
__ TruncatingDiv(dividend, Abs(divisor));
@ -1337,7 +1331,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ movl(rax, rdx);
__ imull(rax, rax, Immediate(divisor));
__ subl(rax, dividend);
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "remainder not zero");
}
}
@ -1357,7 +1351,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
@ -1366,7 +1360,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
DeoptimizeIf(sign, instr);
DeoptimizeIf(sign, instr, "minus zero");
__ bind(&dividend_not_zero);
}
@ -1376,7 +1370,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "overflow");
__ bind(&dividend_not_min_int);
}
@ -1387,7 +1381,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr);
DeoptimizeIf(not_zero, instr, "remainder not zero");
}
}
@ -1464,7 +1458,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (can_overflow) {
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -1483,10 +1477,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
? !instr->hydrogen_value()->representation().IsSmi()
: SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr);
DeoptimizeIf(no_condition, instr, "minus zero");
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmpl(kScratchRegister, Immediate(0));
DeoptimizeIf(less, instr);
DeoptimizeIf(less, instr, "minus zero");
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
@ -1494,7 +1488,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToOperand(right));
}
DeoptimizeIf(sign, instr);
DeoptimizeIf(sign, instr, "minus zero");
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
@ -1502,7 +1496,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToRegister(right));
}
DeoptimizeIf(sign, instr);
DeoptimizeIf(sign, instr, "minus zero");
}
__ bind(&done);
}
@ -1615,7 +1609,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl_cl(ToRegister(left));
if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
DeoptimizeIf(negative, instr);
DeoptimizeIf(negative, instr, "value to shift was negative");
}
break;
case Token::SHL:
@ -1644,7 +1638,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl(ToRegister(left), Immediate(shift_count));
} else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
DeoptimizeIf(negative, instr);
DeoptimizeIf(negative, instr, "value to shift was negative");
}
break;
case Token::SHL:
@ -1659,7 +1653,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shll(ToRegister(left), Immediate(shift_count - 1));
}
__ Integer32ToSmi(ToRegister(left), ToRegister(left));
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
@ -1702,7 +1696,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
}
}
@ -1767,9 +1761,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(rax));
Condition cc = masm()->CheckSmi(object);
DeoptimizeIf(cc, instr);
DeoptimizeIf(cc, instr, "not an object");
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "not a date object");
if (index->value() == 0) {
__ movp(result, FieldOperand(object, JSDate::kValueOffset));
@ -1933,7 +1927,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
}
}
}
@ -2178,7 +2172,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "Smi");
}
const Register map = kScratchRegister;
@ -2232,7 +2226,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(no_condition, instr);
DeoptimizeIf(no_condition, instr, "unexpected object");
}
}
}
@ -2848,7 +2842,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "hole");
}
}
@ -2896,7 +2890,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
DCHECK(!value.is(cell));
__ Move(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "hole");
// Store the value.
__ movp(Operand(cell, 0), value);
} else {
@ -2915,7 +2909,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "hole");
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@ -2936,7 +2930,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(target, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "hole");
} else {
__ j(not_equal, &skip_assignment);
}
@ -3034,7 +3028,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
@ -3146,7 +3140,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
DeoptimizeIf(negative, instr);
DeoptimizeIf(negative, instr, "negative value");
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
@ -3185,7 +3179,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "hole");
}
Operand double_load_operand = BuildFastArrayOperand(
@ -3242,10 +3236,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
DeoptimizeIf(NegateCondition(smi), instr);
DeoptimizeIf(NegateCondition(smi), instr, "not a Smi");
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "hole");
}
}
}
@ -3394,9 +3388,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
DeoptimizeIf(is_smi, instr);
DeoptimizeIf(is_smi, instr, "not an object");
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
DeoptimizeIf(below, instr);
DeoptimizeIf(below, instr, "not a spec object");
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@ -3423,7 +3417,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr);
DeoptimizeIf(above, instr, "too many arguments");
__ Push(receiver);
__ movp(receiver, length);
@ -3618,7 +3612,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "not a heap number");
Label slow, allocated, done;
Register tmp = input_reg.is(rax) ? rcx : rax;
@ -3664,7 +3658,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
DeoptimizeIf(negative, instr);
DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
@ -3675,7 +3669,7 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr);
DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
@ -3730,18 +3724,18 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Deoptimize if minus zero.
__ movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "minus zero");
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr);
DeoptimizeIf(parity_even, instr, "unordered");
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3750,7 +3744,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ testq(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr);
DeoptimizeIf(not_zero, instr, "minus zero");
__ Set(output_reg, 0);
__ jmp(&done);
__ bind(&positive_sign);
@ -3760,7 +3754,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@ -3771,7 +3765,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
__ bind(&done);
}
@ -3908,7 +3902,7 @@ void LCodeGen::DoPower(LPower* instr) {
Label no_deopt;
__ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "not a heap number");
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@ -4280,7 +4274,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
DeoptimizeIf(cc, instr);
DeoptimizeIf(cc, instr, "out of bounds");
}
}
@ -4528,7 +4522,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "memento found");
__ bind(&no_memento_found);
}
@ -4847,12 +4841,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
DeoptimizeIf(NegateCondition(is_smi), instr);
DeoptimizeIf(NegateCondition(is_smi), instr, "not a smi");
}
__ Integer32ToSmi(output, input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
}
}
@ -4862,7 +4856,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
DeoptimizeIf(NegateCondition(is_smi), instr);
DeoptimizeIf(NegateCondition(is_smi), instr, "not a smi");
} else {
__ AssertSmi(input);
}
@ -4893,7 +4887,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "not a heap number");
}
if (deoptimize_on_minus_zero) {
@ -4903,7 +4897,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ j(not_equal, &done, Label::kNear);
__ movmskpd(kScratchRegister, result_reg);
__ testq(kScratchRegister, Immediate(1));
DeoptimizeIf(not_zero, instr);
DeoptimizeIf(not_zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
@ -4912,7 +4906,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "neither a heap number nor undefined");
__ xorps(result_reg, result_reg);
__ divsd(result_reg, result_reg);
@ -5049,7 +5043,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ jmp(&done, Label::kNear);
__ bind(&bailout);
DeoptimizeIf(no_condition, instr);
DeoptimizeIf(no_condition, instr, "conversion failed");
__ bind(&done);
}
}
@ -5071,18 +5065,18 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ jmp(&done, Label::kNear);
__ bind(&bailout);
DeoptimizeIf(no_condition, instr);
DeoptimizeIf(no_condition, instr, "conversion failed");
__ bind(&done);
__ Integer32ToSmi(result_reg, result_reg);
DeoptimizeIf(overflow, instr);
DeoptimizeIf(overflow, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(NegateCondition(cc), instr);
DeoptimizeIf(NegateCondition(cc), instr, "not a Smi");
}
@ -5090,7 +5084,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(cc, instr);
DeoptimizeIf(cc, instr, "Smi");
}
}
@ -5110,14 +5104,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "wrong instance type");
} else {
DeoptimizeIf(below, instr);
DeoptimizeIf(below, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(last)));
DeoptimizeIf(above, instr);
DeoptimizeIf(above, instr, "wrong instance type");
}
}
} else {
@ -5129,13 +5123,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr);
DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
} else {
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ andb(kScratchRegister, Immediate(mask));
__ cmpb(kScratchRegister, Immediate(tag));
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "wrong instance type");
}
}
}
@ -5144,7 +5138,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(reg, instr->hydrogen()->object().handle());
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "value mismatch");
}
@ -5159,7 +5153,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr);
DeoptimizeIf(zero, instr, "instance migration failed");
}
@ -5212,7 +5206,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "wrong map");
}
__ bind(&success);
@ -5251,7 +5245,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "neither a heap number nor undefined");
__ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
@ -5732,19 +5726,19 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "undefined");
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmpp(rax, null_value);
DeoptimizeIf(equal, instr);
DeoptimizeIf(equal, instr, "null");
Condition cc = masm()->CheckSmi(rax);
DeoptimizeIf(cc, instr);
DeoptimizeIf(cc, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
DeoptimizeIf(below_equal, instr);
DeoptimizeIf(below_equal, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
@ -5759,7 +5753,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "not a meta map");
__ bind(&use_cache);
}
@ -5781,7 +5775,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
DeoptimizeIf(cc, instr);
DeoptimizeIf(cc, instr, "Smi");
}
@ -5789,7 +5783,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr);
DeoptimizeIf(not_equal, instr, "wrong map");
}

View File

@ -208,8 +208,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LInstruction* instr,
const char* detail = NULL);
void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();

View File

@ -1155,14 +1155,12 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
DeoptComment(reason);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
jump_table_.last().address != entry ||
jump_table_.last().needs_frame != !frame_is_built_ ||
jump_table_.last().bailout_type != bailout_type) {
Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
!frame_is_built_);
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {