[turbofan] Add support for eager/soft deoptimization reasons.

So far TurboFan wasn't adding the deoptimization reasons for eager/soft
deoptimization exits that can be used by either the DevTools profiler or
the --trace-deopt flag. This adds basic support for deopt reasons on
Deoptimize, DeoptimizeIf and DeoptimizeUnless nodes and threads through
the reasons to the code generation.

Also moves the DeoptReason to it's own file (to resolve include cycles)
and drops unused reasons.

R=jarin@chromium.org

Review-Url: https://codereview.chromium.org/2161543002
Cr-Commit-Position: refs/heads/master@{#37823}
This commit is contained in:
bmeurer 2016-07-18 02:23:28 -07:00 committed by Commit bot
parent a4053e027d
commit db635d5b72
80 changed files with 1585 additions and 1386 deletions

View File

@ -1166,6 +1166,8 @@ v8_source_set("v8_base") {
"src/debug/debug.h",
"src/debug/liveedit.cc",
"src/debug/liveedit.h",
"src/deoptimize-reason.cc",
"src/deoptimize-reason.h",
"src/deoptimizer.cc",
"src/deoptimizer.h",
"src/disasm.h",

View File

@ -1404,7 +1404,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position, int id);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Record the emission of a constant pool.
//

View File

@ -933,7 +933,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position, int id);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
int buffer_space() const;

View File

@ -774,8 +774,8 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
} else if (rmode_ == DEOPT_POSITION) {
os << " (" << data() << ")";
} else if (rmode_ == DEOPT_REASON) {
os << " (" << Deoptimizer::GetDeoptReason(
static_cast<Deoptimizer::DeoptReason>(data_)) << ")";
os << " ("
<< DeoptimizeReasonToString(static_cast<DeoptimizeReason>(data_)) << ")";
} else if (rmode_ == EMBEDDED_OBJECT) {
os << " (" << Brief(target_object()) << ")";
} else if (rmode_ == EXTERNAL_REFERENCE) {
@ -1860,11 +1860,12 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
// Platform specific but identical code for all the platforms.
void Assembler::RecordDeoptReason(const int reason, int raw_position, int id) {
void Assembler::RecordDeoptReason(DeoptimizeReason reason, int raw_position,
int id) {
if (FLAG_trace_deopt || isolate()->is_profiling()) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::DEOPT_POSITION, raw_position);
RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason));
RecordRelocInfo(RelocInfo::DEOPT_ID, id);
}
}

View File

@ -37,6 +37,7 @@
#include "src/allocation.h"
#include "src/builtins/builtins.h"
#include "src/deoptimize-reason.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/register-configuration.h"

View File

@ -207,7 +207,7 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
IfBuilder builder(this);
builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined);
builder.Then();
builder.ElseDeopt(Deoptimizer::kForcedDeoptToRuntime);
builder.ElseDeopt(DeoptimizeReason::kForcedDeoptToRuntime);
return undefined;
}
@ -447,7 +447,8 @@ HValue* CodeStubGraphBuilder<FastCloneRegExpStub>::BuildCodeStub() {
}
Push(result);
}
if_notundefined.ElseDeopt(Deoptimizer::kUninitializedBoilerplateInFastClone);
if_notundefined.ElseDeopt(
DeoptimizeReason::kUninitializedBoilerplateInFastClone);
if_notundefined.End();
return Pop();
@ -526,7 +527,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
if_fixed_cow.End();
zero_capacity.End();
checker.ElseDeopt(Deoptimizer::kUninitializedBoilerplateLiterals);
checker.ElseDeopt(DeoptimizeReason::kUninitializedBoilerplateLiterals);
checker.End();
return environment()->Pop();
@ -699,7 +700,7 @@ HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
can_store.IfNot<HCompareMap>(argument,
isolate()->factory()->heap_number_map());
}
can_store.ThenDeopt(Deoptimizer::kFastPathFailed);
can_store.ThenDeopt(DeoptimizeReason::kFastPathFailed);
can_store.End();
}
builder.EndBody();
@ -750,7 +751,7 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
IfBuilder check(this);
check.If<HCompareNumericAndBranch>(
bits, Add<HConstant>(1 << Map::kIsExtensible), Token::NE);
check.ThenDeopt(Deoptimizer::kFastPathFailed);
check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
check.End();
}
@ -763,7 +764,7 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
IfBuilder check(this);
check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
check.ThenDeopt(Deoptimizer::kFastPathFailed);
check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
check.End();
}
@ -781,7 +782,7 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, details, mask);
IfBuilder readonly(this);
readonly.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
readonly.ThenDeopt(Deoptimizer::kFastPathFailed);
readonly.ThenDeopt(DeoptimizeReason::kFastPathFailed);
readonly.End();
}
@ -809,14 +810,14 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
check_instance_type.If<HCompareNumericAndBranch>(
instance_type, Add<HConstant>(LAST_CUSTOM_ELEMENTS_RECEIVER),
Token::LTE);
check_instance_type.ThenDeopt(Deoptimizer::kFastPathFailed);
check_instance_type.ThenDeopt(DeoptimizeReason::kFastPathFailed);
check_instance_type.End();
HValue* elements = Add<HLoadNamedField>(
prototype, nullptr, HObjectAccess::ForElementsPointer());
IfBuilder no_elements(this);
no_elements.IfNot<HCompareObjectEqAndBranch>(elements, empty);
no_elements.ThenDeopt(Deoptimizer::kFastPathFailed);
no_elements.ThenDeopt(DeoptimizeReason::kFastPathFailed);
no_elements.End();
environment()->Push(prototype_map);
@ -866,7 +867,7 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
FAST_HOLEY_DOUBLE_ELEMENTS);
environment()->Push(new_length);
}
has_double_elements.ElseDeopt(Deoptimizer::kFastPathFailed);
has_double_elements.ElseDeopt(DeoptimizeReason::kFastPathFailed);
has_double_elements.End();
}
has_object_elements.End();
@ -898,7 +899,7 @@ HValue* CodeStubGraphBuilder<FastFunctionBindStub>::BuildCodeStub() {
HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
IfBuilder check(this);
check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
check.ThenDeopt(Deoptimizer::kFastPathFailed);
check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
check.End();
}
@ -914,7 +915,7 @@ HValue* CodeStubGraphBuilder<FastFunctionBindStub>::BuildCodeStub() {
IfBuilder range(this);
range.If<HCompareNumericAndBranch>(descriptors_length,
graph()->GetConstant1(), Token::LTE);
range.ThenDeopt(Deoptimizer::kFastPathFailed);
range.ThenDeopt(DeoptimizeReason::kFastPathFailed);
range.End();
// Verify .length.
@ -991,7 +992,7 @@ HValue* CodeStubGraphBuilder<FastFunctionBindStub>::BuildCodeStub() {
IfBuilder equal_prototype(this);
equal_prototype.IfNot<HCompareObjectEqAndBranch>(prototype,
expected_prototype);
equal_prototype.ThenDeopt(Deoptimizer::kFastPathFailed);
equal_prototype.ThenDeopt(DeoptimizeReason::kFastPathFailed);
equal_prototype.End();
}
@ -1181,7 +1182,7 @@ HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key,
Add<HStoreKeyed>(backing_store, key, value, nullptr, FAST_HOLEY_ELEMENTS);
}
}
in_unmapped_range.ElseDeopt(Deoptimizer::kOutsideOfRange);
in_unmapped_range.ElseDeopt(DeoptimizeReason::kOutsideOfRange);
in_unmapped_range.End();
return result;
}
@ -1222,7 +1223,7 @@ HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
IfBuilder positive_smi(this);
positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
Token::LT);
positive_smi.ThenDeopt(Deoptimizer::kKeyIsNegative);
positive_smi.ThenDeopt(DeoptimizeReason::kKeyIsNegative);
positive_smi.End();
HValue* constant_two = Add<HConstant>(2);
@ -1807,7 +1808,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Add<HLoadNamedField>(global, nullptr, HObjectAccess::ForMap());
IfBuilder map_check(this);
map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
map_check.ThenDeopt(Deoptimizer::kUnknownMap);
map_check.ThenDeopt(DeoptimizeReason::kUnknownMap);
map_check.End();
}
@ -1830,14 +1831,14 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
builder.Then();
builder.ElseDeopt(
Deoptimizer::kUnexpectedCellContentsInConstantGlobalStore);
DeoptimizeReason::kUnexpectedCellContentsInConstantGlobalStore);
builder.End();
} else {
IfBuilder builder(this);
HValue* hole_value = graph()->GetConstantHole();
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
builder.Then();
builder.Deopt(Deoptimizer::kUnexpectedCellContentsInGlobalStore);
builder.Deopt(DeoptimizeReason::kUnexpectedCellContentsInGlobalStore);
builder.Else();
// When dealing with constant types, the type may be allowed to change, as
// long as optimized code remains valid.
@ -1860,7 +1861,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
IfBuilder map_check(this);
map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
map_check.ThenDeopt(Deoptimizer::kUnknownMap);
map_check.ThenDeopt(DeoptimizeReason::kUnknownMap);
map_check.End();
access = access.WithRepresentation(Representation::HeapObject());
break;
@ -1889,7 +1890,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
Add<HDeoptimize>(Deoptimizer::kTracingElementsTransitions,
Add<HDeoptimize>(DeoptimizeReason::kTracingElementsTransitions,
Deoptimizer::EAGER);
} else {
info()->MarkAsSavesCallerDoubles();
@ -2084,12 +2085,12 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
BuildElementsKindLimitCheck(&kind_if, bit_field2,
SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
// Non-strict elements are not handled.
Add<HDeoptimize>(Deoptimizer::kNonStrictElementsInKeyedLoadGenericStub,
Add<HDeoptimize>(DeoptimizeReason::kNonStrictElementsInKeyedLoadGenericStub,
Deoptimizer::EAGER);
Push(graph()->GetConstant0());
kind_if.ElseDeopt(
Deoptimizer::kElementsKindUnhandledInKeyedLoadGenericStub);
DeoptimizeReason::kElementsKindUnhandledInKeyedLoadGenericStub);
kind_if.End();
}

View File

@ -1565,6 +1565,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
__ CheckConstPool(false, false);
return kSuccess;

View File

@ -273,7 +273,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -758,7 +758,7 @@ void VisitShift(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1115,7 +1115,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {temp_operand, result_operand, shift_31};
selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->frame_state());
selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
@ -1546,7 +1547,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1632,7 +1633,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1759,7 +1760,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
@ -1776,14 +1777,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}

View File

@ -1761,6 +1761,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}

View File

@ -409,7 +409,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1288,7 +1288,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {result, result};
selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->frame_state());
selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
@ -1915,7 +1916,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -2328,7 +2329,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
DCHECK(cont->IsDeoptimize());
selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
g.UseRegister(value), g.UseRegister(value),
cont->frame_state());
cont->reason(), cont->frame_state());
}
}
@ -2341,14 +2342,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}

View File

@ -83,6 +83,7 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
node->opcode() == IrOpcode::kDeoptimizeUnless);
bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
Node* condition = NodeProperties::GetValueInput(node, 0);
Node* frame_state = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@ -102,8 +103,9 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
// with the {control} node that already contains the right information.
ReplaceWithValue(node, dead(), effect, control);
} else {
control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
frame_state, effect, control);
control =
graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());

View File

@ -616,7 +616,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
// code address).
size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetFrameStateDescriptor(instr, frame_state_offset);
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = masm()->pc_offset();
int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
descriptor->state_combine());
@ -653,15 +653,19 @@ int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
return result;
}
FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
Instruction* instr, size_t frame_state_offset) {
InstructionOperandConverter i(this, instr);
InstructionSequence::StateId state_id =
InstructionSequence::StateId::FromInt(i.InputInt32(frame_state_offset));
return code()->GetFrameStateDescriptor(state_id);
int const state_id = i.InputInt32(frame_state_offset);
return code()->GetDeoptimizationEntry(state_id);
}
DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
int deoptimization_id) const {
size_t const index = static_cast<size_t>(deoptimization_id);
DCHECK_LT(index, deoptimization_states_.size());
return deoptimization_states_[index]->reason();
}
void CodeGenerator::TranslateStateValueDescriptor(
StateValueDescriptor* desc, Translation* translation,
@ -780,8 +784,9 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
size_t frame_state_offset,
OutputFrameStateCombine state_combine) {
FrameStateDescriptor* descriptor =
GetFrameStateDescriptor(instr, frame_state_offset);
DeoptimizationEntry const& entry =
GetDeoptimizationEntry(instr, frame_state_offset);
FrameStateDescriptor* const descriptor = entry.descriptor();
frame_state_offset++;
Translation translation(
@ -794,7 +799,8 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
int deoptimization_id = static_cast<int>(deoptimization_states_.size());
deoptimization_states_.push_back(new (zone()) DeoptimizationState(
descriptor->bailout_id(), translation.index(), pc_offset));
descriptor->bailout_id(), translation.index(), pc_offset,
entry.reason()));
return deoptimization_id;
}

View File

@ -202,8 +202,9 @@ class CodeGenerator final : public GapResolver::Assembler {
void RecordCallPosition(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
size_t frame_state_offset);
DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
size_t frame_state_offset);
DeoptimizeReason GetDeoptimizationReason(int deoptimization_id) const;
int BuildTranslation(Instruction* instr, int pc_offset,
size_t frame_state_offset,
OutputFrameStateCombine state_combine);
@ -227,21 +228,25 @@ class CodeGenerator final : public GapResolver::Assembler {
// ===========================================================================
struct DeoptimizationState : ZoneObject {
class DeoptimizationState final : public ZoneObject {
public:
DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset,
DeoptimizeReason reason)
: bailout_id_(bailout_id),
translation_id_(translation_id),
pc_offset_(pc_offset),
reason_(reason) {}
BailoutId bailout_id() const { return bailout_id_; }
int translation_id() const { return translation_id_; }
int pc_offset() const { return pc_offset_; }
DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset)
: bailout_id_(bailout_id),
translation_id_(translation_id),
pc_offset_(pc_offset) {}
DeoptimizeReason reason() const { return reason_; }
private:
BailoutId bailout_id_;
int translation_id_;
int pc_offset_;
DeoptimizeReason reason_;
};
struct HandlerInfo {

View File

@ -122,6 +122,7 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
node->opcode() == IrOpcode::kDeoptimizeUnless);
bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
Node* condition = NodeProperties::GetValueInput(node, 0);
Node* frame_state = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@ -133,8 +134,8 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition->opcode() == IrOpcode::kBooleanNot) {
NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
NodeProperties::ChangeOp(node, condition_is_true
? common()->DeoptimizeIf()
: common()->DeoptimizeUnless());
? common()->DeoptimizeIf(reason)
: common()->DeoptimizeUnless(reason));
return Changed(node);
}
Decision const decision = DecideCondition(condition);
@ -142,8 +143,9 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition_is_true == (decision == Decision::kTrue)) {
ReplaceWithValue(node, dead(), effect, control);
} else {
control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
frame_state, effect, control);
control =
graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());

View File

@ -35,10 +35,14 @@ BranchHint BranchHintOf(const Operator* const op) {
return OpParameter<BranchHint>(op);
}
DeoptimizeReason DeoptimizeReasonOf(Operator const* const op) {
DCHECK(op->opcode() == IrOpcode::kDeoptimizeIf ||
op->opcode() == IrOpcode::kDeoptimizeUnless);
return OpParameter<DeoptimizeReason>(op);
}
size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
@ -50,12 +54,26 @@ std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
return os;
}
DeoptimizeKind DeoptimizeKindOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
return OpParameter<DeoptimizeKind>(op);
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return !(lhs == rhs);
}
size_t hash_value(DeoptimizeParameters p) {
return base::hash_combine(p.kind(), p.reason());
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
return os << p.kind() << ":" << p.reason();
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
return OpParameter<DeoptimizeParameters>(op);
}
size_t hash_value(IfExceptionHint hint) { return static_cast<size_t>(hint); }
@ -203,8 +221,6 @@ std::ostream& operator<<(std::ostream& os,
#define CACHED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 1, 1) \
V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 1, 1) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
@ -320,18 +336,6 @@ struct CommonOperatorGlobalCache final {
CACHED_OP_LIST(CACHED)
#undef CACHED
template <DeoptimizeKind kKind>
struct DeoptimizeOperator final : public Operator1<DeoptimizeKind> {
DeoptimizeOperator()
: Operator1<DeoptimizeKind>( // --
IrOpcode::kDeoptimize, Operator::kNoThrow, // opcode
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
kKind) {} // parameter
};
DeoptimizeOperator<DeoptimizeKind::kEager> kDeoptimizeEagerOperator;
DeoptimizeOperator<DeoptimizeKind::kSoft> kDeoptimizeSoftOperator;
template <IfExceptionHint kCaughtLocally>
struct IfExceptionOperator final : public Operator1<IfExceptionHint> {
IfExceptionOperator()
@ -563,18 +567,38 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
return nullptr;
}
const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
return &cache_.kDeoptimizeEagerOperator;
case DeoptimizeKind::kSoft:
return &cache_.kDeoptimizeSoftOperator;
}
UNREACHABLE();
return nullptr;
const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
DeoptimizeReason reason) {
// TODO(turbofan): Cache the most common versions of this.
DeoptimizeParameters parameter(kind, reason);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
parameter); // parameter
}
const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeReason reason) {
// TODO(turbofan): Cache the most common versions of this.
return new (zone()) Operator1<DeoptimizeReason>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
reason); // parameter
}
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
DeoptimizeReason reason) {
// TODO(turbofan): Cache the most common versions of this.
return new (zone()) Operator1<DeoptimizeReason>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
reason); // parameter
}
const Operator* CommonOperatorBuilder::IfException(IfExceptionHint hint) {
switch (hint) {

View File

@ -7,6 +7,7 @@
#include "src/assembler.h"
#include "src/compiler/frame-states.h"
#include "src/deoptimize-reason.h"
#include "src/machine-type.h"
#include "src/zone-containers.h"
@ -42,6 +43,8 @@ std::ostream& operator<<(std::ostream&, BranchHint);
BranchHint BranchHintOf(const Operator* const);
// Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
// Deoptimize bailout kind.
enum class DeoptimizeKind : uint8_t { kEager, kSoft };
@ -50,8 +53,28 @@ size_t hash_value(DeoptimizeKind kind);
std::ostream& operator<<(std::ostream&, DeoptimizeKind);
DeoptimizeKind DeoptimizeKindOf(const Operator* const);
// Parameters for the {Deoptimize} operator.
class DeoptimizeParameters final {
public:
DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason)
: kind_(kind), reason_(reason) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
bool operator!=(DeoptimizeParameters, DeoptimizeParameters);
size_t hast_value(DeoptimizeParameters p);
std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const);
// Prediction whether throw-site is surrounded by any local catch-scope.
enum class IfExceptionHint { kLocallyUncaught, kLocallyCaught };
@ -170,9 +193,9 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* IfValue(int32_t value);
const Operator* IfDefault();
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind);
const Operator* DeoptimizeIf();
const Operator* DeoptimizeUnless();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
const Operator* DeoptimizeIf(DeoptimizeReason reason);
const Operator* DeoptimizeUnless(DeoptimizeReason reason);
const Operator* Return(int value_input_count = 1);
const Operator* Terminate();

View File

@ -1002,8 +1002,9 @@ EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state,
Node* limit = node->InputAt(1);
Node* check = graph()->NewNode(machine()->Uint32LessThan(), index, limit);
control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, effect, control);
control = effect = graph()->NewNode(
common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check,
frame_state, effect, control);
return ValueEffectControl(index, effect, control);
}
@ -1028,8 +1029,9 @@ EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state,
value, efalse0, if_false0);
Node* check1 = graph()->NewNode(machine()->WordEqual(), value_map,
jsgraph()->HeapNumberMapConstant());
if_false0 = efalse0 = graph()->NewNode(common()->DeoptimizeUnless(), check1,
frame_state, efalse0, if_false0);
if_false0 = efalse0 = graph()->NewNode(
common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check1,
frame_state, efalse0, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
@ -1043,8 +1045,9 @@ EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state,
Node* effect, Node* control) {
Node* value = node->InputAt(0);
control = effect = graph()->NewNode(common()->DeoptimizeUnless(), value,
frame_state, effect, control);
control = effect =
graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNoReason),
value, frame_state, effect, control);
return ValueEffectControl(value, effect, control);
}
@ -1055,8 +1058,9 @@ EffectControlLinearizer::LowerCheckTaggedPointer(Node* node, Node* frame_state,
Node* value = node->InputAt(0);
Node* check = ObjectIsSmi(value);
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
control = effect =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
frame_state, effect, control);
return ValueEffectControl(value, effect, control);
}
@ -1067,8 +1071,9 @@ EffectControlLinearizer::LowerCheckTaggedSigned(Node* node, Node* frame_state,
Node* value = node->InputAt(0);
Node* check = ObjectIsSmi(value);
control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, effect, control);
control = effect =
graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
check, frame_state, effect, control);
return ValueEffectControl(value, effect, control);
}
@ -1083,8 +1088,9 @@ EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
graph()->NewNode(machine()->Int32AddWithOverflow(), lhs, rhs, control);
Node* check = graph()->NewNode(common()->Projection(1), value, control);
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
control = effect =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
check, frame_state, effect, control);
value = graph()->NewNode(common()->Projection(0), value, control);
@ -1101,8 +1107,9 @@ EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state,
graph()->NewNode(machine()->Int32SubWithOverflow(), lhs, rhs, control);
Node* check = graph()->NewNode(common()->Projection(1), value, control);
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
control = effect =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
check, frame_state, effect, control);
value = graph()->NewNode(common()->Projection(0), value, control);
@ -1138,13 +1145,15 @@ EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
{
// Check if {rhs} is zero.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
if_false0 = efalse0 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, efalse0, if_false0);
if_false0 = efalse0 = graph()->NewNode(
common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
frame_state, efalse0, if_false0);
// Check if {lhs} is zero, as that would produce minus zero.
check = graph()->NewNode(machine()->Word32Equal(), lhs, zero);
if_false0 = efalse0 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, efalse0, if_false0);
if_false0 = efalse0 =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
check, frame_state, efalse0, if_false0);
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -kMinInt, which is not representable.
@ -1157,8 +1166,9 @@ EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
{
// Check if {rhs} is -1.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
if_true1 = etrue1 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, etrue1, if_true1);
if_true1 = etrue1 =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
check, frame_state, etrue1, if_true1);
}
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
@ -1182,8 +1192,9 @@ EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
Node* check =
graph()->NewNode(machine()->Word32Equal(), lhs,
graph()->NewNode(machine()->Int32Mul(), rhs, value));
control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, effect, control);
control = effect = graph()->NewNode(
common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
frame_state, effect, control);
return ValueEffectControl(value, effect, control);
}
@ -1200,8 +1211,9 @@ EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
control = effect = graph()->NewNode(
common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
frame_state, effect, control);
// Check if {lhs} is positive or zero.
Node* check0 = graph()->NewNode(machine()->Int32LessThanOrEqual(), zero, lhs);
@ -1231,8 +1243,9 @@ EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
{
// Check if {rhs} is -1.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
if_true1 = etrue1 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, etrue1, if_true1);
if_true1 = etrue1 =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
check, frame_state, etrue1, if_true1);
}
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
@ -1248,8 +1261,9 @@ EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
// Check if the result is zero, because in that case we'd have to return
// -0 here since we always take the signe of the {lhs} which is negative.
Node* check = graph()->NewNode(machine()->Word32Equal(), vfalse0, zero);
if_false0 = efalse0 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, efalse0, if_false0);
if_false0 = efalse0 =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
check, frame_state, efalse0, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
@ -1271,8 +1285,9 @@ EffectControlLinearizer::LowerCheckedUint32Div(Node* node, Node* frame_state,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
control = effect = graph()->NewNode(
common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
frame_state, effect, control);
// Perform the actual unsigned integer division.
Node* value = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, control);
@ -1280,8 +1295,9 @@ EffectControlLinearizer::LowerCheckedUint32Div(Node* node, Node* frame_state,
// Check if the remainder is non-zero.
check = graph()->NewNode(machine()->Word32Equal(), lhs,
graph()->NewNode(machine()->Int32Mul(), rhs, value));
control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, effect, control);
control = effect = graph()->NewNode(
common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
frame_state, effect, control);
return ValueEffectControl(value, effect, control);
}
@ -1296,8 +1312,9 @@ EffectControlLinearizer::LowerCheckedUint32Mod(Node* node, Node* frame_state,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
control = effect = graph()->NewNode(
common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
frame_state, effect, control);
// Perform the actual unsigned integer modulus.
Node* value = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, control);
@ -1316,8 +1333,9 @@ EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* frame_state,
graph()->NewNode(machine()->Int32MulWithOverflow(), lhs, rhs, control);
Node* check = graph()->NewNode(common()->Projection(1), projection, control);
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
control = effect =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
check, frame_state, effect, control);
Node* value = graph()->NewNode(common()->Projection(0), projection, control);
@ -1332,8 +1350,9 @@ EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* frame_state,
Node* or_inputs = graph()->NewNode(machine()->Word32Or(), lhs, rhs);
Node* check_or =
graph()->NewNode(machine()->Int32LessThan(), or_inputs, zero);
if_zero = e_if_zero = graph()->NewNode(common()->DeoptimizeIf(), check_or,
frame_state, e_if_zero, if_zero);
if_zero = e_if_zero =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
check_or, frame_state, e_if_zero, if_zero);
}
Node* if_not_zero = graph()->NewNode(common()->IfFalse(), branch_zero);
@ -1355,8 +1374,9 @@ EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* max_int = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::max());
Node* is_safe =
graph()->NewNode(machine()->Uint32LessThanOrEqual(), value, max_int);
control = effect = graph()->NewNode(common()->DeoptimizeUnless(), is_safe,
frame_state, effect, control);
control = effect = graph()->NewNode(
common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), is_safe,
frame_state, effect, control);
return ValueEffectControl(value, effect, control);
}
@ -1370,8 +1390,9 @@ EffectControlLinearizer::BuildCheckedFloat64ToInt32(Node* value,
Node* check_same = graph()->NewNode(
machine()->Float64Equal(), value,
graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check_same,
frame_state, effect, control);
control = effect = graph()->NewNode(
common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN),
check_same, frame_state, effect, control);
// Check if {value} is -0.
Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
@ -1388,8 +1409,9 @@ EffectControlLinearizer::BuildCheckedFloat64ToInt32(Node* value,
graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
jsgraph()->Int32Constant(0));
Node* deopt_minus_zero = graph()->NewNode(
common()->DeoptimizeIf(), check_negative, frame_state, effect, if_zero);
Node* deopt_minus_zero =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
check_negative, frame_state, effect, if_zero);
Node* merge =
graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
@ -1437,8 +1459,9 @@ EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
value, efalse, if_false);
Node* check = graph()->NewNode(machine()->WordEqual(), value_map,
jsgraph()->HeapNumberMapConstant());
if_false = efalse = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, efalse, if_false);
if_false = efalse = graph()->NewNode(
common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check,
frame_state, efalse, if_false);
vfalse = efalse = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
efalse, if_false);
@ -1482,8 +1505,9 @@ EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
graph()->NewNode(machine()->Word32Equal(), instance_type,
jsgraph()->Int32Constant(ODDBALL_TYPE));
if_false = efalse =
graph()->NewNode(common()->DeoptimizeUnless(), check_oddball, frame_state,
efalse, if_false);
graph()->NewNode(common()->DeoptimizeUnless(
DeoptimizeReason::kNotAHeapNumberUndefinedBoolean),
check_oddball, frame_state, efalse, if_false);
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
@ -1882,8 +1906,9 @@ EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
machine()->Word32Equal(),
graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
jsgraph()->Int32Constant(kHoleNanUpper32));
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
control = effect =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
frame_state, effect, control);
return ValueEffectControl(value, effect, control);
}
@ -1902,8 +1927,9 @@ EffectControlLinearizer::LowerCheckTaggedHole(Node* node, Node* frame_state,
check, jsgraph()->UndefinedConstant(), value);
break;
case CheckTaggedHoleMode::kNeverReturnHole:
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
control = effect =
graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole),
check, frame_state, effect, control);
break;
}

View File

@ -1699,6 +1699,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}

View File

@ -498,7 +498,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1169,7 +1169,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@ -1187,7 +1187,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1326,7 +1326,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1440,14 +1440,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}

View File

@ -325,13 +325,14 @@ class FlagsContinuation final {
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(FlagsCondition condition,
DeoptimizeReason reason,
Node* frame_state) {
return FlagsContinuation(kFlags_deoptimize, condition, frame_state);
return FlagsContinuation(condition, reason, frame_state);
}
// Creates a new flags continuation for a boolean value.
static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
return FlagsContinuation(kFlags_set, condition, result);
return FlagsContinuation(condition, result);
}
bool IsNone() const { return mode_ == kFlags_none; }
@ -342,6 +343,10 @@ class FlagsContinuation final {
DCHECK(!IsNone());
return condition_;
}
DeoptimizeReason reason() const {
DCHECK(IsDeoptimize());
return reason_;
}
Node* frame_state() const {
DCHECK(IsDeoptimize());
return frame_state_or_result_;
@ -387,16 +392,24 @@ class FlagsContinuation final {
}
private:
FlagsContinuation(FlagsMode mode, FlagsCondition condition,
Node* frame_state_or_result)
: mode_(mode),
FlagsContinuation(FlagsCondition condition, DeoptimizeReason reason,
Node* frame_state)
: mode_(kFlags_deoptimize),
condition_(condition),
frame_state_or_result_(frame_state_or_result) {
DCHECK_NOT_NULL(frame_state_or_result);
reason_(reason),
frame_state_or_result_(frame_state) {
DCHECK_NOT_NULL(frame_state);
}
FlagsContinuation(FlagsCondition condition, Node* result)
: mode_(kFlags_set),
condition_(condition),
frame_state_or_result_(result) {
DCHECK_NOT_NULL(result);
}
FlagsMode const mode_;
FlagsCondition condition_;
DeoptimizeReason reason_; // Only value if mode_ == kFlags_deoptimize
Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.

View File

@ -644,9 +644,9 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
}
InstructionSequence::StateId state_id =
sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
int const state_id = sequence()->AddDeoptimizationEntry(
buffer->frame_state_descriptor, DeoptimizeReason::kNoReason);
buffer->instruction_args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
@ -833,9 +833,9 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
return VisitReturn(input);
}
case BasicBlock::kDeoptimize: {
DeoptimizeKind kind = DeoptimizeKindOf(input->op());
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
Node* value = input->InputAt(0);
return VisitDeoptimize(kind, value);
return VisitDeoptimize(p.kind(), p.reason(), value);
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
@ -1883,21 +1883,20 @@ void InstructionSelector::VisitReturn(Node* ret) {
}
}
Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a,
InstructionOperand b,
Node* frame_state) {
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, DeoptimizeReason reason, Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
frame_state);
reason, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, Node* frame_state) {
size_t input_count, InstructionOperand* inputs, DeoptimizeReason reason,
Node* frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
InstructionOperandVector args(instruction_zone());
@ -1906,9 +1905,8 @@ Instruction* InstructionSelector::EmitDeoptimize(
args.push_back(inputs[i]);
}
opcode |= MiscField::encode(static_cast<int>(input_count));
InstructionSequence::StateId const state_id =
sequence()->AddFrameStateDescriptor(descriptor);
args.push_back(g.TempImmediate(state_id.ToInt()));
int const state_id = sequence()->AddDeoptimizationEntry(descriptor, reason);
args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
&args, FrameStateInputKind::kAny,
@ -1923,7 +1921,9 @@ void InstructionSelector::EmitIdentity(Node* node) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
}
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
Node* value) {
InstructionCode opcode = kArchDeoptimize;
switch (kind) {
case DeoptimizeKind::kEager:
@ -1933,7 +1933,7 @@ void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
opcode |= MiscField::encode(Deoptimizer::SOFT);
break;
}
EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, value);
EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, reason, value);
}

View File

@ -105,10 +105,11 @@ class InstructionSelector final {
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
Node* frame_state);
DeoptimizeReason reason, Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, Node* frame_state);
InstructionOperand* inputs,
DeoptimizeReason reason, Node* frame_state);
// ===========================================================================
// ============== Architecture-independent CPU feature methods. ==============
@ -291,7 +292,8 @@ class InstructionSelector final {
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, Node* value);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
Node* value);
void VisitReturn(Node* ret);
void VisitThrow(Node* value);

View File

@ -836,22 +836,16 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
representations_[virtual_register] = rep;
}
InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
FrameStateDescriptor* descriptor) {
int InstructionSequence::AddDeoptimizationEntry(
FrameStateDescriptor* descriptor, DeoptimizeReason reason) {
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
deoptimization_entries_.push_back(descriptor);
return StateId::FromInt(deoptimization_id);
deoptimization_entries_.push_back(DeoptimizationEntry(descriptor, reason));
return deoptimization_id;
}
FrameStateDescriptor* InstructionSequence::GetFrameStateDescriptor(
InstructionSequence::StateId state_id) {
return deoptimization_entries_[state_id.ToInt()];
}
int InstructionSequence::GetFrameStateDescriptorCount() {
return static_cast<int>(deoptimization_entries_.size());
DeoptimizationEntry const& InstructionSequence::GetDeoptimizationEntry(
int state_id) {
return deoptimization_entries_[state_id];
}

View File

@ -1161,9 +1161,23 @@ class FrameStateDescriptor : public ZoneObject {
FrameStateDescriptor* outer_state_;
};
// A deoptimization entry is a pair of the reason why we deoptimize and the
// frame state descriptor that we have to go back to.
class DeoptimizationEntry final {
public:
DeoptimizationEntry() {}
DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeReason reason)
: descriptor_(descriptor), reason_(reason) {}
typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
FrameStateDescriptor* descriptor() const { return descriptor_; }
DeoptimizeReason reason() const { return reason_; }
private:
FrameStateDescriptor* descriptor_ = nullptr;
DeoptimizeReason reason_ = DeoptimizeReason::kNoReason;
};
typedef ZoneVector<DeoptimizationEntry> DeoptimizationVector;
class PhiInstruction final : public ZoneObject {
public:
@ -1414,21 +1428,11 @@ class InstructionSequence final : public ZoneObject {
return Constant(static_cast<int32_t>(0));
}
class StateId {
public:
static StateId FromInt(int id) { return StateId(id); }
int ToInt() const { return id_; }
private:
explicit StateId(int id) : id_(id) {}
int id_;
};
StateId AddFrameStateDescriptor(FrameStateDescriptor* descriptor);
FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
int GetFrameStateDescriptorCount();
DeoptimizationVector const& frame_state_descriptors() const {
return deoptimization_entries_;
int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
DeoptimizeReason reason);
DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
int GetDeoptimizationEntryCount() const {
return static_cast<int>(deoptimization_entries_.size());
}
RpoNumber InputRpo(Instruction* instr, size_t index);

View File

@ -104,9 +104,9 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
Node* const control = NodeProperties::GetControlInput(node);
// TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
Node* deoptimize =
graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
frame_state, effect, control);
Node* deoptimize = graph()->NewNode(
common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());

View File

@ -399,7 +399,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (nexus.IsUninitialized()) {
if ((flags() & kDeoptimizationEnabled) &&
(flags() & kBailoutOnUninitialized)) {
return ReduceSoftDeoptimize(node);
return ReduceSoftDeoptimize(
node,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
}
return NoChange();
}
@ -411,7 +413,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
} else if (receiver_maps.length() == 0) {
if ((flags() & kDeoptimizationEnabled) &&
(flags() & kBailoutOnUninitialized)) {
return ReduceSoftDeoptimize(node);
return ReduceSoftDeoptimize(
node,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
}
return NoChange();
}
@ -830,7 +834,9 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
if (nexus.IsUninitialized()) {
if ((flags() & kDeoptimizationEnabled) &&
(flags() & kBailoutOnUninitialized)) {
return ReduceSoftDeoptimize(node);
return ReduceSoftDeoptimize(
node,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
}
return NoChange();
}
@ -842,7 +848,9 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
} else if (receiver_maps.length() == 0) {
if ((flags() & kDeoptimizationEnabled) &&
(flags() & kBailoutOnUninitialized)) {
return ReduceSoftDeoptimize(node);
return ReduceSoftDeoptimize(
node,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
}
return NoChange();
}
@ -880,14 +888,14 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
language_mode, store_mode);
}
Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(Node* node) {
Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
Node* node, DeoptimizeReason reason) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::FindFrameStateBefore(node);
Node* deoptimize =
graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft), frame_state,
effect, control);
graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());

View File

@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
#include "src/deoptimize-reason.h"
namespace v8 {
namespace internal {
@ -77,7 +78,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
LanguageMode language_mode,
Node* index = nullptr);
Reduction ReduceSoftDeoptimize(Node* node);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.

View File

@ -1835,6 +1835,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}

View File

@ -124,7 +124,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1100,7 +1100,7 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1300,7 +1300,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->frame_state());
g.TempImmediate(0), cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
@ -1317,14 +1318,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}

View File

@ -2128,6 +2128,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}

View File

@ -129,7 +129,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1512,7 +1512,7 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1637,7 +1637,8 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->frame_state());
g.TempImmediate(0), cont->reason(),
cont->frame_state());
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@ -1774,14 +1775,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}

View File

@ -2006,6 +2006,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}

View File

@ -154,7 +154,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1498,7 +1498,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1741,14 +1741,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}

View File

@ -2016,6 +2016,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}

View File

@ -148,7 +148,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1365,7 +1365,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1597,14 +1597,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}

View File

@ -2138,6 +2138,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}

View File

@ -440,7 +440,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1506,7 +1506,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@ -1524,7 +1524,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1641,7 +1641,7 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1825,14 +1825,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}

View File

@ -2217,6 +2217,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
__ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}

View File

@ -459,7 +459,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@ -1178,7 +1178,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@ -1196,7 +1196,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1259,7 +1259,7 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(kX87Float32Cmp),
@ -1280,7 +1280,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
cont->frame_state());
cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(kX87Float64Cmp),
@ -1359,7 +1359,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@ -1474,14 +1474,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}

View File

@ -769,9 +769,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@ -846,9 +845,8 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
}
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@ -981,7 +979,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, dividend, Operand(mask));
__ rsb(dividend, dividend, Operand::Zero(), SetCC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done);
}
@ -999,7 +997,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1014,7 +1012,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ b(ne, &remainder_not_zero);
__ cmp(dividend, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@ -1034,7 +1032,7 @@ void LCodeGen::DoModI(LModI* instr) {
// case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
@ -1045,7 +1043,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ b(ne, &no_overflow_possible);
__ cmp(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
} else {
__ b(ne, &no_overflow_possible);
__ mov(result_reg, Operand::Zero());
@ -1066,7 +1064,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
@ -1091,7 +1089,7 @@ void LCodeGen::DoModI(LModI* instr) {
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
__ Move(result_reg, left_reg);
@ -1121,7 +1119,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@ -1139,19 +1137,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, Operand(kMinInt));
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ tst(dividend, Operand(mask));
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@ -1179,7 +1177,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1187,7 +1185,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@ -1197,7 +1195,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ smull(scratch0(), ip, result, ip);
__ sub(scratch0(), scratch0(), dividend, SetCC);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1212,7 +1210,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(divisor, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1224,7 +1222,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
__ b(pl, &positive);
__ cmp(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&positive);
}
@ -1236,7 +1234,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(dividend, Operand(kMinInt));
__ cmp(divisor, Operand(-1), eq);
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
if (CpuFeatures::IsSupported(SUDIV)) {
@ -1259,7 +1257,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
Register remainder = scratch0();
__ Mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero());
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1310,13 +1308,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
return;
}
@ -1339,7 +1337,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1347,7 +1345,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@ -1388,7 +1386,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1400,7 +1398,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&positive);
}
@ -1412,7 +1410,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
if (CpuFeatures::IsSupported(SUDIV)) {
@ -1458,14 +1456,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmp(left, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
switch (constant) {
case -1:
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ rsb(result, left, Operand::Zero());
}
@ -1475,7 +1473,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
__ cmp(left, Operand::Zero());
DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ mov(result, Operand::Zero());
break;
@ -1525,7 +1523,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ smull(result, scratch, left, right);
}
__ cmp(scratch, Operand(result, ASR, 31));
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@ -1541,7 +1539,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -1604,7 +1602,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(mi, instr, DeoptimizeReason::kNegativeValue);
} else {
__ mov(result, Operand(left, LSR, scratch));
}
@ -1641,7 +1639,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
}
__ Move(result, left);
}
@ -1656,7 +1654,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTag(result, left, SetCC);
}
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ mov(result, Operand(left, LSL, shift_count));
}
@ -1688,7 +1686,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (can_overflow) {
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@ -1709,7 +1707,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
}
if (can_overflow) {
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@ -1850,7 +1848,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
if (can_overflow) {
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@ -2089,7 +2087,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
const Register map = scratch0();
@ -2151,7 +2149,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@ -2515,10 +2513,10 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ ldrb(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
@ -2636,7 +2634,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
@ -2657,7 +2655,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ b(ne, &skip_assignment);
}
@ -2735,7 +2733,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@ -2849,7 +2847,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(cs, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@ -2904,7 +2902,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
}
@ -2938,11 +2936,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@ -2957,7 +2955,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
__ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
DeoptimizeIf(ne, instr, Deoptimizer::kHole);
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
@ -3102,9 +3100,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
__ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
@ -3138,7 +3136,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@ -3292,7 +3290,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
Label done;
Register exponent = scratch0();
@ -3360,7 +3358,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
// if input is positive.
__ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
@ -3407,7 +3405,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
Label done, exact;
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3415,7 +3413,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(input_high, Operand::Zero());
DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@ -3441,7 +3439,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
// [-0.5, -0].
DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ VFPCompareAndSetFlags(input, dot_five);
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
@ -3455,7 +3453,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&done);
}
@ -3519,7 +3517,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r6, Operand(ip));
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@ -3893,7 +3891,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@ -4184,7 +4182,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
@ -4231,7 +4229,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@ -4555,12 +4553,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ tst(input, Operand(0xc0000000));
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTag(output, input, SetCC);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ SmiTag(output, input);
}
@ -4574,7 +4572,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
DeoptimizeIf(cs, instr, DeoptimizeReason::kNotASmi);
} else {
__ SmiUntag(result, input);
}
@ -4602,7 +4600,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ b(ne, &convert);
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
// load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
@ -4612,7 +4610,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ b(ne, &done);
__ VmovHigh(scratch, result_reg);
__ cmp(scratch, Operand(HeapNumber::kSignMask));
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done);
if (can_convert_undefined_to_nan) {
@ -4620,7 +4618,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
@ -4688,22 +4686,22 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ mov(input_reg, Operand::Zero());
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
}
}
__ bind(&done);
@ -4772,14 +4770,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -4797,26 +4795,26 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
__ SmiTag(result_reg, SetCC);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi);
}
@ -4824,7 +4822,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
}
@ -4837,7 +4835,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
}
@ -4857,13 +4855,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else {
DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@ -4874,11 +4872,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(tag == 0 ? ne : eq, instr,
DeoptimizeReason::kWrongInstanceType);
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@ -4897,7 +4896,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ cmp(reg, Operand(object));
}
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
@ -4912,7 +4911,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@ -4970,7 +4969,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@ -5009,7 +5008,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ mov(result_reg, Operand::Zero());
__ jmp(&done);
@ -5442,7 +5441,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
__ bind(&done);
}
@ -5453,7 +5452,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}

View File

@ -231,10 +231,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
void AddToTranslation(LEnvironment* environment,
Translation* translation,

View File

@ -824,11 +824,9 @@ void LCodeGen::FinishCode(Handle<Code> code) {
PopulateDeoptimizationData(code);
}
void LCodeGen::DeoptimizeBranch(
LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
BranchType branch_type, Register reg, int bit,
Deoptimizer::BailoutType* override_bailout_type) {
LInstruction* instr, DeoptimizeReason deopt_reason, BranchType branch_type,
Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
Deoptimizer::BailoutType bailout_type =
@ -900,70 +898,59 @@ void LCodeGen::DeoptimizeBranch(
}
}
void LCodeGen::Deoptimize(LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
void LCodeGen::Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType* override_bailout_type) {
DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
override_bailout_type);
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
}
void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
}
void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
}
void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
__ CompareRoot(rt, index);
DeoptimizeIf(eq, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
__ CompareRoot(rt, index);
DeoptimizeIf(ne, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
__ TestForMinusZero(input);
DeoptimizeIf(vs, instr, deopt_reason);
}
@ -971,18 +958,16 @@ void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
__ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
}
void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
}
@ -1361,7 +1346,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (can_overflow) {
__ Adds(result, left, right);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Add(result, left, right);
}
@ -1375,7 +1360,7 @@ void LCodeGen::DoAddS(LAddS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Add(result, left, right);
}
@ -1538,7 +1523,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@ -1730,7 +1715,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(cond, instr, DeoptimizeReason::kOutOfBounds);
}
}
@ -1810,7 +1795,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
DeoptimizeIfSmi(value, instr, DeoptimizeReason::kSmi);
}
Register map = NoReg;
@ -1877,7 +1862,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
Deoptimize(instr, Deoptimizer::kUnexpectedObject);
Deoptimize(instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@ -2013,7 +1998,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
DeoptimizeIfSmi(temp, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@ -2068,7 +2053,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ Bind(&success);
@ -2077,7 +2062,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
DeoptimizeIfSmi(ToRegister(instr->value()), instr, DeoptimizeReason::kSmi);
}
}
@ -2085,7 +2070,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
DeoptimizeIfNotSmi(value, instr, DeoptimizeReason::kNotASmi);
}
@ -2098,7 +2083,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
}
@ -2116,15 +2101,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
}
} else {
uint8_t mask;
@ -2135,10 +2120,10 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
Deoptimizer::kWrongInstanceType);
DeoptimizeReason::kWrongInstanceType);
} else {
DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
Deoptimizer::kWrongInstanceType);
DeoptimizeReason::kWrongInstanceType);
}
} else {
if (tag == 0) {
@ -2147,7 +2132,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@ -2187,7 +2172,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is coverted to zero for clamping conversion.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeReason::kNotAHeapNumberUndefined);
__ Mov(result, 0);
__ B(&done);
@ -2440,7 +2425,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmp(reg, Operand(object));
}
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
@ -2477,21 +2462,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@ -2519,14 +2504,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
Deoptimize(instr, Deoptimizer::kDivisionByZero);
Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@ -2538,7 +2523,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
DeoptimizeIfNotZero(temp, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -2561,7 +2546,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) as that will produce negative zero.
@ -2573,7 +2558,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
@ -2585,13 +2570,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
DeoptimizeIfNotZero(remainder, instr, DeoptimizeReason::kLostPrecision);
}
@ -2600,11 +2585,11 @@ void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->tag_result()) {
__ SmiTag(result.X());
@ -2644,7 +2629,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
DeoptimizeIfZero(result, instr, DeoptimizeReason::kNoCache);
__ Bind(&done);
}
@ -2790,10 +2775,10 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ Ldrb(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
@ -2981,7 +2966,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
Deoptimizer::kHole);
DeoptimizeReason::kHole);
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
@ -3003,7 +2988,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
Deoptimizer::kHole);
DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@ -3150,7 +3135,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@ -3246,7 +3231,7 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
Register scratch = ToRegister(instr->temp());
__ Fmov(scratch, result);
__ Eor(scratch, scratch, kHoleNanInt64);
DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
DeoptimizeIfZero(scratch, instr, DeoptimizeReason::kHole);
}
}
@ -3284,10 +3269,10 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
DeoptimizeIfNotSmi(result, instr, DeoptimizeReason::kNotASmi);
} else {
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
Deoptimizer::kHole);
DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@ -3301,7 +3286,7 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
__ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
DeoptimizeIf(ne, instr, Deoptimizer::kHole);
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ Bind(&done);
@ -3395,7 +3380,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@ -3553,7 +3538,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
}
__ Fcvtms(result, input);
@ -3563,7 +3548,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
}
@ -3589,13 +3574,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
return;
}
@ -3618,14 +3603,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
Deoptimize(instr, Deoptimizer::kDivisionByZero);
Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@ -3668,14 +3653,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
// Check for (0 / -x) that will produce negative zero.
@ -3685,7 +3670,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
Label done;
@ -3843,18 +3828,18 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
DeoptimizeIf(hi, instr, DeoptimizeReason::kOverflow);
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
DeoptimizeIfNegative(result, instr, DeoptimizeReason::kMinusZero);
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
DeoptimizeIf(vs, instr, DeoptimizeReason::kNaN);
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
@ -3932,7 +3917,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ B(&done);
}
@ -3951,7 +3936,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
Deoptimize(instr, Deoptimizer::kDivisionByZero);
Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -3965,7 +3950,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@ -3980,12 +3965,12 @@ void LCodeGen::DoModI(LModI* instr) {
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
}
__ Bind(&done);
}
@ -4008,10 +3993,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
DeoptimizeIfZero(left, instr, DeoptimizeReason::kMinusZero);
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
DeoptimizeIfNegative(left, instr, DeoptimizeReason::kMinusZero);
}
}
@ -4021,7 +4006,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Neg(result, left);
}
@ -4037,7 +4022,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
case 2:
if (can_overflow) {
__ Adds(result, left, left);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Add(result, left, left);
}
@ -4056,7 +4041,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow);
}
if (right >= 0) {
@ -4066,7 +4051,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
@ -4124,13 +4109,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
} else {
__ Mul(result, left, right);
}
@ -4154,7 +4139,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
@ -4162,7 +4147,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
@ -4333,14 +4318,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
DeoptimizeIfMinusZero(result, instr, DeoptimizeReason::kMinusZero);
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@ -4528,7 +4513,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
DeoptimizeIfNegative(input.W(), instr, DeoptimizeReason::kOverflow);
}
__ SmiTag(output, input);
}
@ -4540,7 +4525,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Label done, untag;
if (instr->needs_check()) {
DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
DeoptimizeIfNotSmi(input, instr, DeoptimizeReason::kNotASmi);
}
__ Bind(&untag);
@ -4565,7 +4550,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
}
break;
default: UNREACHABLE();
@ -4575,7 +4560,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
@ -4628,7 +4613,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
}
break;
default: UNREACHABLE();
@ -4638,7 +4623,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
}
__ Mov(result, left);
} else {
@ -4769,7 +4754,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
Deoptimizer::kHole);
DeoptimizeReason::kHole);
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
@ -5043,7 +5028,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
}
// Deopt on smi, which means the elements array changed to dictionary mode.
DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
DeoptimizeIfSmi(result, instr, DeoptimizeReason::kSmi);
}
@ -5285,7 +5270,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
if (can_overflow) {
__ Subs(result, left, right);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Sub(result, left, right);
}
@ -5299,7 +5284,7 @@ void LCodeGen::DoSubS(LSubS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Sub(result, left, right);
}
@ -5340,7 +5325,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// Output contains zero, undefined is converted to zero for truncating
// conversions.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
Deoptimizer::kNotAHeapNumberUndefinedBoolean);
DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
} else {
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
@ -5351,13 +5336,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
DeoptimizeIfNegative(scratch1, instr, DeoptimizeReason::kMinusZero);
}
}
__ Bind(&done);
@ -5447,7 +5432,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ Bind(&no_memento_found);
}
@ -5593,7 +5578,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
@ -5627,10 +5612,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
DeoptimizeIfSmi(receiver, instr, DeoptimizeReason::kSmi);
__ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
__ B(ge, &copy_receiver);
Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
Deoptimize(instr, DeoptimizeReason::kNotAJavaScriptObject);
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));

View File

@ -199,38 +199,35 @@ class LCodeGen: public LCodeGenBase {
Register temp,
LOperand* index,
String::Encoding encoding);
void DeoptimizeBranch(LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
void DeoptimizeBranch(LInstruction* instr, DeoptimizeReason deopt_reason,
BranchType branch_type, Register reg = NoReg,
int bit = -1,
Deoptimizer::BailoutType* override_bailout_type = NULL);
void Deoptimize(LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
void Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType* override_bailout_type = NULL);
void DeoptimizeIf(Condition cond, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
void DeoptimizeIfZero(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
void DeoptimizeIfNegative(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
void DeoptimizeIfSmi(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
LInstruction* instr, DeoptimizeReason deopt_reason);
void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
LInstruction* instr, DeoptimizeReason deopt_reason);
void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,

View File

@ -1301,7 +1301,7 @@ class HGoto final : public HTemplateControlInstruction<1, 0> {
class HDeoptimize final : public HTemplateControlInstruction<1, 0> {
public:
static HDeoptimize* New(Isolate* isolate, Zone* zone, HValue* context,
Deoptimizer::DeoptReason reason,
DeoptimizeReason reason,
Deoptimizer::BailoutType type,
HBasicBlock* unreachable_continuation) {
return new(zone) HDeoptimize(reason, type, unreachable_continuation);
@ -1316,20 +1316,19 @@ class HDeoptimize final : public HTemplateControlInstruction<1, 0> {
return Representation::None();
}
Deoptimizer::DeoptReason reason() const { return reason_; }
DeoptimizeReason reason() const { return reason_; }
Deoptimizer::BailoutType type() { return type_; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
private:
explicit HDeoptimize(Deoptimizer::DeoptReason reason,
Deoptimizer::BailoutType type,
explicit HDeoptimize(DeoptimizeReason reason, Deoptimizer::BailoutType type,
HBasicBlock* unreachable_continuation)
: reason_(reason), type_(type) {
SetSuccessorAt(0, unreachable_continuation);
}
Deoptimizer::DeoptReason reason_;
DeoptimizeReason reason_;
Deoptimizer::BailoutType type_;
};

View File

@ -1106,8 +1106,7 @@ void HGraphBuilder::IfBuilder::Else() {
did_else_ = true;
}
void HGraphBuilder::IfBuilder::Deopt(Deoptimizer::DeoptReason reason) {
void HGraphBuilder::IfBuilder::Deopt(DeoptimizeReason reason) {
DCHECK(did_then_);
builder()->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
AddMergeAtJoinBlock(true);
@ -1513,9 +1512,7 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
return Add<HCheckHeapObject>(obj);
}
void HGraphBuilder::FinishExitWithHardDeoptimization(
Deoptimizer::DeoptReason reason) {
void HGraphBuilder::FinishExitWithHardDeoptimization(DeoptimizeReason reason) {
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
FinishExitCurrentBlock(New<HAbnormalExit>());
}
@ -1833,7 +1830,7 @@ void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
IfBuilder if_global_object(this);
if_global_object.If<HCompareNumericAndBranch>(instance_type, global_type,
Token::EQ);
if_global_object.ThenDeopt(Deoptimizer::kReceiverWasAGlobalObject);
if_global_object.ThenDeopt(DeoptimizeReason::kReceiverWasAGlobalObject);
if_global_object.End();
}
@ -2183,7 +2180,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
if_objectissmi.Else();
{
if (type->Is(Type::SignedSmall())) {
if_objectissmi.Deopt(Deoptimizer::kExpectedSmi);
if_objectissmi.Deopt(DeoptimizeReason::kExpectedSmi);
} else {
// Check if the object is a heap number.
IfBuilder if_objectisnumber(this);
@ -2239,7 +2236,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
if_objectisnumber.Else();
{
if (type->Is(Type::Number())) {
if_objectisnumber.Deopt(Deoptimizer::kExpectedHeapNumber);
if_objectisnumber.Deopt(DeoptimizeReason::kExpectedHeapNumber);
}
}
if_objectisnumber.JoinContinuation(&found);
@ -2332,7 +2329,7 @@ HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
constructor_function_index,
Add<HConstant>(Map::kNoConstructorFunctionIndex), Token::EQ);
constructor_function_index_is_invalid.ThenDeopt(
Deoptimizer::kUndefinedOrNullInToObject);
DeoptimizeReason::kUndefinedOrNullInToObject);
constructor_function_index_is_invalid.End();
// Use the global constructor function.
@ -2840,7 +2837,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HInstruction* result = AddElementAccess(
backing_store, key, val, bounds_check, checked_object->ActualValue(),
elements_kind, access_type);
negative_checker.ElseDeopt(Deoptimizer::kNegativeKeyEncountered);
negative_checker.ElseDeopt(DeoptimizeReason::kNegativeKeyEncountered);
negative_checker.End();
length_checker.End();
return result;
@ -5272,7 +5269,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
if_undefined_or_null.Or();
if_undefined_or_null.If<HCompareObjectEqAndBranch>(
enumerable, graph()->GetConstantNull());
if_undefined_or_null.ThenDeopt(Deoptimizer::kUndefinedOrNullInForIn);
if_undefined_or_null.ThenDeopt(DeoptimizeReason::kUndefinedOrNullInForIn);
if_undefined_or_null.End();
BuildForInBody(stmt, each_var, enumerable);
}
@ -6684,7 +6681,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
// use a generic IC.
if (count == maps->length() && FLAG_deoptimize_uncommon_cases) {
FinishExitWithHardDeoptimization(
Deoptimizer::kUnknownMapInPolymorphicAccess);
DeoptimizeReason::kUnknownMapInPolymorphicAccess);
} else {
HInstruction* instr =
BuildNamedGeneric(access_type, expr, slot, object, name, value);
@ -6870,7 +6867,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
if (value->IsConstant()) {
HConstant* c_value = HConstant::cast(value);
if (!constant.is_identical_to(c_value->handle(isolate()))) {
Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
Deoptimizer::EAGER);
}
} else {
@ -6883,7 +6880,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
builder.Then();
builder.Else();
Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
Deoptimizer::EAGER);
builder.End();
}
@ -7232,7 +7229,7 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
HValue* object, Handle<Name> name, HValue* value, bool is_uninitialized) {
if (is_uninitialized) {
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForGenericNamedAccess,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess,
Deoptimizer::SOFT);
}
if (access_type == LOAD) {
@ -7590,7 +7587,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
FinishExitWithHardDeoptimization(
Deoptimizer::kUnknownMapInPolymorphicElementAccess);
DeoptimizeReason::kUnknownMapInPolymorphicElementAccess);
set_current_block(join);
return access_type == STORE ? val : Pop();
}
@ -7707,13 +7704,15 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
if (access_type == STORE) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedStore,
Deoptimizer::SOFT);
Add<HDeoptimize>(
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess,
Deoptimizer::SOFT);
}
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedLoad,
Deoptimizer::SOFT);
Add<HDeoptimize>(
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess,
Deoptimizer::SOFT);
}
}
instr = AddInstruction(
@ -8190,7 +8189,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == maps->length() && FLAG_deoptimize_uncommon_cases) {
FinishExitWithHardDeoptimization(Deoptimizer::kUnknownMapInPolymorphicCall);
FinishExitWithHardDeoptimization(
DeoptimizeReason::kUnknownMapInPolymorphicCall);
} else {
Property* prop = expr->expression()->AsProperty();
HInstruction* function =
@ -9800,7 +9800,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// We have to use EAGER deoptimization here because Deoptimizer::SOFT
// gets ignored by the always-opt flag, which leads to incorrect code.
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForCallWithArguments,
DeoptimizeReason::kInsufficientTypeFeedbackForCallWithArguments,
Deoptimizer::EAGER);
arguments_flag = ARGUMENTS_FAKED;
}
@ -11038,7 +11038,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
if (!left_type->IsInhabited()) {
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
DeoptimizeReason::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
Deoptimizer::SOFT);
left_type = Type::Any();
left_rep = RepresentationFor(left_type);
@ -11047,7 +11047,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
if (!right_type->IsInhabited()) {
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
DeoptimizeReason::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
Deoptimizer::SOFT);
right_type = Type::Any();
right_rep = RepresentationFor(right_type);
@ -11231,7 +11231,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
IfBuilder if_same(this);
if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
if_same.Then();
if_same.ElseDeopt(Deoptimizer::kUnexpectedRHSOfBinaryOperation);
if_same.ElseDeopt(DeoptimizeReason::kUnexpectedRHSOfBinaryOperation);
right = fixed_right;
}
instr = AddUncasted<HMod>(left, right);
@ -11603,7 +11603,8 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
// soft deoptimize when there is no type feedback.
if (!combined_type->IsInhabited()) {
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
DeoptimizeReason::
kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
Deoptimizer::SOFT);
combined_type = left_type = right_type = Type::Any();
}
@ -11620,8 +11621,9 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HConstant::cast(left)->HasNumberValue()) ||
(right->IsConstant() &&
HConstant::cast(right)->HasNumberValue())) {
Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
Deoptimizer::SOFT);
Add<HDeoptimize>(
DeoptimizeReason::kTypeMismatchBetweenFeedbackAndConstant,
Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
}
@ -11703,8 +11705,9 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
!HConstant::cast(left)->HasInternalizedStringValue()) ||
(right->IsConstant() &&
!HConstant::cast(right)->HasInternalizedStringValue())) {
Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
Deoptimizer::SOFT);
Add<HDeoptimize>(
DeoptimizeReason::kTypeMismatchBetweenFeedbackAndConstant,
Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
}

View File

@ -1517,7 +1517,7 @@ class HGraphBuilder {
HValue* EnforceNumberType(HValue* number, Type* expected);
HValue* TruncateToNumber(HValue* value, Type** expected);
void FinishExitWithHardDeoptimization(Deoptimizer::DeoptReason reason);
void FinishExitWithHardDeoptimization(DeoptimizeReason reason);
void AddIncrementCounter(StatsCounter* counter);
@ -1666,12 +1666,12 @@ class HGraphBuilder {
void End();
void EndUnreachable();
void Deopt(Deoptimizer::DeoptReason reason);
void ThenDeopt(Deoptimizer::DeoptReason reason) {
void Deopt(DeoptimizeReason reason);
void ThenDeopt(DeoptimizeReason reason) {
Then();
Deopt(reason);
}
void ElseDeopt(Deoptimizer::DeoptReason reason) {
void ElseDeopt(DeoptimizeReason reason) {
Else();
Deopt(reason);
}
@ -1928,10 +1928,9 @@ class HGraphBuilder {
int start_position_;
};
template <>
inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
DeoptimizeReason reason, Deoptimizer::BailoutType type) {
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_requested()->Increment();
if (FLAG_always_opt) return NULL;
@ -1948,10 +1947,9 @@ inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
return instr;
}
template <>
inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
DeoptimizeReason reason, Deoptimizer::BailoutType type) {
return Add<HDeoptimize>(reason, type);
}

View File

@ -682,9 +682,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@ -749,9 +748,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@ -882,7 +880,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
@ -899,7 +897,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -914,7 +912,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@ -936,7 +934,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@ -947,7 +945,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
@ -966,7 +964,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@ -986,19 +984,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@ -1019,7 +1017,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1027,7 +1025,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
@ -1037,7 +1035,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1057,7 +1055,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1066,7 +1064,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@ -1076,7 +1074,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@ -1087,7 +1085,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1109,13 +1107,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
return;
}
@ -1142,7 +1140,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1150,7 +1148,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@ -1197,7 +1195,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1206,7 +1204,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@ -1216,7 +1214,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@ -1294,7 +1292,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -1304,15 +1302,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@ -1385,7 +1383,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@ -1402,7 +1400,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
} else {
__ ror(ToRegister(left), shift_count);
}
@ -1417,7 +1415,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@ -1428,7 +1426,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
__ shl(ToRegister(left), shift_count);
}
@ -1454,7 +1452,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@ -1626,7 +1624,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
}
@ -1885,7 +1883,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
Register map = no_reg; // Keep the compiler happy.
@ -1948,7 +1946,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@ -2306,10 +2304,10 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
// Deoptimize if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
__ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, factory()->null_value());
@ -2431,7 +2429,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@ -2452,7 +2450,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
@ -2549,7 +2547,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@ -2633,7 +2631,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@ -2665,7 +2663,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
@ -2692,10 +2690,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi);
} else {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@ -2709,7 +2707,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ cmp(FieldOperand(result, PropertyCell::kValueOffset),
Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
__ mov(result, isolate()->factory()->undefined_value());
__ bind(&done);
@ -2859,9 +2857,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
__ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@ -2885,7 +2883,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
__ push(receiver);
__ mov(receiver, length);
@ -3061,7 +3059,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
Label slow, allocated, done;
uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
@ -3119,7 +3117,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
__ bind(&is_positive);
}
@ -3184,20 +3182,20 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ j(not_equal, &non_zero, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
__ bind(&non_zero);
}
__ roundsd(xmm_scratch, input_reg, kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3206,7 +3204,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
@ -3216,7 +3214,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@ -3227,7 +3225,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ bind(&done);
}
@ -3270,7 +3268,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ jmp(&done, dist);
__ bind(&below_one_half);
@ -3285,7 +3283,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmp(output_reg, 0x1);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@ -3301,7 +3299,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// If the sign is positive, we return +0.
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ Move(output_reg, Immediate(0));
__ bind(&done);
@ -3377,7 +3375,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ JumpIfSmi(tagged_exponent, &no_deopt);
DCHECK(!ecx.is(tagged_exponent));
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@ -3738,7 +3736,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@ -3897,7 +3895,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@ -3995,7 +3993,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ test(result, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
}
@ -4335,12 +4333,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow);
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@ -4351,7 +4349,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
} else {
__ AssertSmi(result);
}
@ -4378,7 +4376,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
}
// Heap number to XMM conversion.
@ -4391,7 +4389,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
__ test_b(temp_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
@ -4400,7 +4398,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined to NaN.
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(not_equal, instr,
DeoptimizeReason::kNotAHeapNumberUndefined);
__ pcmpeqd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
@ -4453,26 +4452,26 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
DeoptimizeIf(not_equal, instr,
Deoptimizer::kNotAHeapNumberUndefinedBoolean);
DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ Move(input_reg, Immediate(0));
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(xmm0));
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ Cvtsi2sd(scratch, Operand(input_reg));
__ ucomisd(xmm0, scratch);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
}
}
@ -4552,11 +4551,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
&is_nan, &minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -4578,21 +4577,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
&minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
__ SmiTag(result_reg);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
}
@ -4600,7 +4599,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
}
@ -4613,7 +4612,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
}
@ -4632,13 +4631,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
} else {
DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@ -4650,12 +4649,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
Deoptimizer::kWrongInstanceType);
DeoptimizeReason::kWrongInstanceType);
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@ -4671,7 +4670,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
}
@ -4686,7 +4685,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ test(eax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@ -4740,7 +4739,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@ -4779,7 +4778,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
@ -5204,7 +5203,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache);
}
@ -5212,7 +5211,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}

View File

@ -204,10 +204,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();

View File

@ -372,7 +372,7 @@ void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, int deopt_id) {
LInstruction* instr, DeoptimizeReason deopt_reason, int deopt_id) {
Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
deopt_reason, deopt_id);
return deopt_info;

View File

@ -41,8 +41,9 @@ class LCodeGenBase BASE_EMBEDDED {
void PRINTF_FORMAT(2, 3) Comment(const char* format, ...);
void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info);
static Deoptimizer::DeoptInfo MakeDeoptInfo(
LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, int deopt_id);
static Deoptimizer::DeoptInfo MakeDeoptInfo(LInstruction* instr,
DeoptimizeReason deopt_reason,
int deopt_id);
bool GenerateBody();
virtual void GenerateBodyInstructionPre(LInstruction* instr) {}

View File

@ -744,9 +744,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1, const Operand& src2) {
LEnvironment* environment = instr->environment();
@ -811,10 +810,9 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
}
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
Register src1, const Operand& src2) {
DeoptimizeReason deopt_reason, Register src1,
const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@ -946,7 +944,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ subu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
@ -979,7 +977,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
__ bind(&remainder_not_zero);
}
@ -999,7 +997,7 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
Operand(zero_reg));
}
@ -1009,7 +1007,8 @@ void LCodeGen::DoModI(LModI* instr) {
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
@ -1021,7 +1020,7 @@ void LCodeGen::DoModI(LModI* instr) {
// If we care about -0, test if the dividend is <0 and the result is 0.
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
Operand(zero_reg));
}
__ bind(&done);
@ -1038,19 +1037,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@ -1087,7 +1088,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
@ -1097,7 +1098,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Mul(scratch0(), result, Operand(divisor));
__ Subu(scratch0(), scratch0(), dividend);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
Operand(zero_reg));
}
}
@ -1117,7 +1118,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
Operand(zero_reg));
}
@ -1125,7 +1126,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
Operand(zero_reg));
__ bind(&left_not_zero);
}
@ -1135,12 +1136,12 @@ void LCodeGen::DoDivI(LDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
Operand(zero_reg));
}
}
@ -1187,14 +1188,15 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ Subu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
}
// Dividing by -1 is basically negation, unless we overflow.
__ Xor(scratch, scratch, result);
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
Operand(zero_reg));
}
return;
@ -1230,7 +1232,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
@ -1275,7 +1277,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
Operand(zero_reg));
}
@ -1283,7 +1285,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
Operand(zero_reg));
__ bind(&left_not_zero);
}
@ -1293,7 +1295,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@ -1324,7 +1326,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
Operand(zero_reg));
}
switch (constant) {
@ -1342,7 +1345,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
Operand(zero_reg));
}
__ mov(result, zero_reg);
@ -1394,7 +1397,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Mul(scratch, result, left, right);
}
__ sra(at, result, 31);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@ -1409,7 +1413,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
__ bind(&done);
}
@ -1474,7 +1478,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
Operand(zero_reg));
}
break;
@ -1510,7 +1514,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
Operand(zero_reg));
}
__ Move(result, left);
@ -1526,7 +1530,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch,
Operand(zero_reg));
} else {
__ sll(result, left, shift_count);
@ -1959,7 +1963,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
const Register map = scratch0();
@ -2023,7 +2027,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
Operand(zero_reg));
}
}
@ -2402,12 +2406,12 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ And(object_instance_type, object_instance_type,
Operand(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
Operand(zero_reg));
// Deoptimize for proxies.
__ lbu(object_instance_type,
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
Operand(JS_PROXY_TYPE));
__ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
@ -2528,7 +2532,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
@ -2552,7 +2556,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
@ -2629,7 +2633,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
@ -2749,7 +2753,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
result, Operand(0x80000000));
}
break;
@ -2804,7 +2808,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
Operand(kHoleNanUpper32));
}
}
@ -2839,11 +2843,12 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
Operand(scratch));
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@ -2856,7 +2861,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ lw(result, FieldMemOperand(result, Cell::kValueOffset));
DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@ -3006,10 +3011,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&result_in_receiver);
@ -3043,7 +3048,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
@ -3196,7 +3201,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
Operand(at));
Label done;
Register exponent = scratch0();
@ -3263,7 +3269,8 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
Operand(zero_reg));
__ bind(&done);
}
@ -3318,7 +3325,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
except_flag);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3327,7 +3334,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ Branch(&done, ne, result, Operand(zero_reg));
__ Mfhc1(scratch1, input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@ -3361,7 +3368,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
@ -3376,7 +3383,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
@ -3395,7 +3403,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
double_scratch1,
except_flag);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3404,7 +3412,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&check_sign_on_zero);
__ Mfhc1(scratch, input);
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
Operand(zero_reg));
}
__ bind(&done);
@ -3471,7 +3479,7 @@ void LCodeGen::DoPower(LPower* instr) {
DCHECK(!t3.is(tagged_exponent));
__ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, t3, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@ -3825,7 +3833,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
}
}
@ -4131,7 +4139,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result, at);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
@ -4498,12 +4506,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0xc0000000));
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
@ -4519,7 +4527,8 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
@ -4544,7 +4553,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
Operand(at));
}
// Load heap number.
@ -4553,7 +4562,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
__ Mfhc1(scratch, result_reg);
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
@ -4561,8 +4570,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
input_reg, Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
@ -4626,12 +4635,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean,
scratch2, Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
Operand(at));
// Load the double value.
@ -4647,7 +4656,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
except_flag,
kCheckForInexactConversion);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -4655,7 +4664,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ Mfhc1(scratch1, double_scratch);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
}
}
@ -4732,7 +4741,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -4740,7 +4749,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@ -4767,7 +4776,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -4775,20 +4784,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
}
__ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch1,
Operand(zero_reg));
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
}
@ -4796,7 +4806,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
}
@ -4809,7 +4819,8 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
Operand(zero_reg));
}
@ -4826,14 +4837,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(first));
} else {
DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(last));
}
}
@ -4845,11 +4856,11 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
at, Operand(zero_reg));
DeoptimizeIf(tag == 0 ? ne : eq, instr,
DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(tag));
}
}
@ -4865,9 +4876,10 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(cell));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
Operand(object));
}
}
@ -4883,7 +4895,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
Operand(zero_reg));
}
@ -4938,7 +4950,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
}
__ bind(&success);
@ -4976,7 +4988,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
@ -5442,7 +5454,8 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
Operand(zero_reg));
__ bind(&done);
}
@ -5452,7 +5465,8 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
Operand(scratch0()));
}

View File

@ -225,14 +225,14 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void DeoptimizeIf(
Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason,
Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg));
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,

View File

@ -732,9 +732,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1, const Operand& src2) {
LEnvironment* environment = instr->environment();
@ -800,10 +799,9 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
}
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
Register src1, const Operand& src2) {
DeoptimizeReason deopt_reason, Register src1,
const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@ -935,7 +933,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ dsubu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
@ -955,7 +953,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -968,7 +966,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
__ bind(&remainder_not_zero);
}
@ -988,7 +986,7 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
Operand(zero_reg));
}
@ -998,7 +996,8 @@ void LCodeGen::DoModI(LModI* instr) {
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
@ -1011,7 +1010,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
Operand(zero_reg));
}
__ bind(&done);
@ -1028,19 +1027,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@ -1070,14 +1071,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
@ -1087,7 +1088,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Dmul(scratch0(), result, Operand(divisor));
__ Dsubu(scratch0(), scratch0(), dividend);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
Operand(zero_reg));
}
}
@ -1106,7 +1107,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
Operand(zero_reg));
}
@ -1114,7 +1115,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
Operand(zero_reg));
__ bind(&left_not_zero);
}
@ -1124,7 +1125,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@ -1136,7 +1137,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
} else {
__ dmod(remainder, dividend, divisor);
}
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
Operand(zero_reg));
}
}
@ -1182,14 +1183,16 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ Dsubu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
}
__ Xor(scratch, scratch, result);
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt));
DeoptimizeIf(gt, instr, DeoptimizeReason::kOverflow, result,
Operand(kMaxInt));
}
return;
}
@ -1217,14 +1220,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
@ -1269,7 +1272,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
Operand(zero_reg));
}
@ -1277,7 +1280,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
Operand(zero_reg));
__ bind(&left_not_zero);
}
@ -1287,7 +1290,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@ -1324,7 +1327,8 @@ void LCodeGen::DoMulS(LMulS* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
Operand(zero_reg));
}
switch (constant) {
@ -1342,7 +1346,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
Operand(zero_reg));
}
__ mov(result, zero_reg);
@ -1390,7 +1394,8 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ dsra32(scratch, result, 0);
__ sra(at, result, 31);
__ SmiTag(result);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
Operand(at));
} else {
__ SmiUntag(result, left);
__ dmul(result, result, right);
@ -1401,7 +1406,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
__ bind(&done);
}
@ -1426,7 +1431,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
Operand(zero_reg));
}
switch (constant) {
@ -1444,7 +1450,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
Operand(zero_reg));
}
__ mov(result, zero_reg);
@ -1493,7 +1499,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ dsra32(scratch, result, 0);
__ sra(at, result, 31);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
Operand(at));
} else {
__ mul(result, left, right);
}
@ -1503,7 +1510,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
__ bind(&done);
}
@ -1568,10 +1575,10 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
// TODO(yy): (-1) >>> 0. anything else?
DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
Operand(zero_reg));
DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result,
Operand(kMaxInt));
DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
Operand(zero_reg));
DeoptimizeIf(gt, instr, DeoptimizeReason::kNegativeValue, result,
Operand(kMaxInt));
}
break;
case Token::SHL:
@ -1606,7 +1613,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
Operand(zero_reg));
}
__ Move(result, left);
@ -2078,7 +2085,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
const Register map = scratch0();
@ -2142,7 +2149,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
Operand(zero_reg));
}
}
@ -2523,11 +2530,11 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ And(object_instance_type, object_instance_type,
Operand(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
Operand(zero_reg));
__ lbu(object_instance_type,
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
Operand(JS_PROXY_TYPE));
__ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
@ -2649,7 +2656,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
@ -2673,7 +2680,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
@ -2765,7 +2772,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
@ -2895,7 +2902,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
result, Operand(0x80000000));
}
break;
@ -2958,7 +2965,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ FmoveHigh(scratch, result);
DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
Operand(static_cast<int32_t>(kHoleNanUpper32)));
}
}
@ -3012,11 +3019,12 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (hinstr->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
Operand(scratch));
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@ -3030,7 +3038,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
// The comparison only needs LS bits of value, which is a smi.
__ ld(result, FieldMemOperand(result, Cell::kValueOffset));
DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@ -3190,10 +3198,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&result_in_receiver);
@ -3227,7 +3235,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
@ -3380,7 +3388,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
Operand(at));
Label done;
Register exponent = scratch0();
@ -3447,7 +3456,8 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
Operand(zero_reg));
__ bind(&done);
}
@ -3461,7 +3471,8 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ dsubu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000 00000000.
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
Operand(zero_reg));
__ bind(&done);
}
@ -3518,7 +3529,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
except_flag);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3527,7 +3538,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ Branch(&done, ne, result, Operand(zero_reg));
__ mfhc1(scratch1, input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@ -3561,7 +3572,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
@ -3579,7 +3590,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
@ -3598,7 +3610,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
double_scratch1,
except_flag);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3607,7 +3619,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&check_sign_on_zero);
__ mfhc1(scratch, input); // Get exponent/sign bits.
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
Operand(zero_reg));
}
__ bind(&done);
@ -3674,7 +3686,7 @@ void LCodeGen::DoPower(LPower* instr) {
DCHECK(!a7.is(tagged_exponent));
__ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, a7, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@ -4034,7 +4046,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
}
}
@ -4362,7 +4374,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result, at);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
@ -4408,7 +4420,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
DeoptimizeIf(al, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@ -4702,12 +4714,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0x80000000));
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
@ -4723,7 +4735,8 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
@ -4748,7 +4761,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
Operand(at));
}
// Load heap number.
@ -4757,7 +4770,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ mfc1(at, result_reg);
__ Branch(&done, ne, at, Operand(zero_reg));
__ mfhc1(scratch, result_reg); // Get exponent/sign bits.
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
@ -4765,8 +4778,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
input_reg, Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
@ -4830,12 +4843,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean,
scratch2, Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
Operand(at));
// Load the double value.
@ -4851,7 +4864,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
except_flag,
kCheckForInexactConversion);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -4859,7 +4872,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
}
}
@ -4936,7 +4949,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -4944,7 +4957,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@ -4971,7 +4984,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -4979,7 +4992,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@ -4991,7 +5004,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
}
@ -4999,7 +5012,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
}
@ -5012,7 +5025,8 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
Operand(zero_reg));
}
@ -5029,14 +5043,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(first));
} else {
DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(last));
}
}
@ -5048,11 +5062,11 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
at, Operand(zero_reg));
DeoptimizeIf(tag == 0 ? ne : eq, instr,
DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(tag));
}
}
@ -5068,9 +5082,10 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(cell));
__ ld(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
Operand(object));
}
}
@ -5086,7 +5101,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
Operand(zero_reg));
}
@ -5141,7 +5156,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
}
__ bind(&success);
@ -5179,7 +5194,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
@ -5648,7 +5663,8 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ld(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
Operand(zero_reg));
__ bind(&done);
}
@ -5658,7 +5674,8 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
Operand(scratch0()));
}

View File

@ -227,14 +227,14 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void DeoptimizeIf(
Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason,
Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg));
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,

View File

@ -713,9 +713,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
CRegister cr) {
LEnvironment* environment = instr->environment();
@ -777,10 +776,8 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
}
}
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
CRegister cr) {
DeoptimizeReason deopt_reason, CRegister cr) {
Deoptimizer::BailoutType bailout_type =
info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
@ -902,12 +899,12 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ ExtractBitRange(dividend, dividend, shift - 1, 0);
__ neg(dividend, dividend, LeaveOE, SetRC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
}
} else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ li(dividend, Operand::Zero());
} else {
DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done);
}
@ -929,7 +926,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -944,7 +941,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ bne(&remainder_not_zero, cr0);
__ cmpwi(dividend, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@ -969,14 +966,14 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, divw will return undefined, which is not what we
// want. We have to deopt if we care about -0, because we can't return that.
if (can_overflow) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero, cr0);
} else {
if (CpuFeatures::IsSupported(ISELECT)) {
__ isel(overflow, result_reg, r0, result_reg, cr0);
@ -998,7 +995,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ bne(&done, cr0);
__ cmpwi(left_reg, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
@ -1016,13 +1013,13 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(dividend, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
int32_t shift = WhichPowerOf2Abs(divisor);
@ -1030,7 +1027,7 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
__ TestBitRange(dividend, shift - 1, 0, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@ -1060,7 +1057,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1068,7 +1065,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@ -1079,7 +1076,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ mullw(scratch, result, ip);
__ cmpw(scratch, dividend);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1105,7 +1102,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(divisor, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1114,14 +1111,14 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmpwi(dividend, Operand::Zero());
__ bne(&dividend_not_zero);
__ cmpwi(divisor, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
if (can_overflow) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
if (CpuFeatures::IsSupported(ISELECT)) {
@ -1144,7 +1141,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
Register scratch = scratch0();
__ mullw(scratch, divisor, result);
__ cmpw(dividend, scratch);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1172,7 +1169,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
if (divisor == -1 && can_overflow) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(dividend, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
#else
if (can_overflow) {
@ -1184,7 +1181,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ neg(result, dividend, oe, SetRC);
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
}
// If the negation could not overflow, simply shifting is OK.
@ -1200,7 +1197,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
return;
}
@ -1222,7 +1219,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1230,7 +1227,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@ -1282,7 +1279,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(divisor, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1291,14 +1288,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmpwi(dividend, Operand::Zero());
__ bne(&dividend_not_zero);
__ cmpwi(divisor, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
if (can_overflow) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
if (CpuFeatures::IsSupported(ISELECT)) {
@ -1376,7 +1373,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmpi(left, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
switch (constant) {
@ -1388,12 +1385,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
__ neg(result, left, SetOE, SetRC);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
#if V8_TARGET_ARCH_PPC64
} else {
__ neg(result, left);
__ TestIfInt32(result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
#endif
} else {
@ -1413,7 +1410,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ cmpwi(left, Operand::Zero());
}
#endif
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ li(result, Operand::Zero());
break;
@ -1466,7 +1463,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Mul(result, left, right);
}
__ TestIfInt32(result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
@ -1481,7 +1478,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ mullw(result, left, right);
}
__ TestIfInt32(scratch, result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
#endif
} else {
if (instr->hydrogen()->representation().IsSmi()) {
@ -1508,7 +1505,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
#endif
// Bail out if the result is minus zero.
__ cmpi(result, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -1594,7 +1591,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
#if V8_TARGET_ARCH_PPC64
__ extsw(result, result, SetRC);
#endif
DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
} else {
__ srw(result, left, scratch);
}
@ -1634,7 +1631,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ cmpwi(left, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
}
__ Move(result, left);
}
@ -1653,7 +1650,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
#endif
} else {
__ slwi(result, left, Operand(shift_count));
@ -1692,7 +1689,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
#if V8_TARGET_ARCH_PPC64
if (can_overflow) {
__ TestIfInt32(result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
#endif
} else {
@ -1703,7 +1700,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
scratch0(), r0);
}
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
}
}
@ -1869,7 +1866,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
#if V8_TARGET_ARCH_PPC64
if (can_overflow) {
__ TestIfInt32(result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
#endif
} else {
@ -1880,7 +1877,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
scratch0(), r0);
}
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
}
}
@ -2131,7 +2128,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ TestIfSmi(reg, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
const Register map = scratch0();
@ -2195,7 +2192,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@ -2579,10 +2576,10 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ lbz(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ LoadP(object_prototype,
FieldMemOperand(object_map, Map::kPrototypeOffset));
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
@ -2712,7 +2709,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
__ cmp(result, ip);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
if (CpuFeatures::IsSupported(ISELECT)) {
Register scratch = scratch0();
@ -2744,7 +2741,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ bne(&skip_assignment);
}
@ -2831,7 +2828,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
if (CpuFeatures::IsSupported(ISELECT)) {
@ -2983,7 +2980,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmplw(result, r0);
DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@ -3048,7 +3045,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
}
__ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
}
@ -3099,11 +3096,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
__ TestIfSmi(result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@ -3118,7 +3115,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
__ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
DeoptimizeIf(ne, instr, Deoptimizer::kHole);
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
@ -3268,9 +3265,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ TestIfSmi(receiver, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
__ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
@ -3304,7 +3301,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpli(length, Operand(kArgumentsLimit));
DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@ -3458,7 +3455,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, ip);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
Label done;
Register exponent = scratch0();
@ -3528,7 +3525,7 @@ void LCodeGen::EmitMathAbs(LMathAbs* instr) {
__ mtxer(r0);
__ neg(result, result, SetOE, SetRC);
// Deoptimize on overflow.
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
__ bind(&done);
}
@ -3545,7 +3542,7 @@ void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
// Deoptimize on overflow.
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(input, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
__ neg(result, result);
__ bind(&done);
@ -3609,7 +3606,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
&exact);
DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3617,7 +3614,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ cmpi(result, Operand::Zero());
__ bne(&done);
__ cmpwi(input_high, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@ -3658,7 +3655,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
__ LoadDoubleLiteral(dot_five, 0.5, r0);
__ fabs(double_scratch1, input);
__ fcmpu(double_scratch1, dot_five);
DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
// If input is in [-0.5, -0], the result is -0.
// If input is in [+0, +0.5[, the result is +0.
// If the input is +0.5, the result is 1.
@ -3666,7 +3663,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// [-0.5, -0] (negative) yields minus zero.
__ TestDoubleSign(input, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ fcmpu(input, dot_five);
if (CpuFeatures::IsSupported(ISELECT)) {
@ -3690,7 +3687,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
double_scratch0(), &done, &done);
DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&done);
}
@ -3755,7 +3752,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r10, ip);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@ -4139,7 +4136,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@ -4457,7 +4454,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ TestIfSmi(result, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
@ -4503,7 +4500,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp2 = ToRegister(instr->temp2());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@ -4814,13 +4811,13 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ TestUnsignedSmiCandidate(input, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
}
#if !V8_TARGET_ARCH_PPC64
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, r0);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
} else {
#endif
__ SmiTag(output, input);
@ -4838,7 +4835,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ andi(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
} else {
__ SmiUntag(result, input);
}
@ -4868,13 +4865,13 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ bne(&convert);
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
// load heap number
__ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ TestDoubleIsMinusZero(result_reg, scratch, ip);
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done);
if (can_convert_undefined_to_nan) {
@ -4882,7 +4879,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, ip);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ b(&done);
@ -4944,10 +4941,10 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(input_reg, ip);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ li(input_reg, Operand::Zero());
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ lfd(double_scratch2,
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
@ -4957,13 +4954,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
}
__ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
double_scratch);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmpi(input_reg, Operand::Zero());
__ bne(&done);
__ TestHeapNumberSign(scratch2, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
}
__ bind(&done);
@ -5032,13 +5029,13 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmpi(result_reg, Operand::Zero());
__ bne(&done);
__ TestDoubleSign(double_input, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -5057,13 +5054,13 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmpi(result_reg, Operand::Zero());
__ bne(&done);
__ TestDoubleSign(double_input, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -5071,7 +5068,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ SmiTag(result_reg);
#else
__ SmiTagCheckOverflow(result_reg, r0);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
#endif
}
@ -5079,7 +5076,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input), r0);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
}
@ -5087,7 +5084,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input), r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
}
@ -5100,7 +5097,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
}
@ -5120,13 +5117,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else {
DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpli(scratch, Operand(last));
DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@ -5137,12 +5134,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ andi(r0, scratch, Operand(mask));
DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
cr0);
DeoptimizeIf(tag == 0 ? ne : eq, instr,
DeoptimizeReason::kWrongInstanceType, cr0);
} else {
__ andi(scratch, scratch, Operand(mask));
__ cmpi(scratch, Operand(tag));
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@ -5161,7 +5158,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmpi(reg, Operand(object), r0);
}
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
@ -5177,7 +5174,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r3, temp);
}
__ TestIfSmi(temp, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
}
@ -5232,7 +5229,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ bne(deferred->entry());
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@ -5271,7 +5268,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ li(result_reg, Operand::Zero());
__ b(&done);
@ -5702,7 +5699,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmpi(result, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
__ bind(&done);
}
@ -5713,7 +5710,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}

View File

@ -207,10 +207,10 @@ class LCodeGen : public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
DeoptimizeReason deopt_reason, CRegister cr = cr7);
void AddToTranslation(LEnvironment* environment, Translation* translation,
LOperand* op, bool is_tagged, bool is_uint32,

View File

@ -675,7 +675,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
CRegister cr) {
LEnvironment* environment = instr->environment();
@ -768,8 +768,7 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
CRegister cr) {
DeoptimizeReason deopt_reason, CRegister cr) {
Deoptimizer::BailoutType bailout_type =
info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
@ -878,12 +877,12 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ ExtractBitRange(dividend, dividend, shift - 1, 0);
__ LoadComplementRR(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
} else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ mov(dividend, Operand::Zero());
} else {
DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done, Label::kNear);
}
@ -904,7 +903,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -919,7 +918,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
__ Cmp32(dividend, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@ -934,7 +933,7 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ Cmp32(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, dr will return undefined, which is not what we
@ -945,7 +944,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bne(&no_overflow_possible, Label::kNear);
__ Cmp32(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
} else {
__ b(ne, &no_overflow_possible, Label::kNear);
__ mov(result_reg, Operand::Zero());
@ -969,7 +968,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ bne(&done, Label::kNear);
__ Cmp32(left_reg, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
@ -986,12 +985,12 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ Cmp32(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ Cmp32(dividend, Operand(0x80000000));
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
int32_t shift = WhichPowerOf2Abs(divisor);
@ -999,7 +998,7 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
__ TestBitRange(dividend, shift - 1, 0, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@ -1031,7 +1030,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1039,7 +1038,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ Cmp32(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@ -1050,7 +1049,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ Mul(scratch, result, ip);
__ Cmp32(scratch, dividend);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1067,7 +1066,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ Cmp32(divisor, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1076,7 +1075,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ Cmp32(dividend, Operand::Zero());
__ bne(&dividend_not_zero, Label::kNear);
__ Cmp32(divisor, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@ -1086,7 +1085,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ Cmp32(dividend, Operand(kMinInt));
__ bne(&dividend_not_min_int, Label::kNear);
__ Cmp32(divisor, Operand(-1));
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@ -1099,7 +1098,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ Cmp32(r0, Operand::Zero());
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1127,13 +1126,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
#if V8_TARGET_ARCH_S390X
if (divisor == -1 && can_overflow) {
__ Cmp32(dividend, Operand(0x80000000));
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
#endif
__ LoadComplementRR(result, dividend);
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
}
// If the negation could not overflow, simply shifting is OK.
@ -1149,7 +1148,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
return;
}
@ -1173,7 +1172,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1181,7 +1180,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ Cmp32(dividend, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@ -1224,7 +1223,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ Cmp32(divisor, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1233,7 +1232,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ Cmp32(dividend, Operand::Zero());
__ bne(&dividend_not_zero, Label::kNear);
__ Cmp32(divisor, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@ -1244,7 +1243,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ bne(&no_overflow_possible, Label::kNear);
__ Cmp32(divisor, Operand(-1));
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
} else {
__ bne(&no_overflow_possible, Label::kNear);
__ LoadRR(result, dividend);
@ -1320,7 +1319,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ CmpP(left, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
switch (constant) {
@ -1330,12 +1329,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->representation().IsSmi()) {
#endif
__ LoadComplementRR(result, left);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
#if V8_TARGET_ARCH_S390X
} else {
__ LoadComplementRR(result, left);
__ TestIfInt32(result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
#endif
} else {
@ -1355,7 +1354,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Cmp32(left, Operand::Zero());
}
#endif
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ LoadImmP(result, Operand::Zero());
break;
@ -1409,7 +1408,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ msgr(result, right);
}
__ TestIfInt32(result, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
@ -1426,7 +1425,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ LoadRR(result, scratch);
}
__ TestIfInt32(r0, result, scratch);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
#endif
} else {
if (instr->hydrogen()->representation().IsSmi()) {
@ -1454,7 +1453,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
#endif
// Bail out if the result is minus zero.
__ CmpP(result, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -1555,7 +1554,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
#else
__ ltr(result, result); // Set the <,==,> condition
#endif
DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
}
break;
case Token::SHL:
@ -1602,7 +1601,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ Cmp32(left, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
}
__ Move(result, left);
}
@ -1624,7 +1623,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
#endif
} else {
__ ShiftLeft(result, left, Operand(shift_count));
@ -1700,7 +1699,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ lgfr(ToRegister(result), ToRegister(result));
#endif
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@ -1888,7 +1887,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
#endif
// Doptimize on overflow
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@ -2131,7 +2130,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ TestIfSmi(reg);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
const Register map = scratch0();
@ -2195,7 +2194,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@ -2558,10 +2557,10 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ LoadlB(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ LoadP(object_prototype,
FieldMemOperand(object_map, Map::kPrototypeOffset));
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
@ -2682,7 +2681,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
Label skip;
__ bne(&skip, Label::kNear);
@ -2704,7 +2703,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadP(scratch, target);
__ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ bne(&skip_assignment);
}
@ -2787,7 +2786,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@ -2920,7 +2919,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ LoadlW(result, mem_operand, r0);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ CmpLogical32(result, Operand(0x80000000));
DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@ -2999,7 +2998,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
base_offset + Register::kExponentOffset));
}
__ Cmp32(r0, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
}
@ -3051,10 +3050,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
__ TestIfSmi(result);
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@ -3069,7 +3068,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
__ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
DeoptimizeIf(ne, instr, Deoptimizer::kHole);
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
@ -3213,9 +3212,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ TestIfSmi(receiver);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
__ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ b(&result_in_receiver, Label::kNear);
__ bind(&global_object);
@ -3248,7 +3247,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ CmpLogicalP(length, Operand(kArgumentsLimit));
DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@ -3393,7 +3392,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
Label done;
Register exponent = scratch0();
@ -3461,7 +3460,7 @@ void LCodeGen::EmitMathAbs(LMathAbs* instr) {
__ bge(&done, Label::kNear);
__ LoadComplementRR(result, result);
// Deoptimize on overflow.
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
__ bind(&done);
}
@ -3476,7 +3475,7 @@ void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
// Deoptimize on overflow.
__ Cmp32(input, Operand(0x80000000));
DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
__ LoadComplementRR(result, result);
__ bind(&done);
@ -3533,7 +3532,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
&exact);
DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3541,7 +3540,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ CmpP(result, Operand::Zero());
__ bne(&done, Label::kNear);
__ Cmp32(input_high, Operand::Zero());
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@ -3559,7 +3558,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ LoadDoubleLiteral(dot_five, 0.5, r0);
__ lpdbr(double_scratch1, input);
__ cdbr(double_scratch1, dot_five);
DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
// If input is in [-0.5, -0], the result is -0.
// If input is in [+0, +0.5[, the result is +0.
// If the input is +0.5, the result is 1.
@ -3567,7 +3566,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// [-0.5, -0] (negative) yields minus zero.
__ TestDoubleSign(input, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
Label return_zero;
__ cdbr(input, dot_five);
@ -3586,7 +3585,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
double_scratch0(), &done, &done);
DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&done);
}
@ -3651,7 +3650,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ JumpIfSmi(tagged_exponent, &no_deopt);
__ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@ -4035,7 +4034,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@ -4395,7 +4394,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ TestIfSmi(result);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
@ -4438,7 +4437,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp2 = ToRegister(instr->temp2());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@ -4736,13 +4735,13 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ TestUnsignedSmiCandidate(input, r0);
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
}
#if !V8_TARGET_ARCH_S390X
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, r0);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
} else {
#endif
__ SmiTag(output, input);
@ -4756,7 +4755,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register result = ToRegister(instr->result());
if (instr->needs_check()) {
__ tmll(input, Operand(kHeapObjectTag));
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
__ SmiUntag(result, input);
} else {
__ SmiUntag(result, input);
@ -4786,20 +4785,20 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ bne(&convert, Label::kNear);
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
// load heap number
__ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ TestDoubleIsMinusZero(result_reg, scratch, ip);
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done, Label::kNear);
if (can_convert_undefined_to_nan) {
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ b(&done, Label::kNear);
@ -4856,11 +4855,11 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ LoadImmP(input_reg, Operand::Zero());
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ ld(double_scratch2,
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
@ -4870,13 +4869,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
}
__ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
double_scratch);
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ CmpP(input_reg, Operand::Zero());
__ bne(&done, Label::kNear);
__ TestHeapNumberSign(scratch2, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
}
__ bind(&done);
@ -4942,13 +4941,13 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ CmpP(result_reg, Operand::Zero());
__ bne(&done, Label::kNear);
__ TestDoubleSign(double_input, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -4966,13 +4965,13 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ CmpP(result_reg, Operand::Zero());
__ bne(&done, Label::kNear);
__ TestDoubleSign(double_input, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -4980,21 +4979,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ SmiTag(result_reg);
#else
__ SmiTagCheckOverflow(result_reg, r0);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
#endif
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input));
DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input));
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
}
@ -5006,7 +5005,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
@ -5025,14 +5024,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else {
DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
Operand(last));
DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@ -5045,11 +5044,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ AndP(scratch, Operand(mask));
DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(tag == 0 ? ne : eq, instr,
DeoptimizeReason::kWrongInstanceType);
} else {
__ AndP(scratch, Operand(mask));
__ CmpP(scratch, Operand(tag));
DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@ -5066,7 +5066,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ CmpP(reg, Operand(object));
}
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
@ -5081,7 +5081,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r2, temp);
}
__ TestIfSmi(temp);
DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
@ -5134,7 +5134,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ bne(deferred->entry());
} else {
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@ -5170,7 +5170,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ CmpP(input_reg, Operand(factory()->undefined_value()));
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadImmP(result_reg, Operand::Zero());
__ b(&done, Label::kNear);
@ -5591,7 +5591,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ CmpP(result, Operand::Zero());
DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
__ bind(&done);
}
@ -5601,7 +5601,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ CmpP(map, scratch0());
DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,

View File

@ -207,10 +207,10 @@ class LCodeGen : public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
DeoptimizeReason deopt_reason, CRegister cr = cr7);
void AddToTranslation(LEnvironment* environment, Translation* translation,
LOperand* op, bool is_tagged, bool is_uint32,

View File

@ -702,9 +702,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@ -775,9 +774,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@ -910,7 +908,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ andl(dividend, Immediate(mask));
__ negl(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
@ -927,7 +925,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -942,7 +940,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmpl(dividend, Immediate(0));
DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@ -964,7 +962,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@ -975,7 +973,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Set(result_reg, 0);
@ -995,7 +993,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
__ testl(result_reg, result_reg);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@ -1021,13 +1019,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
return;
}
@ -1054,7 +1052,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1062,7 +1060,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@ -1109,7 +1107,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1118,7 +1116,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@ -1128,7 +1126,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@ -1157,19 +1155,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmpl(dividend, Immediate(kMinInt));
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ testl(dividend, Immediate(mask));
DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@ -1190,7 +1188,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1198,7 +1196,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
@ -1208,7 +1206,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ movl(rax, rdx);
__ imull(rax, rax, Immediate(divisor));
__ subl(rax, dividend);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1228,7 +1226,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1237,7 +1235,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@ -1247,7 +1245,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@ -1258,7 +1256,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1335,7 +1333,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (can_overflow) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -1354,10 +1352,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
? !instr->hydrogen_value()->representation().IsSmi()
: SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmpl(kScratchRegister, Immediate(0));
DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
@ -1365,7 +1363,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToOperand(right));
}
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
@ -1373,7 +1371,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToRegister(right));
}
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@ -1486,7 +1484,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl_cl(ToRegister(left));
if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@ -1515,7 +1513,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl(ToRegister(left), Immediate(shift_count));
} else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@ -1530,7 +1528,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shll(ToRegister(left), Immediate(shift_count - 1));
}
__ Integer32ToSmi(ToRegister(left), ToRegister(left));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
@ -1573,7 +1571,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@ -1748,7 +1746,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
}
@ -2026,7 +2024,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
const Register map = kScratchRegister;
@ -2086,7 +2084,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@ -2455,10 +2453,10 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
// Deoptimize if the object needs to be access checked.
__ testb(FieldOperand(object_map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
__ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
@ -2573,7 +2571,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@ -2594,7 +2592,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(target, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
__ j(not_equal, &skip_assignment);
}
@ -2691,7 +2689,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@ -2793,7 +2791,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@ -2834,7 +2832,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
@ -2891,10 +2889,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
DeoptimizeIf(NegateCondition(smi), instr, DeoptimizeReason::kNotASmi);
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
} else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
@ -2908,7 +2906,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ Cmp(FieldOperand(result, Cell::kValueOffset),
Smi::FromInt(Isolate::kArrayProtectorValid));
DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
__ Move(result, isolate()->factory()->undefined_value());
__ bind(&done);
@ -3057,9 +3055,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
__ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@ -3084,7 +3082,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
__ Push(receiver);
__ movp(receiver, length);
@ -3255,7 +3253,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
Label slow, allocated, done;
uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit();
@ -3312,7 +3310,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
__ bind(&is_positive);
}
@ -3323,7 +3321,7 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
__ bind(&is_positive);
}
@ -3385,18 +3383,18 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
// Deoptimize if minus zero.
__ Movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero);
}
__ Roundsd(xmm_scratch, input_reg, kRoundDown);
__ Cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ Xorpd(xmm_scratch, xmm_scratch); // Zero the register.
__ Ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3405,7 +3403,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ j(above, &positive_sign, Label::kNear);
__ Movmskpd(output_reg, input_reg);
__ testl(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
__ Set(output_reg, 0);
__ jmp(&done);
__ bind(&positive_sign);
@ -3415,7 +3413,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@ -3426,7 +3424,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ bind(&done);
}
@ -3468,7 +3466,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
__ Cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ jmp(&done, dist);
__ bind(&below_one_half);
@ -3484,7 +3482,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
__ Cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ Ucomisd(xmm_scratch, input_temp);
@ -3499,7 +3497,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(negative, instr, DeoptimizeReason::kMinusZero);
}
__ Set(output_reg, 0);
__ bind(&done);
@ -3578,7 +3576,7 @@ void LCodeGen::DoPower(LPower* instr) {
Label no_deopt;
__ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@ -3974,7 +3972,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@ -4273,7 +4271,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
Condition is_smi = __ CheckSmi(result);
DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
}
@ -4313,7 +4311,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@ -4629,12 +4627,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kOverflow);
}
__ Integer32ToSmi(output, input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@ -4644,7 +4642,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kNotASmi);
} else {
__ AssertSmi(input);
}
@ -4675,7 +4673,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
}
if (deoptimize_on_minus_zero) {
@ -4685,7 +4683,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ j(not_equal, &done, Label::kNear);
__ Movmskpd(kScratchRegister, result_reg);
__ testl(kScratchRegister, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
@ -4694,7 +4692,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(not_equal, instr,
DeoptimizeReason::kNotAHeapNumberUndefined);
__ Pcmpeqd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
@ -4741,27 +4740,27 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
DeoptimizeIf(not_equal, instr,
Deoptimizer::kNotAHeapNumberUndefinedBoolean);
DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ Set(input_reg, 0);
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(double_scratch0()));
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
__ Movsd(double_scratch0(),
FieldOperand(input_reg, HeapNumber::kValueOffset));
__ Cvttsd2si(input_reg, double_scratch0());
__ Cvtlsi2sd(scratch, input_reg);
__ Ucomisd(double_scratch0(), scratch);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ testl(input_reg, input_reg);
__ j(not_zero, done);
__ Movmskpd(input_reg, double_scratch0());
__ andl(input_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
}
}
@ -4832,11 +4831,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
&is_nan, &minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -4859,21 +4858,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
&minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
__ Integer32ToSmi(result_reg, result_reg);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
DeoptimizeIf(NegateCondition(cc), instr, DeoptimizeReason::kNotASmi);
}
@ -4881,7 +4880,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
DeoptimizeIf(cc, instr, DeoptimizeReason::kSmi);
}
}
@ -4894,7 +4893,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
}
@ -4913,14 +4912,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
} else {
DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(last)));
DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@ -4933,13 +4932,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
Deoptimizer::kWrongInstanceType);
DeoptimizeReason::kWrongInstanceType);
} else {
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ andb(kScratchRegister, Immediate(mask));
__ cmpb(kScratchRegister, Immediate(tag));
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@ -4948,7 +4947,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(reg, instr->hydrogen()->object().handle());
DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
}
@ -4963,7 +4962,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@ -5017,7 +5016,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@ -5056,7 +5055,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
@ -5487,7 +5486,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
DeoptimizeIf(cc, instr, DeoptimizeReason::kNoCache);
}
@ -5495,7 +5494,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}

View File

@ -205,10 +205,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();

View File

@ -952,9 +952,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@ -1040,9 +1039,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
DeoptimizeReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@ -1183,7 +1181,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
@ -1200,7 +1198,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1215,7 +1213,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@ -1237,7 +1235,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@ -1248,7 +1246,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
@ -1267,7 +1265,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@ -1287,19 +1285,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@ -1320,7 +1318,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1328,7 +1326,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
@ -1338,7 +1336,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1358,7 +1356,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1367,7 +1365,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@ -1377,7 +1375,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@ -1388,7 +1386,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
}
@ -1410,13 +1408,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
return;
}
@ -1443,7 +1441,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@ -1451,7 +1449,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@ -1498,7 +1496,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@ -1507,7 +1505,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@ -1517,7 +1515,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@ -1595,7 +1593,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -1605,15 +1603,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ j(not_zero, &done);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@ -1686,7 +1684,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@ -1703,7 +1701,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
} else {
__ ror(ToRegister(left), shift_count);
}
@ -1718,7 +1716,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@ -1729,7 +1727,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
__ shl(ToRegister(left), shift_count);
}
@ -1755,7 +1753,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@ -1903,7 +1901,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
}
@ -2157,7 +2155,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
Register map = no_reg; // Keep the compiler happy.
@ -2220,7 +2218,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@ -2592,10 +2590,10 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
// Deoptimize if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
__ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, factory()->null_value());
@ -2716,7 +2714,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@ -2737,7 +2735,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
@ -2828,7 +2826,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@ -2910,7 +2908,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@ -2942,7 +2940,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
@ -2968,10 +2966,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi);
} else {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@ -2985,7 +2983,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ cmp(FieldOperand(result, PropertyCell::kValueOffset),
Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
__ mov(result, isolate()->factory()->undefined_value());
__ bind(&done);
@ -3135,9 +3133,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
__ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@ -3161,7 +3159,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
__ push(receiver);
__ mov(receiver, length);
@ -3337,7 +3335,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
Label slow, allocated, done;
uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
@ -3395,7 +3393,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
__ bind(&is_positive);
}
@ -3448,7 +3446,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ fldz();
__ fld(1);
__ FCmp();
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
__ j(below, &not_minus_zero, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@ -3457,7 +3455,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// +- 0.0.
__ fld(0);
__ FXamSign();
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kFar);
}
@ -3472,7 +3470,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ pop(output_reg);
__ X87SetRC(0x0000);
__ X87CheckIA();
DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
DeoptimizeIf(equal, instr, DeoptimizeReason::kOverflow);
__ fnclex();
__ X87SetRC(0x0000);
__ bind(&done);
@ -3508,7 +3506,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Check overflow.
__ X87CheckIA();
__ pop(result);
DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow);
__ fnclex();
// Restore round mode.
__ X87SetRC(0x0000);
@ -3525,7 +3523,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// If the sign is positive, we return +0.
__ fld(0);
__ FXamSign();
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ Move(result, Immediate(0));
__ jmp(&done);
@ -3544,7 +3542,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Check overflow.
__ X87CheckIA();
__ pop(result);
DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow);
__ fnclex();
// Restore round mode.
__ X87SetRC(0x0000);
@ -3622,7 +3620,7 @@ void LCodeGen::DoPower(LPower* instr) {
X87LoadForUsage(base);
__ JumpIfSmi(exponent, &no_deopt);
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
// Heap number(double)
__ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
__ jmp(&done);
@ -4012,7 +4010,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@ -4220,7 +4218,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@ -4320,7 +4318,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ test(result, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
}
@ -4686,12 +4684,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow);
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@ -4702,7 +4700,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
} else {
__ AssertSmi(result);
}
@ -4728,14 +4726,15 @@ void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (!can_convert_undefined_to_nan) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
} else {
Label heap_number, convert;
__ j(equal, &heap_number);
// Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(not_equal, instr,
DeoptimizeReason::kNotAHeapNumberUndefined);
__ bind(&convert);
__ push(Immediate(0xffffffff));
@ -4761,7 +4760,7 @@ void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
// Pop FPU stack before deoptimizing.
__ fstp(0);
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
} else {
@ -4815,14 +4814,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
DeoptimizeIf(not_equal, instr,
Deoptimizer::kNotAHeapNumberUndefinedBoolean);
DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ Move(input_reg, Immediate(0));
} else {
// TODO(olivf) Converting a number on the fpu is actually quite slow. We
// should first try a fast conversion and then bailout to this slow case.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
__ sub(esp, Immediate(kPointerSize));
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
@ -4838,12 +4837,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ j(equal, &no_precision_lost, Label::kNear);
__ fstp(0);
DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&no_precision_lost);
__ j(parity_odd, &not_nan);
__ fstp(0);
DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&not_nan);
__ test(input_reg, Operand(input_reg));
@ -4858,14 +4857,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ fstp_s(Operand(esp, 0));
__ pop(input_reg);
__ test(input_reg, Operand(input_reg));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
} else {
__ fist_s(MemOperand(esp, 0));
__ fild_s(MemOperand(esp, 0));
__ FCmp();
__ pop(input_reg);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
}
}
}
@ -4946,11 +4945,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
&lost_precision, &is_nan, &minus_zero);
__ jmp(&done);
__ bind(&lost_precision);
DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@ -4970,21 +4969,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
&lost_precision, &is_nan, &minus_zero);
__ jmp(&done);
__ bind(&lost_precision);
DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
__ SmiTag(result_reg);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
}
@ -4992,7 +4991,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
}
@ -5005,7 +5004,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
}
@ -5024,13 +5023,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
} else {
DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@ -5042,12 +5041,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
Deoptimizer::kWrongInstanceType);
DeoptimizeReason::kWrongInstanceType);
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@ -5063,7 +5062,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
}
@ -5078,7 +5077,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ test(eax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@ -5135,7 +5134,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@ -5176,7 +5175,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ jmp(&zero_result, Label::kNear);
// Heap number
@ -5692,7 +5691,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache);
}
@ -5700,7 +5699,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}

View File

@ -232,10 +232,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason);
DeoptimizeReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();

38
src/deoptimize-reason.cc Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/deoptimize-reason.h"
namespace v8 {
namespace internal {
std::ostream& operator<<(std::ostream& os, DeoptimizeReason reason) {
switch (reason) {
#define DEOPTIMIZE_REASON(Name, message) \
case DeoptimizeReason::k##Name: \
return os << #Name;
DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON)
#undef DEOPTIMIZE_REASON
}
UNREACHABLE();
return os;
}
size_t hash_value(DeoptimizeReason reason) {
return static_cast<uint8_t>(reason);
}
char const* const DeoptimizeReasonToString(DeoptimizeReason reason) {
static char const* kDeoptimizeReasonStrings[] = {
#define DEOPTIMIZE_REASON(Name, message) message,
DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON)
#undef DEOPTIMIZE_REASON
};
size_t const index = static_cast<size_t>(reason);
DCHECK_LT(index, arraysize(kDeoptimizeReasonStrings));
return kDeoptimizeReasonStrings[index];
}
} // namespace internal
} // namespace v8

100
src/deoptimize-reason.h Normal file
View File

@ -0,0 +1,100 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_DEOPTIMIZE_REASON_H_
#define V8_DEOPTIMIZE_REASON_H_
#include "src/globals.h"
namespace v8 {
namespace internal {
#define DEOPTIMIZE_REASON_LIST(V) \
V(AccessCheck, "Access check needed") \
V(NoReason, "no reason") \
V(ConstantGlobalVariableAssignment, "Constant global variable assignment") \
V(ConversionOverflow, "conversion overflow") \
V(DivisionByZero, "division by zero") \
V(ElementsKindUnhandledInKeyedLoadGenericStub, \
"ElementsKind unhandled in KeyedLoadGenericStub") \
V(ExpectedHeapNumber, "Expected heap number") \
V(ExpectedSmi, "Expected smi") \
V(ForcedDeoptToRuntime, "Forced deopt to runtime") \
V(Hole, "hole") \
V(InstanceMigrationFailed, "instance migration failed") \
V(InsufficientTypeFeedbackForCallWithArguments, \
"Insufficient type feedback for call with arguments") \
V(FastPathFailed, "Falling off the fast path") \
V(InsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
"Insufficient type feedback for combined type of binary operation") \
V(InsufficientTypeFeedbackForGenericNamedAccess, \
"Insufficient type feedback for generic named access") \
V(InsufficientTypeFeedbackForGenericKeyedAccess, \
"Insufficient type feedback for generic keyed access") \
V(InsufficientTypeFeedbackForLHSOfBinaryOperation, \
"Insufficient type feedback for LHS of binary operation") \
V(InsufficientTypeFeedbackForRHSOfBinaryOperation, \
"Insufficient type feedback for RHS of binary operation") \
V(KeyIsNegative, "key is negative") \
V(LostPrecision, "lost precision") \
V(LostPrecisionOrNaN, "lost precision or NaN") \
V(MementoFound, "memento found") \
V(MinusZero, "minus zero") \
V(NaN, "NaN") \
V(NegativeKeyEncountered, "Negative key encountered") \
V(NegativeValue, "negative value") \
V(NoCache, "no cache") \
V(NonStrictElementsInKeyedLoadGenericStub, \
"non-strict elements in KeyedLoadGenericStub") \
V(NotAHeapNumber, "not a heap number") \
V(NotAHeapNumberUndefinedBoolean, "not a heap number/undefined/true/false") \
V(NotAHeapNumberUndefined, "not a heap number/undefined") \
V(NotAJavaScriptObject, "not a JavaScript object") \
V(NotASmi, "not a Smi") \
V(OutOfBounds, "out of bounds") \
V(OutsideOfRange, "Outside of range") \
V(Overflow, "overflow") \
V(Proxy, "proxy") \
V(ReceiverWasAGlobalObject, "receiver was a global object") \
V(Smi, "Smi") \
V(TooManyArguments, "too many arguments") \
V(TracingElementsTransitions, "Tracing elements transitions") \
V(TypeMismatchBetweenFeedbackAndConstant, \
"Type mismatch between feedback and constant") \
V(UnexpectedCellContentsInConstantGlobalStore, \
"Unexpected cell contents in constant global store") \
V(UnexpectedCellContentsInGlobalStore, \
"Unexpected cell contents in global store") \
V(UnexpectedObject, "unexpected object") \
V(UnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
V(UninitializedBoilerplateInFastClone, \
"Uninitialized boilerplate in fast clone") \
V(UninitializedBoilerplateLiterals, "Uninitialized boilerplate literals") \
V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
V(UnknownMapInPolymorphicElementAccess, \
"Unknown map in polymorphic element access") \
V(UnknownMap, "Unknown map") \
V(ValueMismatch, "value mismatch") \
V(WrongInstanceType, "wrong instance type") \
V(WrongMap, "wrong map") \
V(UndefinedOrNullInForIn, "null or undefined in for-in") \
V(UndefinedOrNullInToObject, "null or undefined in ToObject")
enum class DeoptimizeReason : uint8_t {
#define DEOPTIMIZE_REASON(Name, message) k##Name,
DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON)
#undef DEOPTIMIZE_REASON
};
std::ostream& operator<<(std::ostream&, DeoptimizeReason);
size_t hash_value(DeoptimizeReason reason);
char const* const DeoptimizeReasonToString(DeoptimizeReason reason);
} // namespace internal
} // namespace v8
#endif // V8_DEOPTIMIZE_REASON_H_

View File

@ -2736,19 +2736,9 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
}
const char* Deoptimizer::GetDeoptReason(DeoptReason deopt_reason) {
DCHECK(deopt_reason < kLastDeoptReason);
#define DEOPT_MESSAGES_TEXTS(C, T) T,
static const char* deopt_messages_[] = {
DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_TEXTS)};
#undef DEOPT_MESSAGES_TEXTS
return deopt_messages_[deopt_reason];
}
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
SourcePosition last_position = SourcePosition::Unknown();
Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
DeoptimizeReason last_reason = DeoptimizeReason::kNoReason;
int last_deopt_id = Deoptimizer::DeoptInfo::kNoDeoptId;
int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
@ -2765,10 +2755,10 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
} else if (info->rmode() == RelocInfo::DEOPT_ID) {
last_deopt_id = static_cast<int>(info->data());
} else if (info->rmode() == RelocInfo::DEOPT_REASON) {
last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
last_reason = static_cast<DeoptimizeReason>(info->data());
}
}
return DeoptInfo(SourcePosition::Unknown(), Deoptimizer::kNoReason, -1);
return DeoptInfo(SourcePosition::Unknown(), DeoptimizeReason::kNoReason, -1);
}

View File

@ -6,6 +6,7 @@
#define V8_DEOPTIMIZER_H_
#include "src/allocation.h"
#include "src/deoptimize-reason.h"
#include "src/macro-assembler.h"
@ -322,87 +323,6 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
virtual void LeaveContext(Context* context) = 0;
};
#define DEOPT_MESSAGES_LIST(V) \
V(kAccessCheck, "Access check needed") \
V(kNoReason, "no reason") \
V(kConstantGlobalVariableAssignment, "Constant global variable assignment") \
V(kConversionOverflow, "conversion overflow") \
V(kDivisionByZero, "division by zero") \
V(kElementsKindUnhandledInKeyedLoadGenericStub, \
"ElementsKind unhandled in KeyedLoadGenericStub") \
V(kExpectedHeapNumber, "Expected heap number") \
V(kExpectedSmi, "Expected smi") \
V(kForcedDeoptToRuntime, "Forced deopt to runtime") \
V(kHole, "hole") \
V(kHoleyArrayDespitePackedElements_kindFeedback, \
"Holey array despite packed elements_kind feedback") \
V(kInstanceMigrationFailed, "instance migration failed") \
V(kInsufficientTypeFeedbackForCallWithArguments, \
"Insufficient type feedback for call with arguments") \
V(kFastPathFailed, "Falling off the fast path") \
V(kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
"Insufficient type feedback for combined type of binary operation") \
V(kInsufficientTypeFeedbackForGenericNamedAccess, \
"Insufficient type feedback for generic named access") \
V(kInsufficientTypeFeedbackForKeyedLoad, \
"Insufficient type feedback for keyed load") \
V(kInsufficientTypeFeedbackForKeyedStore, \
"Insufficient type feedback for keyed store") \
V(kInsufficientTypeFeedbackForLHSOfBinaryOperation, \
"Insufficient type feedback for LHS of binary operation") \
V(kInsufficientTypeFeedbackForRHSOfBinaryOperation, \
"Insufficient type feedback for RHS of binary operation") \
V(kKeyIsNegative, "key is negative") \
V(kLiteralsWereDisposed, "literals have been disposed") \
V(kLostPrecision, "lost precision") \
V(kLostPrecisionOrNaN, "lost precision or NaN") \
V(kMementoFound, "memento found") \
V(kMinusZero, "minus zero") \
V(kNaN, "NaN") \
V(kNegativeKeyEncountered, "Negative key encountered") \
V(kNegativeValue, "negative value") \
V(kNoCache, "no cache") \
V(kNonStrictElementsInKeyedLoadGenericStub, \
"non-strict elements in KeyedLoadGenericStub") \
V(kNotADateObject, "not a date object") \
V(kNotAHeapNumber, "not a heap number") \
V(kNotAHeapNumberUndefinedBoolean, "not a heap number/undefined/true/false") \
V(kNotAHeapNumberUndefined, "not a heap number/undefined") \
V(kNotAJavaScriptObject, "not a JavaScript object") \
V(kNotASmi, "not a Smi") \
V(kNull, "null") \
V(kOutOfBounds, "out of bounds") \
V(kOutsideOfRange, "Outside of range") \
V(kOverflow, "overflow") \
V(kProxy, "proxy") \
V(kReceiverWasAGlobalObject, "receiver was a global object") \
V(kSmi, "Smi") \
V(kTooManyArguments, "too many arguments") \
V(kTooManyUndetectableTypes, "Too many undetectable types") \
V(kTracingElementsTransitions, "Tracing elements transitions") \
V(kTypeMismatchBetweenFeedbackAndConstant, \
"Type mismatch between feedback and constant") \
V(kUndefined, "undefined") \
V(kUnexpectedCellContentsInConstantGlobalStore, \
"Unexpected cell contents in constant global store") \
V(kUnexpectedCellContentsInGlobalStore, \
"Unexpected cell contents in global store") \
V(kUnexpectedObject, "unexpected object") \
V(kUnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
V(kUninitializedBoilerplateInFastClone, \
"Uninitialized boilerplate in fast clone") \
V(kUninitializedBoilerplateLiterals, "Uninitialized boilerplate literals") \
V(kUnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
V(kUnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
V(kUnknownMapInPolymorphicElementAccess, \
"Unknown map in polymorphic element access") \
V(kUnknownMap, "Unknown map") \
V(kValueMismatch, "value mismatch") \
V(kWrongInstanceType, "wrong instance type") \
V(kWrongMap, "wrong map") \
V(kUndefinedOrNullInForIn, "null or undefined in for-in") \
V(kUndefinedOrNullInToObject, "null or undefined in ToObject")
class Deoptimizer : public Malloced {
public:
enum BailoutType { EAGER, LAZY, SOFT, kLastBailoutType = SOFT };
@ -423,19 +343,13 @@ class Deoptimizer : public Malloced {
return nullptr;
}
#define DEOPT_MESSAGES_CONSTANTS(C, T) C,
enum DeoptReason {
DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_CONSTANTS) kLastDeoptReason
};
#undef DEOPT_MESSAGES_CONSTANTS
static const char* GetDeoptReason(DeoptReason deopt_reason);
struct DeoptInfo {
DeoptInfo(SourcePosition position, DeoptReason deopt_reason, int deopt_id)
DeoptInfo(SourcePosition position, DeoptimizeReason deopt_reason,
int deopt_id)
: position(position), deopt_reason(deopt_reason), deopt_id(deopt_id) {}
SourcePosition position;
DeoptReason deopt_reason;
DeoptimizeReason deopt_reason;
int deopt_id;
static const int kNoDeoptId = -1;

View File

@ -174,10 +174,10 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
out.AddFormatted(" ;; debug: deopt position '%d'",
static_cast<int>(relocinfo.data()));
} else if (rmode == RelocInfo::DEOPT_REASON) {
Deoptimizer::DeoptReason reason =
static_cast<Deoptimizer::DeoptReason>(relocinfo.data());
DeoptimizeReason reason =
static_cast<DeoptimizeReason>(relocinfo.data());
out.AddFormatted(" ;; debug: deopt reason '%s'",
Deoptimizer::GetDeoptReason(reason));
DeoptimizeReasonToString(reason));
} else if (rmode == RelocInfo::DEOPT_ID) {
out.AddFormatted(" ;; debug: deopt index %d",
static_cast<int>(relocinfo.data()));

View File

@ -1446,7 +1446,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position, int id);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.

View File

@ -1048,7 +1048,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position, int id);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta);

View File

@ -1110,7 +1110,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position, int id);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta);

View File

@ -13823,14 +13823,14 @@ Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
void Code::PrintDeoptLocation(FILE* out, Address pc) {
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, pc);
class SourcePosition pos = info.position;
if (info.deopt_reason != Deoptimizer::kNoReason || !pos.IsUnknown()) {
if (info.deopt_reason != DeoptimizeReason::kNoReason || !pos.IsUnknown()) {
if (FLAG_hydrogen_track_positions) {
PrintF(out, " ;;; deoptimize at %d_%d: %s\n",
pos.inlining_id(), pos.position(),
Deoptimizer::GetDeoptReason(info.deopt_reason));
DeoptimizeReasonToString(info.deopt_reason));
} else {
PrintF(out, " ;;; deoptimize at %d: %s\n", pos.raw(),
Deoptimizer::GetDeoptReason(info.deopt_reason));
DeoptimizeReasonToString(info.deopt_reason));
}
}
}

View File

@ -1213,7 +1213,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position, int id);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.

View File

@ -155,7 +155,7 @@ void ProfilerListener::CodeDeoptEvent(Code* code, Address pc,
CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
rec->start = code->address();
rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
rec->deopt_reason = DeoptimizeReasonToString(info.deopt_reason);
rec->position = info.position;
rec->deopt_id = info.deopt_id;
rec->pc = reinterpret_cast<void*>(pc);

View File

@ -1241,7 +1241,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position, int id);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.

View File

@ -783,6 +783,8 @@
'debug/debug.h',
'debug/liveedit.cc',
'debug/liveedit.h',
'deoptimize-reason.cc',
'deoptimize-reason.h',
'deoptimizer.cc',
'deoptimizer.h',
'disasm.h',

View File

@ -1744,7 +1744,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position, int id);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,

View File

@ -969,7 +969,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position, int id);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.

View File

@ -63,8 +63,8 @@ static size_t offset(const char* src, const char* substring) {
return static_cast<size_t>(it - src);
}
static const char* reason(const i::Deoptimizer::DeoptReason reason) {
return i::Deoptimizer::GetDeoptReason(reason);
static const char* reason(const i::DeoptimizeReason reason) {
return i::DeoptimizeReasonToString(reason);
}
TEST(StartStop) {
@ -1853,21 +1853,21 @@ TEST(CollectDeoptEvents) {
{
const char* branch[] = {"", "opt_function0", "opt_function0"};
CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber),
CHECK_EQ(reason(i::DeoptimizeReason::kNotAHeapNumber),
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch)));
}
{
const char* branch[] = {"", "opt_function1", "opt_function1"};
const char* deopt_reason =
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch));
if (deopt_reason != reason(i::Deoptimizer::kNaN) &&
deopt_reason != reason(i::Deoptimizer::kLostPrecisionOrNaN)) {
if (deopt_reason != reason(i::DeoptimizeReason::kNaN) &&
deopt_reason != reason(i::DeoptimizeReason::kLostPrecisionOrNaN)) {
FATAL(deopt_reason);
}
}
{
const char* branch[] = {"", "opt_function2", "opt_function2"};
CHECK_EQ(reason(i::Deoptimizer::kDivisionByZero),
CHECK_EQ(reason(i::DeoptimizeReason::kDivisionByZero),
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch)));
}
iprofiler->DeleteProfile(iprofile);
@ -1957,7 +1957,7 @@ TEST(DeoptAtFirstLevelInlinedSource) {
CHECK_EQ(1U, deopt_infos.size());
const v8::CpuProfileDeoptInfo& info = deopt_infos[0];
CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber), info.deopt_reason);
CHECK_EQ(reason(i::DeoptimizeReason::kNotAHeapNumber), info.deopt_reason);
CHECK_EQ(2U, info.stack.size());
CHECK_EQ(inlined_script_id, info.stack[0].script_id);
CHECK_EQ(offset(inlined_source, "left /"), info.stack[0].position);
@ -2030,7 +2030,7 @@ TEST(DeoptAtSecondLevelInlinedSource) {
CHECK_EQ(1U, deopt_infos.size());
const v8::CpuProfileDeoptInfo info = deopt_infos[0];
CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber), info.deopt_reason);
CHECK_EQ(reason(i::DeoptimizeReason::kNotAHeapNumber), info.deopt_reason);
CHECK_EQ(3U, info.stack.size());
CHECK_EQ(inlined_script_id, info.stack[0].script_id);
CHECK_EQ(offset(inlined_source, "left /"), info.stack[0].position);

View File

@ -446,8 +446,9 @@ TEST_F(EscapeAnalysisTest, DeoptReplacement) {
nullptr),
state_values1, state_values2, state_values3, UndefinedConstant(),
graph()->start(), graph()->start());
Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
frame_state, effect1, ifFalse);
Node* deopt = graph()->NewNode(
common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
frame_state, effect1, ifFalse);
Node* ifTrue = IfTrue();
Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
Node* result = Return(load, effect1, ifTrue);
@ -486,8 +487,9 @@ TEST_F(EscapeAnalysisTest, DeoptReplacementIdentity) {
nullptr),
state_values1, state_values2, state_values3, UndefinedConstant(),
graph()->start(), graph()->start());
Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
frame_state, effect1, ifFalse);
Node* deopt = graph()->NewNode(
common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
frame_state, effect1, ifFalse);
Node* ifTrue = IfTrue();
Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
Node* result = Return(load, effect1, ifTrue);

View File

@ -102,9 +102,9 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
s.references_.insert(virtual_register);
}
}
for (int i = 0; i < sequence.GetFrameStateDescriptorCount(); i++) {
s.deoptimization_entries_.push_back(sequence.GetFrameStateDescriptor(
InstructionSequence::StateId::FromInt(i)));
for (int i = 0; i < sequence.GetDeoptimizationEntryCount(); i++) {
s.deoptimization_entries_.push_back(
sequence.GetDeoptimizationEntry(i).descriptor());
}
return s;
}