[turbofan] Change NULL to nullptr and CHECK(x != nullptr) to CHECK_NOT_NULL(x).
R=bmeurer@chromium.org BUG= Review URL: https://codereview.chromium.org/1578723002 Cr-Commit-Position: refs/heads/master@{#33202}
This commit is contained in:
parent
b5a34b3d29
commit
3ae141c121
@ -147,7 +147,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
|
||||
}
|
||||
|
||||
MemOperand ToMemOperand(InstructionOperand* op) const {
|
||||
DCHECK(op != NULL);
|
||||
DCHECK_NOT_NULL(op);
|
||||
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
|
||||
FrameOffset offset = frame_access_state()->GetFrameOffset(
|
||||
AllocatedOperand::cast(op)->index());
|
||||
@ -1175,7 +1175,7 @@ void CodeGenerator::AssembleReturn() {
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
ArmOperandConverter g(this, NULL);
|
||||
ArmOperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
@ -1283,7 +1283,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
ArmOperandConverter g(this, NULL);
|
||||
ArmOperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
|
@ -1358,7 +1358,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
|
||||
if (ProjectionIndexOf(value->op()) == 1u) {
|
||||
// We cannot combine the <Operation>WithOverflow with this branch
|
||||
// unless the 0th projection (the use of the actual value of the
|
||||
// <Operation> is either NULL, which means there's no use of the
|
||||
// <Operation> is either nullptr, which means there's no use of the
|
||||
// actual value, or was already defined, which means it is scheduled
|
||||
// *AFTER* this branch).
|
||||
Node* const node = value->InputAt(0);
|
||||
|
@ -205,7 +205,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
|
||||
}
|
||||
|
||||
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
|
||||
DCHECK(op != NULL);
|
||||
DCHECK_NOT_NULL(op);
|
||||
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
|
||||
FrameOffset offset = frame_access_state()->GetFrameOffset(
|
||||
AllocatedOperand::cast(op)->index());
|
||||
@ -1461,7 +1461,7 @@ void CodeGenerator::AssembleReturn() {
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
Arm64OperandConverter g(this, NULL);
|
||||
Arm64OperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
@ -1558,7 +1558,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
Arm64OperandConverter g(this, NULL);
|
||||
Arm64OperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
|
@ -1832,12 +1832,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
|
||||
if (ProjectionIndexOf(value->op()) == 1u) {
|
||||
// We cannot combine the <Operation>WithOverflow with this branch
|
||||
// unless the 0th projection (the use of the actual value of the
|
||||
// <Operation> is either NULL, which means there's no use of the
|
||||
// <Operation> is either nullptr, which means there's no use of the
|
||||
// actual value, or was already defined, which means it is scheduled
|
||||
// *AFTER* this branch).
|
||||
Node* const node = value->InputAt(0);
|
||||
Node* const result = NodeProperties::FindProjection(node, 0);
|
||||
if (result == NULL || IsDefined(result)) {
|
||||
if (result == nullptr || IsDefined(result)) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32AddWithOverflow:
|
||||
cont.OverwriteAndNegateIfEqual(kOverflow);
|
||||
|
@ -219,7 +219,7 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
|
||||
// One recorded control-flow command.
|
||||
struct Entry {
|
||||
Command command; // The command type being applied on this path.
|
||||
Statement* statement; // The target statement for the command or {NULL}.
|
||||
Statement* statement; // The target statement for the command or {nullptr}.
|
||||
Node* token; // A token identifying this particular path.
|
||||
};
|
||||
|
||||
@ -512,7 +512,7 @@ Node* AstGraphBuilder::GetNewTarget() {
|
||||
|
||||
bool AstGraphBuilder::CreateGraph(bool stack_check) {
|
||||
Scope* scope = info()->scope();
|
||||
DCHECK(graph() != NULL);
|
||||
DCHECK_NOT_NULL(graph());
|
||||
|
||||
// Set up the basic structure of the graph. Outputs for {Start} are the formal
|
||||
// parameters (including the receiver) plus new target, number of arguments,
|
||||
@ -829,7 +829,7 @@ void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
|
||||
int offset, int count) {
|
||||
bool should_update = false;
|
||||
Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
|
||||
if (*state_values == NULL || (*state_values)->InputCount() != count) {
|
||||
if (*state_values == nullptr || (*state_values)->InputCount() != count) {
|
||||
should_update = true;
|
||||
} else {
|
||||
DCHECK(static_cast<size_t>(offset + count) <= values()->size());
|
||||
@ -939,7 +939,7 @@ void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
|
||||
}
|
||||
|
||||
|
||||
Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; }
|
||||
Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return nullptr; }
|
||||
|
||||
|
||||
Node* AstGraphBuilder::AstValueContext::ConsumeValue() {
|
||||
@ -967,14 +967,14 @@ void AstGraphBuilder::ControlScope::PerformCommand(Command command,
|
||||
Node* value) {
|
||||
Environment* env = environment()->CopyAsUnreachable();
|
||||
ControlScope* current = this;
|
||||
while (current != NULL) {
|
||||
while (current != nullptr) {
|
||||
environment()->TrimStack(current->stack_height());
|
||||
environment()->TrimContextChain(current->context_length());
|
||||
if (current->Execute(command, target, value)) break;
|
||||
current = current->outer_;
|
||||
}
|
||||
builder()->set_environment(env);
|
||||
DCHECK(current != NULL); // Always handled (unless stack is malformed).
|
||||
DCHECK_NOT_NULL(current); // Always handled (unless stack is malformed).
|
||||
}
|
||||
|
||||
|
||||
@ -999,7 +999,7 @@ void AstGraphBuilder::ControlScope::ThrowValue(Node* exception_value) {
|
||||
|
||||
|
||||
void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
|
||||
if (expr == NULL) {
|
||||
if (expr == nullptr) {
|
||||
return environment()->Push(jsgraph()->NullConstant());
|
||||
}
|
||||
VisitForValue(expr);
|
||||
@ -1007,7 +1007,7 @@ void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
|
||||
|
||||
|
||||
void AstGraphBuilder::VisitForValueOrTheHole(Expression* expr) {
|
||||
if (expr == NULL) {
|
||||
if (expr == nullptr) {
|
||||
return environment()->Push(jsgraph()->TheHoleConstant());
|
||||
}
|
||||
VisitForValue(expr);
|
||||
@ -1141,8 +1141,8 @@ void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
|
||||
void AstGraphBuilder::VisitBlock(Block* stmt) {
|
||||
BlockBuilder block(this);
|
||||
ControlScopeForBreakable scope(this, stmt, &block);
|
||||
if (stmt->labels() != NULL) block.BeginBlock();
|
||||
if (stmt->scope() == NULL) {
|
||||
if (stmt->labels() != nullptr) block.BeginBlock();
|
||||
if (stmt->scope() == nullptr) {
|
||||
// Visit statements in the same scope, no declarations.
|
||||
VisitStatements(stmt->statements());
|
||||
} else {
|
||||
@ -1157,7 +1157,7 @@ void AstGraphBuilder::VisitBlock(Block* stmt) {
|
||||
VisitStatements(stmt->statements());
|
||||
}
|
||||
}
|
||||
if (stmt->labels() != NULL) block.EndBlock();
|
||||
if (stmt->labels() != nullptr) block.EndBlock();
|
||||
}
|
||||
|
||||
|
||||
@ -1298,7 +1298,7 @@ void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
|
||||
LoopBuilder for_loop(this);
|
||||
VisitIfNotNull(stmt->init());
|
||||
for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
|
||||
if (stmt->cond() != NULL) {
|
||||
if (stmt->cond() != nullptr) {
|
||||
VisitForTest(stmt->cond());
|
||||
Node* condition = environment()->Pop();
|
||||
for_loop.BreakUnless(condition);
|
||||
@ -2112,7 +2112,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
|
||||
// Evaluate the value and potentially handle compound assignments by loading
|
||||
// the left-hand side value and performing a binary operation.
|
||||
if (expr->is_compound()) {
|
||||
Node* old_value = NULL;
|
||||
Node* old_value = nullptr;
|
||||
switch (assign_type) {
|
||||
case VARIABLE: {
|
||||
VariableProxy* proxy = expr->target()->AsVariableProxy();
|
||||
@ -2640,7 +2640,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
|
||||
}
|
||||
|
||||
// Evaluate LHS expression and get old value.
|
||||
Node* old_value = NULL;
|
||||
Node* old_value = nullptr;
|
||||
int stack_depth = -1;
|
||||
switch (assign_type) {
|
||||
case VARIABLE: {
|
||||
@ -2866,7 +2866,7 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
|
||||
op = javascript()->HasProperty();
|
||||
break;
|
||||
default:
|
||||
op = NULL;
|
||||
op = nullptr;
|
||||
UNREACHABLE();
|
||||
}
|
||||
VisitForValue(expr->left());
|
||||
@ -2938,7 +2938,7 @@ void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
|
||||
|
||||
|
||||
void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
|
||||
if (stmt == NULL) return;
|
||||
if (stmt == nullptr) return;
|
||||
Visit(stmt);
|
||||
}
|
||||
|
||||
@ -3206,7 +3206,7 @@ Node* AstGraphBuilder::BuildLocalBlockContext(Scope* scope) {
|
||||
|
||||
|
||||
Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
|
||||
if (arguments == NULL) return NULL;
|
||||
if (arguments == nullptr) return nullptr;
|
||||
|
||||
// Allocate and initialize a new arguments object.
|
||||
CreateArgumentsParameters::Type type =
|
||||
@ -3228,7 +3228,7 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
|
||||
|
||||
|
||||
Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
|
||||
if (rest == NULL) return NULL;
|
||||
if (rest == nullptr) return nullptr;
|
||||
|
||||
// Allocate and initialize a new arguments object.
|
||||
CreateArgumentsParameters::Type type = CreateArgumentsParameters::kRestArray;
|
||||
@ -3412,7 +3412,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -3447,7 +3447,7 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -3575,7 +3575,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -3872,7 +3872,7 @@ Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
js_op = NULL;
|
||||
js_op = nullptr;
|
||||
}
|
||||
return NewNode(js_op, left, right);
|
||||
}
|
||||
@ -4055,7 +4055,7 @@ void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
|
||||
|
||||
BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
|
||||
IterationStatement* stmt) {
|
||||
if (loop_assignment_analysis_ == NULL) return NULL;
|
||||
if (loop_assignment_analysis_ == nullptr) return nullptr;
|
||||
return loop_assignment_analysis_->GetVariablesAssignedInLoop(stmt);
|
||||
}
|
||||
|
||||
@ -4082,7 +4082,7 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
|
||||
DCHECK(op->ControlInputCount() < 2);
|
||||
DCHECK(op->EffectInputCount() < 2);
|
||||
|
||||
Node* result = NULL;
|
||||
Node* result = nullptr;
|
||||
if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
|
||||
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
|
||||
} else {
|
||||
|
@ -169,7 +169,7 @@ class AstGraphBuilder : public AstVisitor {
|
||||
|
||||
// Node creation helpers.
|
||||
Node* NewNode(const Operator* op, bool incomplete = false) {
|
||||
return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
|
||||
return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
|
||||
}
|
||||
|
||||
Node* NewNode(const Operator* op, Node* n1) {
|
||||
@ -365,7 +365,7 @@ class AstGraphBuilder : public AstVisitor {
|
||||
|
||||
// ===========================================================================
|
||||
// The following build methods have the same contract as the above ones, but
|
||||
// they can also return {NULL} to indicate that no fragment was built. Note
|
||||
// they can also return {nullptr} to indicate that no fragment was built. Note
|
||||
// that these are optimizations, disabling any of them should still produce
|
||||
// correct graphs.
|
||||
|
||||
|
@ -22,7 +22,7 @@ LoopAssignmentAnalysis* ALAA::Analyze() {
|
||||
LoopAssignmentAnalysis* a = new (zone_) LoopAssignmentAnalysis(zone_);
|
||||
result_ = a;
|
||||
VisitStatements(info()->literal()->body());
|
||||
result_ = NULL;
|
||||
result_ = nullptr;
|
||||
return a;
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ class LoopAssignmentAnalysis : public ZoneObject {
|
||||
if (list_[i].first == loop) return list_[i].second;
|
||||
}
|
||||
UNREACHABLE(); // should never ask for loops that aren't here!
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
int GetAssignmentCountForTesting(Scope* scope, Variable* var);
|
||||
@ -63,7 +63,7 @@ class AstLoopAssignmentAnalyzer : public AstVisitor {
|
||||
void Exit(IterationStatement* loop);
|
||||
|
||||
void VisitIfNotNull(AstNode* node) {
|
||||
if (node != NULL) Visit(node);
|
||||
if (node != nullptr) Visit(node);
|
||||
}
|
||||
|
||||
void AnalyzeAssignment(Variable* var);
|
||||
|
@ -1875,7 +1875,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
|
||||
DCHECK_LT(op->ControlInputCount(), 2);
|
||||
DCHECK_LT(op->EffectInputCount(), 2);
|
||||
|
||||
Node* result = NULL;
|
||||
Node* result = nullptr;
|
||||
if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
|
||||
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
|
||||
} else {
|
||||
|
@ -65,7 +65,7 @@ class BytecodeGraphBuilder {
|
||||
|
||||
// Node creation helpers
|
||||
Node* NewNode(const Operator* op, bool incomplete = false) {
|
||||
return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
|
||||
return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
|
||||
}
|
||||
|
||||
Node* NewNode(const Operator* op, Node* n1) {
|
||||
|
@ -27,7 +27,7 @@ LiveRange* LiveRangeConflictIterator::Current() const {
|
||||
|
||||
|
||||
void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
|
||||
DCHECK(query_ != nullptr);
|
||||
DCHECK_NOT_NULL(query_);
|
||||
auto end = intervals_->end();
|
||||
LifetimePosition q_start = query_->start();
|
||||
LifetimePosition q_end = query_->end();
|
||||
|
@ -89,7 +89,7 @@ class LiveRangeConflictIterator {
|
||||
}
|
||||
|
||||
bool QueryIntersectsAllocatedInterval() const {
|
||||
DCHECK(query_ != nullptr);
|
||||
DCHECK_NOT_NULL(query_);
|
||||
return pos_ != intervals_->end() &&
|
||||
Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
|
||||
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
|
||||
current_block_(RpoNumber::Invalid()),
|
||||
current_source_position_(SourcePosition::Unknown()),
|
||||
masm_(info->isolate(), NULL, 0, CodeObjectRequired::kYes),
|
||||
masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kYes),
|
||||
resolver_(this),
|
||||
safepoints_(code->zone()),
|
||||
handlers_(code->zone()),
|
||||
|
@ -41,8 +41,8 @@ class IfBuilder final : public ControlBuilder {
|
||||
public:
|
||||
explicit IfBuilder(AstGraphBuilder* builder)
|
||||
: ControlBuilder(builder),
|
||||
then_environment_(NULL),
|
||||
else_environment_(NULL) {}
|
||||
then_environment_(nullptr),
|
||||
else_environment_(nullptr) {}
|
||||
|
||||
// Primitive control commands.
|
||||
void If(Node* condition, BranchHint hint = BranchHint::kNone);
|
||||
@ -61,9 +61,9 @@ class LoopBuilder final : public ControlBuilder {
|
||||
public:
|
||||
explicit LoopBuilder(AstGraphBuilder* builder)
|
||||
: ControlBuilder(builder),
|
||||
loop_environment_(NULL),
|
||||
continue_environment_(NULL),
|
||||
break_environment_(NULL) {}
|
||||
loop_environment_(nullptr),
|
||||
continue_environment_(nullptr),
|
||||
break_environment_(nullptr) {}
|
||||
|
||||
// Primitive control commands.
|
||||
void BeginLoop(BitVector* assigned, bool is_osr = false);
|
||||
@ -90,9 +90,9 @@ class SwitchBuilder final : public ControlBuilder {
|
||||
public:
|
||||
explicit SwitchBuilder(AstGraphBuilder* builder, int case_count)
|
||||
: ControlBuilder(builder),
|
||||
body_environment_(NULL),
|
||||
label_environment_(NULL),
|
||||
break_environment_(NULL),
|
||||
body_environment_(nullptr),
|
||||
label_environment_(nullptr),
|
||||
break_environment_(nullptr),
|
||||
body_environments_(case_count, zone()) {}
|
||||
|
||||
// Primitive control commands.
|
||||
@ -122,7 +122,7 @@ class SwitchBuilder final : public ControlBuilder {
|
||||
class BlockBuilder final : public ControlBuilder {
|
||||
public:
|
||||
explicit BlockBuilder(AstGraphBuilder* builder)
|
||||
: ControlBuilder(builder), break_environment_(NULL) {}
|
||||
: ControlBuilder(builder), break_environment_(nullptr) {}
|
||||
|
||||
// Primitive control commands.
|
||||
void BeginBlock();
|
||||
@ -145,9 +145,9 @@ class TryCatchBuilder final : public ControlBuilder {
|
||||
public:
|
||||
explicit TryCatchBuilder(AstGraphBuilder* builder)
|
||||
: ControlBuilder(builder),
|
||||
catch_environment_(NULL),
|
||||
exit_environment_(NULL),
|
||||
exception_node_(NULL) {}
|
||||
catch_environment_(nullptr),
|
||||
exit_environment_(nullptr),
|
||||
exception_node_(nullptr) {}
|
||||
|
||||
// Primitive control commands.
|
||||
void BeginTry();
|
||||
@ -170,9 +170,9 @@ class TryFinallyBuilder final : public ControlBuilder {
|
||||
public:
|
||||
explicit TryFinallyBuilder(AstGraphBuilder* builder)
|
||||
: ControlBuilder(builder),
|
||||
finally_environment_(NULL),
|
||||
token_node_(NULL),
|
||||
value_node_(NULL) {}
|
||||
finally_environment_(nullptr),
|
||||
token_node_(nullptr),
|
||||
value_node_(nullptr) {}
|
||||
|
||||
// Primitive control commands.
|
||||
void BeginTry();
|
||||
|
@ -71,7 +71,7 @@ void ControlEquivalence::VisitPost(Node* node, Node* parent_node,
|
||||
BracketListDelete(blist, node, direction);
|
||||
|
||||
// Propagate bracket list up the DFS tree [line:13].
|
||||
if (parent_node != NULL) {
|
||||
if (parent_node != nullptr) {
|
||||
BracketList& parent_blist = GetBracketList(parent_node);
|
||||
parent_blist.splice(parent_blist.end(), blist);
|
||||
}
|
||||
@ -91,7 +91,7 @@ void ControlEquivalence::VisitBackedge(Node* from, Node* to,
|
||||
|
||||
void ControlEquivalence::RunUndirectedDFS(Node* exit) {
|
||||
ZoneStack<DFSStackEntry> stack(zone_);
|
||||
DFSPush(stack, exit, NULL, kInputDirection);
|
||||
DFSPush(stack, exit, nullptr, kInputDirection);
|
||||
VisitPre(exit);
|
||||
|
||||
while (!stack.empty()) { // Undirected depth-first backwards traversal.
|
||||
|
@ -18,8 +18,8 @@ Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
|
||||
frame_slot_count_(fixed_frame_size_in_slots),
|
||||
callee_saved_slot_count_(0),
|
||||
spill_slot_count_(0),
|
||||
allocated_registers_(NULL),
|
||||
allocated_double_registers_(NULL) {}
|
||||
allocated_registers_(nullptr),
|
||||
allocated_double_registers_(nullptr) {}
|
||||
|
||||
|
||||
void FrameAccessState::SetFrameAccessToDefault() {
|
||||
|
@ -121,12 +121,12 @@ class Frame : public ZoneObject {
|
||||
}
|
||||
|
||||
void SetAllocatedRegisters(BitVector* regs) {
|
||||
DCHECK(allocated_registers_ == NULL);
|
||||
DCHECK(allocated_registers_ == nullptr);
|
||||
allocated_registers_ = regs;
|
||||
}
|
||||
|
||||
void SetAllocatedDoubleRegisters(BitVector* regs) {
|
||||
DCHECK(allocated_double_registers_ == NULL);
|
||||
DCHECK(allocated_double_registers_ == nullptr);
|
||||
allocated_double_registers_ = regs;
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,8 @@ class AdvancedReducer : public Reducer {
|
||||
// Revisit the {node} again later.
|
||||
virtual void Revisit(Node* node) = 0;
|
||||
// Replace value uses of {node} with {value} and effect uses of {node} with
|
||||
// {effect}. If {effect == NULL}, then use the effect input to {node}. All
|
||||
// {effect}. If {effect == nullptr}, then use the effect input to {node}.
|
||||
// All
|
||||
// control uses will be relaxed assuming {node} cannot throw.
|
||||
virtual void ReplaceWithValue(Node* node, Node* value, Node* effect,
|
||||
Node* control) = 0;
|
||||
@ -149,7 +150,7 @@ class GraphReducer : public AdvancedReducer::Editor {
|
||||
void Replace(Node* node, Node* replacement) final;
|
||||
|
||||
// Replace value uses of {node} with {value} and effect uses of {node} with
|
||||
// {effect}. If {effect == NULL}, then use the effect input to {node}. All
|
||||
// {effect}. If {effect == nullptr}, then use the effect input to {node}. All
|
||||
// control uses will be relaxed assuming {node} cannot throw.
|
||||
void ReplaceWithValue(Node* node, Node* value, Node* effect,
|
||||
Node* control) final;
|
||||
|
@ -41,7 +41,7 @@ FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
|
||||
'_');
|
||||
|
||||
EmbeddedVector<char, 256> full_filename;
|
||||
if (phase == NULL) {
|
||||
if (phase == nullptr) {
|
||||
SNPrintF(full_filename, "%s.%s", filename.start(), suffix);
|
||||
} else {
|
||||
SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
|
||||
@ -50,9 +50,9 @@ FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
|
||||
}
|
||||
|
||||
|
||||
static int SafeId(Node* node) { return node == NULL ? -1 : node->id(); }
|
||||
static int SafeId(Node* node) { return node == nullptr ? -1 : node->id(); }
|
||||
static const char* SafeMnemonic(Node* node) {
|
||||
return node == NULL ? "null" : node->op()->mnemonic();
|
||||
return node == nullptr ? "null" : node->op()->mnemonic();
|
||||
}
|
||||
|
||||
#define DEAD_COLOR "#999999"
|
||||
@ -158,7 +158,7 @@ class JSONGraphEdgeWriter {
|
||||
void PrintEdges(Node* node) {
|
||||
for (int i = 0; i < node->InputCount(); i++) {
|
||||
Node* input = node->InputAt(i);
|
||||
if (input == NULL) continue;
|
||||
if (input == nullptr) continue;
|
||||
PrintEdge(node, i, input);
|
||||
}
|
||||
}
|
||||
@ -169,7 +169,7 @@ class JSONGraphEdgeWriter {
|
||||
} else {
|
||||
os_ << ",\n";
|
||||
}
|
||||
const char* edge_type = NULL;
|
||||
const char* edge_type = nullptr;
|
||||
if (index < NodeProperties::FirstValueIndex(from)) {
|
||||
edge_type = "unknown";
|
||||
} else if (index < NodeProperties::FirstContextIndex(from)) {
|
||||
@ -397,7 +397,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
|
||||
PrintIndent();
|
||||
os_ << "flags\n";
|
||||
|
||||
if (current->dominator() != NULL) {
|
||||
if (current->dominator() != nullptr) {
|
||||
PrintBlockProperty("dominator", current->dominator()->rpo_number());
|
||||
}
|
||||
|
||||
@ -455,7 +455,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
|
||||
os_ << " ";
|
||||
PrintType(node);
|
||||
}
|
||||
if (positions != NULL) {
|
||||
if (positions != nullptr) {
|
||||
SourcePosition position = positions->GetSourcePosition(node);
|
||||
if (position.IsKnown()) {
|
||||
os_ << " pos:" << position.raw();
|
||||
@ -468,7 +468,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
|
||||
if (control != BasicBlock::kNone) {
|
||||
PrintIndent();
|
||||
os_ << "0 0 ";
|
||||
if (current->control_input() != NULL) {
|
||||
if (current->control_input() != nullptr) {
|
||||
PrintNode(current->control_input());
|
||||
} else {
|
||||
os_ << -1 - current->rpo_number() << " Goto";
|
||||
@ -477,7 +477,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
|
||||
for (BasicBlock* successor : current->successors()) {
|
||||
os_ << " B" << successor->rpo_number();
|
||||
}
|
||||
if (FLAG_trace_turbo_types && current->control_input() != NULL) {
|
||||
if (FLAG_trace_turbo_types && current->control_input() != nullptr) {
|
||||
os_ << " ";
|
||||
PrintType(current->control_input());
|
||||
}
|
||||
@ -485,7 +485,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
|
||||
}
|
||||
}
|
||||
|
||||
if (instructions != NULL) {
|
||||
if (instructions != nullptr) {
|
||||
Tag LIR_tag(this, "LIR");
|
||||
for (int j = instruction_block->first_instruction_index();
|
||||
j <= instruction_block->last_instruction_index(); j++) {
|
||||
@ -531,7 +531,7 @@ void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
|
||||
|
||||
void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
|
||||
int vreg) {
|
||||
if (range != NULL && !range->IsEmpty()) {
|
||||
if (range != nullptr && !range->IsEmpty()) {
|
||||
PrintIndent();
|
||||
os_ << vreg << ":" << range->relative_id() << " " << type;
|
||||
if (range->HasRegisterAssigned()) {
|
||||
@ -571,7 +571,7 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
|
||||
}
|
||||
|
||||
UsePosition* current_pos = range->first_pos();
|
||||
while (current_pos != NULL) {
|
||||
while (current_pos != nullptr) {
|
||||
if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
|
||||
os_ << " " << current_pos->pos().value() << " M";
|
||||
}
|
||||
|
@ -48,8 +48,8 @@ struct AsC1VCompilation {
|
||||
|
||||
struct AsC1V {
|
||||
AsC1V(const char* phase, const Schedule* schedule,
|
||||
const SourcePositionTable* positions = NULL,
|
||||
const InstructionSequence* instructions = NULL)
|
||||
const SourcePositionTable* positions = nullptr,
|
||||
const InstructionSequence* instructions = nullptr)
|
||||
: schedule_(schedule),
|
||||
instructions_(instructions),
|
||||
positions_(positions),
|
||||
|
@ -1500,7 +1500,7 @@ void CodeGenerator::AssembleReturn() {
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
IA32OperandConverter g(this, NULL);
|
||||
IA32OperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
@ -1610,7 +1610,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
IA32OperandConverter g(this, NULL);
|
||||
IA32OperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister() && destination->IsRegister()) {
|
||||
|
@ -50,18 +50,18 @@ class IA32OperandGenerator final : public OperandGenerator {
|
||||
InstructionOperand inputs[],
|
||||
size_t* input_count) {
|
||||
AddressingMode mode = kMode_MRI;
|
||||
int32_t displacement = (displacement_node == NULL)
|
||||
int32_t displacement = (displacement_node == nullptr)
|
||||
? 0
|
||||
: OpParameter<int32_t>(displacement_node);
|
||||
if (base != NULL) {
|
||||
if (base != nullptr) {
|
||||
if (base->opcode() == IrOpcode::kInt32Constant) {
|
||||
displacement += OpParameter<int32_t>(base);
|
||||
base = NULL;
|
||||
base = nullptr;
|
||||
}
|
||||
}
|
||||
if (base != NULL) {
|
||||
if (base != nullptr) {
|
||||
inputs[(*input_count)++] = UseRegister(base);
|
||||
if (index != NULL) {
|
||||
if (index != nullptr) {
|
||||
DCHECK(scale >= 0 && scale <= 3);
|
||||
inputs[(*input_count)++] = UseRegister(index);
|
||||
if (displacement != 0) {
|
||||
@ -84,7 +84,7 @@ class IA32OperandGenerator final : public OperandGenerator {
|
||||
}
|
||||
} else {
|
||||
DCHECK(scale >= 0 && scale <= 3);
|
||||
if (index != NULL) {
|
||||
if (index != nullptr) {
|
||||
inputs[(*input_count)++] = UseRegister(index);
|
||||
if (displacement != 0) {
|
||||
inputs[(*input_count)++] = TempImmediate(displacement);
|
||||
@ -109,7 +109,7 @@ class IA32OperandGenerator final : public OperandGenerator {
|
||||
size_t* input_count) {
|
||||
BaseWithIndexAndDisplacement32Matcher m(node, true);
|
||||
DCHECK(m.matches());
|
||||
if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
|
||||
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
|
||||
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
|
||||
m.displacement(), inputs, input_count);
|
||||
} else {
|
||||
@ -298,7 +298,8 @@ void InstructionSelector::VisitStore(Node* node) {
|
||||
InstructionCode code =
|
||||
opcode | AddressingModeField::encode(addressing_mode);
|
||||
inputs[input_count++] = val;
|
||||
Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
|
||||
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
|
||||
inputs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -555,8 +556,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
|
||||
Int32ScaleMatcher m(node, true);
|
||||
if (m.matches()) {
|
||||
Node* index = node->InputAt(0);
|
||||
Node* base = m.power_of_two_plus_one() ? index : NULL;
|
||||
EmitLea(this, node, index, m.scale(), base, NULL);
|
||||
Node* base = m.power_of_two_plus_one() ? index : nullptr;
|
||||
EmitLea(this, node, index, m.scale(), base, nullptr);
|
||||
return;
|
||||
}
|
||||
VisitShift(this, node, kIA32Shl);
|
||||
@ -602,7 +603,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
|
||||
// Try to match the Add to a lea pattern
|
||||
BaseWithIndexAndDisplacement32Matcher m(node);
|
||||
if (m.matches() &&
|
||||
(m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
|
||||
(m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
|
||||
InstructionOperand inputs[4];
|
||||
size_t input_count = 0;
|
||||
AddressingMode mode = g.GenerateMemoryOperandInputs(
|
||||
@ -639,8 +640,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
|
||||
Int32ScaleMatcher m(node, true);
|
||||
if (m.matches()) {
|
||||
Node* index = node->InputAt(0);
|
||||
Node* base = m.power_of_two_plus_one() ? index : NULL;
|
||||
EmitLea(this, node, index, m.scale(), base, NULL);
|
||||
Node* base = m.power_of_two_plus_one() ? index : nullptr;
|
||||
EmitLea(this, node, index, m.scale(), base, nullptr);
|
||||
return;
|
||||
}
|
||||
IA32OperandGenerator g(this);
|
||||
@ -1104,12 +1105,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
|
||||
if (ProjectionIndexOf(value->op()) == 1u) {
|
||||
// We cannot combine the <Operation>WithOverflow with this branch
|
||||
// unless the 0th projection (the use of the actual value of the
|
||||
// <Operation> is either NULL, which means there's no use of the
|
||||
// <Operation> is either nullptr, which means there's no use of the
|
||||
// actual value, or was already defined, which means it is scheduled
|
||||
// *AFTER* this branch).
|
||||
Node* const node = value->InputAt(0);
|
||||
Node* const result = NodeProperties::FindProjection(node, 0);
|
||||
if (result == NULL || selector->IsDefined(result)) {
|
||||
if (result == nullptr || selector->IsDefined(result)) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32AddWithOverflow:
|
||||
cont->OverwriteAndNegateIfEqual(kOverflow);
|
||||
|
@ -86,7 +86,7 @@ void InstructionSelector::SelectInstructions() {
|
||||
void InstructionSelector::StartBlock(RpoNumber rpo) {
|
||||
if (FLAG_turbo_instruction_scheduling &&
|
||||
InstructionScheduler::SchedulerSupported()) {
|
||||
DCHECK(scheduler_ != nullptr);
|
||||
DCHECK_NOT_NULL(scheduler_);
|
||||
scheduler_->StartBlock(rpo);
|
||||
} else {
|
||||
sequence()->StartBlock(rpo);
|
||||
@ -97,7 +97,7 @@ void InstructionSelector::StartBlock(RpoNumber rpo) {
|
||||
void InstructionSelector::EndBlock(RpoNumber rpo) {
|
||||
if (FLAG_turbo_instruction_scheduling &&
|
||||
InstructionScheduler::SchedulerSupported()) {
|
||||
DCHECK(scheduler_ != nullptr);
|
||||
DCHECK_NOT_NULL(scheduler_);
|
||||
scheduler_->EndBlock(rpo);
|
||||
} else {
|
||||
sequence()->EndBlock(rpo);
|
||||
@ -108,7 +108,7 @@ void InstructionSelector::EndBlock(RpoNumber rpo) {
|
||||
void InstructionSelector::AddInstruction(Instruction* instr) {
|
||||
if (FLAG_turbo_instruction_scheduling &&
|
||||
InstructionScheduler::SchedulerSupported()) {
|
||||
DCHECK(scheduler_ != nullptr);
|
||||
DCHECK_NOT_NULL(scheduler_);
|
||||
scheduler_->AddInstruction(instr);
|
||||
} else {
|
||||
sequence()->AddInstruction(instr);
|
||||
@ -121,7 +121,7 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
|
||||
size_t temp_count,
|
||||
InstructionOperand* temps) {
|
||||
size_t output_count = output.IsInvalid() ? 0 : 1;
|
||||
return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
|
||||
return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
|
||||
}
|
||||
|
||||
|
||||
@ -482,7 +482,7 @@ struct CallBuffer {
|
||||
size_t frame_state_count() const { return descriptor->FrameStateCount(); }
|
||||
|
||||
size_t frame_state_value_count() const {
|
||||
return (frame_state_descriptor == NULL)
|
||||
return (frame_state_descriptor == nullptr)
|
||||
? 0
|
||||
: (frame_state_descriptor->GetTotalSize() +
|
||||
1); // Include deopt id.
|
||||
@ -519,13 +519,13 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
|
||||
|
||||
// Filter out the outputs that aren't live because no projection uses them.
|
||||
size_t outputs_needed_by_framestate =
|
||||
buffer->frame_state_descriptor == NULL
|
||||
buffer->frame_state_descriptor == nullptr
|
||||
? 0
|
||||
: buffer->frame_state_descriptor->state_combine()
|
||||
.ConsumedOutputCount();
|
||||
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
|
||||
bool output_is_live =
|
||||
buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
|
||||
bool output_is_live = buffer->output_nodes[i] != nullptr ||
|
||||
i < outputs_needed_by_framestate;
|
||||
if (output_is_live) {
|
||||
MachineType type =
|
||||
buffer->descriptor->GetReturnType(static_cast<int>(i));
|
||||
@ -534,7 +534,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
|
||||
|
||||
Node* output = buffer->output_nodes[i];
|
||||
InstructionOperand op =
|
||||
output == NULL
|
||||
output == nullptr
|
||||
? g.TempLocation(location, type.representation())
|
||||
: g.DefineAsLocation(output, location, type.representation());
|
||||
MarkAsRepresentation(type.representation(), op);
|
||||
@ -580,7 +580,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
|
||||
// arg 2 - arg (n + 1) : value inputs to the frame state.
|
||||
size_t frame_state_entries = 0;
|
||||
USE(frame_state_entries); // frame_state_entries is only used for debug.
|
||||
if (buffer->frame_state_descriptor != NULL) {
|
||||
if (buffer->frame_state_descriptor != nullptr) {
|
||||
InstructionSequence::StateId state_id =
|
||||
sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
|
||||
buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
|
||||
@ -688,7 +688,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
|
||||
instruction_block->set_code_start(static_cast<int>(instructions_.size()));
|
||||
instruction_block->set_code_end(current_block_end);
|
||||
|
||||
current_block_ = NULL;
|
||||
current_block_ = nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -1605,7 +1605,7 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
|
||||
DCHECK_EQ(parameters, state_info.parameter_count());
|
||||
DCHECK_EQ(locals, state_info.local_count());
|
||||
|
||||
FrameStateDescriptor* outer_state = NULL;
|
||||
FrameStateDescriptor* outer_state = nullptr;
|
||||
Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
|
||||
if (outer_node->opcode() == IrOpcode::kFrameState) {
|
||||
outer_state = GetFrameStateDescriptor(outer_node);
|
||||
|
@ -68,35 +68,35 @@ class InstructionSelector final {
|
||||
// ===========================================================================
|
||||
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
|
||||
size_t temp_count = 0, InstructionOperand* temps = NULL);
|
||||
size_t temp_count = 0, InstructionOperand* temps = nullptr);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
|
||||
InstructionOperand a, size_t temp_count = 0,
|
||||
InstructionOperand* temps = NULL);
|
||||
InstructionOperand* temps = nullptr);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
|
||||
InstructionOperand a, InstructionOperand b,
|
||||
size_t temp_count = 0, InstructionOperand* temps = NULL);
|
||||
size_t temp_count = 0, InstructionOperand* temps = nullptr);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
|
||||
InstructionOperand a, InstructionOperand b,
|
||||
InstructionOperand c, size_t temp_count = 0,
|
||||
InstructionOperand* temps = NULL);
|
||||
InstructionOperand* temps = nullptr);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
|
||||
InstructionOperand a, InstructionOperand b,
|
||||
InstructionOperand c, InstructionOperand d,
|
||||
size_t temp_count = 0, InstructionOperand* temps = NULL);
|
||||
size_t temp_count = 0, InstructionOperand* temps = nullptr);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
|
||||
InstructionOperand a, InstructionOperand b,
|
||||
InstructionOperand c, InstructionOperand d,
|
||||
InstructionOperand e, size_t temp_count = 0,
|
||||
InstructionOperand* temps = NULL);
|
||||
InstructionOperand* temps = nullptr);
|
||||
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
|
||||
InstructionOperand a, InstructionOperand b,
|
||||
InstructionOperand c, InstructionOperand d,
|
||||
InstructionOperand e, InstructionOperand f,
|
||||
size_t temp_count = 0, InstructionOperand* temps = NULL);
|
||||
size_t temp_count = 0, InstructionOperand* temps = nullptr);
|
||||
Instruction* Emit(InstructionCode opcode, size_t output_count,
|
||||
InstructionOperand* outputs, size_t input_count,
|
||||
InstructionOperand* inputs, size_t temp_count = 0,
|
||||
InstructionOperand* temps = NULL);
|
||||
InstructionOperand* temps = nullptr);
|
||||
Instruction* Emit(Instruction* instr);
|
||||
|
||||
// ===========================================================================
|
||||
|
@ -253,7 +253,7 @@ Instruction::Instruction(InstructionCode opcode)
|
||||
: opcode_(opcode),
|
||||
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
|
||||
TempCountField::encode(0) | IsCallField::encode(false)),
|
||||
reference_map_(NULL) {
|
||||
reference_map_(nullptr) {
|
||||
parallel_moves_[0] = nullptr;
|
||||
parallel_moves_[1] = nullptr;
|
||||
}
|
||||
@ -268,7 +268,7 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
|
||||
InputCountField::encode(input_count) |
|
||||
TempCountField::encode(temp_count) |
|
||||
IsCallField::encode(false)),
|
||||
reference_map_(NULL) {
|
||||
reference_map_(nullptr) {
|
||||
parallel_moves_[0] = nullptr;
|
||||
parallel_moves_[1] = nullptr;
|
||||
size_t offset = 0;
|
||||
@ -459,7 +459,7 @@ std::ostream& operator<<(std::ostream& os,
|
||||
for (int i = Instruction::FIRST_GAP_POSITION;
|
||||
i <= Instruction::LAST_GAP_POSITION; i++) {
|
||||
os << "(";
|
||||
if (instr.parallel_moves()[i] != NULL) {
|
||||
if (instr.parallel_moves()[i] != nullptr) {
|
||||
PrintableParallelMove ppm = {printable.register_configuration_,
|
||||
instr.parallel_moves()[i]};
|
||||
os << ppm;
|
||||
@ -568,7 +568,7 @@ size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
|
||||
|
||||
|
||||
static RpoNumber GetRpo(const BasicBlock* block) {
|
||||
if (block == NULL) return RpoNumber::Invalid();
|
||||
if (block == nullptr) return RpoNumber::Invalid();
|
||||
return RpoNumber::FromInt(block->rpo_number());
|
||||
}
|
||||
|
||||
@ -603,7 +603,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
|
||||
Zone* zone, const Schedule* schedule) {
|
||||
InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
|
||||
new (blocks) InstructionBlocks(
|
||||
static_cast<int>(schedule->rpo_order()->size()), NULL, zone);
|
||||
static_cast<int>(schedule->rpo_order()->size()), nullptr, zone);
|
||||
size_t rpo_number = 0;
|
||||
for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin();
|
||||
it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
|
||||
@ -689,7 +689,7 @@ int InstructionSequence::AddInstruction(Instruction* instr) {
|
||||
int index = static_cast<int>(instructions_.size());
|
||||
instructions_.push_back(instr);
|
||||
if (instr->NeedsReferenceMap()) {
|
||||
DCHECK(instr->reference_map() == NULL);
|
||||
DCHECK(instr->reference_map() == nullptr);
|
||||
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
|
||||
reference_map->set_instruction_position(index);
|
||||
instr->set_reference_map(reference_map);
|
||||
@ -853,7 +853,7 @@ size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
|
||||
|
||||
size_t FrameStateDescriptor::GetTotalSize() const {
|
||||
size_t total_size = 0;
|
||||
for (const FrameStateDescriptor* iter = this; iter != NULL;
|
||||
for (const FrameStateDescriptor* iter = this; iter != nullptr;
|
||||
iter = iter->outer_state_) {
|
||||
total_size += iter->GetSize();
|
||||
}
|
||||
@ -863,7 +863,7 @@ size_t FrameStateDescriptor::GetTotalSize() const {
|
||||
|
||||
size_t FrameStateDescriptor::GetFrameCount() const {
|
||||
size_t count = 0;
|
||||
for (const FrameStateDescriptor* iter = this; iter != NULL;
|
||||
for (const FrameStateDescriptor* iter = this; iter != nullptr;
|
||||
iter = iter->outer_state_) {
|
||||
++count;
|
||||
}
|
||||
@ -873,7 +873,7 @@ size_t FrameStateDescriptor::GetFrameCount() const {
|
||||
|
||||
size_t FrameStateDescriptor::GetJSFrameCount() const {
|
||||
size_t count = 0;
|
||||
for (const FrameStateDescriptor* iter = this; iter != NULL;
|
||||
for (const FrameStateDescriptor* iter = this; iter != nullptr;
|
||||
iter = iter->outer_state_) {
|
||||
if (FrameStateFunctionInfo::IsJSFunctionType(iter->type_)) {
|
||||
++count;
|
||||
|
@ -730,7 +730,7 @@ class Instruction final {
|
||||
|
||||
// TODO(titzer): make call into a flags.
|
||||
static Instruction* New(Zone* zone, InstructionCode opcode) {
|
||||
return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
|
||||
return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
|
||||
}
|
||||
|
||||
static Instruction* New(Zone* zone, InstructionCode opcode,
|
||||
@ -738,9 +738,9 @@ class Instruction final {
|
||||
size_t input_count, InstructionOperand* inputs,
|
||||
size_t temp_count, InstructionOperand* temps) {
|
||||
DCHECK(opcode >= 0);
|
||||
DCHECK(output_count == 0 || outputs != NULL);
|
||||
DCHECK(input_count == 0 || inputs != NULL);
|
||||
DCHECK(temp_count == 0 || temps != NULL);
|
||||
DCHECK(output_count == 0 || outputs != nullptr);
|
||||
DCHECK(input_count == 0 || inputs != nullptr);
|
||||
DCHECK(temp_count == 0 || temps != nullptr);
|
||||
size_t total_extra_ops = output_count + input_count + temp_count;
|
||||
if (total_extra_ops != 0) total_extra_ops--;
|
||||
int size = static_cast<int>(
|
||||
@ -756,7 +756,7 @@ class Instruction final {
|
||||
}
|
||||
bool IsCall() const { return IsCallField::decode(bit_field_); }
|
||||
bool NeedsReferenceMap() const { return IsCall(); }
|
||||
bool HasReferenceMap() const { return reference_map_ != NULL; }
|
||||
bool HasReferenceMap() const { return reference_map_ != nullptr; }
|
||||
|
||||
bool ClobbersRegisters() const { return IsCall(); }
|
||||
bool ClobbersTemps() const { return IsCall(); }
|
||||
@ -772,7 +772,7 @@ class Instruction final {
|
||||
void OverwriteWithNop() {
|
||||
opcode_ = ArchOpcodeField::encode(kArchNop);
|
||||
bit_field_ = 0;
|
||||
reference_map_ = NULL;
|
||||
reference_map_ = nullptr;
|
||||
}
|
||||
|
||||
bool IsNop() const {
|
||||
|
@ -120,7 +120,7 @@ Node* JSGraph::Constant(int32_t value) {
|
||||
|
||||
Node* JSGraph::Int32Constant(int32_t value) {
|
||||
Node** loc = cache_.FindInt32Constant(value);
|
||||
if (*loc == NULL) {
|
||||
if (*loc == nullptr) {
|
||||
*loc = graph()->NewNode(common()->Int32Constant(value));
|
||||
}
|
||||
return *loc;
|
||||
@ -129,7 +129,7 @@ Node* JSGraph::Int32Constant(int32_t value) {
|
||||
|
||||
Node* JSGraph::Int64Constant(int64_t value) {
|
||||
Node** loc = cache_.FindInt64Constant(value);
|
||||
if (*loc == NULL) {
|
||||
if (*loc == nullptr) {
|
||||
*loc = graph()->NewNode(common()->Int64Constant(value));
|
||||
}
|
||||
return *loc;
|
||||
@ -138,7 +138,7 @@ Node* JSGraph::Int64Constant(int64_t value) {
|
||||
|
||||
Node* JSGraph::NumberConstant(double value) {
|
||||
Node** loc = cache_.FindNumberConstant(value);
|
||||
if (*loc == NULL) {
|
||||
if (*loc == nullptr) {
|
||||
*loc = graph()->NewNode(common()->NumberConstant(value));
|
||||
}
|
||||
return *loc;
|
||||
@ -147,7 +147,7 @@ Node* JSGraph::NumberConstant(double value) {
|
||||
|
||||
Node* JSGraph::Float32Constant(float value) {
|
||||
Node** loc = cache_.FindFloat32Constant(value);
|
||||
if (*loc == NULL) {
|
||||
if (*loc == nullptr) {
|
||||
*loc = graph()->NewNode(common()->Float32Constant(value));
|
||||
}
|
||||
return *loc;
|
||||
@ -156,7 +156,7 @@ Node* JSGraph::Float32Constant(float value) {
|
||||
|
||||
Node* JSGraph::Float64Constant(double value) {
|
||||
Node** loc = cache_.FindFloat64Constant(value);
|
||||
if (*loc == NULL) {
|
||||
if (*loc == nullptr) {
|
||||
*loc = graph()->NewNode(common()->Float64Constant(value));
|
||||
}
|
||||
return *loc;
|
||||
@ -165,7 +165,7 @@ Node* JSGraph::Float64Constant(double value) {
|
||||
|
||||
Node* JSGraph::ExternalConstant(ExternalReference reference) {
|
||||
Node** loc = cache_.FindExternalConstant(reference);
|
||||
if (*loc == NULL) {
|
||||
if (*loc == nullptr) {
|
||||
*loc = graph()->NewNode(common()->ExternalConstant(reference));
|
||||
}
|
||||
return *loc;
|
||||
|
@ -1271,7 +1271,7 @@ Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
|
||||
jsgraph()->FalseConstant(), control);
|
||||
|
||||
if (if_is_smi != nullptr) {
|
||||
DCHECK(e_is_smi != nullptr);
|
||||
DCHECK_NOT_NULL(e_is_smi);
|
||||
control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
|
||||
effect =
|
||||
graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
|
||||
|
@ -120,7 +120,7 @@ bool CallDescriptor::CanTailCall(const Node* node,
|
||||
|
||||
|
||||
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
|
||||
if (info->code_stub() != NULL) {
|
||||
if (info->code_stub() != nullptr) {
|
||||
// Use the code stub interface descriptor.
|
||||
CodeStub* stub = info->code_stub();
|
||||
CallInterfaceDescriptor descriptor = stub->GetCallInterfaceDescriptor();
|
||||
@ -143,7 +143,7 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
|
||||
1 + shared->internal_formal_parameter_count(),
|
||||
CallDescriptor::kNoFlags);
|
||||
}
|
||||
return NULL; // TODO(titzer): ?
|
||||
return nullptr; // TODO(titzer): ?
|
||||
}
|
||||
|
||||
|
||||
|
@ -113,7 +113,7 @@ class LoopTree : public ZoneObject {
|
||||
if (node->opcode() == IrOpcode::kLoop) return node;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -944,8 +944,8 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
|
||||
}
|
||||
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
|
||||
|
||||
Node* shl = NULL;
|
||||
Node* shr = NULL;
|
||||
Node* shl = nullptr;
|
||||
Node* shr = nullptr;
|
||||
// Recognize rotation, we are matching either:
|
||||
// * x << y | x >>> (32 - y) => x ror (32 - y), i.e x rol y
|
||||
// * x << (32 - y) | x >>> y => x ror y
|
||||
@ -968,8 +968,8 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
|
||||
// Case where y is a constant.
|
||||
if (mshl.right().Value() + mshr.right().Value() != 32) return NoChange();
|
||||
} else {
|
||||
Node* sub = NULL;
|
||||
Node* y = NULL;
|
||||
Node* sub = nullptr;
|
||||
Node* y = nullptr;
|
||||
if (mshl.right().IsInt32Sub()) {
|
||||
sub = mshl.right().node();
|
||||
y = mshr.right().node();
|
||||
|
@ -118,7 +118,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
|
||||
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
|
||||
|
||||
MemOperand ToMemOperand(InstructionOperand* op) const {
|
||||
DCHECK(op != NULL);
|
||||
DCHECK_NOT_NULL(op);
|
||||
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
|
||||
FrameOffset offset = frame_access_state()->GetFrameOffset(
|
||||
AllocatedOperand::cast(op)->index());
|
||||
@ -1221,7 +1221,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
|
||||
!__ IsDoubleZeroRegSet()) {
|
||||
__ Move(kDoubleRegZero, 0.0);
|
||||
}
|
||||
__ BranchF32(tlabel, NULL, cc, left, right);
|
||||
__ BranchF32(tlabel, nullptr, cc, left, right);
|
||||
} else if (instr->arch_opcode() == kMipsCmpD) {
|
||||
if (!convertCondition(branch->condition, cc)) {
|
||||
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
|
||||
@ -1232,7 +1232,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
|
||||
!__ IsDoubleZeroRegSet()) {
|
||||
__ Move(kDoubleRegZero, 0.0);
|
||||
}
|
||||
__ BranchF64(tlabel, NULL, cc, left, right);
|
||||
__ BranchF64(tlabel, nullptr, cc, left, right);
|
||||
} else {
|
||||
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
|
||||
instr->arch_opcode());
|
||||
@ -1541,7 +1541,7 @@ void CodeGenerator::AssembleReturn() {
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
MipsOperandConverter g(this, NULL);
|
||||
MipsOperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
@ -1647,7 +1647,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
MipsOperandConverter g(this, NULL);
|
||||
MipsOperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
|
@ -1109,7 +1109,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
|
||||
if (ProjectionIndexOf(value->op()) == 1u) {
|
||||
// We cannot combine the <Operation>WithOverflow with this branch
|
||||
// unless the 0th projection (the use of the actual value of the
|
||||
// <Operation> is either NULL, which means there's no use of the
|
||||
// <Operation> is either nullptr, which means there's no use of the
|
||||
// actual value, or was already defined, which means it is scheduled
|
||||
// *AFTER* this branch).
|
||||
Node* const node = value->InputAt(0);
|
||||
|
@ -118,7 +118,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
|
||||
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
|
||||
|
||||
MemOperand ToMemOperand(InstructionOperand* op) const {
|
||||
DCHECK(op != NULL);
|
||||
DCHECK_NOT_NULL(op);
|
||||
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
|
||||
FrameOffset offset = frame_access_state()->GetFrameOffset(
|
||||
AllocatedOperand::cast(op)->index());
|
||||
@ -1447,7 +1447,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
|
||||
!__ IsDoubleZeroRegSet()) {
|
||||
__ Move(kDoubleRegZero, 0.0);
|
||||
}
|
||||
__ BranchF32(tlabel, NULL, cc, left, right);
|
||||
__ BranchF32(tlabel, nullptr, cc, left, right);
|
||||
} else if (instr->arch_opcode() == kMips64CmpD) {
|
||||
if (!convertCondition(branch->condition, cc)) {
|
||||
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
|
||||
@ -1458,7 +1458,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
|
||||
!__ IsDoubleZeroRegSet()) {
|
||||
__ Move(kDoubleRegZero, 0.0);
|
||||
}
|
||||
__ BranchF64(tlabel, NULL, cc, left, right);
|
||||
__ BranchF64(tlabel, nullptr, cc, left, right);
|
||||
} else {
|
||||
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
|
||||
instr->arch_opcode());
|
||||
@ -1778,7 +1778,7 @@ void CodeGenerator::AssembleReturn() {
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
MipsOperandConverter g(this, NULL);
|
||||
MipsOperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
@ -1884,7 +1884,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
MipsOperandConverter g(this, NULL);
|
||||
MipsOperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
|
@ -1595,12 +1595,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
|
||||
if (ProjectionIndexOf(value->op()) == 1u) {
|
||||
// We cannot combine the <Operation>WithOverflow with this branch
|
||||
// unless the 0th projection (the use of the actual value of the
|
||||
// <Operation> is either NULL, which means there's no use of the
|
||||
// <Operation> is either nullptr, which means there's no use of the
|
||||
// actual value, or was already defined, which means it is scheduled
|
||||
// *AFTER* this branch).
|
||||
Node* const node = value->InputAt(0);
|
||||
Node* const result = NodeProperties::FindProjection(node, 0);
|
||||
if (result == NULL || selector->IsDefined(result)) {
|
||||
if (result == nullptr || selector->IsDefined(result)) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32AddWithOverflow:
|
||||
cont->OverwriteAndNegateIfEqual(kOverflow);
|
||||
|
@ -255,7 +255,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
|
||||
if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
|
||||
break;
|
||||
}
|
||||
DCHECK(instr != nullptr);
|
||||
DCHECK_NOT_NULL(instr);
|
||||
bool gap_initialized = true;
|
||||
if (instr->parallel_moves()[0] == nullptr ||
|
||||
instr->parallel_moves()[0]->empty()) {
|
||||
|
@ -35,7 +35,8 @@ class NodeCache final {
|
||||
|
||||
// Search for node associated with {key} and return a pointer to a memory
|
||||
// location in this cache that stores an entry for the key. If the location
|
||||
// returned by this method contains a non-NULL node, the caller can use that
|
||||
// returned by this method contains a non-nullptr node, the caller can use
|
||||
// that
|
||||
// node. Otherwise it is the responsibility of the caller to fill the entry
|
||||
// with a new node.
|
||||
// Note that a previous cache entry may be overwritten if the cache becomes
|
||||
|
@ -384,19 +384,19 @@ template <class AddMatcher>
|
||||
struct BaseWithIndexAndDisplacementMatcher {
|
||||
BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
|
||||
: matches_(false),
|
||||
index_(NULL),
|
||||
index_(nullptr),
|
||||
scale_(0),
|
||||
base_(NULL),
|
||||
displacement_(NULL) {
|
||||
base_(nullptr),
|
||||
displacement_(nullptr) {
|
||||
Initialize(node, allow_input_swap);
|
||||
}
|
||||
|
||||
explicit BaseWithIndexAndDisplacementMatcher(Node* node)
|
||||
: matches_(false),
|
||||
index_(NULL),
|
||||
index_(nullptr),
|
||||
scale_(0),
|
||||
base_(NULL),
|
||||
displacement_(NULL) {
|
||||
base_(nullptr),
|
||||
displacement_(nullptr) {
|
||||
Initialize(node, node->op()->HasProperty(Operator::kCommutative));
|
||||
}
|
||||
|
||||
@ -434,10 +434,10 @@ struct BaseWithIndexAndDisplacementMatcher {
|
||||
AddMatcher m(node, allow_input_swap);
|
||||
Node* left = m.left().node();
|
||||
Node* right = m.right().node();
|
||||
Node* displacement = NULL;
|
||||
Node* base = NULL;
|
||||
Node* index = NULL;
|
||||
Node* scale_expression = NULL;
|
||||
Node* displacement = nullptr;
|
||||
Node* base = nullptr;
|
||||
Node* index = nullptr;
|
||||
Node* scale_expression = nullptr;
|
||||
bool power_of_two_plus_one = false;
|
||||
int scale = 0;
|
||||
if (m.HasIndexInput() && left->OwnedBy(node)) {
|
||||
@ -519,7 +519,7 @@ struct BaseWithIndexAndDisplacementMatcher {
|
||||
}
|
||||
}
|
||||
int64_t value = 0;
|
||||
if (displacement != NULL) {
|
||||
if (displacement != nullptr) {
|
||||
switch (displacement->opcode()) {
|
||||
case IrOpcode::kInt32Constant: {
|
||||
value = OpParameter<int32_t>(displacement);
|
||||
@ -534,11 +534,11 @@ struct BaseWithIndexAndDisplacementMatcher {
|
||||
break;
|
||||
}
|
||||
if (value == 0) {
|
||||
displacement = NULL;
|
||||
displacement = nullptr;
|
||||
}
|
||||
}
|
||||
if (power_of_two_plus_one) {
|
||||
if (base != NULL) {
|
||||
if (base != nullptr) {
|
||||
// If the scale requires explicitly using the index as the base, but a
|
||||
// base is already part of the match, then the (1 << N + 1) scale factor
|
||||
// can't be folded into the match and the entire index * scale
|
||||
|
@ -97,7 +97,7 @@ class NodeProperties final {
|
||||
Node* node);
|
||||
|
||||
// Replace all uses of {node} with the given replacement nodes. All occurring
|
||||
// use kinds need to be replaced, {NULL} is only valid if a use kind is
|
||||
// use kinds need to be replaced, {nullptr} is only valid if a use kind is
|
||||
// guaranteed not to exist.
|
||||
static void ReplaceUses(Node* node, Node* value, Node* effect = nullptr,
|
||||
Node* success = nullptr, Node* exception = nullptr);
|
||||
|
@ -60,8 +60,8 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
|
||||
// Verify that none of the inputs are {nullptr}.
|
||||
for (int i = 0; i < input_count; i++) {
|
||||
if (inputs[i] == nullptr) {
|
||||
V8_Fatal(__FILE__, __LINE__, "Node::New() Error: #%d:%s[%d] is NULL",
|
||||
static_cast<int>(id), op->mnemonic(), i);
|
||||
V8_Fatal(__FILE__, __LINE__, "Node::New() Error: #%d:%s[%d] is nullptr",
|
||||
static_cast<int>(id), op->mnemonic(), i);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -36,7 +36,7 @@ void PipelineStatistics::CommonStats::End(
|
||||
diff->max_allocated_bytes_ + allocated_bytes_at_start_;
|
||||
diff->total_allocated_bytes_ =
|
||||
outer_zone_diff + scope_->GetTotalAllocatedBytes();
|
||||
scope_.Reset(NULL);
|
||||
scope_.Reset(nullptr);
|
||||
timer_.Stop();
|
||||
}
|
||||
|
||||
@ -48,8 +48,8 @@ PipelineStatistics::PipelineStatistics(CompilationInfo* info,
|
||||
zone_pool_(zone_pool),
|
||||
compilation_stats_(isolate_->GetTurboStatistics()),
|
||||
source_size_(0),
|
||||
phase_kind_name_(NULL),
|
||||
phase_name_(NULL) {
|
||||
phase_kind_name_(nullptr),
|
||||
phase_name_(nullptr) {
|
||||
if (info->has_shared_info()) {
|
||||
source_size_ = static_cast<size_t>(info->shared_info()->SourceSize());
|
||||
base::SmartArrayPointer<char> name =
|
||||
|
@ -76,10 +76,10 @@ class PhaseScope {
|
||||
public:
|
||||
PhaseScope(PipelineStatistics* pipeline_stats, const char* name)
|
||||
: pipeline_stats_(pipeline_stats) {
|
||||
if (pipeline_stats_ != NULL) pipeline_stats_->BeginPhase(name);
|
||||
if (pipeline_stats_ != nullptr) pipeline_stats_->BeginPhase(name);
|
||||
}
|
||||
~PhaseScope() {
|
||||
if (pipeline_stats_ != NULL) pipeline_stats_->EndPhase();
|
||||
if (pipeline_stats_ != nullptr) pipeline_stats_->EndPhase();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -304,7 +304,7 @@ class PipelineData {
|
||||
Handle<Code> code_;
|
||||
|
||||
// All objects in the following group of fields are allocated in graph_zone_.
|
||||
// They are all set to NULL when the graph_zone_ is destroyed.
|
||||
// They are all set to nullptr when the graph_zone_ is destroyed.
|
||||
ZonePool::Scope graph_zone_scope_;
|
||||
Zone* graph_zone_;
|
||||
Graph* graph_;
|
||||
@ -320,7 +320,8 @@ class PipelineData {
|
||||
Schedule* schedule_;
|
||||
|
||||
// All objects in the following group of fields are allocated in
|
||||
// instruction_zone_. They are all set to NULL when the instruction_zone_ is
|
||||
// instruction_zone_. They are all set to nullptr when the instruction_zone_
|
||||
// is
|
||||
// destroyed.
|
||||
ZonePool::Scope instruction_zone_scope_;
|
||||
Zone* instruction_zone_;
|
||||
@ -328,7 +329,7 @@ class PipelineData {
|
||||
Frame* frame_;
|
||||
|
||||
// All objects in the following group of fields are allocated in
|
||||
// register_allocation_zone_. They are all set to NULL when the zone is
|
||||
// register_allocation_zone_. They are all set to nullptr when the zone is
|
||||
// destroyed.
|
||||
ZonePool::Scope register_allocation_zone_scope_;
|
||||
Zone* register_allocation_zone_;
|
||||
@ -349,7 +350,7 @@ struct TurboCfgFile : public std::ofstream {
|
||||
|
||||
void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
|
||||
if (FLAG_trace_turbo) {
|
||||
FILE* json_file = OpenVisualizerLogFile(info, NULL, "json", "a+");
|
||||
FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
|
||||
if (json_file != nullptr) {
|
||||
OFStream json_of(json_file);
|
||||
json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
|
||||
@ -758,7 +759,7 @@ struct StressLoopPeelingPhase {
|
||||
// Peel the first outer loop for testing.
|
||||
// TODO(titzer): peel all loops? the N'th loop? Innermost loops?
|
||||
LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
|
||||
if (loop_tree != NULL && loop_tree->outer_loops().size() > 0) {
|
||||
if (loop_tree != nullptr && loop_tree->outer_loops().size() > 0) {
|
||||
LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
|
||||
loop_tree->outer_loops()[0], temp_zone);
|
||||
}
|
||||
@ -1006,7 +1007,7 @@ struct PrintGraphPhase {
|
||||
Graph* graph = data->graph();
|
||||
|
||||
{ // Print JSON.
|
||||
FILE* json_file = OpenVisualizerLogFile(info, NULL, "json", "a+");
|
||||
FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
|
||||
if (json_file == nullptr) return;
|
||||
OFStream json_of(json_file);
|
||||
json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
|
||||
@ -1035,7 +1036,7 @@ struct VerifyGraphPhase {
|
||||
|
||||
|
||||
void Pipeline::BeginPhaseKind(const char* phase_kind_name) {
|
||||
if (data_->pipeline_statistics() != NULL) {
|
||||
if (data_->pipeline_statistics() != nullptr) {
|
||||
data_->pipeline_statistics()->BeginPhaseKind(phase_kind_name);
|
||||
}
|
||||
}
|
||||
@ -1068,7 +1069,7 @@ Handle<Code> Pipeline::GenerateCode() {
|
||||
}
|
||||
|
||||
if (FLAG_trace_turbo) {
|
||||
FILE* json_file = OpenVisualizerLogFile(info(), NULL, "json", "w+");
|
||||
FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "w+");
|
||||
if (json_file != nullptr) {
|
||||
OFStream json_of(json_file);
|
||||
Handle<Script> script = info()->script();
|
||||
@ -1233,7 +1234,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
|
||||
DCHECK_NOT_NULL(data.schedule());
|
||||
|
||||
if (FLAG_trace_turbo) {
|
||||
FILE* json_file = OpenVisualizerLogFile(&info, NULL, "json", "w+");
|
||||
FILE* json_file = OpenVisualizerLogFile(&info, nullptr, "json", "w+");
|
||||
if (json_file != nullptr) {
|
||||
OFStream json_of(json_file);
|
||||
json_of << "{\"function\":\"" << info.GetDebugName().get()
|
||||
@ -1302,7 +1303,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
|
||||
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
|
||||
TraceSchedule(data->info(), data->schedule());
|
||||
|
||||
BasicBlockProfiler::Data* profiler_data = NULL;
|
||||
BasicBlockProfiler::Data* profiler_data = nullptr;
|
||||
if (FLAG_turbo_profiling) {
|
||||
profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
|
||||
data->schedule());
|
||||
@ -1351,10 +1352,10 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
|
||||
Run<GenerateCodePhase>(&linkage);
|
||||
|
||||
Handle<Code> code = data->code();
|
||||
if (profiler_data != NULL) {
|
||||
if (profiler_data != nullptr) {
|
||||
#if ENABLE_DISASSEMBLER
|
||||
std::ostringstream os;
|
||||
code->Disassemble(NULL, os);
|
||||
code->Disassemble(nullptr, os);
|
||||
profiler_data->SetCode(&os);
|
||||
#endif
|
||||
}
|
||||
@ -1363,14 +1364,14 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
|
||||
v8::internal::CodeGenerator::PrintCode(code, info());
|
||||
|
||||
if (FLAG_trace_turbo) {
|
||||
FILE* json_file = OpenVisualizerLogFile(info(), NULL, "json", "a+");
|
||||
FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "a+");
|
||||
if (json_file != nullptr) {
|
||||
OFStream json_of(json_file);
|
||||
json_of
|
||||
<< "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
|
||||
#if ENABLE_DISASSEMBLER
|
||||
std::stringstream disassembly_stream;
|
||||
code->Disassemble(NULL, disassembly_stream);
|
||||
code->Disassemble(nullptr, disassembly_stream);
|
||||
std::string disassembly_string(disassembly_stream.str());
|
||||
for (const auto& c : disassembly_string) {
|
||||
json_of << AsEscapedUC16ForJSON(c);
|
||||
|
@ -101,7 +101,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
|
||||
}
|
||||
|
||||
MemOperand ToMemOperand(InstructionOperand* op) const {
|
||||
DCHECK(op != NULL);
|
||||
DCHECK_NOT_NULL(op);
|
||||
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
|
||||
FrameOffset offset = frame_access_state()->GetFrameOffset(
|
||||
AllocatedOperand::cast(op)->index());
|
||||
@ -1640,7 +1640,7 @@ void CodeGenerator::AssembleReturn() {
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
PPCOperandConverter g(this, NULL);
|
||||
PPCOperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
@ -1742,7 +1742,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
PPCOperandConverter g(this, NULL);
|
||||
PPCOperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
|
@ -1431,12 +1431,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
|
||||
if (ProjectionIndexOf(value->op()) == 1u) {
|
||||
// We cannot combine the <Operation>WithOverflow with this branch
|
||||
// unless the 0th projection (the use of the actual value of the
|
||||
// <Operation> is either NULL, which means there's no use of the
|
||||
// <Operation> is either nullptr, which means there's no use of the
|
||||
// actual value, or was already defined, which means it is scheduled
|
||||
// *AFTER* this branch).
|
||||
Node* const node = value->InputAt(0);
|
||||
Node* const result = NodeProperties::FindProjection(node, 0);
|
||||
if (result == NULL || selector->IsDefined(result)) {
|
||||
if (result == nullptr || selector->IsDefined(result)) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32AddWithOverflow:
|
||||
cont->OverwriteAndNegateIfEqual(kOverflow);
|
||||
|
@ -358,7 +358,7 @@ BasicBlock* RawMachineAssembler::CurrentBlock() {
|
||||
Node* RawMachineAssembler::AddNode(const Operator* op, int input_count,
|
||||
Node** inputs) {
|
||||
DCHECK_NOT_NULL(schedule_);
|
||||
DCHECK(current_block_ != nullptr);
|
||||
DCHECK_NOT_NULL(current_block_);
|
||||
Node* node = MakeNode(op, input_count, inputs);
|
||||
schedule()->AddNode(CurrentBlock(), node);
|
||||
return node;
|
||||
@ -374,7 +374,7 @@ Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
|
||||
|
||||
|
||||
RawMachineLabel::RawMachineLabel()
|
||||
: block_(NULL), used_(false), bound_(false) {}
|
||||
: block_(nullptr), used_(false), bound_(false) {}
|
||||
|
||||
|
||||
RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
|
||||
|
@ -287,10 +287,10 @@ void LiveRange::VerifyPositions() const {
|
||||
for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
|
||||
CHECK(Start() <= pos->pos());
|
||||
CHECK(pos->pos() <= End());
|
||||
CHECK(interval != nullptr);
|
||||
CHECK_NOT_NULL(interval);
|
||||
while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
|
||||
interval = interval->next();
|
||||
CHECK(interval != nullptr);
|
||||
CHECK_NOT_NULL(interval);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ const Operator* RepresentationChanger::Int32OperatorFor(
|
||||
return machine()->Int32LessThanOrEqual();
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -454,7 +454,7 @@ const Operator* RepresentationChanger::Uint32OperatorFor(
|
||||
return machine()->Uint32LessThanOrEqual();
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -480,7 +480,7 @@ const Operator* RepresentationChanger::Float64OperatorFor(
|
||||
return machine()->Float64LessThanOrEqual();
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ bool BasicBlock::LoopContains(BasicBlock* block) const {
|
||||
// RPO numbers must be initialized.
|
||||
DCHECK(rpo_number_ >= 0);
|
||||
DCHECK(block->rpo_number_ >= 0);
|
||||
if (loop_end_ == NULL) return false; // This is not a loop.
|
||||
if (loop_end_ == nullptr) return false; // This is not a loop.
|
||||
return block->rpo_number_ >= rpo_number_ &&
|
||||
block->rpo_number_ < loop_end_->rpo_number_;
|
||||
}
|
||||
@ -140,13 +140,13 @@ BasicBlock* Schedule::block(Node* node) const {
|
||||
if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
|
||||
return nodeid_to_block_[node->id()];
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
bool Schedule::IsScheduled(Node* node) {
|
||||
if (node->id() >= nodeid_to_block_.size()) return false;
|
||||
return nodeid_to_block_[node->id()] != NULL;
|
||||
return nodeid_to_block_[node->id()] != nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -158,7 +158,7 @@ BasicBlock* Schedule::GetBlockById(BasicBlock::Id block_id) {
|
||||
|
||||
bool Schedule::SameBasicBlock(Node* a, Node* b) const {
|
||||
BasicBlock* block = this->block(a);
|
||||
return block != NULL && block == this->block(b);
|
||||
return block != nullptr && block == this->block(b);
|
||||
}
|
||||
|
||||
|
||||
@ -176,7 +176,7 @@ void Schedule::PlanNode(BasicBlock* block, Node* node) {
|
||||
os << "Planning #" << node->id() << ":" << node->op()->mnemonic()
|
||||
<< " for future add to B" << block->id() << "\n";
|
||||
}
|
||||
DCHECK(this->block(node) == NULL);
|
||||
DCHECK(this->block(node) == nullptr);
|
||||
SetBlockForNode(block, node);
|
||||
}
|
||||
|
||||
@ -187,7 +187,7 @@ void Schedule::AddNode(BasicBlock* block, Node* node) {
|
||||
os << "Adding #" << node->id() << ":" << node->op()->mnemonic() << " to B"
|
||||
<< block->id() << "\n";
|
||||
}
|
||||
DCHECK(this->block(node) == NULL || this->block(node) == block);
|
||||
DCHECK(this->block(node) == nullptr || this->block(node) == block);
|
||||
block->AddNode(node);
|
||||
SetBlockForNode(block, node);
|
||||
}
|
||||
@ -354,7 +354,7 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
|
||||
BasicBlock::Control control = block->control();
|
||||
if (control != BasicBlock::kNone) {
|
||||
os << " ";
|
||||
if (block->control_input() != NULL) {
|
||||
if (block->control_input() != nullptr) {
|
||||
os << *block->control_input();
|
||||
} else {
|
||||
os << "Goto";
|
||||
|
@ -138,7 +138,7 @@ class BasicBlock final : public ZoneObject {
|
||||
void set_rpo_number(int32_t rpo_number);
|
||||
|
||||
// Loop membership helpers.
|
||||
inline bool IsLoopHeader() const { return loop_end_ != NULL; }
|
||||
inline bool IsLoopHeader() const { return loop_end_ != nullptr; }
|
||||
bool LoopContains(BasicBlock* block) const;
|
||||
|
||||
// Computes the immediate common dominator of {b1} and {b2}. The worst time
|
||||
@ -153,8 +153,8 @@ class BasicBlock final : public ZoneObject {
|
||||
BasicBlock* dominator_; // Immediate dominator of the block.
|
||||
BasicBlock* rpo_next_; // Link to next block in special RPO order.
|
||||
BasicBlock* loop_header_; // Pointer to dominating loop header basic block,
|
||||
// NULL if none. For loop headers, this points to
|
||||
// enclosing loop header.
|
||||
// nullptr if none. For loop headers, this points to
|
||||
// enclosing loop header.
|
||||
BasicBlock* loop_end_; // end of the loop, if this block is a loop header.
|
||||
int32_t loop_depth_; // loop nesting, 0 is top-level
|
||||
|
||||
|
@ -221,9 +221,9 @@ class CFGBuilder : public ZoneObject {
|
||||
queued_(scheduler->graph_, 2),
|
||||
queue_(zone),
|
||||
control_(zone),
|
||||
component_entry_(NULL),
|
||||
component_start_(NULL),
|
||||
component_end_(NULL) {}
|
||||
component_entry_(nullptr),
|
||||
component_start_(nullptr),
|
||||
component_end_(nullptr) {}
|
||||
|
||||
// Run the control flow graph construction algorithm by walking the graph
|
||||
// backwards from end through control edges, building and connecting the
|
||||
@ -253,7 +253,7 @@ class CFGBuilder : public ZoneObject {
|
||||
ResetDataStructures();
|
||||
Queue(exit);
|
||||
|
||||
component_entry_ = NULL;
|
||||
component_entry_ = nullptr;
|
||||
component_start_ = block;
|
||||
component_end_ = schedule_->block(exit);
|
||||
scheduler_->equivalence_->Run(exit);
|
||||
@ -377,7 +377,7 @@ class CFGBuilder : public ZoneObject {
|
||||
|
||||
BasicBlock* BuildBlockForNode(Node* node) {
|
||||
BasicBlock* block = schedule_->block(node);
|
||||
if (block == NULL) {
|
||||
if (block == nullptr) {
|
||||
block = schedule_->NewBasicBlock();
|
||||
TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
|
||||
node->op()->mnemonic());
|
||||
@ -501,34 +501,34 @@ class CFGBuilder : public ZoneObject {
|
||||
void ConnectTailCall(Node* call) {
|
||||
Node* call_control = NodeProperties::GetControlInput(call);
|
||||
BasicBlock* call_block = FindPredecessorBlock(call_control);
|
||||
TraceConnect(call, call_block, NULL);
|
||||
TraceConnect(call, call_block, nullptr);
|
||||
schedule_->AddTailCall(call_block, call);
|
||||
}
|
||||
|
||||
void ConnectReturn(Node* ret) {
|
||||
Node* return_control = NodeProperties::GetControlInput(ret);
|
||||
BasicBlock* return_block = FindPredecessorBlock(return_control);
|
||||
TraceConnect(ret, return_block, NULL);
|
||||
TraceConnect(ret, return_block, nullptr);
|
||||
schedule_->AddReturn(return_block, ret);
|
||||
}
|
||||
|
||||
void ConnectDeoptimize(Node* deopt) {
|
||||
Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
|
||||
BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
|
||||
TraceConnect(deopt, deoptimize_block, NULL);
|
||||
TraceConnect(deopt, deoptimize_block, nullptr);
|
||||
schedule_->AddDeoptimize(deoptimize_block, deopt);
|
||||
}
|
||||
|
||||
void ConnectThrow(Node* thr) {
|
||||
Node* throw_control = NodeProperties::GetControlInput(thr);
|
||||
BasicBlock* throw_block = FindPredecessorBlock(throw_control);
|
||||
TraceConnect(thr, throw_block, NULL);
|
||||
TraceConnect(thr, throw_block, nullptr);
|
||||
schedule_->AddThrow(throw_block, thr);
|
||||
}
|
||||
|
||||
void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
|
||||
DCHECK_NOT_NULL(block);
|
||||
if (succ == NULL) {
|
||||
if (succ == nullptr) {
|
||||
TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
|
||||
node->op()->mnemonic(), block->id().ToInt());
|
||||
} else {
|
||||
@ -602,8 +602,8 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
SpecialRPONumberer(Zone* zone, Schedule* schedule)
|
||||
: zone_(zone),
|
||||
schedule_(schedule),
|
||||
order_(NULL),
|
||||
beyond_end_(NULL),
|
||||
order_(nullptr),
|
||||
beyond_end_(nullptr),
|
||||
loops_(zone),
|
||||
backedges_(zone),
|
||||
stack_(zone),
|
||||
@ -630,7 +630,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
// numbering for basic blocks into the final schedule.
|
||||
void SerializeRPOIntoSchedule() {
|
||||
int32_t number = 0;
|
||||
for (BasicBlock* b = order_; b != NULL; b = b->rpo_next()) {
|
||||
for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
|
||||
b->set_rpo_number(number++);
|
||||
schedule_->rpo_order()->push_back(b);
|
||||
}
|
||||
@ -677,7 +677,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
BasicBlock* start;
|
||||
|
||||
void AddOutgoing(Zone* zone, BasicBlock* block) {
|
||||
if (outgoing == NULL) {
|
||||
if (outgoing == nullptr) {
|
||||
outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
|
||||
ZoneVector<BasicBlock*>(zone);
|
||||
}
|
||||
@ -713,7 +713,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
// use the schedule's end block in actual control flow (e.g. with end having
|
||||
// successors). Once this has been cleaned up we can use the end block here.
|
||||
BasicBlock* BeyondEndSentinel() {
|
||||
if (beyond_end_ == NULL) {
|
||||
if (beyond_end_ == nullptr) {
|
||||
BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
|
||||
beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
|
||||
}
|
||||
@ -777,7 +777,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
|
||||
// Initialize the "loop stack". Note the entry could be a loop header.
|
||||
LoopInfo* loop =
|
||||
HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
|
||||
HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
|
||||
order = insertion_point;
|
||||
|
||||
// Perform an iterative post-order traversal, visiting loop bodies before
|
||||
@ -788,7 +788,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
while (stack_depth > 0) {
|
||||
SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
|
||||
BasicBlock* block = frame->block;
|
||||
BasicBlock* succ = NULL;
|
||||
BasicBlock* succ = nullptr;
|
||||
|
||||
if (block != end && frame->index < block->SuccessorCount()) {
|
||||
// Process the next normal successor.
|
||||
@ -798,7 +798,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
if (block->rpo_number() == kBlockOnStack) {
|
||||
// Finish the loop body the first time the header is left on the
|
||||
// stack.
|
||||
DCHECK(loop != NULL && loop->header == block);
|
||||
DCHECK(loop != nullptr && loop->header == block);
|
||||
loop->start = PushFront(order, block);
|
||||
order = loop->end;
|
||||
block->set_rpo_number(kBlockVisited2);
|
||||
@ -813,19 +813,19 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
size_t outgoing_index = frame->index - block->SuccessorCount();
|
||||
LoopInfo* info = &loops_[GetLoopNumber(block)];
|
||||
DCHECK(loop != info);
|
||||
if (block != entry && info->outgoing != NULL &&
|
||||
if (block != entry && info->outgoing != nullptr &&
|
||||
outgoing_index < info->outgoing->size()) {
|
||||
succ = info->outgoing->at(outgoing_index);
|
||||
frame->index++;
|
||||
}
|
||||
}
|
||||
|
||||
if (succ != NULL) {
|
||||
if (succ != nullptr) {
|
||||
// Process the next successor.
|
||||
if (succ->rpo_number() == kBlockOnStack) continue;
|
||||
if (succ->rpo_number() == kBlockVisited2) continue;
|
||||
DCHECK(succ->rpo_number() == kBlockUnvisited2);
|
||||
if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) {
|
||||
if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
|
||||
// The successor is not in the current loop or any nested loop.
|
||||
// Add it to the outgoing edges of this loop and visit it later.
|
||||
loop->AddOutgoing(zone_, succ);
|
||||
@ -865,10 +865,10 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
}
|
||||
|
||||
// Publish new order the first time.
|
||||
if (order_ == NULL) order_ = order;
|
||||
if (order_ == nullptr) order_ = order;
|
||||
|
||||
// Compute the correct loop headers and set the correct loop ends.
|
||||
LoopInfo* current_loop = NULL;
|
||||
LoopInfo* current_loop = nullptr;
|
||||
BasicBlock* current_header = entry->loop_header();
|
||||
int32_t loop_depth = entry->loop_depth();
|
||||
if (entry->IsLoopHeader()) --loop_depth; // Entry might be a loop header.
|
||||
@ -879,11 +879,13 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
current->set_rpo_number(kBlockUnvisited1);
|
||||
|
||||
// Finish the previous loop(s) if we just exited them.
|
||||
while (current_header != NULL && current == current_header->loop_end()) {
|
||||
while (current_header != nullptr &&
|
||||
current == current_header->loop_end()) {
|
||||
DCHECK(current_header->IsLoopHeader());
|
||||
DCHECK(current_loop != NULL);
|
||||
DCHECK_NOT_NULL(current_loop);
|
||||
current_loop = current_loop->prev;
|
||||
current_header = current_loop == NULL ? NULL : current_loop->header;
|
||||
current_header =
|
||||
current_loop == nullptr ? nullptr : current_loop->header;
|
||||
--loop_depth;
|
||||
}
|
||||
current->set_loop_header(current_header);
|
||||
@ -893,7 +895,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
++loop_depth;
|
||||
current_loop = &loops_[GetLoopNumber(current)];
|
||||
BasicBlock* end = current_loop->end;
|
||||
current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
|
||||
current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
|
||||
current_header = current_loop->header;
|
||||
TRACE("id:%d is a loop header, increment loop depth to %d\n",
|
||||
current->id().ToInt(), loop_depth);
|
||||
@ -901,7 +903,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
|
||||
current->set_loop_depth(loop_depth);
|
||||
|
||||
if (current->loop_header() == NULL) {
|
||||
if (current->loop_header() == nullptr) {
|
||||
TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
|
||||
current->loop_depth());
|
||||
} else {
|
||||
@ -932,7 +934,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
BasicBlock* member = backedges->at(i).first;
|
||||
BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
|
||||
size_t loop_num = GetLoopNumber(header);
|
||||
if (loops_[loop_num].header == NULL) {
|
||||
if (loops_[loop_num].header == nullptr) {
|
||||
loops_[loop_num].header = header;
|
||||
loops_[loop_num].members = new (zone_)
|
||||
BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
|
||||
@ -979,7 +981,8 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
}
|
||||
os << ":\n";
|
||||
|
||||
for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
|
||||
for (BasicBlock* block = order_; block != nullptr;
|
||||
block = block->rpo_next()) {
|
||||
os << std::setw(5) << "B" << block->rpo_number() << ":";
|
||||
for (size_t i = 0; i < loops_.size(); i++) {
|
||||
bool range = loops_[i].header->LoopContains(block);
|
||||
@ -988,11 +991,11 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
os << (range ? "x" : " ");
|
||||
}
|
||||
os << " id:" << block->id() << ": ";
|
||||
if (block->loop_end() != NULL) {
|
||||
if (block->loop_end() != nullptr) {
|
||||
os << " range: [B" << block->rpo_number() << ", B"
|
||||
<< block->loop_end()->rpo_number() << ")";
|
||||
}
|
||||
if (block->loop_header() != NULL) {
|
||||
if (block->loop_header() != nullptr) {
|
||||
os << " header: id:" << block->loop_header()->id();
|
||||
}
|
||||
if (block->loop_depth() > 0) {
|
||||
@ -1012,10 +1015,10 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
BasicBlock* header = loop->header;
|
||||
BasicBlock* end = header->loop_end();
|
||||
|
||||
DCHECK(header != NULL);
|
||||
DCHECK_NOT_NULL(header);
|
||||
DCHECK(header->rpo_number() >= 0);
|
||||
DCHECK(header->rpo_number() < static_cast<int>(order->size()));
|
||||
DCHECK(end != NULL);
|
||||
DCHECK_NOT_NULL(end);
|
||||
DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
|
||||
DCHECK(end->rpo_number() > header->rpo_number());
|
||||
DCHECK(header->loop_header() != header);
|
||||
@ -1026,7 +1029,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
DCHECK_EQ(header, block);
|
||||
bool end_found;
|
||||
while (true) {
|
||||
if (block == NULL || block == loop->end) {
|
||||
if (block == nullptr || block == loop->end) {
|
||||
end_found = (loop->end == block);
|
||||
break;
|
||||
}
|
||||
@ -1042,7 +1045,7 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
|
||||
// Check loop depth of the header.
|
||||
int loop_depth = 0;
|
||||
for (LoopInfo* outer = loop; outer != NULL; outer = outer->prev) {
|
||||
for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
|
||||
loop_depth++;
|
||||
}
|
||||
DCHECK_EQ(loop_depth, header->loop_depth());
|
||||
@ -1096,7 +1099,7 @@ void Scheduler::ComputeSpecialRPONumbering() {
|
||||
|
||||
|
||||
void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
|
||||
for (/*nop*/; block != NULL; block = block->rpo_next()) {
|
||||
for (/*nop*/; block != nullptr; block = block->rpo_next()) {
|
||||
auto pred = block->predecessors().begin();
|
||||
auto end = block->predecessors().end();
|
||||
DCHECK(pred != end); // All blocks except start have predecessors.
|
||||
@ -1153,7 +1156,7 @@ class PrepareUsesVisitor {
|
||||
opcode == IrOpcode::kParameter
|
||||
? schedule_->start()
|
||||
: schedule_->block(NodeProperties::GetControlInput(node));
|
||||
DCHECK(block != NULL);
|
||||
DCHECK_NOT_NULL(block);
|
||||
schedule_->AddNode(block, node);
|
||||
}
|
||||
}
|
||||
@ -1243,7 +1246,7 @@ class ScheduleEarlyNodeVisitor {
|
||||
if (data->minimum_block_ == schedule_->start()) return;
|
||||
|
||||
// Propagate schedule early position.
|
||||
DCHECK(data->minimum_block_ != NULL);
|
||||
DCHECK_NOT_NULL(data->minimum_block_);
|
||||
for (auto use : node->uses()) {
|
||||
PropagateMinimumPositionToNode(data->minimum_block_, use);
|
||||
}
|
||||
@ -1521,10 +1524,11 @@ class ScheduleLateNodeVisitor {
|
||||
BasicBlock* block = nullptr;
|
||||
for (Edge edge : node->use_edges()) {
|
||||
BasicBlock* use_block = GetBlockForUse(edge);
|
||||
block = block == NULL ? use_block : use_block == NULL
|
||||
? block
|
||||
: BasicBlock::GetCommonDominator(
|
||||
block, use_block);
|
||||
block = block == nullptr
|
||||
? use_block
|
||||
: use_block == nullptr
|
||||
? block
|
||||
: BasicBlock::GetCommonDominator(block, use_block);
|
||||
}
|
||||
return block;
|
||||
}
|
||||
@ -1564,7 +1568,7 @@ class ScheduleLateNodeVisitor {
|
||||
}
|
||||
}
|
||||
BasicBlock* result = schedule_->block(use);
|
||||
if (result == NULL) return NULL;
|
||||
if (result == nullptr) return nullptr;
|
||||
TRACE(" must dominate use #%d:%s in id:%d\n", use->id(),
|
||||
use->op()->mnemonic(), result->id().ToInt());
|
||||
return result;
|
||||
@ -1685,9 +1689,9 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
|
||||
// Iterate on phase 2: Compute special RPO and dominator tree.
|
||||
special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
|
||||
// TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
|
||||
for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
|
||||
for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
|
||||
b->set_dominator_depth(-1);
|
||||
b->set_dominator(NULL);
|
||||
b->set_dominator(nullptr);
|
||||
}
|
||||
PropagateImmediateDominators(block->rpo_next());
|
||||
|
||||
|
@ -328,7 +328,7 @@ class RepresentationSelector {
|
||||
queue_.pop();
|
||||
info->set_queued(false);
|
||||
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
|
||||
VisitNode(node, info->truncation(), NULL);
|
||||
VisitNode(node, info->truncation(), nullptr);
|
||||
TRACE(" ==> output ");
|
||||
PrintInfo(info->output_type());
|
||||
TRACE("\n");
|
||||
|
@ -979,7 +979,7 @@ static bool HasDominatingDef(Schedule* schedule, Node* node,
|
||||
use_pos--;
|
||||
}
|
||||
block = block->dominator();
|
||||
if (block == NULL) break;
|
||||
if (block == nullptr) break;
|
||||
use_pos = static_cast<int>(block->NodeCount()) - 1;
|
||||
if (node == block->control_input()) return true;
|
||||
}
|
||||
@ -990,7 +990,7 @@ static bool HasDominatingDef(Schedule* schedule, Node* node,
|
||||
static bool Dominates(Schedule* schedule, Node* dominator, Node* dominatee) {
|
||||
BasicBlock* dom = schedule->block(dominator);
|
||||
BasicBlock* sub = schedule->block(dominatee);
|
||||
while (sub != NULL) {
|
||||
while (sub != nullptr) {
|
||||
if (sub == dom) {
|
||||
return true;
|
||||
}
|
||||
@ -1106,7 +1106,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
|
||||
{
|
||||
// Verify the dominance relation.
|
||||
ZoneVector<BitVector*> dominators(zone);
|
||||
dominators.resize(count, NULL);
|
||||
dominators.resize(count, nullptr);
|
||||
|
||||
// Compute a set of all the nodes that dominate a given node by using
|
||||
// a forward fixpoint. O(n^2).
|
||||
@ -1119,7 +1119,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
|
||||
queue.pop();
|
||||
BitVector* block_doms = dominators[block->id().ToSize()];
|
||||
BasicBlock* idom = block->dominator();
|
||||
if (idom != NULL && !block_doms->Contains(idom->id().ToInt())) {
|
||||
if (idom != nullptr && !block_doms->Contains(idom->id().ToInt())) {
|
||||
V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
|
||||
block->rpo_number(), idom->rpo_number());
|
||||
}
|
||||
@ -1127,7 +1127,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
|
||||
BasicBlock* succ = block->SuccessorAt(s);
|
||||
BitVector* succ_doms = dominators[succ->id().ToSize()];
|
||||
|
||||
if (succ_doms == NULL) {
|
||||
if (succ_doms == nullptr) {
|
||||
// First time visiting the node. S.doms = B U B.doms
|
||||
succ_doms = new (zone) BitVector(static_cast<int>(count), zone);
|
||||
succ_doms->CopyFrom(*block_doms);
|
||||
@ -1149,7 +1149,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
|
||||
b != rpo_order->end(); ++b) {
|
||||
BasicBlock* block = *b;
|
||||
BasicBlock* idom = block->dominator();
|
||||
if (idom == NULL) continue;
|
||||
if (idom == nullptr) continue;
|
||||
BitVector* block_doms = dominators[block->id().ToSize()];
|
||||
|
||||
for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
|
||||
@ -1189,7 +1189,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
|
||||
|
||||
// Check inputs to control for this block.
|
||||
Node* control = block->control_input();
|
||||
if (control != NULL) {
|
||||
if (control != nullptr) {
|
||||
CHECK_EQ(block, schedule->block(control));
|
||||
CheckInputsDominate(schedule, block, control,
|
||||
static_cast<int>(block->NodeCount()) - 1);
|
||||
|
@ -1921,7 +1921,7 @@ void CodeGenerator::AssembleReturn() {
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
X64OperandConverter g(this, NULL);
|
||||
X64OperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
@ -2042,7 +2042,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
X64OperandConverter g(this, NULL);
|
||||
X64OperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister() && destination->IsRegister()) {
|
||||
|
@ -41,12 +41,12 @@ class X64OperandGenerator final : public OperandGenerator {
|
||||
InstructionOperand inputs[],
|
||||
size_t* input_count) {
|
||||
AddressingMode mode = kMode_MRI;
|
||||
if (base != NULL) {
|
||||
if (base != nullptr) {
|
||||
inputs[(*input_count)++] = UseRegister(base);
|
||||
if (index != NULL) {
|
||||
if (index != nullptr) {
|
||||
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
|
||||
inputs[(*input_count)++] = UseRegister(index);
|
||||
if (displacement != NULL) {
|
||||
if (displacement != nullptr) {
|
||||
inputs[(*input_count)++] = UseImmediate(displacement);
|
||||
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
|
||||
kMode_MR4I, kMode_MR8I};
|
||||
@ -57,7 +57,7 @@ class X64OperandGenerator final : public OperandGenerator {
|
||||
mode = kMRn_modes[scale_exponent];
|
||||
}
|
||||
} else {
|
||||
if (displacement == NULL) {
|
||||
if (displacement == nullptr) {
|
||||
mode = kMode_MR;
|
||||
} else {
|
||||
inputs[(*input_count)++] = UseImmediate(displacement);
|
||||
@ -65,10 +65,10 @@ class X64OperandGenerator final : public OperandGenerator {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
DCHECK(index != NULL);
|
||||
DCHECK_NOT_NULL(index);
|
||||
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
|
||||
inputs[(*input_count)++] = UseRegister(index);
|
||||
if (displacement != NULL) {
|
||||
if (displacement != nullptr) {
|
||||
inputs[(*input_count)++] = UseImmediate(displacement);
|
||||
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
|
||||
kMode_M4I, kMode_M8I};
|
||||
@ -91,7 +91,7 @@ class X64OperandGenerator final : public OperandGenerator {
|
||||
size_t* input_count) {
|
||||
BaseWithIndexAndDisplacement64Matcher m(operand, true);
|
||||
DCHECK(m.matches());
|
||||
if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
|
||||
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
|
||||
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
|
||||
m.displacement(), inputs, input_count);
|
||||
} else {
|
||||
@ -232,7 +232,8 @@ void InstructionSelector::VisitStore(Node* node) {
|
||||
InstructionOperand value_operand =
|
||||
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
|
||||
inputs[input_count++] = value_operand;
|
||||
Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
|
||||
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
|
||||
inputs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -531,8 +532,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
|
||||
Int32ScaleMatcher m(node, true);
|
||||
if (m.matches()) {
|
||||
Node* index = node->InputAt(0);
|
||||
Node* base = m.power_of_two_plus_one() ? index : NULL;
|
||||
EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
|
||||
Node* base = m.power_of_two_plus_one() ? index : nullptr;
|
||||
EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
|
||||
return;
|
||||
}
|
||||
VisitWord32Shift(this, node, kX64Shl32);
|
||||
@ -639,7 +640,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
|
||||
// Try to match the Add to a leal pattern
|
||||
BaseWithIndexAndDisplacement32Matcher m(node);
|
||||
if (m.matches() &&
|
||||
(m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
|
||||
(m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
|
||||
EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
|
||||
m.displacement());
|
||||
return;
|
||||
@ -763,8 +764,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
|
||||
Int32ScaleMatcher m(node, true);
|
||||
if (m.matches()) {
|
||||
Node* index = node->InputAt(0);
|
||||
Node* base = m.power_of_two_plus_one() ? index : NULL;
|
||||
EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
|
||||
Node* base = m.power_of_two_plus_one() ? index : nullptr;
|
||||
EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
|
||||
return;
|
||||
}
|
||||
VisitMul(this, node, kX64Imul32);
|
||||
@ -1504,12 +1505,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
|
||||
if (ProjectionIndexOf(value->op()) == 1u) {
|
||||
// We cannot combine the <Operation>WithOverflow with this branch
|
||||
// unless the 0th projection (the use of the actual value of the
|
||||
// <Operation> is either NULL, which means there's no use of the
|
||||
// <Operation> is either nullptr, which means there's no use of the
|
||||
// actual value, or was already defined, which means it is scheduled
|
||||
// *AFTER* this branch).
|
||||
Node* const node = value->InputAt(0);
|
||||
Node* const result = NodeProperties::FindProjection(node, 0);
|
||||
if (result == NULL || IsDefined(result)) {
|
||||
if (result == nullptr || IsDefined(result)) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32AddWithOverflow:
|
||||
cont.OverwriteAndNegateIfEqual(kOverflow);
|
||||
|
@ -662,7 +662,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
InstructionOperand* source = instr->InputAt(0);
|
||||
InstructionOperand* destination = instr->Output();
|
||||
DCHECK(source->IsConstant());
|
||||
X87OperandConverter g(this, NULL);
|
||||
X87OperandConverter g(this, nullptr);
|
||||
Constant src_constant = g.ToConstant(source);
|
||||
|
||||
DCHECK_EQ(Constant::kFloat64, src_constant.type());
|
||||
@ -1765,7 +1765,7 @@ void CodeGenerator::AssembleReturn() {
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
X87OperandConverter g(this, NULL);
|
||||
X87OperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
@ -1909,7 +1909,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
|
||||
void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
X87OperandConverter g(this, NULL);
|
||||
X87OperandConverter g(this, nullptr);
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
if (source->IsRegister() && destination->IsRegister()) {
|
||||
|
@ -54,18 +54,18 @@ class X87OperandGenerator final : public OperandGenerator {
|
||||
InstructionOperand inputs[],
|
||||
size_t* input_count) {
|
||||
AddressingMode mode = kMode_MRI;
|
||||
int32_t displacement = (displacement_node == NULL)
|
||||
int32_t displacement = (displacement_node == nullptr)
|
||||
? 0
|
||||
: OpParameter<int32_t>(displacement_node);
|
||||
if (base != NULL) {
|
||||
if (base != nullptr) {
|
||||
if (base->opcode() == IrOpcode::kInt32Constant) {
|
||||
displacement += OpParameter<int32_t>(base);
|
||||
base = NULL;
|
||||
base = nullptr;
|
||||
}
|
||||
}
|
||||
if (base != NULL) {
|
||||
if (base != nullptr) {
|
||||
inputs[(*input_count)++] = UseRegister(base);
|
||||
if (index != NULL) {
|
||||
if (index != nullptr) {
|
||||
DCHECK(scale >= 0 && scale <= 3);
|
||||
inputs[(*input_count)++] = UseRegister(index);
|
||||
if (displacement != 0) {
|
||||
@ -88,7 +88,7 @@ class X87OperandGenerator final : public OperandGenerator {
|
||||
}
|
||||
} else {
|
||||
DCHECK(scale >= 0 && scale <= 3);
|
||||
if (index != NULL) {
|
||||
if (index != nullptr) {
|
||||
inputs[(*input_count)++] = UseRegister(index);
|
||||
if (displacement != 0) {
|
||||
inputs[(*input_count)++] = TempImmediate(displacement);
|
||||
@ -113,7 +113,7 @@ class X87OperandGenerator final : public OperandGenerator {
|
||||
size_t* input_count) {
|
||||
BaseWithIndexAndDisplacement32Matcher m(node, true);
|
||||
DCHECK(m.matches());
|
||||
if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
|
||||
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
|
||||
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
|
||||
m.displacement(), inputs, input_count);
|
||||
} else {
|
||||
@ -259,7 +259,8 @@ void InstructionSelector::VisitStore(Node* node) {
|
||||
InstructionCode code =
|
||||
opcode | AddressingModeField::encode(addressing_mode);
|
||||
inputs[input_count++] = val;
|
||||
Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
|
||||
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
|
||||
inputs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -516,8 +517,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
|
||||
Int32ScaleMatcher m(node, true);
|
||||
if (m.matches()) {
|
||||
Node* index = node->InputAt(0);
|
||||
Node* base = m.power_of_two_plus_one() ? index : NULL;
|
||||
EmitLea(this, node, index, m.scale(), base, NULL);
|
||||
Node* base = m.power_of_two_plus_one() ? index : nullptr;
|
||||
EmitLea(this, node, index, m.scale(), base, nullptr);
|
||||
return;
|
||||
}
|
||||
VisitShift(this, node, kX87Shl);
|
||||
@ -560,7 +561,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
|
||||
// Try to match the Add to a lea pattern
|
||||
BaseWithIndexAndDisplacement32Matcher m(node);
|
||||
if (m.matches() &&
|
||||
(m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
|
||||
(m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
|
||||
InstructionOperand inputs[4];
|
||||
size_t input_count = 0;
|
||||
AddressingMode mode = g.GenerateMemoryOperandInputs(
|
||||
@ -597,8 +598,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
|
||||
Int32ScaleMatcher m(node, true);
|
||||
if (m.matches()) {
|
||||
Node* index = node->InputAt(0);
|
||||
Node* base = m.power_of_two_plus_one() ? index : NULL;
|
||||
EmitLea(this, node, index, m.scale(), base, NULL);
|
||||
Node* base = m.power_of_two_plus_one() ? index : nullptr;
|
||||
EmitLea(this, node, index, m.scale(), base, nullptr);
|
||||
return;
|
||||
}
|
||||
X87OperandGenerator g(this);
|
||||
@ -707,7 +708,7 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
|
||||
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, NULL);
|
||||
Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -721,7 +722,7 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -729,7 +730,7 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -737,7 +738,7 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -745,7 +746,7 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -753,7 +754,7 @@ void InstructionSelector::VisitFloat32Mul(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -761,7 +762,7 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -769,7 +770,7 @@ void InstructionSelector::VisitFloat32Div(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -777,7 +778,7 @@ void InstructionSelector::VisitFloat64Div(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -794,7 +795,7 @@ void InstructionSelector::VisitFloat32Max(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -802,7 +803,7 @@ void InstructionSelector::VisitFloat64Max(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -810,7 +811,7 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -818,35 +819,35 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
|
||||
Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat32Abs(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Abs(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
|
||||
X87OperandGenerator g(this);
|
||||
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
|
||||
Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
|
||||
Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
|
||||
}
|
||||
|
||||
|
||||
@ -1121,12 +1122,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
|
||||
if (ProjectionIndexOf(value->op()) == 1u) {
|
||||
// We cannot combine the <Operation>WithOverflow with this branch
|
||||
// unless the 0th projection (the use of the actual value of the
|
||||
// <Operation> is either NULL, which means there's no use of the
|
||||
// <Operation> is either nullptr, which means there's no use of the
|
||||
// actual value, or was already defined, which means it is scheduled
|
||||
// *AFTER* this branch).
|
||||
Node* const node = value->InputAt(0);
|
||||
Node* const result = NodeProperties::FindProjection(node, 0);
|
||||
if (result == NULL || selector->IsDefined(result)) {
|
||||
if (result == nullptr || selector->IsDefined(result)) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kInt32AddWithOverflow:
|
||||
cont->OverwriteAndNegateIfEqual(kOverflow);
|
||||
|
@ -19,16 +19,17 @@ class ZonePool final {
|
||||
public:
|
||||
class Scope final {
|
||||
public:
|
||||
explicit Scope(ZonePool* zone_pool) : zone_pool_(zone_pool), zone_(NULL) {}
|
||||
explicit Scope(ZonePool* zone_pool)
|
||||
: zone_pool_(zone_pool), zone_(nullptr) {}
|
||||
~Scope() { Destroy(); }
|
||||
|
||||
Zone* zone() {
|
||||
if (zone_ == NULL) zone_ = zone_pool_->NewEmptyZone();
|
||||
if (zone_ == nullptr) zone_ = zone_pool_->NewEmptyZone();
|
||||
return zone_;
|
||||
}
|
||||
void Destroy() {
|
||||
if (zone_ != NULL) zone_pool_->ReturnZone(zone_);
|
||||
zone_ = NULL;
|
||||
if (zone_ != nullptr) zone_pool_->ReturnZone(zone_);
|
||||
zone_ = nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
|
Loading…
Reference in New Issue
Block a user