Fix 64-bit VS2010 build

R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/420033003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22722 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
danno@chromium.org 2014-07-30 16:21:36 +00:00
parent 1e1a7a593b
commit 252cb8ba6e
22 changed files with 76 additions and 63 deletions

View File

@ -196,7 +196,8 @@ class AstGraphBuilder::Environment
int parameters_count() const { return parameters_count_; }
int locals_count() const { return locals_count_; }
int stack_height() {
return values()->size() - parameters_count_ - locals_count_;
return static_cast<int>(values()->size()) - parameters_count_ -
locals_count_;
}
// Operations on parameter or local variables. The parameter indices are
@ -241,12 +242,12 @@ class AstGraphBuilder::Environment
// Direct mutations of the operand stack.
void Poke(int depth, Node* node) {
ASSERT(depth >= 0 && depth < stack_height());
int index = values()->size() - depth - 1;
int index = static_cast<int>(values()->size()) - depth - 1;
values()->at(index) = node;
}
Node* Peek(int depth) {
ASSERT(depth >= 0 && depth < stack_height());
int index = values()->size() - depth - 1;
int index = static_cast<int>(values()->size()) - depth - 1;
return values()->at(index);
}
void Drop(int depth) {

View File

@ -174,7 +174,7 @@ void CodeGenerator::AssembleGap(GapInstruction* instr) {
void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
CompilationInfo* info = linkage()->info();
int deopt_count = code()->GetDeoptimizationEntryCount();
int patch_count = lazy_deoptimization_entries_.size();
int patch_count = static_cast<int>(lazy_deoptimization_entries_.size());
if (patch_count == 0 && deopt_count == 0) return;
Handle<DeoptimizationInputData> data = DeoptimizationInputData::New(
isolate(), deopt_count, patch_count, TENURED);
@ -196,7 +196,7 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
}
Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
deoptimization_literals_.size(), TENURED);
static_cast<int>(deoptimization_literals_.size()), TENURED);
{
AllowDeferredHandleDereference copy_handles;
for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
@ -240,8 +240,10 @@ void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) {
masm()->bind(&after_call);
// The continuation and deoptimization are the last two inputs:
BasicBlock* cont_block = i.InputBlock(instr->InputCount() - 2);
BasicBlock* deopt_block = i.InputBlock(instr->InputCount() - 1);
BasicBlock* cont_block =
i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
BasicBlock* deopt_block =
i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
Label* cont_label = code_->GetLabel(cont_block);
Label* deopt_label = code_->GetLabel(deopt_block);
@ -252,7 +254,7 @@ void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) {
int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
int result = deoptimization_literals_.size();
int result = static_cast<int>(deoptimization_literals_.size());
for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}

View File

@ -217,7 +217,7 @@ S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count,
size_t node_size = sizeof(GenericNode);
size_t inputs_size = input_count * sizeof(Input);
size_t uses_size = input_count * sizeof(Use);
size_t size = node_size + inputs_size + uses_size;
int size = static_cast<int>(node_size + inputs_size + uses_size);
Zone* zone = graph->zone();
void* buffer = zone->New(size);
S* result = new (buffer) S(graph, input_count);

View File

@ -343,7 +343,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
void InstructionSelector::VisitBlock(BasicBlock* block) {
ASSERT_EQ(NULL, current_block_);
current_block_ = block;
size_t current_block_end = instructions_.size();
int current_block_end = static_cast<int>(instructions_.size());
// Generate code for the block control "top down", but schedule the code
// "bottom up".
@ -366,7 +366,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
// We're done with the block.
// TODO(bmeurer): We should not mutate the schedule.
block->code_end_ = current_block_end;
block->code_start_ = instructions_.size();
block->code_start_ = static_cast<int>(instructions_.size());
current_block_ = NULL;
}

View File

@ -318,7 +318,7 @@ BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
void InstructionSequence::StartBlock(BasicBlock* block) {
block->code_start_ = instructions_.size();
block->code_start_ = static_cast<int>(instructions_.size());
BlockStartInstruction* block_start =
BlockStartInstruction::New(zone(), block);
AddInstruction(block_start, block);
@ -326,7 +326,7 @@ void InstructionSequence::StartBlock(BasicBlock* block) {
void InstructionSequence::EndBlock(BasicBlock* block) {
int end = instructions_.size();
int end = static_cast<int>(instructions_.size());
ASSERT(block->code_start_ >= 0 && block->code_start_ < end);
block->code_end_ = end;
}
@ -336,7 +336,7 @@ int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
// TODO(titzer): the order of these gaps is a holdover from Lithium.
GapInstruction* gap = GapInstruction::New(zone());
if (instr->IsControl()) instructions_.push_back(gap);
int index = instructions_.size();
int index = static_cast<int>(instructions_.size());
instructions_.push_back(instr);
if (!instr->IsControl()) instructions_.push_back(gap);
if (instr->NeedsPointerMap()) {
@ -391,7 +391,7 @@ void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
int InstructionSequence::AddDeoptimizationEntry(
const FrameStateDescriptor& descriptor) {
int deoptimization_id = deoptimization_entries_.size();
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
deoptimization_entries_.push_back(descriptor);
return deoptimization_id;
}
@ -403,7 +403,7 @@ FrameStateDescriptor InstructionSequence::GetDeoptimizationEntry(
int InstructionSequence::GetDeoptimizationEntryCount() {
return deoptimization_entries_.size();
return static_cast<int>(deoptimization_entries_.size());
}

View File

@ -445,8 +445,9 @@ class Instruction : public ZoneObject {
ASSERT(temp_count == 0 || temps != NULL);
InstructionOperand* none = NULL;
USE(none);
size_t size = RoundUp(sizeof(Instruction), kPointerSize) +
(output_count + input_count + temp_count - 1) * sizeof(none);
int size = static_cast<int>(RoundUp(sizeof(Instruction), kPointerSize) +
(output_count + input_count + temp_count - 1) *
sizeof(none));
return new (zone->New(size)) Instruction(
opcode, output_count, outputs, input_count, inputs, temp_count, temps);
}
@ -803,7 +804,7 @@ class InstructionSequence V8_FINAL {
const Immediates& immediates() const { return immediates_; }
int AddImmediate(Constant constant) {
int index = immediates_.size();
int index = static_cast<int>(immediates_.size());
immediates_.push_back(constant);
return index;
}

View File

@ -64,7 +64,8 @@ class MachineNodeFactory {
}
Node* IntPtrConstant(intptr_t value) {
// TODO(dcarney): mark generated code as unserializable if value != 0.
return kPointerSize == 8 ? Int64Constant(value) : Int32Constant(value);
return kPointerSize == 8 ? Int64Constant(value)
: Int32Constant(static_cast<int>(value));
}
Node* Int32Constant(int32_t value) {
return NEW_NODE_0(COMMON()->Int32Constant(value));

View File

@ -185,7 +185,9 @@ struct StaticParameterTraits<PrintableUnique<Object> > {
static OStream& PrintTo(OStream& os, PrintableUnique<Object> val) { // NOLINT
return os << val.string();
}
static int HashCode(PrintableUnique<Object> a) { return a.Hashcode(); }
static int HashCode(PrintableUnique<Object> a) {
return static_cast<int>(a.Hashcode());
}
static bool Equals(PrintableUnique<Object> a, PrintableUnique<Object> b) {
return a == b;
}
@ -197,7 +199,9 @@ struct StaticParameterTraits<PrintableUnique<Name> > {
static OStream& PrintTo(OStream& os, PrintableUnique<Name> val) { // NOLINT
return os << val.string();
}
static int HashCode(PrintableUnique<Name> a) { return a.Hashcode(); }
static int HashCode(PrintableUnique<Name> a) {
return static_cast<int>(a.Hashcode());
}
static bool Equals(PrintableUnique<Name> a, PrintableUnique<Name> b) {
return a == b;
}

View File

@ -45,7 +45,7 @@ class PhaseStats {
base::TimeDelta delta = timer_.Elapsed();
size_t bytes = info_->zone()->allocation_size() - size_;
HStatistics* stats = info_->isolate()->GetTStatistics();
stats->SaveTiming(name_, delta, bytes);
stats->SaveTiming(name_, delta, static_cast<int>(bytes));
switch (kind_) {
case CREATE_GRAPH:

View File

@ -194,7 +194,7 @@ class Schedule : public GenericGraph<BasicBlock> {
BasicBlock* GetBlockById(int block_id) { return all_blocks_[block_id]; }
int BasicBlockCount() const { return NodeCount(); }
int RpoBlockCount() const { return rpo_order_.size(); }
int RpoBlockCount() const { return static_cast<int>(rpo_order_.size()); }
typedef ContainerPointerWrapper<BasicBlockVector> BasicBlocks;

View File

@ -870,7 +870,7 @@ BasicBlockVector* Scheduler::ComputeSpecialRPO() {
}
// RPO should not have been computed for this schedule yet.
CHECK_EQ(kBlockUnvisited1, schedule_->entry()->rpo_number_);
CHECK_EQ(0, schedule_->rpo_order_.size());
CHECK_EQ(0, static_cast<int>(schedule_->rpo_order_.size()));
// Perform an iterative RPO traversal using an explicit stack,
// recording backedges that form cycles. O(|B|).

View File

@ -182,14 +182,14 @@ StructuredMachineAssembler::CopyForLoopHeader(Environment* env) {
void StructuredMachineAssembler::MergeBackEdgesToLoopHeader(
Environment* header, EnvironmentVector* environments) {
// Only merge as many variables are were declared before this loop.
size_t n = header->variables_.size();
int n = static_cast<int>(header->variables_.size());
// TODO(dcarney): invert loop order and extend phis once.
for (EnvironmentVector::iterator i = environments->begin();
i != environments->end(); ++i) {
Environment* from = *i;
if (from->is_dead_) continue;
AddGoto(from, header);
for (size_t i = 0; i < n; ++i) {
for (int i = 0; i < n; ++i) {
Node* phi = header->variables_[i];
if (phi == NULL) continue;
phi->set_op(common()->Phi(phi->InputCount() + 1));
@ -233,7 +233,7 @@ void StructuredMachineAssembler::Merge(EnvironmentVector* environments,
// TODO(dcarney): record start position at time of split.
// all variables after this should not be NULL.
if (val != NULL) {
val = VariableAt(live_environments[i], j);
val = VariableAt(live_environments[i], static_cast<int>(j));
}
}
if (val == resolved) continue;
@ -254,7 +254,8 @@ void StructuredMachineAssembler::Merge(EnvironmentVector* environments,
for (; i < n_envs; i++) {
scratch[i] = live_environments[i]->variables_[j];
}
resolved = graph()->NewNode(common()->Phi(n_envs), n_envs, scratch);
resolved = graph()->NewNode(common()->Phi(static_cast<int>(n_envs)),
static_cast<int>(n_envs), scratch);
if (next->block_ != NULL) {
schedule()->AddNode(next->block_, resolved);
}
@ -644,7 +645,7 @@ void StructuredMachineAssembler::LoopBuilder::End() {
// Do loop header merges.
smasm_->MergeBackEdgesToLoopHeader(header_environment_,
&pending_header_merges_);
int initial_size = header_environment_->variables_.size();
int initial_size = static_cast<int>(header_environment_->variables_.size());
// Do loop exit merges, truncating loop variables away.
smasm_->Merge(&pending_exit_merges_, initial_size);
for (EnvironmentVector::iterator i = pending_exit_merges_.begin();

View File

@ -596,8 +596,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
// Emit a branch. The true and false targets are always the last two inputs
// to the instruction.
BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
bool fallthru = IsNextInAssemblyOrder(fblock);
Label* tlabel = code()->GetLabel(tblock);
Label* flabel = fallthru ? &done : code()->GetLabel(fblock);

View File

@ -468,7 +468,8 @@ void Deoptimizer::PatchStackForMarkedCode(Isolate* isolate) {
JSFunction* function =
static_cast<OptimizedFrame*>(it.frame())->function();
Address* pc_address = it.frame()->pc_address();
int pc_offset = *pc_address - code->instruction_start();
int pc_offset =
static_cast<int>(*pc_address - code->instruction_start());
int new_pc_offset = FindPatchAddressForReturnAddress(code, pc_offset);
if (FLAG_trace_deopt) {

View File

@ -187,7 +187,7 @@ class PrintableUnique : public Unique<T> {
this->handle_->ShortPrint(&stream);
SmartArrayPointer<const char> desc_string = stream.ToCString();
const char* desc_chars = desc_string.get();
int length = strlen(desc_chars);
int length = static_cast<int>(strlen(desc_chars));
char* desc_copy = zone->NewArray<char>(length + 1);
memcpy(desc_copy, desc_chars, length + 1);
string_ = desc_copy;

View File

@ -378,7 +378,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_inverted_materialized) {
TEST(BranchCombineInt32CmpAllInputShapes_branch_true) {
for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
for (int i = 0; i < static_cast<int>(ARRAY_SIZE(int32cmp_opcodes)); i++) {
CmpBranchGen gen(int32cmp_opcodes[i], false, false, 995 + i, -1011 - i);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();
@ -387,7 +387,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_branch_true) {
TEST(BranchCombineInt32CmpAllInputShapes_branch_false) {
for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
for (int i = 0; i < static_cast<int>(ARRAY_SIZE(int32cmp_opcodes)); i++) {
CmpBranchGen gen(int32cmp_opcodes[i], false, true, 795 + i, -2011 - i);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();
@ -396,7 +396,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_branch_false) {
TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_true) {
for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
for (int i = 0; i < static_cast<int>(ARRAY_SIZE(int32cmp_opcodes)); i++) {
CmpBranchGen gen(int32cmp_opcodes[i], true, false, 695 + i, -3011 - i);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();
@ -405,7 +405,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_true) {
TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_false) {
for (size_t i = 0; i < ARRAY_SIZE(int32cmp_opcodes); i++) {
for (int i = 0; i < static_cast<int>(ARRAY_SIZE(int32cmp_opcodes)); i++) {
CmpBranchGen gen(int32cmp_opcodes[i], true, true, 595 + i, -4011 - i);
Int32BinopInputShapeTester tester(&gen);
tester.TestAllInputShapes();

View File

@ -188,7 +188,9 @@ class ReducerRecorder : public Reducer {
set.insert(node);
return NoChange();
}
void CheckContains(Node* node) { CHECK_EQ(1, set.count(node)); }
void CheckContains(Node* node) {
CHECK_EQ(1, static_cast<int>(set.count(node)));
}
NodeSet set;
};
@ -628,7 +630,7 @@ class OneTimeReducer : public Reducer {
: reducer_(reducer),
nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)) {}
virtual Reduction Reduce(Node* node) {
CHECK_EQ(0, nodes_.count(node));
CHECK_EQ(0, static_cast<int>(nodes_.count(node)));
nodes_.insert(node);
return reducer_->Reduce(node);
}

View File

@ -11,8 +11,8 @@ TEST(InstructionSelectionReturnZero) {
InstructionSelectorTester m(InstructionSelectorTester::kInternalMode);
m.Return(m.Int32Constant(0));
m.SelectInstructions();
CHECK_EQ(2, m.code.size());
CHECK_EQ(2, static_cast<int>(m.code.size()));
CHECK_EQ(kArchNop, m.code[0]->opcode());
CHECK_EQ(kArchRet, m.code[1]->opcode());
CHECK_EQ(1, m.code[1]->InputCount());
CHECK_EQ(1, static_cast<int>(m.code[1]->InputCount()));
}

View File

@ -300,9 +300,9 @@ TEST(InstructionOperands) {
{
TestInstr* i = TestInstr::New(&zone, 101);
CHECK_EQ(0, i->OutputCount());
CHECK_EQ(0, i->InputCount());
CHECK_EQ(0, i->TempCount());
CHECK_EQ(0, static_cast<int>(i->OutputCount()));
CHECK_EQ(0, static_cast<int>(i->InputCount()));
CHECK_EQ(0, static_cast<int>(i->TempCount()));
}
InstructionOperand* outputs[] = {

View File

@ -49,7 +49,7 @@ TEST(TestUseNodeVisitEmpty) {
PreNodeVisitor node_visitor;
graph.VisitNodeUsesFromStart(&node_visitor);
CHECK_EQ(1, node_visitor.nodes_.size());
CHECK_EQ(1, static_cast<int>(node_visitor.nodes_.size()));
}
@ -64,7 +64,7 @@ TEST(TestUseNodePreOrderVisitSimple) {
PreNodeVisitor node_visitor;
graph.VisitNodeUsesFromStart(&node_visitor);
CHECK_EQ(5, node_visitor.nodes_.size());
CHECK_EQ(5, static_cast<int>(node_visitor.nodes_.size()));
CHECK(graph.start()->id() == node_visitor.nodes_[0]->id());
CHECK(n2->id() == node_visitor.nodes_[1]->id());
CHECK(n3->id() == node_visitor.nodes_[2]->id());
@ -83,7 +83,7 @@ TEST(TestInputNodePreOrderVisitSimple) {
PreNodeVisitor node_visitor;
graph.VisitNodeInputsFromEnd(&node_visitor);
CHECK_EQ(5, node_visitor.nodes_.size());
CHECK_EQ(5, static_cast<int>(node_visitor.nodes_.size()));
CHECK(n5->id() == node_visitor.nodes_[0]->id());
CHECK(n4->id() == node_visitor.nodes_[1]->id());
CHECK(n2->id() == node_visitor.nodes_[2]->id());
@ -107,7 +107,7 @@ TEST(TestUseNodePostOrderVisitSimple) {
PostNodeVisitor node_visitor;
graph.VisitNodeUsesFromStart(&node_visitor);
CHECK_EQ(8, node_visitor.nodes_.size());
CHECK_EQ(8, static_cast<int>(node_visitor.nodes_.size()));
CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
CHECK(n4->id() == node_visitor.nodes_[1]->id());
CHECK(n5->id() == node_visitor.nodes_[2]->id());
@ -138,7 +138,7 @@ TEST(TestUseNodePostOrderVisitLong) {
PostNodeVisitor node_visitor;
graph.VisitNodeUsesFromStart(&node_visitor);
CHECK_EQ(12, node_visitor.nodes_.size());
CHECK_EQ(12, static_cast<int>(node_visitor.nodes_.size()));
CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
CHECK(n4->id() == node_visitor.nodes_[1]->id());
CHECK(n8->id() == node_visitor.nodes_[2]->id());
@ -166,7 +166,7 @@ TEST(TestUseNodePreOrderVisitCycle) {
PreNodeVisitor node_visitor;
graph.VisitNodeUsesFromStart(&node_visitor);
CHECK_EQ(3, node_visitor.nodes_.size());
CHECK_EQ(3, static_cast<int>(node_visitor.nodes_.size()));
CHECK(n0->id() == node_visitor.nodes_[0]->id());
CHECK(n1->id() == node_visitor.nodes_[1]->id());
CHECK(n2->id() == node_visitor.nodes_[2]->id());
@ -177,7 +177,7 @@ struct ReenterNodeVisitor : NullNodeVisitor {
GenericGraphVisit::Control Pre(Node* node) {
printf("[%d] PRE NODE: %d\n", static_cast<int>(nodes_.size()), node->id());
nodes_.push_back(node->id());
int size = nodes_.size();
int size = static_cast<int>(nodes_.size());
switch (node->id()) {
case 0:
return size < 6 ? GenericGraphVisit::REENTER : GenericGraphVisit::SKIP;
@ -228,8 +228,8 @@ TEST(TestUseNodeReenterVisit) {
ReenterNodeVisitor visitor;
graph.VisitNodeUsesFromStart(&visitor);
CHECK_EQ(22, visitor.nodes_.size());
CHECK_EQ(24, visitor.edges_.size());
CHECK_EQ(22, static_cast<int>(visitor.nodes_.size()));
CHECK_EQ(24, static_cast<int>(visitor.edges_.size()));
CHECK(n0->id() == visitor.nodes_[0]);
CHECK(n0->id() == visitor.edges_[0].first);

View File

@ -2500,7 +2500,7 @@ static void RunLoadImmIndex(MachineRepresentation rep) {
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = (i + sizeof(buffer)) ^ 0xAA;
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
// Test with various large and small offsets.
@ -2539,7 +2539,7 @@ static void RunLoadStore(MachineRepresentation rep) {
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = (i + sizeof(buffer)) ^ 0xAA;
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
RawMachineAssemblerTester<int32_t> m;
@ -3703,33 +3703,33 @@ TEST(RunSpillLotsOfThings) {
TEST(RunSpillConstantsAndParameters) {
static const size_t kInputSize = 1000;
static const int kInputSize = 1000;
static const int32_t kBase = 987;
RawMachineAssemblerTester<int32_t> m(kMachineWord32, kMachineWord32);
int32_t outputs[kInputSize];
Node* csts[kInputSize];
Node* accs[kInputSize];
Node* acc = m.Int32Constant(0);
for (size_t i = 0; i < kInputSize; i++) {
for (int i = 0; i < kInputSize; i++) {
csts[i] = m.Int32Constant(static_cast<int32_t>(kBase + i));
}
for (size_t i = 0; i < kInputSize; i++) {
for (int i = 0; i < kInputSize; i++) {
acc = m.Int32Add(acc, csts[i]);
accs[i] = acc;
}
for (size_t i = 0; i < kInputSize; i++) {
for (int i = 0; i < kInputSize; i++) {
m.StoreToPointer(&outputs[i], kMachineWord32, accs[i]);
}
m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected = *i + *j;
for (size_t k = 0; k < kInputSize; k++) {
for (int k = 0; k < kInputSize; k++) {
expected += kBase + k;
}
CHECK_EQ(expected, m.Call(*i, *j));
expected = 0;
for (size_t k = 0; k < kInputSize; k++) {
for (int k = 0; k < kInputSize; k++) {
expected += kBase + k;
CHECK_EQ(expected, outputs[k]);
}

View File

@ -1834,7 +1834,7 @@ TEST(BuildScheduleTrivialLazyDeoptCall) {
CHECK(!cont_block->deferred_);
// The lazy deopt block contains framestate + bailout (and nothing else).
CHECK_EQ(deoptimization_node, deopt_block->control_input_);
CHECK_EQ(2, deopt_block->nodes_.size());
CHECK_EQ(2, static_cast<int>(deopt_block->nodes_.size()));
CHECK_EQ(lazy_deopt_node, deopt_block->nodes_[0]);
CHECK_EQ(state_node, deopt_block->nodes_[1]);
}