diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc index b472feb0af..e9a4a5aaf6 100644 --- a/src/compiler/arm/code-generator-arm.cc +++ b/src/compiler/arm/code-generator-arm.cc @@ -147,12 +147,9 @@ class ArmOperandConverter final : public InstructionOperandConverter { MemOperand ToMemOperand(InstructionOperand* op) const { DCHECK(op != NULL); - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - // The linkage computes where all spill slots are located. - FrameOffset offset = linkage()->GetFrameOffset( - AllocatedOperand::cast(op)->index(), frame(), 0); + FrameOffset offset = + linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame()); return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); } }; @@ -992,6 +989,8 @@ void CodeGenerator::AssemblePrologue() { __ StubPrologue(); frame()->SetRegisterSaveAreaSize( StandardFrameConstants::kFixedFrameSizeFromFp); + } else { + frame()->SetPCOnStack(false); } if (info()->is_osr()) { @@ -1019,6 +1018,7 @@ void CodeGenerator::AssemblePrologue() { void CodeGenerator::AssembleReturn() { CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); int stack_slots = frame()->GetSpillSlotCount(); + int pop_count = static_cast(descriptor->StackParameterCount()); if (descriptor->kind() == CallDescriptor::kCallAddress) { if (frame()->GetRegisterSaveAreaSize() > 0) { // Remove this frame's spill slots first. @@ -1041,23 +1041,17 @@ void CodeGenerator::AssembleReturn() { } } __ LeaveFrame(StackFrame::MANUAL); - __ Ret(); } else if (descriptor->IsJSFunctionCall() || needs_frame_) { // Canonicalize JSFunction return sites for now. if (return_label_.is_bound()) { __ b(&return_label_); + return; } else { __ bind(&return_label_); __ LeaveFrame(StackFrame::MANUAL); - int pop_count = static_cast(descriptor->StackParameterCount()); - if (pop_count != 0) { - __ Drop(pop_count); - } - __ Ret(); } - } else { - __ Ret(); } + __ Ret(pop_count); } diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc index 58adc25098..27d4889c34 100644 --- a/src/compiler/arm64/code-generator-arm64.cc +++ b/src/compiler/arm64/code-generator-arm64.cc @@ -184,12 +184,9 @@ class Arm64OperandConverter final : public InstructionOperandConverter { MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const { DCHECK(op != NULL); - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - // The linkage computes where all spill slots are located. - FrameOffset offset = linkage()->GetFrameOffset( - AllocatedOperand::cast(op)->index(), frame(), 0); + FrameOffset offset = + linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame()); return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp, offset.offset()); } @@ -1118,6 +1115,8 @@ void CodeGenerator::AssemblePrologue() { __ StubPrologue(); frame()->SetRegisterSaveAreaSize( StandardFrameConstants::kFixedFrameSizeFromFp); + } else { + frame()->SetPCOnStack(false); } if (info()->is_osr()) { @@ -1149,6 +1148,7 @@ void CodeGenerator::AssemblePrologue() { void CodeGenerator::AssembleReturn() { CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); int stack_slots = frame()->GetSpillSlotCount(); + int pop_count = static_cast(descriptor->StackParameterCount()); if (descriptor->kind() == CallDescriptor::kCallAddress) { if (frame()->GetRegisterSaveAreaSize() > 0) { // Remove this frame's spill slots first. @@ -1169,24 +1169,19 @@ void CodeGenerator::AssembleReturn() { __ Mov(csp, fp); __ Pop(fp, lr); - __ Ret(); } else if (descriptor->IsJSFunctionCall() || needs_frame_) { // Canonicalize JSFunction return sites for now. if (return_label_.is_bound()) { __ B(&return_label_); + return; } else { __ Bind(&return_label_); __ Mov(jssp, fp); __ Pop(fp, lr); - int pop_count = static_cast(descriptor->StackParameterCount()); - if (pop_count != 0) { - __ Drop(pop_count); - } - __ Ret(); } - } else { - __ Ret(); } + __ Drop(pop_count); + __ Ret(); } diff --git a/src/compiler/frame.h b/src/compiler/frame.h index 2850a8c1a1..f901b1e2b3 100644 --- a/src/compiler/frame.h +++ b/src/compiler/frame.h @@ -22,7 +22,8 @@ class Frame : public ZoneObject { spill_slot_count_(0), osr_stack_slot_count_(0), allocated_registers_(NULL), - allocated_double_registers_(NULL) {} + allocated_double_registers_(NULL), + pc_on_stack_(true) {} inline int GetSpillSlotCount() { return spill_slot_count_; } @@ -71,12 +72,17 @@ class Frame : public ZoneObject { spill_slot_count_ = static_cast(slot_count); } + void SetPCOnStack(bool val) { pc_on_stack_ = val; } + + int PCOnStackSize() { return pc_on_stack_ ? kRegisterSize : 0; } + private: int register_save_area_size_; int spill_slot_count_; int osr_stack_slot_count_; BitVector* allocated_registers_; BitVector* allocated_double_registers_; + bool pc_on_stack_; DISALLOW_COPY_AND_ASSIGN(Frame); }; diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc index 298627b55b..7b70f21bab 100644 --- a/src/compiler/ia32/code-generator-ia32.cc +++ b/src/compiler/ia32/code-generator-ia32.cc @@ -46,10 +46,10 @@ class IA32OperandConverter : public InstructionOperandConverter { return Operand(ToDoubleRegister(op)); } DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - // The linkage computes where all spill slots are located. - FrameOffset offset = linkage()->GetFrameOffset( - AllocatedOperand::cast(op)->index(), frame(), extra); - return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset()); + FrameOffset offset = + linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame()); + return Operand(offset.from_stack_pointer() ? esp : ebp, + offset.offset() + extra); } Operand HighOperand(InstructionOperand* op) { @@ -1325,31 +1325,26 @@ void CodeGenerator::AssembleReturn() { } } __ pop(ebp); // Pop caller's frame pointer. - __ ret(0); } else { // No saved registers. __ mov(esp, ebp); // Move stack pointer back to frame pointer. __ pop(ebp); // Pop caller's frame pointer. - __ ret(0); } } else if (descriptor->IsJSFunctionCall() || needs_frame_) { // Canonicalize JSFunction return sites for now. if (return_label_.is_bound()) { __ jmp(&return_label_); + return; } else { __ bind(&return_label_); __ mov(esp, ebp); // Move stack pointer back to frame pointer. __ pop(ebp); // Pop caller's frame pointer. - int pop_count = static_cast(descriptor->StackParameterCount()); - if (pop_count == 0) { - __ ret(0); - } else { - __ Ret(pop_count * kPointerSize, ebx); - } } - } else { - __ ret(0); } + size_t pop_size = descriptor->StackParameterCount() * kPointerSize; + // Might need ecx for scratch if pop_size is too big. + DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & ecx.bit()); + __ Ret(static_cast(pop_size), ecx); } diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc index 93b9a66ec2..2c6d2a8ae4 100644 --- a/src/compiler/linkage.cc +++ b/src/compiler/linkage.cc @@ -70,7 +70,7 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) { std::ostream& operator<<(std::ostream& os, const CallDescriptor& d) { // TODO(svenpanne) Output properties etc. and be less cryptic. return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount() - << "j" << d.JSParameterCount() << "i" << d.InputCount() << "f" + << "s" << d.StackParameterCount() << "i" << d.InputCount() << "f" << d.FrameStateCount() << "t" << d.SupportsTailCalls(); } @@ -189,8 +189,7 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) { } -FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, - int extra) const { +FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame) const { if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() || incoming_->kind() == CallDescriptor::kCallAddress) { int offset; @@ -198,12 +197,11 @@ FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, if (spill_slot >= 0) { // Local or spill slot. Skip the frame pointer, function, and // context in the fixed part of the frame. - offset = - -(spill_slot + 1) * kPointerSize - register_save_area_size + extra; + offset = -(spill_slot + 1) * kPointerSize - register_save_area_size; } else { // Incoming parameter. Skip the return address. offset = -(spill_slot + 1) * kPointerSize + kFPOnStackSize + - kPCOnStackSize + extra; + frame->PCOnStackSize(); } return FrameOffset::FromFramePointer(offset); } else { @@ -211,7 +209,7 @@ FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, DCHECK(spill_slot < 0); // Must be a parameter. int register_save_area_size = frame->GetRegisterSaveAreaSize(); int offset = register_save_area_size - (spill_slot + 1) * kPointerSize + - kPCOnStackSize + extra; + frame->PCOnStackSize(); return FrameOffset::FromStackPointer(offset); } } diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h index 363261e088..9f8b2d76f3 100644 --- a/src/compiler/linkage.h +++ b/src/compiler/linkage.h @@ -313,9 +313,8 @@ class Linkage : public ZoneObject { // Get the frame offset for a given spill slot. The location depends on the // calling convention and the specific frame layout, and may thus be // architecture-specific. Negative spill slots indicate arguments on the - // caller's frame. The {extra} parameter indicates an additional offset from - // the frame offset, e.g. to index into part of a double slot. - FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0) const; + // caller's frame. + FrameOffset GetFrameOffset(int spill_slot, Frame* frame) const; static int FrameStateInputCount(Runtime::FunctionId function); diff --git a/src/compiler/machine-type.h b/src/compiler/machine-type.h index f152611a14..0cd2a84010 100644 --- a/src/compiler/machine-type.h +++ b/src/compiler/machine-type.h @@ -116,6 +116,11 @@ inline int ElementSizeOf(MachineType machine_type) { return 1 << shift; } +inline bool IsFloatingPoint(MachineType type) { + MachineType rep = RepresentationOf(type); + return rep == kRepFloat32 || rep == kRepFloat64; +} + typedef Signature MachineSignature; } // namespace compiler diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc index e2ae0bd05d..fd6885b755 100644 --- a/src/compiler/mips/code-generator-mips.cc +++ b/src/compiler/mips/code-generator-mips.cc @@ -106,12 +106,9 @@ class MipsOperandConverter final : public InstructionOperandConverter { MemOperand ToMemOperand(InstructionOperand* op) const { DCHECK(op != NULL); - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - // The linkage computes where all spill slots are located. - FrameOffset offset = linkage()->GetFrameOffset( - AllocatedOperand::cast(op)->index(), frame(), 0); + FrameOffset offset = + linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame()); return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); } }; @@ -1082,13 +1079,14 @@ void CodeGenerator::AssemblePrologue() { // kNumCalleeSaved includes the fp register, but the fp register // is saved separately in TF. DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1); - int register_save_area_size = kNumCalleeSaved * kPointerSize; + int register_save_area_size = + base::bits::CountPopulation32(saves) * kPointerSize; const RegList saves_fpu = descriptor->CalleeSavedFPRegisters(); // Save callee-saved FPU registers. __ MultiPushFPU(saves_fpu); DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu)); - register_save_area_size += kNumCalleeSavedFPU * kDoubleSize * kPointerSize; + register_save_area_size += kNumCalleeSavedFPU * kDoubleSize; frame()->SetRegisterSaveAreaSize(register_save_area_size); } else if (descriptor->IsJSFunctionCall()) { @@ -1100,6 +1098,8 @@ void CodeGenerator::AssemblePrologue() { __ StubPrologue(); frame()->SetRegisterSaveAreaSize( StandardFrameConstants::kFixedFrameSizeFromFp); + } else { + frame()->SetPCOnStack(false); } if (info()->is_osr()) { @@ -1127,6 +1127,7 @@ void CodeGenerator::AssemblePrologue() { void CodeGenerator::AssembleReturn() { CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); int stack_slots = frame()->GetSpillSlotCount(); + int pop_count = static_cast(descriptor->StackParameterCount()); if (descriptor->kind() == CallDescriptor::kCallAddress) { if (frame()->GetRegisterSaveAreaSize() > 0) { // Remove this frame's spill slots first. @@ -1143,22 +1144,19 @@ void CodeGenerator::AssembleReturn() { } __ mov(sp, fp); __ Pop(ra, fp); - __ Ret(); } else if (descriptor->IsJSFunctionCall() || needs_frame_) { // Canonicalize JSFunction return sites for now. if (return_label_.is_bound()) { __ Branch(&return_label_); + return; } else { __ bind(&return_label_); __ mov(sp, fp); __ Pop(ra, fp); - int pop_count = static_cast(descriptor->StackParameterCount()); - if (pop_count != 0) { - __ DropAndRet(pop_count); - } else { - __ Ret(); - } } + } + if (pop_count != 0) { + __ DropAndRet(pop_count); } else { __ Ret(); } diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc index 1c4a789229..c95522b4a9 100644 --- a/src/compiler/mips64/code-generator-mips64.cc +++ b/src/compiler/mips64/code-generator-mips64.cc @@ -106,12 +106,9 @@ class MipsOperandConverter final : public InstructionOperandConverter { MemOperand ToMemOperand(InstructionOperand* op) const { DCHECK(op != NULL); - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - // The linkage computes where all spill slots are located. - FrameOffset offset = linkage()->GetFrameOffset( - AllocatedOperand::cast(op)->index(), frame(), 0); + FrameOffset offset = + linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame()); return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); } }; @@ -1158,13 +1155,14 @@ void CodeGenerator::AssemblePrologue() { // kNumCalleeSaved includes the fp register, but the fp register // is saved separately in TF. DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1); - int register_save_area_size = kNumCalleeSaved * kPointerSize; + int register_save_area_size = + base::bits::CountPopulation32(saves) * kPointerSize; const RegList saves_fpu = descriptor->CalleeSavedFPRegisters(); // Save callee-saved FPU registers. __ MultiPushFPU(saves_fpu); DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu)); - register_save_area_size += kNumCalleeSavedFPU * kDoubleSize * kPointerSize; + register_save_area_size += kNumCalleeSavedFPU * kDoubleSize; frame()->SetRegisterSaveAreaSize(register_save_area_size); } else if (descriptor->IsJSFunctionCall()) { @@ -1176,6 +1174,8 @@ void CodeGenerator::AssemblePrologue() { __ StubPrologue(); frame()->SetRegisterSaveAreaSize( StandardFrameConstants::kFixedFrameSizeFromFp); + } else { + frame()->SetPCOnStack(false); } if (info()->is_osr()) { @@ -1203,6 +1203,7 @@ void CodeGenerator::AssemblePrologue() { void CodeGenerator::AssembleReturn() { CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); int stack_slots = frame()->GetSpillSlotCount(); + int pop_count = static_cast(descriptor->StackParameterCount()); if (descriptor->kind() == CallDescriptor::kCallAddress) { if (frame()->GetRegisterSaveAreaSize() > 0) { // Remove this frame's spill slots first. @@ -1219,22 +1220,19 @@ void CodeGenerator::AssembleReturn() { } __ mov(sp, fp); __ Pop(ra, fp); - __ Ret(); } else if (descriptor->IsJSFunctionCall() || needs_frame_) { // Canonicalize JSFunction return sites for now. if (return_label_.is_bound()) { __ Branch(&return_label_); + return; } else { __ bind(&return_label_); __ mov(sp, fp); __ Pop(ra, fp); - int pop_count = static_cast(descriptor->StackParameterCount()); - if (pop_count != 0) { - __ DropAndRet(pop_count); - } else { - __ Ret(); - } } + } + if (pop_count != 0) { + __ DropAndRet(pop_count); } else { __ Ret(); } diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc index 4e9f99fb9c..0cdbd5f6d0 100644 --- a/src/compiler/ppc/code-generator-ppc.cc +++ b/src/compiler/ppc/code-generator-ppc.cc @@ -99,12 +99,9 @@ class PPCOperandConverter final : public InstructionOperandConverter { MemOperand ToMemOperand(InstructionOperand* op) const { DCHECK(op != NULL); - DCHECK(!op->IsRegister()); - DCHECK(!op->IsDoubleRegister()); DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - // The linkage computes where all spill slots are located. - FrameOffset offset = linkage()->GetFrameOffset( - AllocatedOperand::cast(op)->index(), frame(), 0); + FrameOffset offset = + linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame()); return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); } }; @@ -1360,6 +1357,7 @@ void CodeGenerator::AssemblePrologue() { void CodeGenerator::AssembleReturn() { CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); int stack_slots = frame()->GetSpillSlotCount(); + int pop_count = static_cast(descriptor->StackParameterCount()); if (descriptor->kind() == CallDescriptor::kCallAddress) { if (frame()->GetRegisterSaveAreaSize() > 0) { // Remove this frame's spill slots first. @@ -1378,21 +1376,17 @@ void CodeGenerator::AssembleReturn() { const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves; __ MultiPop(saves); } - __ LeaveFrame(StackFrame::MANUAL); - __ Ret(); } else if (descriptor->IsJSFunctionCall() || needs_frame_) { // Canonicalize JSFunction return sites for now. if (return_label_.is_bound()) { __ b(&return_label_); + return; } else { __ bind(&return_label_); - int pop_count = static_cast(descriptor->StackParameterCount()); - __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize); - __ Ret(); } - } else { - __ Ret(); } + __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize); + __ Ret(); } diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc index 86d644addc..2ce9ef20cc 100644 --- a/src/compiler/raw-machine-assembler.cc +++ b/src/compiler/raw-machine-assembler.cc @@ -100,6 +100,22 @@ void RawMachineAssembler::Return(Node* value) { } +Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function, + Node** args) { + int param_count = + static_cast(desc->GetMachineSignature()->parameter_count()); + Node** buffer = zone()->NewArray(param_count + 1); + int index = 0; + buffer[index++] = function; + for (int i = 0; i < param_count; i++) { + buffer[index++] = args[i]; + } + Node* call = graph()->NewNode(common()->Call(desc), param_count + 1, buffer); + schedule()->AddNode(CurrentBlock(), call); + return call; +} + + Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver, Node* context, Node* frame_state, CallFunctionFlags flags) { diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h index 5fc710772e..19ea2b0ad1 100644 --- a/src/compiler/raw-machine-assembler.h +++ b/src/compiler/raw-machine-assembler.h @@ -479,25 +479,28 @@ class RawMachineAssembler { return HeapConstant(isolate()->factory()->InternalizeUtf8String(string)); } + // Call a given call descriptor and the given arguments. + Node* CallN(CallDescriptor* desc, Node* function, Node** args); + // Call through CallFunctionStub with lazy deopt and frame-state. Node* CallFunctionStub0(Node* function, Node* receiver, Node* context, Node* frame_state, CallFunctionFlags flags); - // Call to a JS function with zero parameters. + // Call to a JS function with zero arguments. Node* CallJS0(Node* function, Node* receiver, Node* context, Node* frame_state); - // Call to a runtime function with zero parameters. + // Call to a runtime function with zero arguments. Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context, Node* frame_state); - // Call to a C function with zero parameters. + // Call to a C function with zero arguments. Node* CallCFunction0(MachineType return_type, Node* function); // Call to a C function with one parameter. Node* CallCFunction1(MachineType return_type, MachineType arg0_type, Node* function, Node* arg0); - // Call to a C function with two parameters. + // Call to a C function with two arguments. Node* CallCFunction2(MachineType return_type, MachineType arg0_type, MachineType arg1_type, Node* function, Node* arg0, Node* arg1); - // Call to a C function with eight parameters. + // Call to a C function with eight arguments. Node* CallCFunction8(MachineType return_type, MachineType arg0_type, MachineType arg1_type, MachineType arg2_type, MachineType arg3_type, MachineType arg4_type, diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc index f23d24433f..0b775d29e1 100644 --- a/src/compiler/register-allocator-verifier.cc +++ b/src/compiler/register-allocator-verifier.cc @@ -155,7 +155,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op, int vreg = unallocated->virtual_register(); constraint->virtual_register_ = vreg; if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) { - constraint->type_ = kFixedSlot; + constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot; constraint->value_ = unallocated->fixed_slot_index(); } else { switch (unallocated->extended_policy()) { @@ -185,11 +185,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op, } break; case UnallocatedOperand::MUST_HAVE_SLOT: - if (sequence()->IsFloat(vreg)) { - constraint->type_ = kDoubleSlot; - } else { - constraint->type_ = kSlot; - } + constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot; break; case UnallocatedOperand::SAME_AS_FIRST_INPUT: constraint->type_ = kSameAsFirst; diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc index 666268aca0..4a222a43be 100644 --- a/src/compiler/register-allocator.cc +++ b/src/compiler/register-allocator.cc @@ -1242,8 +1242,11 @@ InstructionOperand* ConstraintBuilder::AllocateFixed( machine_type = data()->MachineTypeFor(virtual_register); } if (operand->HasFixedSlotPolicy()) { - allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, machine_type, - operand->fixed_slot_index()); + AllocatedOperand::AllocatedKind kind = + IsFloatingPoint(machine_type) ? AllocatedOperand::DOUBLE_STACK_SLOT + : AllocatedOperand::STACK_SLOT; + allocated = + AllocatedOperand(kind, machine_type, operand->fixed_slot_index()); } else if (operand->HasFixedRegisterPolicy()) { allocated = AllocatedOperand(AllocatedOperand::REGISTER, machine_type, operand->fixed_register_index()); diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc index 38c7f2a31c..413fe635ac 100644 --- a/src/compiler/x64/code-generator-x64.cc +++ b/src/compiler/x64/code-generator-x64.cc @@ -48,10 +48,10 @@ class X64OperandConverter : public InstructionOperandConverter { Operand ToOperand(InstructionOperand* op, int extra = 0) { DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - // The linkage computes where all spill slots are located. - FrameOffset offset = linkage()->GetFrameOffset( - AllocatedOperand::cast(op)->index(), frame(), extra); - return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset()); + FrameOffset offset = + linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame()); + return Operand(offset.from_stack_pointer() ? rsp : rbp, + offset.offset() + extra); } static size_t NextOffset(size_t* offset) { @@ -1219,6 +1219,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { } else { if (instr->InputAt(0)->IsRegister()) { __ pushq(i.InputRegister(0)); + } else if (instr->InputAt(0)->IsDoubleRegister()) { + // TODO(titzer): use another machine instruction? + __ subq(rsp, Immediate(kDoubleSize)); + __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0)); } else { __ pushq(i.InputOperand(0)); } @@ -1554,31 +1558,26 @@ void CodeGenerator::AssembleReturn() { } } __ popq(rbp); // Pop caller's frame pointer. - __ ret(0); } else { // No saved registers. __ movq(rsp, rbp); // Move stack pointer back to frame pointer. __ popq(rbp); // Pop caller's frame pointer. - __ ret(0); } } else if (descriptor->IsJSFunctionCall() || needs_frame_) { // Canonicalize JSFunction return sites for now. if (return_label_.is_bound()) { __ jmp(&return_label_); + return; } else { __ bind(&return_label_); __ movq(rsp, rbp); // Move stack pointer back to frame pointer. __ popq(rbp); // Pop caller's frame pointer. - int pop_count = static_cast(descriptor->StackParameterCount()); - if (pop_count == 0) { - __ Ret(); - } else { - __ Ret(pop_count * kPointerSize, rbx); - } } - } else { - __ Ret(); } + size_t pop_size = descriptor->StackParameterCount() * kPointerSize; + // Might need rcx for scratch if pop_size is too big. + DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & rcx.bit()); + __ Ret(static_cast(pop_size), rcx); } diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc index c274610226..bbde90e5c4 100644 --- a/src/compiler/x87/code-generator-x87.cc +++ b/src/compiler/x87/code-generator-x87.cc @@ -38,15 +38,12 @@ class X87OperandConverter : public InstructionOperandConverter { if (op->IsRegister()) { DCHECK(extra == 0); return Operand(ToRegister(op)); - } else if (op->IsDoubleRegister()) { - DCHECK(extra == 0); - UNIMPLEMENTED(); } DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); - // The linkage computes where all spill slots are located. - FrameOffset offset = linkage()->GetFrameOffset( - AllocatedOperand::cast(op)->index(), frame(), extra); - return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset()); + FrameOffset offset = + linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame()); + return Operand(offset.from_stack_pointer() ? esp : ebp, + offset.offset() + extra); } Operand HighOperand(InstructionOperand* op) { @@ -1568,6 +1565,7 @@ void CodeGenerator::AssemblePrologue() { void CodeGenerator::AssembleReturn() { CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); int stack_slots = frame()->GetSpillSlotCount(); + int pop_count = static_cast(descriptor->StackParameterCount()); if (descriptor->kind() == CallDescriptor::kCallAddress) { const RegList saves = descriptor->CalleeSavedRegisters(); if (frame()->GetRegisterSaveAreaSize() > 0) { @@ -1583,30 +1581,26 @@ void CodeGenerator::AssembleReturn() { } } __ pop(ebp); // Pop caller's frame pointer. - __ ret(0); } else { // No saved registers. __ mov(esp, ebp); // Move stack pointer back to frame pointer. __ pop(ebp); // Pop caller's frame pointer. - __ ret(0); } } else if (descriptor->IsJSFunctionCall() || needs_frame_) { // Canonicalize JSFunction return sites for now. if (return_label_.is_bound()) { __ jmp(&return_label_); + return; } else { __ bind(&return_label_); __ mov(esp, ebp); // Move stack pointer back to frame pointer. __ pop(ebp); // Pop caller's frame pointer. - int pop_count = static_cast(descriptor->StackParameterCount()); - if (pop_count == 0) { - __ ret(0); - } else { - __ Ret(pop_count * kPointerSize, ebx); - } } - } else { + } + if (pop_count == 0) { __ ret(0); + } else { + __ Ret(pop_count * kPointerSize, ebx); } } diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index ea933996ea..81ddb3ec2f 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -76,6 +76,7 @@ 'compiler/test-run-jsexceptions.cc', 'compiler/test-run-jsops.cc', 'compiler/test-run-machops.cc', + 'compiler/test-run-native-calls.cc', 'compiler/test-run-properties.cc', 'compiler/test-run-stackcheck.cc', 'compiler/test-run-stubs.cc', diff --git a/test/cctest/compiler/c-signature.h b/test/cctest/compiler/c-signature.h index 83b3328a3b..8eaf6325f2 100644 --- a/test/cctest/compiler/c-signature.h +++ b/test/cctest/compiler/c-signature.h @@ -69,6 +69,10 @@ class CSignature : public MachineSignature { } } + static CSignature* FromMachine(Zone* zone, MachineSignature* msig) { + return reinterpret_cast(msig); + } + static CSignature* New(Zone* zone, MachineType ret, MachineType p1 = kMachNone, MachineType p2 = kMachNone, MachineType p3 = kMachNone, MachineType p4 = kMachNone, diff --git a/test/cctest/compiler/call-tester.h b/test/cctest/compiler/call-tester.h index dc265ea5fa..31a6d0f93b 100644 --- a/test/cctest/compiler/call-tester.h +++ b/test/cctest/compiler/call-tester.h @@ -304,6 +304,21 @@ class CallHelper { Isolate* isolate_; }; +// A call helper that calls the given code object assuming C calling convention. +template +class CodeRunner : public CallHelper { + public: + CodeRunner(Isolate* isolate, Handle code, CSignature* csig) + : CallHelper(isolate, csig), code_(code) {} + virtual ~CodeRunner() {} + + virtual byte* Generate() { return code_->entry(); } + + private: + Handle code_; +}; + + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/test/cctest/compiler/graph-builder-tester.h b/test/cctest/compiler/graph-builder-tester.h index 7432957c0e..41c1e384be 100644 --- a/test/cctest/compiler/graph-builder-tester.h +++ b/test/cctest/compiler/graph-builder-tester.h @@ -28,6 +28,12 @@ class GraphAndBuilders { main_machine_(zone), main_simplified_(zone) {} + Graph* graph() const { return main_graph_; } + Zone* zone() const { return graph()->zone(); } + CommonOperatorBuilder* common() { return &main_common_; } + MachineOperatorBuilder* machine() { return &main_machine_; } + SimplifiedOperatorBuilder* simplified() { return &main_simplified_; } + protected: // Prefixed with main_ to avoid naming conflicts. Graph* main_graph_; @@ -39,7 +45,7 @@ class GraphAndBuilders { template class GraphBuilderTester : public HandleAndZoneScope, - private GraphAndBuilders, + public GraphAndBuilders, public CallHelper { public: explicit GraphBuilderTester(MachineType p0 = kMachNone, @@ -67,12 +73,7 @@ class GraphBuilderTester : public HandleAndZoneScope, } Isolate* isolate() { return main_isolate(); } - Graph* graph() const { return main_graph_; } - Zone* zone() const { return graph()->zone(); } Factory* factory() { return isolate()->factory(); } - CommonOperatorBuilder* common() { return &main_common_; } - MachineOperatorBuilder* machine() { return &main_machine_; } - SimplifiedOperatorBuilder* simplified() { return &main_simplified_; } // Initialize graph and builder. void Begin(int num_parameters) { diff --git a/test/cctest/compiler/test-run-native-calls.cc b/test/cctest/compiler/test-run-native-calls.cc new file mode 100644 index 0000000000..942d699ae8 --- /dev/null +++ b/test/cctest/compiler/test-run-native-calls.cc @@ -0,0 +1,674 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/assembler.h" +#include "src/codegen.h" +#include "src/compiler/linkage.h" +#include "src/compiler/machine-type.h" +#include "src/compiler/raw-machine-assembler.h" + +#include "test/cctest/cctest.h" +#include "test/cctest/compiler/codegen-tester.h" +#include "test/cctest/compiler/graph-builder-tester.h" +#include "test/cctest/compiler/value-helper.h" + +using namespace v8::base; +using namespace v8::internal; +using namespace v8::internal::compiler; + +typedef RawMachineAssembler::Label MLabel; + +#if !V8_TARGET_ARCH_ARM64 +// TODO(titzer): fix native stack parameters on arm64 +#define NATIVE_STACK_PARAMS_OK +#endif + +namespace { +// Picks a representative set of registers from the allocatable set. +// If there are less than 100 possible pairs, do them all, otherwise try +// to select a representative set. +class RegisterPairs { + public: + RegisterPairs() + : max_(std::min(100, Register::kMaxNumAllocatableRegisters * + Register::kMaxNumAllocatableRegisters)), + counter_(0) {} + + bool More() { return counter_ < max_; } + + void Next(int* r0, int* r1, bool same_is_ok) { + do { + // Find the next pair. + if (exhaustive()) { + *r0 = counter_ % Register::kMaxNumAllocatableRegisters; + *r1 = counter_ / Register::kMaxNumAllocatableRegisters; + } else { + // Try each register at least once for both r0 and r1. + int index = counter_ / 2; + if (counter_ & 1) { + *r0 = index % Register::kMaxNumAllocatableRegisters; + *r1 = index / Register::kMaxNumAllocatableRegisters; + } else { + *r1 = index % Register::kMaxNumAllocatableRegisters; + *r0 = index / Register::kMaxNumAllocatableRegisters; + } + } + counter_++; + if (same_is_ok) break; + if (*r0 == *r1) { + if (counter_ >= max_) { + // For the last hurrah, reg#0 with reg#n-1 + *r0 = 0; + *r1 = Register::kMaxNumAllocatableRegisters - 1; + break; + } + } + } while (true); + + DCHECK(*r0 >= 0 && *r0 < Register::kMaxNumAllocatableRegisters); + DCHECK(*r1 >= 0 && *r1 < Register::kMaxNumAllocatableRegisters); + printf("pair = %d, %d\n", *r0, *r1); + } + + private: + int max_; + int counter_; + bool exhaustive() { + return max_ == (Register::kMaxNumAllocatableRegisters * + Register::kMaxNumAllocatableRegisters); + } +}; + + +// Helper for allocating either an GP or FP reg, or the next stack slot. +struct Allocator { + Allocator(int* gp, int gpc, int* fp, int fpc) + : gp_count(gpc), + gp_offset(0), + gp_regs(gp), + fp_count(fpc), + fp_offset(0), + fp_regs(fp), + stack_offset(0) {} + + int gp_count; + int gp_offset; + int* gp_regs; + + int fp_count; + int fp_offset; + int* fp_regs; + + int stack_offset; + + LinkageLocation Next(MachineType type) { + if (IsFloatingPoint(type)) { + // Allocate a floating point register/stack location. + if (fp_offset < fp_count) { + return LinkageLocation::ForRegister(fp_regs[fp_offset++]); + } else { + int offset = -1 - stack_offset; + stack_offset += Words(type); + return LinkageLocation::ForCallerFrameSlot(offset); + } + } else { + // Allocate a general purpose register/stack location. + if (gp_offset < gp_count) { + return LinkageLocation::ForRegister(gp_regs[gp_offset++]); + } else { + int offset = -1 - stack_offset; + stack_offset += Words(type); + return LinkageLocation::ForCallerFrameSlot(offset); + } + } + } + bool IsFloatingPoint(MachineType type) { + return RepresentationOf(type) == kRepFloat32 || + RepresentationOf(type) == kRepFloat64; + } + int Words(MachineType type) { + int size = ElementSizeOf(type); + return size <= kPointerSize ? 1 : size / kPointerSize; + } + void Reset() { + fp_offset = 0; + gp_offset = 0; + stack_offset = 0; + } +}; + + +class RegisterConfig { + public: + RegisterConfig(Allocator& p, Allocator& r) : params(p), rets(r) {} + + CallDescriptor* Create(Zone* zone, MachineSignature* msig) { + rets.Reset(); + params.Reset(); + + LocationSignature::Builder locations(zone, msig->return_count(), + msig->parameter_count()); + // Add return location(s). + const int return_count = static_cast(locations.return_count_); + for (int i = 0; i < return_count; i++) { + locations.AddReturn(rets.Next(msig->GetReturn(i))); + } + + // Add register and/or stack parameter(s). + const int parameter_count = static_cast(msig->parameter_count()); + for (int i = 0; i < parameter_count; i++) { + locations.AddParam(params.Next(msig->GetParam(i))); + } + + const RegList kCalleeSaveRegisters = 0; + const RegList kCalleeSaveFPRegisters = 0; + + MachineType target_type = compiler::kMachAnyTagged; + LinkageLocation target_loc = LinkageLocation::ForAnyRegister(); + int stack_param_count = params.stack_offset; + return new (zone) CallDescriptor( // -- + CallDescriptor::kCallCodeObject, // kind + target_type, // target MachineType + target_loc, // target location + msig, // machine_sig + locations.Build(), // location_sig + stack_param_count, // stack_parameter_count + compiler::Operator::kNoProperties, // properties + kCalleeSaveRegisters, // callee-saved registers + kCalleeSaveFPRegisters, // callee-saved fp regs + CallDescriptor::kNoFlags, // flags + "c-call"); + } + + private: + Allocator& params; + Allocator& rets; +}; + +const int kMaxParamCount = 64; + +MachineType kIntTypes[kMaxParamCount + 1] = { + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32}; + + +// For making uniform int32 signatures shorter. +class Int32Signature : public MachineSignature { + public: + explicit Int32Signature(int param_count) + : MachineSignature(1, param_count, kIntTypes) { + CHECK(param_count <= kMaxParamCount); + } +}; + + +Handle CompileGraph(const char* name, CallDescriptor* desc, Graph* graph, + Schedule* schedule = nullptr) { + Isolate* isolate = CcTest::InitIsolateOnce(); + Handle code = + Pipeline::GenerateCodeForTesting(isolate, desc, graph, schedule); + CHECK(!code.is_null()); +#ifdef ENABLE_DISASSEMBLER + if (FLAG_print_opt_code) { + OFStream os(stdout); + code->Disassemble(name, os); + } +#endif + return code; +} + + +Handle WrapWithCFunction(Handle inner, CallDescriptor* desc) { + Zone zone; + MachineSignature* msig = + const_cast(desc->GetMachineSignature()); + int param_count = static_cast(msig->parameter_count()); + GraphAndBuilders caller(&zone); + { + GraphAndBuilders& b = caller; + Node* start = b.graph()->NewNode(b.common()->Start(param_count + 3)); + b.graph()->SetStart(start); + Unique unique = Unique::CreateUninitialized(inner); + Node* target = b.graph()->NewNode(b.common()->HeapConstant(unique)); + + // Add arguments to the call. + Node** args = zone.NewArray(param_count + 3); + int index = 0; + args[index++] = target; + for (int i = 0; i < param_count; i++) { + args[index] = b.graph()->NewNode(b.common()->Parameter(i), start); + index++; + } + args[index++] = start; // effect. + args[index++] = start; // control. + + // Build the call and return nodes. + Node* call = + b.graph()->NewNode(b.common()->Call(desc), param_count + 3, args); + Node* ret = b.graph()->NewNode(b.common()->Return(), call, call, start); + b.graph()->SetEnd(ret); + } + + CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, msig); + + return CompileGraph("wrapper", cdesc, caller.graph()); +} + + +} // namespace + + +static void TestInt32Sub(CallDescriptor* desc) { + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + Zone zone; + GraphAndBuilders inner(&zone); + { + // Build the add function. + GraphAndBuilders& b = inner; + Node* start = b.graph()->NewNode(b.common()->Start(5)); + b.graph()->SetStart(start); + Node* p0 = b.graph()->NewNode(b.common()->Parameter(0), start); + Node* p1 = b.graph()->NewNode(b.common()->Parameter(1), start); + Node* add = b.graph()->NewNode(b.machine()->Int32Sub(), p0, p1); + Node* ret = b.graph()->NewNode(b.common()->Return(), add, start, start); + b.graph()->SetEnd(ret); + } + + Handle inner_code = CompileGraph("Int32Sub", desc, inner.graph()); + Handle wrapper = WrapWithCFunction(inner_code, desc); + MachineSignature* msig = + const_cast(desc->GetMachineSignature()); + CodeRunner runnable(isolate, wrapper, + CSignature::FromMachine(&zone, msig)); + + FOR_INT32_INPUTS(i) { + FOR_INT32_INPUTS(j) { + int32_t expected = static_cast(static_cast(*i) - + static_cast(*j)); + int32_t result = runnable.Call(*i, *j); + CHECK_EQ(expected, result); + } + } +} + + +#ifdef NATIVE_STACK_PARAMS_OK +static void CopyTwentyInt32(CallDescriptor* desc) { + const int kNumParams = 20; + int32_t input[kNumParams]; + int32_t output[kNumParams]; + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + Handle inner = Handle::null(); + { + // Writes all parameters into the output buffer. + Zone zone; + Graph graph(&zone); + RawMachineAssembler raw(isolate, &graph, desc); + Node* base = raw.PointerConstant(output); + for (int i = 0; i < kNumParams; i++) { + Node* offset = raw.Int32Constant(i * sizeof(int32_t)); + raw.Store(kMachInt32, base, offset, raw.Parameter(i)); + } + raw.Return(raw.Int32Constant(42)); + inner = CompileGraph("CopyTwentyInt32", desc, &graph, raw.Export()); + } + + CSignature0 csig; + Handle wrapper = Handle::null(); + { + // Loads parameters from the input buffer and calls the above code. + Zone zone; + Graph graph(&zone); + CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); + RawMachineAssembler raw(isolate, &graph, cdesc); + Node* base = raw.PointerConstant(input); + Unique unique = Unique::CreateUninitialized(inner); + Node* target = raw.HeapConstant(unique); + Node** args = zone.NewArray(kNumParams); + for (int i = 0; i < kNumParams; i++) { + Node* offset = raw.Int32Constant(i * sizeof(int32_t)); + args[i] = raw.Load(kMachInt32, base, offset); + } + + Node* call = raw.CallN(desc, target, args); + raw.Return(call); + wrapper = + CompileGraph("CopyTwentyInt32-wrapper", cdesc, &graph, raw.Export()); + } + + CodeRunner runnable(isolate, wrapper, &csig); + + // Run the code, checking it correctly implements the memcpy. + for (int i = 0; i < 5; i++) { + uint32_t base = 1111111111u * i; + for (int j = 0; j < kNumParams; j++) { + input[j] = static_cast(base + 13 * j); + } + + memset(output, 0, sizeof(output)); + CHECK_EQ(42, runnable.Call()); + + for (int j = 0; j < kNumParams; j++) { + CHECK_EQ(input[j], output[j]); + } + } +} +#endif // NATIVE_STACK_PARAMS_OK + + +static void Test_RunInt32SubWithRet(int retreg) { + Int32Signature sig(2); + Zone zone; + RegisterPairs pairs; + while (pairs.More()) { + int parray[2]; + int rarray[] = {retreg}; + pairs.Next(&parray[0], &parray[1], false); + Allocator params(parray, 2, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + CallDescriptor* desc = config.Create(&zone, &sig); + TestInt32Sub(desc); + } +} + + +// Separate tests for parallelization. +#define TEST_INT32_SUB_WITH_RET(x) \ + TEST(Run_Int32Sub_all_allocatable_pairs_##x) { \ + if (Register::kMaxNumAllocatableRegisters > x) Test_RunInt32SubWithRet(x); \ + } + + +TEST_INT32_SUB_WITH_RET(0) +TEST_INT32_SUB_WITH_RET(1) +TEST_INT32_SUB_WITH_RET(2) +TEST_INT32_SUB_WITH_RET(3) +TEST_INT32_SUB_WITH_RET(4) +TEST_INT32_SUB_WITH_RET(5) +TEST_INT32_SUB_WITH_RET(6) +TEST_INT32_SUB_WITH_RET(7) +TEST_INT32_SUB_WITH_RET(8) +TEST_INT32_SUB_WITH_RET(9) +TEST_INT32_SUB_WITH_RET(10) +TEST_INT32_SUB_WITH_RET(11) +TEST_INT32_SUB_WITH_RET(12) +TEST_INT32_SUB_WITH_RET(13) +TEST_INT32_SUB_WITH_RET(14) +TEST_INT32_SUB_WITH_RET(15) +TEST_INT32_SUB_WITH_RET(16) +TEST_INT32_SUB_WITH_RET(17) +TEST_INT32_SUB_WITH_RET(18) +TEST_INT32_SUB_WITH_RET(19) + + +TEST(Run_Int32Sub_all_allocatable_single) { +#ifdef NATIVE_STACK_PARAMS_OK + Int32Signature sig(2); + RegisterPairs pairs; + while (pairs.More()) { + Zone zone; + int parray[1]; + int rarray[1]; + pairs.Next(&rarray[0], &parray[0], true); + Allocator params(parray, 1, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + CallDescriptor* desc = config.Create(&zone, &sig); + TestInt32Sub(desc); + } +#endif // NATIVE_STACK_PARAMS_OK +} + + +TEST(Run_CopyTwentyInt32_all_allocatable_pairs) { +#ifdef NATIVE_STACK_PARAMS_OK + Int32Signature sig(20); + RegisterPairs pairs; + while (pairs.More()) { + Zone zone; + int parray[2]; + int rarray[] = {0}; + pairs.Next(&parray[0], &parray[1], false); + Allocator params(parray, 2, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + CallDescriptor* desc = config.Create(&zone, &sig); + CopyTwentyInt32(desc); + } +#endif // NATIVE_STACK_PARAMS_OK +} + + +#ifdef NATIVE_STACK_PARAMS_OK +int ParamCount(CallDescriptor* desc) { + return static_cast(desc->GetMachineSignature()->parameter_count()); +} + + +// Super mega helper routine to generate a computation with a given +// call descriptor, compile the code, wrap the code, and pass various inputs, +// comparing against a reference implementation. +static void Run_Int32_Computation( + CallDescriptor* desc, void (*build)(CallDescriptor*, RawMachineAssembler&), + int32_t (*compute)(CallDescriptor*, int32_t* inputs), int seed = 1) { + int num_params = ParamCount(desc); + CHECK_LE(num_params, kMaxParamCount); + int32_t input[kMaxParamCount]; + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + Handle inner = Handle::null(); + { + // Build the graph for the computation. + Zone zone; + Graph graph(&zone); + RawMachineAssembler raw(isolate, &graph, desc); + build(desc, raw); + inner = CompileGraph("Compute", desc, &graph, raw.Export()); + } + + CSignature0 csig; + + if (false) { + // constant mode. + Handle wrapper = Handle::null(); + { + // Wrap the above code with a callable function that passes constants. + Zone zone; + Graph graph(&zone); + CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); + RawMachineAssembler raw(isolate, &graph, cdesc); + Unique unique = + Unique::CreateUninitialized(inner); + Node* target = raw.HeapConstant(unique); + Node** args = zone.NewArray(kMaxParamCount); + for (int i = 0; i < num_params; i++) { + args[i] = raw.Int32Constant(0x100 + i); + } + + Node* call = raw.CallN(desc, target, args); + raw.Return(call); + wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export()); + } + + CodeRunner runnable(isolate, wrapper, &csig); + + // Run the code, checking it against the reference. + for (int j = 0; j < kMaxParamCount; j++) { + input[j] = 0x100 + j; + } + int32_t expected = compute(desc, input); + int32_t result = runnable.Call(); + + CHECK_EQ(expected, result); + } + + { + // buffer mode. + Handle wrapper = Handle::null(); + { + // Wrap the above code with a callable function that loads from {input}. + Zone zone; + Graph graph(&zone); + CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); + RawMachineAssembler raw(isolate, &graph, cdesc); + Node* base = raw.PointerConstant(input); + Unique unique = + Unique::CreateUninitialized(inner); + Node* target = raw.HeapConstant(unique); + Node** args = zone.NewArray(kMaxParamCount); + for (int i = 0; i < num_params; i++) { + Node* offset = raw.Int32Constant(i * sizeof(int32_t)); + args[i] = raw.Load(kMachInt32, base, offset); + } + + Node* call = raw.CallN(desc, target, args); + raw.Return(call); + wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export()); + } + + CodeRunner runnable(isolate, wrapper, &csig); + + // Run the code, checking it against the reference. + for (int i = 0; i < 5; i++) { + // Use pseudo-random values for each run, but the first run gets args + // 100, 101, 102, 103... for easier diagnosis. + uint32_t base = 1111111111u * i * seed; + for (int j = 0; j < kMaxParamCount; j++) { + input[j] = static_cast(100 + base + j); + } + int32_t expected = compute(desc, input); + int32_t result = runnable.Call(); + + CHECK_EQ(expected, result); + } + } +} + + +static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, + 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, + 79, 83, 89, 97, 101, 103, 107, 109, 113}; + + +static void Build_Int32_WeightedSum(CallDescriptor* desc, + RawMachineAssembler& raw) { + Node* result = raw.Int32Constant(0); + for (int i = 0; i < ParamCount(desc); i++) { + Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i])); + result = raw.Int32Add(result, term); + } + raw.Return(result); +} + + +static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) { + uint32_t result = 0; + for (int i = 0; i < ParamCount(desc); i++) { + result += static_cast(input[i]) * coeff[i]; + } + return static_cast(result); +} + + +static void Test_Int32_WeightedSum_of_size(int count) { + Int32Signature sig(count); + for (int p0 = 0; p0 < Register::kMaxNumAllocatableRegisters; p0++) { + Zone zone; + + int parray[] = {p0}; + int rarray[] = {0}; + Allocator params(parray, 1, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + CallDescriptor* desc = config.Create(&zone, &sig); + Run_Int32_Computation(desc, Build_Int32_WeightedSum, + Compute_Int32_WeightedSum, 257 + count); + } +} + + +// Separate tests for parallelization. +#define TEST_INT32_WEIGHTEDSUM(x) \ + TEST(Run_Int32_WeightedSum_##x) { Test_Int32_WeightedSum_of_size(x); } + + +TEST_INT32_WEIGHTEDSUM(1) +TEST_INT32_WEIGHTEDSUM(2) +TEST_INT32_WEIGHTEDSUM(3) +TEST_INT32_WEIGHTEDSUM(4) +TEST_INT32_WEIGHTEDSUM(5) +TEST_INT32_WEIGHTEDSUM(7) +TEST_INT32_WEIGHTEDSUM(9) +TEST_INT32_WEIGHTEDSUM(11) +TEST_INT32_WEIGHTEDSUM(17) +TEST_INT32_WEIGHTEDSUM(19) + + +template +static void Build_Int32_Select(CallDescriptor* desc, RawMachineAssembler& raw) { + raw.Return(raw.Parameter(which)); +} + + +template +static int32_t Compute_Int32_Select(CallDescriptor* desc, int32_t* inputs) { + return inputs[which]; +} + + +template +void Test_Int32_Select() { + int parray[] = {0}; + int rarray[] = {0}; + Allocator params(parray, 1, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + + Zone zone; + + for (int i = which + 1; i <= 64; i++) { + Int32Signature sig(i); + CallDescriptor* desc = config.Create(&zone, &sig); + Run_Int32_Computation(desc, Build_Int32_Select, + Compute_Int32_Select, 1025 + which); + } +} + + +// Separate tests for parallelization. +#define TEST_INT32_SELECT(x) \ + TEST(Run_Int32_Select_##x) { Test_Int32_Select(); } + + +TEST_INT32_SELECT(0) +TEST_INT32_SELECT(1) +TEST_INT32_SELECT(2) +TEST_INT32_SELECT(3) +TEST_INT32_SELECT(4) +TEST_INT32_SELECT(5) +TEST_INT32_SELECT(6) +TEST_INT32_SELECT(11) +TEST_INT32_SELECT(15) +TEST_INT32_SELECT(19) +TEST_INT32_SELECT(45) +TEST_INT32_SELECT(62) +TEST_INT32_SELECT(63) +#endif // NATIVE_STACK_PARAMS_OK + + +TEST(TheLastTestForLint) { + // Yes, thank you. +}