[turbofan] Add CallCFunctionWithCallerSavedRegisters node in turbofan compiler

The new node behave the same as its counterpart, CallCFunction, with the
additional saving and restoring caller saved registers before and after
the function call.

Bug: chromium:749486
Change-Id: I0a1dfb2e4e55f7720541a00e6d16fd20220f39ed
Reviewed-on: https://chromium-review.googlesource.com/620709
Commit-Queue: Albert Mingkun Yang <albertnetymk@google.com>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47479}
This commit is contained in:
Albert Mingkun Yang 2017-08-21 17:23:17 +02:00 committed by Commit Bot
parent f546ec1a5d
commit e58ae53132
44 changed files with 581 additions and 64 deletions

View File

@ -30,6 +30,47 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1, Register exclusion2,
Register exclusion3) {
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
stm(db_w, sp, (kCallerSaved | lr.bit()) & ~exclusions);
if (fp_mode == kSaveFPRegs) {
SaveFPRegs(sp, lr);
}
}
void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
RestoreFPRegs(sp, lr);
}
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~exclusions);
}
void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
@ -2229,14 +2270,13 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
void MacroAssembler::CheckFor32DRegs(Register scratch) {
void TurboAssembler::CheckFor32DRegs(Register scratch) {
mov(scratch, Operand(ExternalReference::cpu_features()));
ldr(scratch, MemOperand(scratch));
tst(scratch, Operand(1u << VFP32DREGS));
}
void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
vstm(db_w, location, d16, d31, ne);
@ -2244,8 +2284,7 @@ void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
vstm(db_w, location, d0, d15);
}
void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
vldm(ia_w, location, d0, d15);

View File

@ -388,6 +388,24 @@ class TurboAssembler : public Assembler {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
// Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
// values to location, saving [d0..(d15|d31)].
void SaveFPRegs(Register location, Register scratch);
// Does a runtime check for 16/32 FP registers. Either way, pops 32 double
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@ -846,18 +864,6 @@ class MacroAssembler : public TurboAssembler {
DwVfpRegister double_input,
LowDwVfpRegister double_scratch);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
// Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
// values to location, saving [d0..(d15|d31)].
void SaveFPRegs(Register location, Register scratch);
// Does a runtime check for 16/32 FP registers. Either way, pops 32 double
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
// ---------------------------------------------------------------------------
// Runtime calls

View File

@ -37,6 +37,29 @@ CPURegList TurboAssembler::DefaultFPTmpList() {
return CPURegList(fp_scratch1, fp_scratch2);
}
void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1, Register exclusion2,
Register exclusion3) {
auto list = kCallerSaved;
list.Remove(exclusion1, exclusion2, exclusion3);
PushCPURegList(list);
if (fp_mode == kSaveFPRegs) {
PushCPURegList(kCallerSavedV);
}
}
void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
PopCPURegList(kCallerSavedV);
}
auto list = kCallerSaved;
list.Remove(exclusion1, exclusion2, exclusion3);
PopCPURegList(list);
}
void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
const Operand& operand, LogicalOp op) {
UseScratchRegisterScope temps(this);

View File

@ -796,6 +796,13 @@ class TurboAssembler : public Assembler {
void PushCPURegList(CPURegList registers);
void PopCPURegList(CPURegList registers);
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Move an immediate into register dst, and return an Operand object for use
// with a subsequent instruction that accepts a shift. The value moved into
// dst is not necessarily equal to imm; it may have had a shifting operation

View File

@ -283,8 +283,8 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
{
Node* function = ExternalConstant(
ExternalReference::store_buffer_overflow_function(this->isolate()));
CallCFunction1(MachineType::Int32(), MachineType::Pointer(), function,
isolate);
CallCFunction1WithCallerSavedRegisters(
MachineType::Int32(), MachineType::Pointer(), function, isolate);
Goto(next);
}
}
@ -295,7 +295,6 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Node* slot = Parameter(Descriptor::kSlot);
Node* isolate = Parameter(Descriptor::kIsolate);
Node* value;
Node* function;
Label test_old_to_new_flags(this);
Label store_buffer_exit(this), store_buffer_incremental_wb(this);
@ -356,12 +355,12 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
BIND(&call_incremental_wb);
{
function = ExternalConstant(
Node* function = ExternalConstant(
ExternalReference::incremental_marking_record_write_function(
this->isolate()));
CallCFunction3(MachineType::Int32(), MachineType::Pointer(),
MachineType::Pointer(), MachineType::Pointer(), function,
object, slot, isolate);
CallCFunction3WithCallerSavedRegisters(
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
MachineType::Pointer(), function, object, slot, isolate);
Goto(&exit);
}
}

View File

@ -744,6 +744,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToFP();
break;
}
case kArchSaveCallerRegisters: {
// kReturnRegister0 should have been saved before entering the stub.
__ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchRestoreCallerRegisters: {
// Don't overwrite the returned value.
__ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall();
break;

View File

@ -765,6 +765,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// via the stack pointer.
UNREACHABLE();
break;
case kArchSaveCallerRegisters: {
// kReturnRegister0 should have been saved before entering the stub.
__ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchRestoreCallerRegisters: {
// Don't overwrite the returned value.
__ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall();
break;

View File

@ -737,6 +737,14 @@ Node* CodeAssembler::CallCFunction1(MachineType return_type,
arg0);
}
Node* CodeAssembler::CallCFunction1WithCallerSavedRegisters(
MachineType return_type, MachineType arg0_type, Node* function,
Node* arg0) {
DCHECK(return_type.LessThanOrEqualPointerSize());
return raw_assembler()->CallCFunction1WithCallerSavedRegisters(
return_type, arg0_type, function, arg0);
}
Node* CodeAssembler::CallCFunction2(MachineType return_type,
MachineType arg0_type,
MachineType arg1_type, Node* function,
@ -754,6 +762,14 @@ Node* CodeAssembler::CallCFunction3(MachineType return_type,
arg2_type, function, arg0, arg1, arg2);
}
Node* CodeAssembler::CallCFunction3WithCallerSavedRegisters(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2) {
DCHECK(return_type.LessThanOrEqualPointerSize());
return raw_assembler()->CallCFunction3WithCallerSavedRegisters(
return_type, arg0_type, arg1_type, arg2_type, function, arg0, arg1, arg2);
}
Node* CodeAssembler::CallCFunction6(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,

View File

@ -755,6 +755,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* CallCFunction1(MachineType return_type, MachineType arg0_type,
Node* function, Node* arg0);
// Call to a C function with one argument, while saving/restoring caller
// registers except the register used for return value.
Node* CallCFunction1WithCallerSavedRegisters(MachineType return_type,
MachineType arg0_type,
Node* function, Node* arg0);
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
@ -765,6 +771,15 @@ class V8_EXPORT_PRIVATE CodeAssembler {
MachineType arg1_type, MachineType arg2_type,
Node* function, Node* arg0, Node* arg1, Node* arg2);
// Call to a C function with three arguments, while saving/restoring caller
// registers except the register used for return value.
Node* CallCFunction3WithCallerSavedRegisters(MachineType return_type,
MachineType arg0_type,
MachineType arg1_type,
MachineType arg2_type,
Node* function, Node* arg0,
Node* arg1, Node* arg2);
// Call to a C function with six arguments.
Node* CallCFunction6(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,

View File

@ -95,6 +95,7 @@ SelectParameters const& SelectParametersOf(const Operator* const op) {
CallDescriptor const* CallDescriptorOf(const Operator* const op) {
DCHECK(op->opcode() == IrOpcode::kCall ||
op->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
op->opcode() == IrOpcode::kTailCall);
return OpParameter<CallDescriptor const*>(op);
}
@ -1317,6 +1318,27 @@ const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
return new (zone()) CallOperator(descriptor);
}
const Operator* CommonOperatorBuilder::CallWithCallerSavedRegisters(
const CallDescriptor* descriptor) {
class CallOperator final : public Operator1<const CallDescriptor*> {
public:
explicit CallOperator(const CallDescriptor* descriptor)
: Operator1<const CallDescriptor*>(
IrOpcode::kCallWithCallerSavedRegisters, descriptor->properties(),
"CallWithCallerSavedRegisters",
descriptor->InputCount() + descriptor->FrameStateCount(),
Operator::ZeroIfPure(descriptor->properties()),
Operator::ZeroIfEliminatable(descriptor->properties()),
descriptor->ReturnCount(),
Operator::ZeroIfPure(descriptor->properties()),
Operator::ZeroIfNoThrow(descriptor->properties()), descriptor) {}
void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
os << "[" << *parameter() << "]";
}
};
return new (zone()) CallOperator(descriptor);
}
const Operator* CommonOperatorBuilder::TailCall(
const CallDescriptor* descriptor) {

View File

@ -392,6 +392,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info);
const Operator* Call(const CallDescriptor* descriptor);
const Operator* CallWithCallerSavedRegisters(
const CallDescriptor* descriptor);
const Operator* TailCall(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
const Operator* Retain();

View File

@ -1122,7 +1122,9 @@ bool EscapeStatusAnalysis::IsEffectBranchPoint(Node* node) {
namespace {
bool HasFrameStateInput(const Operator* op) {
if (op->opcode() == IrOpcode::kCall || op->opcode() == IrOpcode::kTailCall) {
if (op->opcode() == IrOpcode::kCall ||
op->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
op->opcode() == IrOpcode::kTailCall) {
const CallDescriptor* d = CallDescriptorOf(op);
return d->NeedsFrameState();
} else {

View File

@ -968,6 +968,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ PrepareCallCFunction(num_parameters, i.TempRegister(0));
break;
}
case kArchSaveCallerRegisters: {
// kReturnRegister0 should have been saved before entering the stub.
__ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchRestoreCallerRegisters: {
// Don't overwrite the returned value.
__ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall();
break;

View File

@ -47,6 +47,8 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchCallJSFunction) \
V(ArchTailCallAddress) \
V(ArchPrepareCallCFunction) \
V(ArchSaveCallerRegisters) \
V(ArchRestoreCallerRegisters) \
V(ArchCallCFunction) \
V(ArchPrepareTailCall) \
V(ArchJmp) \

View File

@ -290,6 +290,8 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
return kIsLoadOperation;
case kArchPrepareCallCFunction:
case kArchSaveCallerRegisters:
case kArchRestoreCallerRegisters:
case kArchPrepareTailCall:
case kArchCallCFunction:
case kArchCallCodeObject:

View File

@ -861,6 +861,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool InstructionSelector::IsSourcePositionUsed(Node* node) {
return (source_position_mode_ == kAllSourcePositions ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
node->opcode() == IrOpcode::kTrapIf ||
node->opcode() == IrOpcode::kTrapUnless);
}
@ -881,6 +882,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCheckedStore ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore) {
++effect_level;
@ -1095,6 +1097,8 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kCall:
return VisitCall(node);
case IrOpcode::kCallWithCallerSavedRegisters:
return VisitCallWithCallerSavedRegisters(node);
case IrOpcode::kDeoptimizeIf:
return VisitDeoptimizeIf(node);
case IrOpcode::kDeoptimizeUnless:
@ -2588,6 +2592,14 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
call_instr->MarkAsCall();
}
void InstructionSelector::VisitCallWithCallerSavedRegisters(
Node* node, BasicBlock* handler) {
OperandGenerator g(this);
Emit(kArchSaveCallerRegisters, g.NoOutput());
VisitCall(node, handler);
Emit(kArchRestoreCallerRegisters, g.NoOutput());
}
void InstructionSelector::VisitTailCall(Node* node) {
OperandGenerator g(this);

View File

@ -333,6 +333,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitProjection(Node* node);
void VisitConstant(Node* node);
void VisitCall(Node* call, BasicBlock* handler = nullptr);
void VisitCallWithCallerSavedRegisters(Node* call,
BasicBlock* handler = nullptr);
void VisitDeoptimizeIf(Node* node);
void VisitDeoptimizeUnless(Node* node);
void VisitTrapIf(Node* node, Runtime::FunctionId func_id);

View File

@ -60,7 +60,8 @@ class MachineRepresentationInferrer {
CHECK_LE(index, static_cast<size_t>(1));
return index == 0 ? MachineRepresentation::kWord64
: MachineRepresentation::kBit;
case IrOpcode::kCall: {
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters: {
CallDescriptor const* desc = CallDescriptorOf(input->op());
return desc->GetReturnType(index).representation();
}
@ -133,7 +134,8 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] =
PhiRepresentationOf(node->op());
break;
case IrOpcode::kCall: {
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters: {
CallDescriptor const* desc = CallDescriptorOf(node->op());
if (desc->ReturnCount() > 0) {
representation_vector_[node->id()] =
@ -310,6 +312,7 @@ class MachineRepresentationChecker {
}
switch (node->opcode()) {
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters:
case IrOpcode::kTailCall:
CheckCallInputs(node);
break;

View File

@ -78,6 +78,8 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
return VisitAllocate(node, state);
case IrOpcode::kCall:
return VisitCall(node, state);
case IrOpcode::kCallWithCallerSavedRegisters:
return VisitCallWithCallerSavedRegisters(node, state);
case IrOpcode::kLoadElement:
return VisitLoadElement(node, state);
case IrOpcode::kLoadField:
@ -325,6 +327,16 @@ void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitCallWithCallerSavedRegisters(
Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kCallWithCallerSavedRegisters, node->opcode());
// If the call can allocate, we start with a fresh state.
if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
state = empty_state();
}
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());

View File

@ -108,6 +108,7 @@ class MemoryOptimizer final {
void VisitNode(Node*, AllocationState const*);
void VisitAllocate(Node*, AllocationState const*);
void VisitCall(Node*, AllocationState const*);
void VisitCallWithCallerSavedRegisters(Node*, AllocationState const*);
void VisitLoadElement(Node*, AllocationState const*);
void VisitLoadField(Node*, AllocationState const*);
void VisitStoreElement(Node*, AllocationState const*);

View File

@ -701,6 +701,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToFP();
break;
}
case kArchSaveCallerRegisters: {
// kReturnRegister0 should have been saved before entering the stub.
__ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchRestoreCallerRegisters: {
// Don't overwrite the returned value.
__ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall();
break;

View File

@ -742,6 +742,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToFP();
break;
}
case kArchSaveCallerRegisters: {
// kReturnRegister0 should have been saved before entering the stub.
__ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchRestoreCallerRegisters: {
// Don't overwrite the returned value.
__ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall();
break;

View File

@ -48,31 +48,32 @@
V(RelocatableInt32Constant) \
V(RelocatableInt64Constant)
#define INNER_OP_LIST(V) \
V(Select) \
V(Phi) \
V(EffectPhi) \
V(InductionVariablePhi) \
V(Checkpoint) \
V(BeginRegion) \
V(FinishRegion) \
V(FrameState) \
V(StateValues) \
V(TypedStateValues) \
V(ArgumentsElementsState) \
V(ArgumentsLengthState) \
V(ObjectState) \
V(ObjectId) \
V(TypedObjectState) \
V(Call) \
V(Parameter) \
V(OsrValue) \
V(LoopExit) \
V(LoopExitValue) \
V(LoopExitEffect) \
V(Projection) \
V(Retain) \
V(MapGuard) \
#define INNER_OP_LIST(V) \
V(Select) \
V(Phi) \
V(EffectPhi) \
V(InductionVariablePhi) \
V(Checkpoint) \
V(BeginRegion) \
V(FinishRegion) \
V(FrameState) \
V(StateValues) \
V(TypedStateValues) \
V(ArgumentsElementsState) \
V(ArgumentsLengthState) \
V(ObjectState) \
V(ObjectId) \
V(TypedObjectState) \
V(Call) \
V(CallWithCallerSavedRegisters) \
V(Parameter) \
V(OsrValue) \
V(LoopExit) \
V(LoopExitValue) \
V(LoopExitEffect) \
V(Projection) \
V(Retain) \
V(MapGuard) \
V(TypeGuard)
#define COMMON_OP_LIST(V) \

View File

@ -1012,6 +1012,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToFP();
break;
}
case kArchSaveCallerRegisters: {
// kReturnRegister0 should have been saved before entering the stub.
__ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchRestoreCallerRegisters: {
// Don't overwrite the returned value.
__ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall();
break;

View File

@ -226,6 +226,18 @@ Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
return AddNode(common()->Call(descriptor), function, arg0);
}
Node* RawMachineAssembler::CallCFunction1WithCallerSavedRegisters(
MachineType return_type, MachineType arg0_type, Node* function,
Node* arg0) {
MachineSignature::Builder builder(zone(), 1, 1);
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
return AddNode(common()->CallWithCallerSavedRegisters(descriptor), function,
arg0);
}
Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
MachineType arg0_type,
@ -257,6 +269,21 @@ Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2);
}
Node* RawMachineAssembler::CallCFunction3WithCallerSavedRegisters(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2) {
MachineSignature::Builder builder(zone(), 1, 3);
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
builder.AddParam(arg1_type);
builder.AddParam(arg2_type);
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
return AddNode(common()->CallWithCallerSavedRegisters(descriptor), function,
arg0, arg1, arg2);
}
Node* RawMachineAssembler::CallCFunction6(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,

View File

@ -764,6 +764,11 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Call to a C function with one parameter.
Node* CallCFunction1(MachineType return_type, MachineType arg0_type,
Node* function, Node* arg0);
// Call to a C function with one argument, while saving/restoring caller
// registers.
Node* CallCFunction1WithCallerSavedRegisters(MachineType return_type,
MachineType arg0_type,
Node* function, Node* arg0);
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
@ -772,6 +777,14 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
Node* function, Node* arg0, Node* arg1, Node* arg2);
// Call to a C function with three arguments, while saving/restoring caller
// registers.
Node* CallCFunction3WithCallerSavedRegisters(MachineType return_type,
MachineType arg0_type,
MachineType arg1_type,
MachineType arg2_type,
Node* function, Node* arg0,
Node* arg1, Node* arg2);
// Call to a C function with six arguments.
Node* CallCFunction6(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,

View File

@ -1217,6 +1217,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToFP();
break;
}
case kArchSaveCallerRegisters: {
// kReturnRegister0 should have been saved before entering the stub.
__ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchRestoreCallerRegisters: {
// Don't overwrite the returned value.
__ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall();
break;

View File

@ -230,6 +230,7 @@ bool IsPotentiallyThrowingCall(IrOpcode::Value opcode) {
JS_OP_LIST(BUILD_BLOCK_JS_CASE)
#undef BUILD_BLOCK_JS_CASE
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters:
return true;
default:
return false;

View File

@ -340,6 +340,7 @@ class CFGBuilder : public ZoneObject {
// JS opcodes are just like calls => fall through.
#undef BUILD_BLOCK_JS_CASE
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters:
if (NodeProperties::IsExceptionalCall(node)) {
BuildBlocksForSuccessors(node);
}
@ -384,6 +385,7 @@ class CFGBuilder : public ZoneObject {
// JS opcodes are just like calls => fall through.
#undef CONNECT_BLOCK_JS_CASE
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters:
if (NodeProperties::IsExceptionalCall(node)) {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectCall(node);

View File

@ -850,6 +850,9 @@ Type* Typer::Visitor::TypeTypedObjectState(Node* node) {
Type* Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeCallWithCallerSavedRegisters(Node* node) {
UNREACHABLE();
}
Type* Typer::Visitor::TypeProjection(Node* node) {
Type* const type = Operand(node, 0);

View File

@ -518,6 +518,7 @@ void Verifier::Visitor::Check(Node* node) {
// TODO(jarin): what are the constraints on these?
break;
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters:
// TODO(rossberg): what are the constraints on these?
break;
case IrOpcode::kTailCall:

View File

@ -925,6 +925,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ PrepareCallCFunction(num_parameters);
break;
}
case kArchSaveCallerRegisters: {
// kReturnRegister0 should have been saved before entering the stub.
__ PushCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchRestoreCallerRegisters: {
// Don't overwrite the returned value.
__ PopCallerSaved(kSaveFPRegs, kReturnRegister0);
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall();
break;

View File

@ -99,7 +99,7 @@ static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1, Register exclusion2,
Register exclusion3) {
// We don't allow a GC during a store buffer overflow so there is no need to
@ -121,7 +121,7 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
}
}
void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
// Restore all XMM registers except XMM0.

View File

@ -292,6 +292,16 @@ class TurboAssembler : public Assembler {
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
void Push(Smi* smi) { Push(Immediate(smi)); }
// These functions do not arrange the registers in any particular order so
// they are not useful for calls that can cause a GC. The caller can
// exclude up to 3 registers that do not need to be saved and restored.
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
private:
bool has_frame_ = false;
Isolate* const isolate_;
@ -351,16 +361,6 @@ class MacroAssembler : public TurboAssembler {
j(not_equal, if_not_equal, if_not_equal_distance);
}
// These functions do not arrange the registers in any particular order so
// they are not useful for calls that can cause a GC. The caller can
// exclude up to 3 registers that do not need to be saved and restored.
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };

View File

@ -52,6 +52,8 @@ enum class MachineSemantic {
kAny
};
V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep);
class MachineType {
public:
MachineType()
@ -214,6 +216,10 @@ class MachineType {
}
}
bool LessThanOrEqualPointerSize() {
return ElementSizeLog2Of(this->representation()) <= kPointerSizeLog2;
}
private:
MachineRepresentation representation_;
MachineSemantic semantic_;

View File

@ -24,6 +24,47 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1, Register exclusion2,
Register exclusion3) {
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
MultiPush(kJSCallerSaved & ~exclusions);
if (fp_mode == kSaveFPRegs) {
MultiPushFPU(kCallerSavedFPU);
}
}
void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
MultiPopFPU(kCallerSavedFPU);
}
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
MultiPop(kJSCallerSaved & ~exclusions);
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}

View File

@ -357,6 +357,13 @@ class TurboAssembler : public Assembler {
void MultiPush(RegList regs);
void MultiPushFPU(RegList regs);
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));

View File

@ -24,6 +24,47 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1, Register exclusion2,
Register exclusion3) {
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
MultiPush(kJSCallerSaved & ~exclusions);
if (fp_mode == kSaveFPRegs) {
MultiPushFPU(kCallerSavedFPU);
}
}
void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
MultiPopFPU(kCallerSavedFPU);
}
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
MultiPop(kJSCallerSaved & ~exclusions);
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}

View File

@ -399,6 +399,13 @@ class TurboAssembler : public Assembler {
void MultiPush(RegList regs);
void MultiPushFPU(RegList regs);
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void pop(Register dst) {
Ld(dst, MemOperand(sp, 0));
Daddu(sp, sp, Operand(kPointerSize));

View File

@ -26,6 +26,46 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1, Register exclusion2,
Register exclusion3) {
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
MultiPush(kJSCallerSaved & ~exclusions);
if (fp_mode == kSaveFPRegs) {
MultiPushDoubles(kCallerSavedDoubles);
}
}
void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
MultiPopDoubles(kCallerSavedDoubles);
}
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
MultiPop(kJSCallerSaved & ~exclusions);
}
void TurboAssembler::Jump(Register target) {
mtctr(target);
bctr();

View File

@ -315,6 +315,13 @@ class TurboAssembler : public Assembler {
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);

View File

@ -26,6 +26,47 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1, Register exclusion2,
Register exclusion3) {
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
MultiPush(kJSCallerSaved & ~exclusions);
if (fp_mode == kSaveFPRegs) {
MultiPushDoubles(kCallerSavedDoubles);
}
}
void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
MultiPopDoubles(kCallerSavedDoubles);
}
RegList exclusions = 0;
if (!exclusion1.is(no_reg)) {
exclusions |= exclusion1.bit();
if (!exclusion2.is(no_reg)) {
exclusions |= exclusion2.bit();
if (!exclusion3.is(no_reg)) {
exclusions |= exclusion3.bit();
}
}
}
MultiPop(kJSCallerSaved & ~exclusions);
}
void TurboAssembler::Jump(Register target) { b(target); }
void MacroAssembler::JumpToJSEntry(Register target) {

View File

@ -228,6 +228,13 @@ class TurboAssembler : public Assembler {
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);

View File

@ -30,6 +30,8 @@ int sum9(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7,
return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
static int sum3(int a0, int a1, int a2) { return a0 + a1 + a2; }
} // namespace
TEST(CallCFunction9) {
@ -61,6 +63,31 @@ TEST(CallCFunction9) {
CHECK_EQ(36, Handle<Smi>::cast(result)->value());
}
TEST(CallCFunction3WithCallerSavedRegisters) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 0;
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
{
Node* const fun_constant = m.ExternalConstant(
ExternalReference(reinterpret_cast<Address>(sum3), isolate));
MachineType type_intptr = MachineType::IntPtr();
Node* const result = m.CallCFunction3WithCallerSavedRegisters(
type_intptr, type_intptr, type_intptr, type_intptr, fun_constant,
m.IntPtrConstant(0), m.IntPtrConstant(1), m.IntPtrConstant(2));
m.Return(m.SmiTag(result));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Object> result = ft.Call().ToHandleChecked();
CHECK_EQ(3, Handle<Smi>::cast(result)->value());
}
namespace {
void CheckToUint32Result(uint32_t expected, Handle<Object> result) {