[nojit] Add a kCallBuiltinPointer call kind

Currently, Torque's builtin pointers store a Code target underneath and
callsites generate a kArchCallCodeObject opcode. When embedded builtins
are enabled, the call thus first calls the on-heap trampoline, which
finally jumps to the target off-heap builtin code.

This will no longer be possible in jitless mode, since on-heap code must
not be executable.

As a step towards changing the way builtin pointers are called
(function pointers will hold the builtin index as a Smi, and callsites
look up the off-heap target address and jump there), this CL adds a
dedicated opcode for builtin pointer calls to the compiler pipeline.

The calling mechanism itself is unchanged, changes there will happen
in a follow-up.

Drive-by: rename 'FunctionPointer' in torque/ to 'BuiltinPointer'.

Bug: v8:7777
Change-Id: Ic999a1cd7c3172425dd4a1513ae2f50c774faddb
Reviewed-on: https://chromium-review.googlesource.com/c/1378175
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58281}
This commit is contained in:
Jakob Gruber 2018-12-17 14:20:16 +01:00 committed by Commit Bot
parent e7cdc378de
commit f323a5f415
38 changed files with 253 additions and 97 deletions

View File

@ -291,6 +291,12 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, mode);
}
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
add(builtin_pointer, builtin_pointer,
Operand(Code::kHeaderSize - kHeapObjectTag));
Call(builtin_pointer);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.

View File

@ -327,6 +327,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool check_constant_pool = true);
void Call(Label* target);
void CallBuiltinPointer(Register builtin_pointer) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.

View File

@ -2029,6 +2029,11 @@ void TurboAssembler::Call(ExternalReference target) {
Call(temp);
}
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
Add(builtin_pointer, builtin_pointer, Code::kHeaderSize - kHeapObjectTag);
Call(builtin_pointer);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.

View File

@ -879,6 +879,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Generate an indirect call (for when a direct call's range is not adequate).
void IndirectCall(Address target, RelocInfo::Mode rmode);
void CallBuiltinPointer(Register builtin_pointer) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.

View File

@ -743,6 +743,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
Register builtin_pointer = i.InputRegister(0);
__ CallBuiltinPointer(builtin_pointer);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));

View File

@ -624,6 +624,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
Register builtin_pointer = i.InputRegister(0);
__ CallBuiltinPointer(builtin_pointer);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));

View File

@ -669,6 +669,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallBuiltinPointer: {
DCHECK(!HasImmediateInput(instr, 0));
Register builtin_pointer = i.InputRegister(0);
__ CallBuiltinPointer(builtin_pointer);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallWasmFunction: {
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));

View File

@ -52,6 +52,7 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchPrepareTailCall) \
V(ArchCallWasmFunction) \
V(ArchTailCallWasm) \
V(ArchCallBuiltinPointer) \
V(ArchJmp) \
V(ArchBinarySearchSwitch) \
V(ArchLookupSwitch) \

View File

@ -293,6 +293,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchCallCodeObject:
case kArchCallJSFunction:
case kArchCallWasmFunction:
case kArchCallBuiltinPointer:
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject:
case kArchTailCallAddress:

View File

@ -919,6 +919,15 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallBuiltinPointer:
// The common case for builtin pointers is to have the target in a
// register. If we have a constant, we use a register anyway to simplify
// related code.
buffer->instruction_args.push_back(
call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
@ -2578,6 +2587,9 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
case CallDescriptor::kCallWasmImportWrapper:
opcode = kArchCallWasmFunction | MiscField::encode(flags);
break;
case CallDescriptor::kCallBuiltinPointer:
opcode = kArchCallBuiltinPointer | MiscField::encode(flags);
break;
}
// Emit the call instruction.

View File

@ -660,6 +660,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
Register builtin_pointer = i.InputRegister(0);
__ CallBuiltinPointer(builtin_pointer);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallWasmFunction: {
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));

View File

@ -632,6 +632,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
Register builtin_pointer = i.InputRegister(0);
__ CallBuiltinPointer(builtin_pointer);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallWasmFunction: {
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,

View File

@ -706,6 +706,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallBuiltinPointer: {
DCHECK(!HasImmediateInput(instr, 0));
Register builtin_pointer = i.InputRegister(0);
__ CallBuiltinPointer(builtin_pointer);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallWasmFunction: {
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));

View File

@ -1183,9 +1183,13 @@ void CodeAssembler::TailCallRuntimeWithCEntryImpl(
raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
}
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
Node* CodeAssembler::CallStubN(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
size_t result_size, int input_count,
Node* const* inputs) {
DCHECK(call_mode == StubCallMode::kCallOnHeapBuiltin ||
call_mode == StubCallMode::kCallBuiltinPointer);
// implicit nodes are target and optionally context.
int implicit_nodes = descriptor.HasContextParameter() ? 2 : 1;
DCHECK_LE(implicit_nodes, input_count);
@ -1198,7 +1202,7 @@ Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags,
Operator::kNoProperties);
Operator::kNoProperties, call_mode);
CallPrologue();
Node* return_value =
@ -1228,10 +1232,14 @@ void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
}
Node* CodeAssembler::CallStubRImpl(const CallInterfaceDescriptor& descriptor,
size_t result_size, SloppyTNode<Code> target,
Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
size_t result_size, Node* target,
SloppyTNode<Object> context,
std::initializer_list<Node*> args) {
DCHECK(call_mode == StubCallMode::kCallOnHeapBuiltin ||
call_mode == StubCallMode::kCallBuiltinPointer);
constexpr size_t kMaxNumArgs = 10;
DCHECK_GE(kMaxNumArgs, args.size());
@ -1242,7 +1250,8 @@ Node* CodeAssembler::CallStubRImpl(const CallInterfaceDescriptor& descriptor,
inputs.Add(context);
}
return CallStubN(descriptor, result_size, inputs.size(), inputs.data());
return CallStubN(call_mode, descriptor, result_size, inputs.size(),
inputs.data());
}
Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl(

View File

@ -248,6 +248,10 @@ struct UnionT {
using Number = UnionT<Smi, HeapNumber>;
using Numeric = UnionT<Number, BigInt>;
// A pointer to a builtin function, used by Torque's function pointers.
// TODO(jgruber): Switch to a Smi representation.
using BuiltinPtr = Code;
class int31_t {
public:
int31_t() : value_(0) {}
@ -1213,19 +1217,31 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
SloppyTNode<Code> target, SloppyTNode<Object> context,
TArgs... args) {
return UncheckedCast<T>(CallStubR(descriptor, 1, target, context, args...));
return UncheckedCast<T>(CallStubR(StubCallMode::kCallOnHeapBuiltin,
descriptor, 1, target, context, args...));
}
template <class... TArgs>
Node* CallStubR(const CallInterfaceDescriptor& descriptor, size_t result_size,
Node* CallStubR(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor, size_t result_size,
SloppyTNode<Code> target, SloppyTNode<Object> context,
TArgs... args) {
return CallStubRImpl(descriptor, result_size, target, context, {args...});
return CallStubRImpl(call_mode, descriptor, result_size, target, context,
{args...});
}
Node* CallStubN(const CallInterfaceDescriptor& descriptor, size_t result_size,
Node* CallStubN(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor, size_t result_size,
int input_count, Node* const* inputs);
template <class T = Object, class... TArgs>
TNode<T> CallBuiltinPointer(const CallInterfaceDescriptor& descriptor,
TNode<BuiltinPtr> target, TNode<Object> context,
TArgs... args) {
return UncheckedCast<T>(CallStubR(StubCallMode::kCallBuiltinPointer,
descriptor, 1, target, context, args...));
}
template <class... TArgs>
void TailCallStub(Callable const& callable, SloppyTNode<Object> context,
TArgs... args) {
@ -1403,8 +1419,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
std::initializer_list<Node*> args);
Node* CallStubRImpl(const CallInterfaceDescriptor& descriptor,
size_t result_size, SloppyTNode<Code> target,
Node* CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
size_t result_size, Node* target,
SloppyTNode<Object> context,
std::initializer_list<Node*> args);

View File

@ -43,6 +43,9 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
case CallDescriptor::kCallWasmImportWrapper:
os << "WasmImportWrapper";
break;
case CallDescriptor::kCallBuiltinPointer:
os << "BuiltinPointer";
break;
}
return os;
}
@ -130,6 +133,7 @@ int CallDescriptor::CalculateFixedFrameSize() const {
return CommonFrameConstants::kFixedSlotCountAboveFp +
CommonFrameConstants::kCPSlotCount;
case kCallCodeObject:
case kCallBuiltinPointer:
return TypedFrameConstants::kFixedSlotCount;
case kCallWasmFunction:
case kCallWasmImportWrapper:
@ -392,12 +396,23 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
}
// The target for stub calls depends on the requested mode.
CallDescriptor::Kind kind = stub_mode == StubCallMode::kCallWasmRuntimeStub
? CallDescriptor::kCallWasmFunction
: CallDescriptor::kCallCodeObject;
MachineType target_type = stub_mode == StubCallMode::kCallWasmRuntimeStub
? MachineType::Pointer()
: MachineType::AnyTagged();
CallDescriptor::Kind kind;
MachineType target_type;
switch (stub_mode) {
case StubCallMode::kCallOnHeapBuiltin:
kind = CallDescriptor::kCallCodeObject;
target_type = MachineType::AnyTagged();
break;
case StubCallMode::kCallWasmRuntimeStub:
kind = CallDescriptor::kCallWasmFunction;
target_type = MachineType::Pointer();
break;
case StubCallMode::kCallBuiltinPointer:
kind = CallDescriptor::kCallBuiltinPointer;
target_type = MachineType::AnyTagged();
break;
}
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
return new (zone) CallDescriptor( // --
kind, // kind

View File

@ -170,11 +170,12 @@ class V8_EXPORT_PRIVATE CallDescriptor final
public:
// Describes the kind of this call, which determines the target.
enum Kind {
kCallCodeObject, // target is a Code object
kCallJSFunction, // target is a JSFunction object
kCallAddress, // target is a machine pointer
kCallWasmFunction, // target is a wasm function
kCallWasmImportWrapper // target is a wasm import wrapper
kCallCodeObject, // target is a Code object
kCallJSFunction, // target is a JSFunction object
kCallAddress, // target is a machine pointer
kCallWasmFunction, // target is a wasm function
kCallWasmImportWrapper, // target is a wasm import wrapper
kCallBuiltinPointer, // target is a builtin pointer
};
enum Flag {

View File

@ -1710,9 +1710,17 @@ enum IcCheckType { ELEMENT, PROPERTY };
// Helper stubs can be called in different ways depending on where the target
// code is located and how the call sequence is expected to look like:
// - JavaScript: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}.
// - WebAssembly: Call native {WasmCode} stub via {RelocInfo::WASM_STUB_CALL}.
enum class StubCallMode { kCallOnHeapBuiltin, kCallWasmRuntimeStub };
// - OnHeapBuiltin: Call on-heap {Code} object via {RelocInfo::CODE_TARGET}.
// - WasmRuntimeStub: Call native {WasmCode} stub via
// {RelocInfo::WASM_STUB_CALL}.
// - BuiltinPointer: Call a builtin based on a builtin pointer with dynamic
// contents. If builtins are embedded, we call directly into off-heap code
// without going through the on-heap Code trampoline.
enum class StubCallMode {
kCallOnHeapBuiltin,
kCallWasmRuntimeStub,
kCallBuiltinPointer,
};
} // namespace internal
} // namespace v8

View File

@ -1871,6 +1871,11 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
add(builtin_pointer, Immediate(Code::kHeaderSize - kHeapObjectTag));
call(builtin_pointer);
}
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
if (root_array_available() && options().isolate_independent_code &&

View File

@ -131,11 +131,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Label* target) { call(target); }
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void CallBuiltinPointer(Register builtin_pointer) override;
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void RetpolineJump(Register reg);
void CallForDeoptimization(Address target, int deopt_id,

View File

@ -1251,8 +1251,9 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Load(MachineType::Pointer(), function,
IntPtrConstant(offsetof(Runtime::Function, entry)));
return CallStubR(callable.descriptor(), result_size, code_target, context,
args.reg_count(), args.base_reg_location(), function_entry);
return CallStubR(StubCallMode::kCallOnHeapBuiltin, callable.descriptor(),
result_size, code_target, context, args.reg_count(),
args.base_reg_location(), function_entry);
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {

View File

@ -183,7 +183,8 @@ Node* IntrinsicsGenerator::IntrinsicAsStubCall(
stub_args[index++] = __ LoadRegisterFromRegisterList(args, i);
}
stub_args[index++] = context;
return __ CallStubN(callable.descriptor(), 1, input_count, stub_args);
return __ CallStubN(StubCallMode::kCallOnHeapBuiltin, callable.descriptor(),
1, input_count, stub_args);
}
Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(

View File

@ -3972,6 +3972,10 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
Call(builtin_pointer, builtin_pointer, Code::kHeaderSize - kHeapObjectTag);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.

View File

@ -257,6 +257,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
COND_ARGS);
void Call(Label* target);
void CallBuiltinPointer(Register builtin_pointer) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.

View File

@ -4299,6 +4299,11 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
daddiu(builtin_pointer, builtin_pointer, Code::kHeaderSize - kHeapObjectTag);
Call(builtin_pointer);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.

View File

@ -282,6 +282,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
COND_ARGS);
void Call(Label* target);
void CallBuiltinPointer(Register builtin_pointer) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.

View File

@ -92,9 +92,11 @@ void CSAGenerator::EmitInstruction(
}
void CSAGenerator::EmitInstruction(
const PushCodePointerInstruction& instruction, Stack<std::string>* stack) {
const PushBuiltinPointerInstruction& instruction,
Stack<std::string>* stack) {
stack->Push(
"ca_.UncheckedCast<Code>(ca_.HeapConstant(Builtins::CallableFor(ca_."
"ca_.UncheckedCast<BuiltinPtr>(ca_.HeapConstant(Builtins::CallableFor(ca_"
"."
"isolate(), Builtins::k" +
instruction.external_name + ").code()))");
}
@ -451,29 +453,24 @@ void CSAGenerator::EmitInstruction(
ReportError("builtins must have exactly one result");
}
if (instruction.is_tailcall) {
out_ << " "
"CodeStubAssembler(state_).TailCallBuiltin(Builtins::CallableFor("
"ca_.isolate(), "
"ExampleBuiltinForTorqueFunctionPointerType("
<< instruction.type->function_pointer_type_id() << ")).descriptor(), ";
PrintCommaSeparatedList(out_, function_and_arguments);
out_ << ");\n";
} else {
stack->Push(FreshNodeName());
std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
out_ << " compiler::TNode<" << generated_type << "> " << stack->Top()
<< " = ";
if (generated_type != "Object") out_ << "TORQUE_CAST(";
out_ << "CodeStubAssembler(state_).CallStub(Builtins::CallableFor(ca_."
"isolate(),"
"ExampleBuiltinForTorqueFunctionPointerType("
<< instruction.type->function_pointer_type_id() << ")).descriptor(), ";
PrintCommaSeparatedList(out_, function_and_arguments);
out_ << ")";
if (generated_type != "Object") out_ << ")";
out_ << "; \n";
out_ << " USE(" << stack->Top() << ");\n";
ReportError("tail-calls to builtin pointers are not supported");
}
stack->Push(FreshNodeName());
std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
out_ << " compiler::TNode<" << generated_type << "> " << stack->Top()
<< " = ";
if (generated_type != "Object") out_ << "TORQUE_CAST(";
out_ << "CodeStubAssembler(state_).CallBuiltinPointer(Builtins::"
"CallableFor(ca_."
"isolate(),"
"ExampleBuiltinForTorqueFunctionPointerType("
<< instruction.type->function_pointer_type_id() << ")).descriptor(), ";
PrintCommaSeparatedList(out_, function_and_arguments);
out_ << ")";
if (generated_type != "Object") out_ << ")";
out_ << "; \n";
out_ << " USE(" << stack->Top() << ");\n";
}
std::string CSAGenerator::PreCallableExceptionPreparation(

View File

@ -89,13 +89,13 @@ const Type* Declarations::GetType(TypeExpression* type_expression) {
for (TypeExpression* type_exp : function_type_exp->parameters) {
argument_types.push_back(GetType(type_exp));
}
return TypeOracle::GetFunctionPointerType(
return TypeOracle::GetBuiltinPointerType(
argument_types, GetType(function_type_exp->return_type));
}
}
Builtin* Declarations::FindSomeInternalBuiltinWithType(
const FunctionPointerType* type) {
const BuiltinPointerType* type) {
for (auto& declarable : GlobalContext::AllDeclarables()) {
if (Builtin* builtin = Builtin::DynamicCast(declarable.get())) {
if (!builtin->IsExternal() && builtin->kind() == Builtin::kStub &&

View File

@ -62,7 +62,7 @@ class Declarations {
static const Type* GetType(TypeExpression* type_expression);
static Builtin* FindSomeInternalBuiltinWithType(
const FunctionPointerType* type);
const BuiltinPointerType* type);
static Value* LookupValue(const QualifiedName& name);

View File

@ -650,10 +650,11 @@ VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
"creating function pointers is only allowed for internal builtins with "
"stub linkage");
}
const Type* type = TypeOracle::GetFunctionPointerType(
const Type* type = TypeOracle::GetBuiltinPointerType(
builtin->signature().parameter_types.types,
builtin->signature().return_type);
assembler().Emit(PushCodePointerInstruction{builtin->ExternalName(), type});
assembler().Emit(
PushBuiltinPointerInstruction{builtin->ExternalName(), type});
return VisitResult(type, assembler().TopRange(1));
}
@ -1559,14 +1560,14 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
StackScope scope(this);
TypeVector parameter_types(arguments.parameters.GetTypeVector());
VisitResult callee_result = Visit(callee);
if (!callee_result.type()->IsFunctionPointerType()) {
if (!callee_result.type()->IsBuiltinPointerType()) {
std::stringstream stream;
stream << "Expected a function pointer type but found "
<< *callee_result.type();
ReportError(stream.str());
}
const FunctionPointerType* type =
FunctionPointerType::cast(callee_result.type());
const BuiltinPointerType* type =
BuiltinPointerType::cast(callee_result.type());
if (type->parameter_types().size() != parameter_types.size()) {
std::stringstream stream;
@ -2095,8 +2096,7 @@ void ImplementationVisitor::GenerateBuiltinDefinitions(std::string& file_name) {
new_contents_stream
<< "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n";
for (const FunctionPointerType* type :
TypeOracle::AllFunctionPointerTypes()) {
for (const BuiltinPointerType* type : TypeOracle::AllBuiltinPointerTypes()) {
Builtin* example_builtin =
Declarations::FindSomeInternalBuiltinWithType(type);
if (!example_builtin) {

View File

@ -60,8 +60,8 @@ void PushUninitializedInstruction::TypeInstruction(
stack->Push(type);
}
void PushCodePointerInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
void PushBuiltinPointerInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
stack->Push(type);
}
@ -202,7 +202,7 @@ void CallBuiltinInstruction::TypeInstruction(Stack<const Type*>* stack,
void CallBuiltinPointerInstruction::TypeInstruction(
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
std::vector<const Type*> argument_types = stack->PopMany(argc);
const FunctionPointerType* f = FunctionPointerType::DynamicCast(stack->Pop());
const BuiltinPointerType* f = BuiltinPointerType::DynamicCast(stack->Pop());
if (!f) ReportError("expected function pointer type");
if (argument_types != LowerParameterTypes(f->parameter_types())) {
ReportError("wrong argument types");

View File

@ -29,7 +29,7 @@ class RuntimeFunction;
V(PokeInstruction) \
V(DeleteRangeInstruction) \
V(PushUninitializedInstruction) \
V(PushCodePointerInstruction) \
V(PushBuiltinPointerInstruction) \
V(CallCsaMacroInstruction) \
V(CallIntrinsicInstruction) \
V(NamespaceConstantInstruction) \
@ -170,11 +170,11 @@ struct PushUninitializedInstruction : InstructionBase {
const Type* type;
};
struct PushCodePointerInstruction : InstructionBase {
struct PushBuiltinPointerInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
PushCodePointerInstruction(std::string external_name, const Type* type)
PushBuiltinPointerInstruction(std::string external_name, const Type* type)
: external_name(std::move(external_name)), type(type) {
DCHECK(type->IsFunctionPointerType());
DCHECK(type->IsBuiltinPointerType());
}
std::string external_name;
@ -265,11 +265,11 @@ struct CallBuiltinPointerInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return is_tailcall; }
CallBuiltinPointerInstruction(bool is_tailcall,
const FunctionPointerType* type, size_t argc)
const BuiltinPointerType* type, size_t argc)
: is_tailcall(is_tailcall), type(type), argc(argc) {}
bool is_tailcall;
const FunctionPointerType* type;
const BuiltinPointerType* type;
size_t argc;
};

View File

@ -35,23 +35,23 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return result;
}
static const FunctionPointerType* GetFunctionPointerType(
static const BuiltinPointerType* GetBuiltinPointerType(
TypeVector argument_types, const Type* return_type) {
TypeOracle& self = Get();
const Type* code_type = self.GetBuiltinType(CODE_TYPE_STRING);
const FunctionPointerType* result = self.function_pointer_types_.Add(
FunctionPointerType(code_type, argument_types, return_type,
self.all_function_pointer_types_.size()));
const BuiltinPointerType* result = self.function_pointer_types_.Add(
BuiltinPointerType(code_type, argument_types, return_type,
self.all_builtin_pointer_types_.size()));
if (result->function_pointer_type_id() ==
self.all_function_pointer_types_.size()) {
self.all_function_pointer_types_.push_back(result);
self.all_builtin_pointer_types_.size()) {
self.all_builtin_pointer_types_.push_back(result);
}
return result;
}
static const std::vector<const FunctionPointerType*>&
AllFunctionPointerTypes() {
return Get().all_function_pointer_types_;
static const std::vector<const BuiltinPointerType*>&
AllBuiltinPointerTypes() {
return Get().all_builtin_pointer_types_;
}
static const Type* GetUnionType(UnionType type) {
@ -159,8 +159,8 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Declarations::LookupGlobalType(name);
}
Deduplicator<FunctionPointerType> function_pointer_types_;
std::vector<const FunctionPointerType*> all_function_pointer_types_;
Deduplicator<BuiltinPointerType> function_pointer_types_;
std::vector<const BuiltinPointerType*> all_builtin_pointer_types_;
Deduplicator<UnionType> union_types_;
std::vector<std::unique_ptr<Type>> nominal_types_;
std::vector<std::unique_ptr<Type>> struct_types_;

View File

@ -78,7 +78,7 @@ std::string AbstractType::GetGeneratedTNodeTypeName() const {
return generated_type_;
}
std::string FunctionPointerType::ToExplicitString() const {
std::string BuiltinPointerType::ToExplicitString() const {
std::stringstream result;
result << "builtin (";
PrintCommaSeparatedList(result, parameter_types_);
@ -86,7 +86,7 @@ std::string FunctionPointerType::ToExplicitString() const {
return result.str();
}
std::string FunctionPointerType::MangledName() const {
std::string BuiltinPointerType::MangledName() const {
std::stringstream result;
result << "FT";
for (const Type* t : parameter_types_) {

View File

@ -48,15 +48,15 @@ class TypeBase {
enum class Kind {
kTopType,
kAbstractType,
kFunctionPointerType,
kBuiltinPointerType,
kUnionType,
kStructType
};
virtual ~TypeBase() = default;
bool IsTopType() const { return kind() == Kind::kTopType; }
bool IsAbstractType() const { return kind() == Kind::kAbstractType; }
bool IsFunctionPointerType() const {
return kind() == Kind::kFunctionPointerType;
bool IsBuiltinPointerType() const {
return kind() == Kind::kBuiltinPointerType;
}
bool IsUnionType() const { return kind() == Kind::kUnionType; }
bool IsStructType() const { return kind() == Kind::kStructType; }
@ -218,11 +218,10 @@ class AbstractType final : public Type {
base::Optional<const AbstractType*> non_constexpr_version_;
};
// For now, function pointers are restricted to Code objects of Torque-defined
// builtins.
class FunctionPointerType final : public Type {
// For now, builtin pointers are restricted to Torque-defined builtins.
class BuiltinPointerType final : public Type {
public:
DECLARE_TYPE_BOILERPLATE(FunctionPointerType);
DECLARE_TYPE_BOILERPLATE(BuiltinPointerType);
std::string ToExplicitString() const override;
std::string MangledName() const override;
std::string GetGeneratedTypeName() const override {
@ -240,14 +239,14 @@ class FunctionPointerType final : public Type {
const TypeVector& parameter_types() const { return parameter_types_; }
const Type* return_type() const { return return_type_; }
friend size_t hash_value(const FunctionPointerType& p) {
friend size_t hash_value(const BuiltinPointerType& p) {
size_t result = base::hash_value(p.return_type_);
for (const Type* parameter : p.parameter_types_) {
result = base::hash_combine(result, parameter);
}
return result;
}
bool operator==(const FunctionPointerType& other) const {
bool operator==(const BuiltinPointerType& other) const {
return parameter_types_ == other.parameter_types_ &&
return_type_ == other.return_type_;
}
@ -255,9 +254,9 @@ class FunctionPointerType final : public Type {
private:
friend class TypeOracle;
FunctionPointerType(const Type* parent, TypeVector parameter_types,
const Type* return_type, size_t function_pointer_type_id)
: Type(Kind::kFunctionPointerType, parent),
BuiltinPointerType(const Type* parent, TypeVector parameter_types,
const Type* return_type, size_t function_pointer_type_id)
: Type(Kind::kBuiltinPointerType, parent),
parameter_types_(parameter_types),
return_type_(return_type),
function_pointer_type_id_(function_pointer_type_id) {}

View File

@ -39,6 +39,10 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
void set_has_frame(bool v) { has_frame_ = v; }
bool has_frame() const { return has_frame_; }
// Calls the given builtin. If builtins are embedded, the trampoline Code
// object on the heap is not used.
virtual void CallBuiltinPointer(Register builtin_pointer) = 0;
// Loads the given constant or external reference without embedding its direct
// pointer. The produced code is isolate-independent.
void IndirectLoadConstant(Register destination, Handle<HeapObject> object);

View File

@ -1637,6 +1637,11 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
addp(builtin_pointer, Immediate(Code::kHeaderSize - kHeapObjectTag));
call(builtin_pointer);
}
void TurboAssembler::RetpolineCall(Register reg) {
Label setup_return, setup_target, inner_indirect_branch, capture_spec;

View File

@ -391,6 +391,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
void CallBuiltinPointer(Register builtin_pointer) override;
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);