[Interpreter] Make dispatch table point to code entry instead of code objects.
Modifies Ignition to store code entry addresses in the dispatch table rather than code objects. This allows the interpreter to avoid calculating the code entry address from the code object on every dispatch and provides a ~5-7% performance improvement on Octane with Ignition. This change adds ArchOpcode::kArchTailCallAddress to TurboFan to enable tail call dispatch using these code addresses. It also adds a Dispatch linkage creator (distinct from the stub linkage type used previously) to allow targetting a code address target (which will diverge further from the stub linkage type when we remove the context machine register in Ignition). BUG=v8:4280 LOG=N Review URL: https://codereview.chromium.org/1882073002 Cr-Commit-Position: refs/heads/master@{#35480}
This commit is contained in:
parent
1b63d124ad
commit
0c05e02f25
@ -1040,9 +1040,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
|
||||
kPointerSizeLog2));
|
||||
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
|
||||
// and header removal.
|
||||
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Call(ip);
|
||||
|
||||
// Even though the first bytecode handler was called, we will never return.
|
||||
@ -1174,7 +1171,6 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
|
||||
kPointerSizeLog2));
|
||||
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ mov(pc, ip);
|
||||
}
|
||||
|
||||
|
@ -1047,9 +1047,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
|
||||
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
|
||||
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
|
||||
// and header removal.
|
||||
__ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Call(ip0);
|
||||
|
||||
// Even though the first bytecode handler was called, we will never return.
|
||||
@ -1123,7 +1120,6 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
|
||||
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
|
||||
__ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(ip0);
|
||||
}
|
||||
|
||||
|
@ -488,6 +488,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchTailCallAddress: {
|
||||
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
|
||||
AssembleDeconstructActivationRecord(stack_param_delta);
|
||||
CHECK(!instr->InputAt(0)->IsImmediate());
|
||||
__ Jump(i.InputRegister(0));
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchCallJSFunction: {
|
||||
EnsureSpaceForLazyDeopt();
|
||||
Register func = i.InputRegister(0);
|
||||
|
@ -581,6 +581,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchTailCallAddress: {
|
||||
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
|
||||
AssembleDeconstructActivationRecord(stack_param_delta);
|
||||
CHECK(!instr->InputAt(0)->IsImmediate());
|
||||
__ Jump(i.InputRegister(0));
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchCallJSFunction: {
|
||||
EnsureSpaceForLazyDeopt();
|
||||
Register func = i.InputRegister(0);
|
||||
|
@ -1547,15 +1547,13 @@ Node* CodeStubAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
|
||||
return raw_assembler_->TailCallN(call_descriptor, target, args);
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::TailCall(
|
||||
const CallInterfaceDescriptor& interface_descriptor, Node* code_target,
|
||||
Node** args, size_t result_size) {
|
||||
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
|
||||
Node* CodeStubAssembler::TailCallBytecodeDispatch(
|
||||
const CallInterfaceDescriptor& interface_descriptor,
|
||||
Node* code_target_address, Node** args) {
|
||||
CallDescriptor* descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
|
||||
isolate(), zone(), interface_descriptor,
|
||||
interface_descriptor.GetStackParameterCount(),
|
||||
CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
|
||||
MachineType::AnyTagged(), result_size);
|
||||
return raw_assembler_->TailCallN(descriptor, code_target, args);
|
||||
interface_descriptor.GetStackParameterCount());
|
||||
return raw_assembler_->TailCallN(descriptor, code_target_address, args);
|
||||
}
|
||||
|
||||
void CodeStubAssembler::Goto(CodeStubAssembler::Label* label) {
|
||||
|
@ -263,13 +263,12 @@ class CodeStubAssembler {
|
||||
|
||||
Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
|
||||
Node* arg2, size_t result_size = 1);
|
||||
|
||||
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
|
||||
Node* context, Node* arg1, Node* arg2,
|
||||
size_t result_size = 1);
|
||||
|
||||
Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
|
||||
Node** args, size_t result_size = 1);
|
||||
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
|
||||
Node* code_target_address, Node** args);
|
||||
|
||||
// ===========================================================================
|
||||
// Macros
|
||||
|
@ -467,6 +467,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchTailCallAddress: {
|
||||
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
|
||||
AssembleDeconstructActivationRecord(stack_param_delta);
|
||||
CHECK(!HasImmediateInput(instr, 0));
|
||||
Register reg = i.InputRegister(0);
|
||||
__ jmp(reg);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchCallJSFunction: {
|
||||
EnsureSpaceForLazyDeopt();
|
||||
Register func = i.InputRegister(0);
|
||||
|
@ -48,6 +48,7 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
|
||||
V(ArchCallJSFunction) \
|
||||
V(ArchTailCallJSFunctionFromJSFunction) \
|
||||
V(ArchTailCallJSFunction) \
|
||||
V(ArchTailCallAddress) \
|
||||
V(ArchPrepareCallCFunction) \
|
||||
V(ArchCallCFunction) \
|
||||
V(ArchPrepareTailCall) \
|
||||
|
@ -224,6 +224,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
|
||||
case kArchTailCallCodeObject:
|
||||
case kArchTailCallJSFunctionFromJSFunction:
|
||||
case kArchTailCallJSFunction:
|
||||
case kArchTailCallAddress:
|
||||
return kHasSideEffect | kIsBlockTerminator;
|
||||
|
||||
case kArchDeoptimize:
|
||||
|
@ -1637,6 +1637,9 @@ void InstructionSelector::VisitTailCall(Node* node) {
|
||||
case CallDescriptor::kCallJSFunction:
|
||||
opcode = kArchTailCallJSFunction;
|
||||
break;
|
||||
case CallDescriptor::kCallAddress:
|
||||
opcode = kArchTailCallAddress;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return;
|
||||
|
@ -823,7 +823,8 @@ class Instruction final {
|
||||
return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
|
||||
arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
|
||||
arch_opcode() == ArchOpcode::kArchTailCallJSFunction ||
|
||||
arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction;
|
||||
arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction ||
|
||||
arch_opcode() == ArchOpcode::kArchTailCallAddress;
|
||||
}
|
||||
bool IsThrow() const {
|
||||
return arch_opcode() == ArchOpcode::kArchThrowTerminator;
|
||||
|
@ -404,6 +404,55 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
|
||||
descriptor.DebugName(isolate));
|
||||
}
|
||||
|
||||
CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
|
||||
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
|
||||
int stack_parameter_count) {
|
||||
const int register_parameter_count = descriptor.GetRegisterParameterCount();
|
||||
const int parameter_count = register_parameter_count + stack_parameter_count;
|
||||
const int context_count = 1;
|
||||
const size_t parameter_and_context_count =
|
||||
static_cast<size_t>(parameter_count + context_count);
|
||||
|
||||
LocationSignature::Builder locations(zone, 0, parameter_and_context_count);
|
||||
MachineSignature::Builder types(zone, 0, parameter_and_context_count);
|
||||
|
||||
// Add parameters in registers and on the stack.
|
||||
for (int i = 0; i < parameter_count; i++) {
|
||||
if (i < register_parameter_count) {
|
||||
// The first parameters go in registers.
|
||||
Register reg = descriptor.GetRegisterParameter(i);
|
||||
Representation rep =
|
||||
RepresentationFromType(descriptor.GetParameterType(i));
|
||||
locations.AddParam(regloc(reg));
|
||||
types.AddParam(reptyp(rep));
|
||||
} else {
|
||||
// The rest of the parameters go on the stack.
|
||||
int stack_slot = i - register_parameter_count - stack_parameter_count;
|
||||
locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
|
||||
types.AddParam(MachineType::AnyTagged());
|
||||
}
|
||||
}
|
||||
// Add context.
|
||||
locations.AddParam(regloc(kContextRegister));
|
||||
types.AddParam(MachineType::AnyTagged());
|
||||
|
||||
// The target for interpreter dispatches is a code entry address.
|
||||
MachineType target_type = MachineType::Pointer();
|
||||
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
|
||||
return new (zone) CallDescriptor( // --
|
||||
CallDescriptor::kCallAddress, // kind
|
||||
target_type, // target MachineType
|
||||
target_loc, // target location
|
||||
types.Build(), // machine_sig
|
||||
locations.Build(), // location_sig
|
||||
stack_parameter_count, // stack_parameter_count
|
||||
Operator::kNoProperties, // properties
|
||||
kNoCalleeSaved, // callee-saved registers
|
||||
kNoCalleeSaved, // callee-saved fp
|
||||
CallDescriptor::kCanUseRoots | // flags
|
||||
CallDescriptor::kSupportsTailCalls, // flags
|
||||
descriptor.DebugName(isolate));
|
||||
}
|
||||
|
||||
LinkageLocation Linkage::GetOsrValueLocation(int index) const {
|
||||
CHECK(incoming_->IsJSFunctionCall());
|
||||
|
@ -304,10 +304,11 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
|
||||
// representing the architecture-specific location. The following call node
|
||||
// layouts are supported (where {n} is the number of value inputs):
|
||||
//
|
||||
// #0 #1 #2 #3 [...] #n
|
||||
// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context
|
||||
// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], new, #arg, context
|
||||
// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
|
||||
// #0 #1 #2 [...] #n
|
||||
// Call[CodeStub] code, arg 1, arg 2, [...], context
|
||||
// Call[JSFunction] function, rcvr, arg 1, [...], new, #arg, context
|
||||
// Call[Runtime] CEntryStub, arg 1, arg 2, [...], fun, #arg, context
|
||||
// Call[BytecodeDispatch] address, arg 1, arg 2, [...], context
|
||||
class Linkage : public ZoneObject {
|
||||
public:
|
||||
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
|
||||
@ -332,6 +333,10 @@ class Linkage : public ZoneObject {
|
||||
MachineType return_type = MachineType::AnyTagged(),
|
||||
size_t return_count = 1);
|
||||
|
||||
static CallDescriptor* GetBytecodeDispatchCallDescriptor(
|
||||
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
|
||||
int stack_parameter_count);
|
||||
|
||||
// Creates a call descriptor for simplified C calls that is appropriate
|
||||
// for the host platform. This simplified calling convention only supports
|
||||
// integers and pointers of one word size each, i.e. no floating point,
|
||||
|
@ -564,6 +564,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchTailCallAddress: {
|
||||
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
|
||||
AssembleDeconstructActivationRecord(stack_param_delta);
|
||||
CHECK(!instr->InputAt(0)->IsImmediate());
|
||||
__ Jump(i.InputRegister(0));
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchCallJSFunction: {
|
||||
EnsureSpaceForLazyDeopt();
|
||||
Register func = i.InputRegister(0);
|
||||
|
@ -576,6 +576,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchTailCallAddress: {
|
||||
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
|
||||
AssembleDeconstructActivationRecord(stack_param_delta);
|
||||
CHECK(!instr->InputAt(0)->IsImmediate());
|
||||
__ Jump(i.InputRegister(0));
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchCallJSFunction: {
|
||||
EnsureSpaceForLazyDeopt();
|
||||
Register func = i.InputRegister(0);
|
||||
|
@ -695,6 +695,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchTailCallAddress: {
|
||||
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
|
||||
AssembleDeconstructActivationRecord(stack_param_delta);
|
||||
CHECK(!HasImmediateInput(instr, 0));
|
||||
Register reg = i.InputRegister(0);
|
||||
__ jmp(reg);
|
||||
frame_access_state()->ClearSPDelta();
|
||||
break;
|
||||
}
|
||||
case kArchCallJSFunction: {
|
||||
EnsureSpaceForLazyDeopt();
|
||||
Register func = i.InputRegister(0);
|
||||
|
@ -612,9 +612,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
// TODO(rmcilroy): Remove this once we move the dispatch table back into a
|
||||
// register.
|
||||
__ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
|
||||
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
|
||||
// and header removal.
|
||||
__ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ call(ebx);
|
||||
|
||||
// Even though the first bytecode handler was called, we will never return.
|
||||
@ -782,10 +779,6 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
__ mov(kContextRegister,
|
||||
Operand(kInterpreterRegisterFileRegister,
|
||||
InterpreterFrameConstants::kContextFromRegisterPointer));
|
||||
|
||||
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
|
||||
// and header removal.
|
||||
__ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(ebx);
|
||||
}
|
||||
|
||||
|
@ -559,17 +559,22 @@ void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
|
||||
TraceBytecodeDispatch(target_bytecode);
|
||||
}
|
||||
|
||||
// TODO(rmcilroy): Create a code target dispatch table to avoid conversion
|
||||
// from code object on every dispatch.
|
||||
Node* target_code_object =
|
||||
Node* target_code_entry =
|
||||
Load(MachineType::Pointer(), DispatchTableRawPointer(),
|
||||
WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
|
||||
|
||||
DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
|
||||
DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
|
||||
}
|
||||
|
||||
void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
|
||||
Node* bytecode_offset) {
|
||||
Node* handler_entry =
|
||||
IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
|
||||
DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
|
||||
}
|
||||
|
||||
void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
|
||||
Node* handler_entry, Node* bytecode_offset) {
|
||||
if (FLAG_trace_ignition) {
|
||||
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
|
||||
}
|
||||
@ -578,7 +583,7 @@ void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
|
||||
Node* args[] = {GetAccumulatorUnchecked(), RegisterFileRawPointer(),
|
||||
bytecode_offset, BytecodeArrayTaggedPointer(),
|
||||
DispatchTableRawPointer(), GetContext()};
|
||||
TailCall(descriptor, handler, args, 0);
|
||||
TailCallBytecodeDispatch(descriptor, handler_entry, args);
|
||||
}
|
||||
|
||||
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
|
||||
@ -613,11 +618,11 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
|
||||
base_index = nullptr;
|
||||
}
|
||||
Node* target_index = IntPtrAdd(base_index, next_bytecode);
|
||||
Node* target_code_object =
|
||||
Node* target_code_entry =
|
||||
Load(MachineType::Pointer(), DispatchTableRawPointer(),
|
||||
WordShl(target_index, kPointerSizeLog2));
|
||||
|
||||
DispatchToBytecodeHandler(target_code_object, next_bytecode_offset);
|
||||
DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
|
||||
}
|
||||
|
||||
void InterpreterAssembler::InterpreterReturn() {
|
||||
|
@ -137,8 +137,6 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
|
||||
void Dispatch();
|
||||
|
||||
// Dispatch to bytecode handler.
|
||||
void DispatchToBytecodeHandler(compiler::Node* handler,
|
||||
compiler::Node* bytecode_offset);
|
||||
void DispatchToBytecodeHandler(compiler::Node* handler) {
|
||||
DispatchToBytecodeHandler(handler, BytecodeOffset());
|
||||
}
|
||||
@ -217,6 +215,14 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
|
||||
// Starts next instruction dispatch at |new_bytecode_offset|.
|
||||
void DispatchTo(compiler::Node* new_bytecode_offset);
|
||||
|
||||
// Dispatch to the bytecode handler with code offset |handler|.
|
||||
void DispatchToBytecodeHandler(compiler::Node* handler,
|
||||
compiler::Node* bytecode_offset);
|
||||
|
||||
// Dispatch to the bytecode handler with code entry point |handler_entry|.
|
||||
void DispatchToBytecodeHandlerEntry(compiler::Node* handler_entry,
|
||||
compiler::Node* bytecode_offset);
|
||||
|
||||
// Abort operations for debug code.
|
||||
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
|
||||
BailoutReason bailout_reason);
|
||||
|
@ -54,7 +54,7 @@ void Interpreter::Initialize() {
|
||||
Do##Name(&assembler); \
|
||||
Handle<Code> code = assembler.GenerateCode(); \
|
||||
size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale); \
|
||||
dispatch_table_[index] = *code; \
|
||||
dispatch_table_[index] = code->entry(); \
|
||||
TraceCodegen(code); \
|
||||
LOG_CODE_EVENT( \
|
||||
isolate_, \
|
||||
@ -82,7 +82,8 @@ Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
|
||||
DCHECK(IsDispatchTableInitialized());
|
||||
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
|
||||
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
|
||||
return dispatch_table_[index];
|
||||
Address code_entry = dispatch_table_[index];
|
||||
return Code::GetCodeFromTargetAddress(code_entry);
|
||||
}
|
||||
|
||||
// static
|
||||
@ -99,9 +100,17 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
|
||||
}
|
||||
|
||||
void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
|
||||
v->VisitPointers(
|
||||
reinterpret_cast<Object**>(&dispatch_table_[0]),
|
||||
reinterpret_cast<Object**>(&dispatch_table_[0] + kDispatchTableSize));
|
||||
for (int i = 0; i < kDispatchTableSize; i++) {
|
||||
Address code_entry = dispatch_table_[i];
|
||||
Object* code = code_entry == nullptr
|
||||
? nullptr
|
||||
: Code::GetCodeFromTargetAddress(code_entry);
|
||||
Object* old_code = code;
|
||||
v->VisitPointer(&code);
|
||||
if (code != old_code) {
|
||||
dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
@ -179,9 +188,10 @@ void Interpreter::TraceCodegen(Handle<Code> code) {
|
||||
|
||||
const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
|
||||
#ifdef ENABLE_DISASSEMBLER
|
||||
#define RETURN_NAME(Name, ...) \
|
||||
if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code) { \
|
||||
return #Name; \
|
||||
#define RETURN_NAME(Name, ...) \
|
||||
if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
|
||||
code->entry()) { \
|
||||
return #Name; \
|
||||
}
|
||||
BYTECODE_LIST(RETURN_NAME)
|
||||
#undef RETURN_NAME
|
||||
|
@ -146,7 +146,7 @@ class Interpreter {
|
||||
static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
|
||||
|
||||
Isolate* isolate_;
|
||||
Code* dispatch_table_[kDispatchTableSize];
|
||||
Address dispatch_table_[kDispatchTableSize];
|
||||
v8::base::SmartArrayPointer<uintptr_t> bytecode_dispatch_count_table_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(Interpreter);
|
||||
|
@ -1030,9 +1030,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ lbu(a0, MemOperand(a0));
|
||||
__ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
|
||||
__ lw(at, MemOperand(at));
|
||||
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
|
||||
// and header removal.
|
||||
__ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Call(at);
|
||||
|
||||
// Even though the first bytecode handler was called, we will never return.
|
||||
@ -1172,7 +1169,6 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
__ lbu(a1, MemOperand(a1));
|
||||
__ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
|
||||
__ lw(a1, MemOperand(a1));
|
||||
__ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(a1);
|
||||
}
|
||||
|
||||
|
@ -1019,9 +1019,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ lbu(a0, MemOperand(a0));
|
||||
__ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
|
||||
__ ld(at, MemOperand(at));
|
||||
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
|
||||
// and header removal.
|
||||
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Call(at);
|
||||
|
||||
// Even though the first bytecode handler was called, we will never return.
|
||||
@ -1161,7 +1158,6 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
__ lbu(a1, MemOperand(a1));
|
||||
__ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
|
||||
__ ld(a1, MemOperand(a1));
|
||||
__ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(a1);
|
||||
}
|
||||
|
||||
|
@ -683,9 +683,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
|
||||
times_pointer_size, 0));
|
||||
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
|
||||
// and header removal.
|
||||
__ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ call(rbx);
|
||||
|
||||
// Even though the first bytecode handler was called, we will never return.
|
||||
@ -847,7 +844,6 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
|
||||
times_pointer_size, 0));
|
||||
__ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ jmp(rbx);
|
||||
}
|
||||
|
||||
|
@ -464,10 +464,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, InterpreterReturn) {
|
||||
|
||||
Handle<HeapObject> exit_trampoline =
|
||||
isolate()->builtins()->InterpreterExitTrampoline();
|
||||
Matcher<Node*> exit_trampoline_entry_matcher =
|
||||
IsIntPtrAdd(IsHeapConstant(exit_trampoline),
|
||||
IsIntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
|
||||
EXPECT_THAT(
|
||||
tail_call_node,
|
||||
IsTailCall(
|
||||
_, IsHeapConstant(exit_trampoline),
|
||||
_, exit_trampoline_entry_matcher,
|
||||
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
|
||||
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
|
||||
IsParameter(
|
||||
|
Loading…
Reference in New Issue
Block a user