[Interpreter] Make InterpreterAssembler a subclass of CodeStubAssembler.

Moves InterpreterAssembler out of the compiler directory and into the
interpreter directory. Makes InterpreterAssembler as subclass of
CodeStubAssembler.

As part of this change, the special bytecode dispatch linkage type
is removed and instead we use a InterfaceDispatchDescriptor and
a normal CodeStub linkage type.

Removes a bunch of duplicated logic in InterpreterAssembler and
instead uses the CodeStubAssembler logic. Refactors Interpreter
with these changes.

Modifies CodeStubAssembler to add the extra operations required
by the Interpreter (extra call types, raw memory access and some extra
binary ops). Also adds the ability for subclasses to add extra
prologue and epilogue operations around calls, which is required
for the Interpreter.

BUG=v8:4280
LOG=N

Review URL: https://codereview.chromium.org/1673333004

Cr-Commit-Position: refs/heads/master@{#33873}
This commit is contained in:
rmcilroy 2016-02-10 08:38:49 -08:00 committed by Commit bot
parent e0d0c96a53
commit d1c28849c7
28 changed files with 1523 additions and 1704 deletions

View File

@ -785,8 +785,6 @@ source_set("v8_base") {
"src/compiler/instruction.h",
"src/compiler/int64-lowering.cc",
"src/compiler/int64-lowering.h",
"src/compiler/interpreter-assembler.cc",
"src/compiler/interpreter-assembler.h",
"src/compiler/js-builtin-reducer.cc",
"src/compiler/js-builtin-reducer.h",
"src/compiler/js-call-reducer.cc",
@ -1123,6 +1121,8 @@ source_set("v8_base") {
"src/interpreter/handler-table-builder.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
"src/interpreter/interpreter-assembler.cc",
"src/interpreter/interpreter-assembler.h",
"src/interpreter/register-translator.cc",
"src/interpreter/register-translator.h",
"src/interpreter/source-position-table.cc",

View File

@ -44,7 +44,6 @@
},
'interpreter': {
'filepath': 'src/interpreter/' \
'|src/compiler/interpreter' \
'|src/compiler/bytecode' \
'|test/cctest/interpreter/' \
'|test/unittests/interpreter/',

View File

@ -431,6 +431,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -442,7 +450,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -454,7 +461,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -465,6 +465,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -476,7 +484,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -488,7 +495,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -26,11 +26,14 @@ namespace compiler {
CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor,
Code::Flags flags, const char* name)
Code::Flags flags, const char* name,
size_t result_size)
: raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone),
Linkage::GetStubCallDescriptor(isolate, zone, descriptor, 0,
CallDescriptor::kNoFlags))),
Linkage::GetStubCallDescriptor(
isolate, zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size))),
flags_(flags),
name_(name),
code_generated_(false),
@ -38,6 +41,9 @@ CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
CodeStubAssembler::~CodeStubAssembler() {}
void CodeStubAssembler::CallPrologue() {}
void CodeStubAssembler::CallEpilogue() {}
Handle<Code> CodeStubAssembler::GenerateCode() {
DCHECK(!code_generated_);
@ -97,6 +103,10 @@ Node* CodeStubAssembler::LoadFramePointer() {
return raw_assembler_->LoadFramePointer();
}
Node* CodeStubAssembler::LoadStackPointer() {
return raw_assembler_->LoadStackPointer();
}
Node* CodeStubAssembler::SmiShiftBitsConstant() {
return Int32Constant(kSmiShiftSize + kSmiTagSize);
}
@ -118,6 +128,10 @@ Node* CodeStubAssembler::SmiUntag(Node* value) {
CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
#undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) {
return raw_assembler_->ChangeInt32ToInt64(value);
}
Node* CodeStubAssembler::WordShl(Node* value, int shift) {
return raw_assembler_->WordShl(value, Int32Constant(shift));
}
@ -179,9 +193,45 @@ Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
return nullptr;
}
Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
return raw_assembler_->Load(rep, base);
}
Node* CodeStubAssembler::Load(MachineType rep, Node* base, Node* index) {
return raw_assembler_->Load(rep, base, index);
}
Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
Node* value) {
return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
}
Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
Node* index, Node* value) {
return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
}
Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
Node* base, Node* value) {
return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
}
Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
Node* base, Node* index,
Node* value) {
return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
}
Node* CodeStubAssembler::Projection(int index, Node* value) {
return raw_assembler_->Projection(index, value);
}
Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
Node** args) {
return raw_assembler_->CallN(descriptor, code_target, args);
CallPrologue();
Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
CallEpilogue();
return return_value;
}
@ -190,16 +240,49 @@ Node* CodeStubAssembler::TailCallN(CallDescriptor* descriptor,
return raw_assembler_->TailCallN(descriptor, code_target, args);
}
Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* context) {
CallPrologue();
Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
CallEpilogue();
return return_value;
}
Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1) {
return raw_assembler_->CallRuntime1(function_id, arg1, context);
CallPrologue();
Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
CallEpilogue();
return return_value;
}
Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1, Node* arg2) {
return raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
CallPrologue();
Node* return_value =
raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
CallEpilogue();
return return_value;
}
Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1, Node* arg2,
Node* arg3) {
CallPrologue();
Node* return_value =
raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
CallEpilogue();
return return_value;
}
Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1, Node* arg2,
Node* arg3, Node* arg4) {
CallPrologue();
Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
arg3, arg4, context);
CallEpilogue();
return return_value;
}
Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
@ -227,6 +310,93 @@ Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
context);
}
Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(2);
args[0] = arg1;
args[1] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(3);
args[0] = arg1;
args[1] = arg2;
args[2] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, Node* arg3, size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(4);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4,
size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(5);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = arg4;
args[4] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4,
Node* arg5, size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(6);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = arg4;
args[4] = arg5;
args[5] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeStubAssembler::TailCallStub(CodeStub& stub, Node** args) {
Node* code_target = HeapConstant(stub.GetCode());
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
@ -237,11 +407,12 @@ Node* CodeStubAssembler::TailCallStub(CodeStub& stub, Node** args) {
Node* CodeStubAssembler::TailCall(
const CallInterfaceDescriptor& interface_descriptor, Node* code_target,
Node** args) {
Node** args, size_t result_size) {
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), interface_descriptor,
interface_descriptor.GetStackParameterCount(),
CallDescriptor::kSupportsTailCalls);
CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
return raw_assembler_->TailCallN(descriptor, code_target, args);
}
@ -277,10 +448,8 @@ void CodeStubAssembler::Switch(Node* index, Label* default_label,
// RawMachineAssembler delegate helpers:
Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
// The core implementation of Variable is stored through an indirection so

View File

@ -53,6 +53,7 @@ class Schedule;
V(Word32Or) \
V(Word32And) \
V(Word32Xor) \
V(Word32Shl) \
V(Word32Shr) \
V(Word32Sar) \
V(Word32Ror) \
@ -63,13 +64,17 @@ class Schedule;
V(Word64Xor) \
V(Word64Shr) \
V(Word64Sar) \
V(Word64Ror)
V(Word64Ror) \
V(UintPtrGreaterThanOrEqual)
class CodeStubAssembler {
public:
// |result_size| specifies the number of results returned by the stub.
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeStubAssembler(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor,
Code::Flags flags, const char* name);
Code::Flags flags, const char* name,
size_t result_size = 1);
virtual ~CodeStubAssembler();
Handle<Code> GenerateCode();
@ -115,6 +120,20 @@ class CodeStubAssembler {
Node* LoadFramePointer();
Node* LoadParentFramePointer();
// Access to the stack pointer
Node* LoadStackPointer();
// Load raw memory location.
Node* Load(MachineType rep, Node* base);
Node* Load(MachineType rep, Node* base, Node* index);
// Store value to raw memory location.
Node* Store(MachineRepresentation rep, Node* base, Node* value);
Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
Node* value);
// Basic arithmetic operations.
#define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b);
CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP)
@ -122,10 +141,23 @@ class CodeStubAssembler {
Node* WordShl(Node* value, int shift);
// Conversions
Node* ChangeInt32ToInt64(Node* value);
// Projections
Node* Projection(int index, Node* value);
// Calls
Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4, Node* arg5);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1);
@ -136,9 +168,23 @@ class CodeStubAssembler {
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2, Node* arg3, Node* arg4);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3,
size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, size_t result_size = 1);
Node* TailCallStub(CodeStub& stub, Node** args);
Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
Node** args);
Node** args, size_t result_size = 1);
// ===========================================================================
// Macros
@ -164,6 +210,16 @@ class CodeStubAssembler {
int additional_offset = 0);
Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
protected:
// Protected helpers which delegate to RawMachineAssembler.
Graph* graph();
Isolate* isolate();
Zone* zone();
// Enables subclasses to perform operations before and after a call.
virtual void CallPrologue();
virtual void CallEpilogue();
private:
friend class CodeStubAssemblerTester;
@ -172,11 +228,6 @@ class CodeStubAssembler {
Node* SmiShiftBitsConstant();
// Private helpers which delegate to RawMachineAssembler.
Graph* graph();
Isolate* isolate();
Zone* zone();
base::SmartPointer<RawMachineAssembler> raw_assembler_;
Code::Flags flags_;
const char* name_;

View File

@ -1,788 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/interpreter-assembler.h"
#include <ostream>
#include "src/code-factory.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/schedule.h"
#include "src/frames.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
interpreter::Bytecode bytecode)
: bytecode_(bytecode),
raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone),
Linkage::GetInterpreterDispatchDescriptor(zone),
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags())),
accumulator_(
raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
context_(
raw_assembler_->Parameter(Linkage::kInterpreterContextParameter)),
code_generated_(false) {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
}
}
InterpreterAssembler::~InterpreterAssembler() {}
Handle<Code> InterpreterAssembler::GenerateCode() {
DCHECK(!code_generated_);
// Disallow empty handlers that never return.
DCHECK_NE(0, graph()->end()->InputCount());
const char* bytecode_name = interpreter::Bytecodes::ToString(bytecode_);
Schedule* schedule = raw_assembler_->Export();
Code::Flags flags = Code::ComputeFlags(Code::STUB);
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags,
bytecode_name);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
OFStream os(stdout);
code->Disassemble(bytecode_name, os);
os << std::flush;
}
#endif
code_generated_ = true;
return code;
}
Node* InterpreterAssembler::GetAccumulator() { return accumulator_; }
void InterpreterAssembler::SetAccumulator(Node* value) { accumulator_ = value; }
Node* InterpreterAssembler::GetContext() { return context_; }
void InterpreterAssembler::SetContext(Node* value) {
StoreRegister(value, interpreter::Register::current_context());
context_ = value;
}
Node* InterpreterAssembler::BytecodeOffset() {
return raw_assembler_->Parameter(
Linkage::kInterpreterBytecodeOffsetParameter);
}
Node* InterpreterAssembler::RegisterFileRawPointer() {
return raw_assembler_->Parameter(Linkage::kInterpreterRegisterFileParameter);
}
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
return raw_assembler_->Parameter(Linkage::kInterpreterBytecodeArrayParameter);
}
Node* InterpreterAssembler::DispatchTableRawPointer() {
return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
}
Node* InterpreterAssembler::LoadRegister(int offset) {
return raw_assembler_->Load(MachineType::AnyTagged(),
RegisterFileRawPointer(), Int32Constant(offset));
}
Node* InterpreterAssembler::LoadRegister(interpreter::Register reg) {
return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
return WordShl(index, kPointerSizeLog2);
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
return raw_assembler_->Load(MachineType::AnyTagged(),
RegisterFileRawPointer(),
RegisterFrameOffset(reg_index));
}
Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
return raw_assembler_->Store(MachineRepresentation::kTagged,
RegisterFileRawPointer(), Int32Constant(offset),
value, kNoWriteBarrier);
}
Node* InterpreterAssembler::StoreRegister(Node* value,
interpreter::Register reg) {
return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
}
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
return raw_assembler_->Store(
MachineRepresentation::kTagged, RegisterFileRawPointer(),
RegisterFrameOffset(reg_index), value, kNoWriteBarrier);
}
Node* InterpreterAssembler::NextRegister(Node* reg_index) {
// Register indexes are negative, so the next index is minus one.
return IntPtrAdd(reg_index, Int32Constant(-1));
}
Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(interpreter::OperandSize::kByte,
interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
return raw_assembler_->Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(),
Int32Constant(interpreter::Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
}
Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(interpreter::OperandSize::kByte,
interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
Node* load = raw_assembler_->Load(
MachineType::Int8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(),
Int32Constant(interpreter::Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
load = raw_assembler_->ChangeInt32ToInt64(load);
}
return load;
}
Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(interpreter::OperandSize::kShort,
interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
if (TargetSupportsUnalignedAccess()) {
return raw_assembler_->Load(
MachineType::Uint16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(),
Int32Constant(interpreter::Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
} else {
int offset =
interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
Node* first_byte = raw_assembler_->Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
Node* second_byte = raw_assembler_->Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
#if V8_TARGET_LITTLE_ENDIAN
return raw_assembler_->WordOr(WordShl(second_byte, kBitsPerByte),
first_byte);
#elif V8_TARGET_BIG_ENDIAN
return raw_assembler_->WordOr(WordShl(first_byte, kBitsPerByte),
second_byte);
#else
#error "Unknown Architecture"
#endif
}
}
Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(interpreter::OperandSize::kShort,
interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
int operand_offset =
interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
Node* load;
if (TargetSupportsUnalignedAccess()) {
load = raw_assembler_->Load(
MachineType::Int16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
Node* hi_byte_offset = Int32Constant(operand_offset + 1);
Node* lo_byte_offset = Int32Constant(operand_offset);
#elif V8_TARGET_BIG_ENDIAN
Node* hi_byte_offset = Int32Constant(operand_offset);
Node* lo_byte_offset = Int32Constant(operand_offset + 1);
#else
#error "Unknown Architecture"
#endif
Node* hi_byte =
raw_assembler_->Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), hi_byte_offset));
Node* lo_byte =
raw_assembler_->Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), lo_byte_offset));
hi_byte = raw_assembler_->Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
load = raw_assembler_->Word32Or(hi_byte, lo_byte);
}
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
load = raw_assembler_->ChangeInt32ToInt64(load);
}
return load;
}
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
case interpreter::OperandSize::kByte:
DCHECK_EQ(
interpreter::OperandType::kRegCount8,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperand(operand_index);
case interpreter::OperandSize::kShort:
DCHECK_EQ(
interpreter::OperandType::kRegCount16,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperandShort(operand_index);
case interpreter::OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
DCHECK_EQ(interpreter::OperandType::kImm8,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperandSignExtended(operand_index);
}
Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
case interpreter::OperandSize::kByte:
DCHECK_EQ(
interpreter::OperandType::kIdx8,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperand(operand_index);
case interpreter::OperandSize::kShort:
DCHECK_EQ(
interpreter::OperandType::kIdx16,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperandShort(operand_index);
case interpreter::OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
interpreter::OperandType operand_type =
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index);
if (interpreter::Bytecodes::IsRegisterOperandType(operand_type)) {
interpreter::OperandSize operand_size =
interpreter::Bytecodes::SizeOfOperand(operand_type);
if (operand_size == interpreter::OperandSize::kByte) {
return BytecodeOperandSignExtended(operand_index);
} else if (operand_size == interpreter::OperandSize::kShort) {
return BytecodeOperandShortSignExtended(operand_index);
}
}
UNREACHABLE();
return nullptr;
}
Node* InterpreterAssembler::Int32Constant(int value) {
return raw_assembler_->Int32Constant(value);
}
Node* InterpreterAssembler::IntPtrConstant(intptr_t value) {
return raw_assembler_->IntPtrConstant(value);
}
Node* InterpreterAssembler::NumberConstant(double value) {
return raw_assembler_->NumberConstant(value);
}
Node* InterpreterAssembler::HeapConstant(Handle<HeapObject> object) {
return raw_assembler_->HeapConstant(object);
}
Node* InterpreterAssembler::BooleanConstant(bool value) {
return raw_assembler_->BooleanConstant(value);
}
Node* InterpreterAssembler::SmiShiftBitsConstant() {
return Int32Constant(kSmiShiftSize + kSmiTagSize);
}
Node* InterpreterAssembler::SmiTag(Node* value) {
return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
}
Node* InterpreterAssembler::SmiUntag(Node* value) {
return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
}
Node* InterpreterAssembler::IntPtrAdd(Node* a, Node* b) {
return raw_assembler_->IntPtrAdd(a, b);
}
Node* InterpreterAssembler::IntPtrSub(Node* a, Node* b) {
return raw_assembler_->IntPtrSub(a, b);
}
Node* InterpreterAssembler::Int32Sub(Node* a, Node* b) {
return raw_assembler_->Int32Sub(a, b);
}
Node* InterpreterAssembler::WordShl(Node* value, int shift) {
return raw_assembler_->WordShl(value, Int32Constant(shift));
}
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kConstantPoolOffset);
Node* entry_offset =
IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
WordShl(index, kPointerSizeLog2));
return raw_assembler_->Load(MachineType::AnyTagged(), constant_pool,
entry_offset);
}
Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
int index) {
Node* entry_offset =
IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
WordShl(Int32Constant(index), kPointerSizeLog2));
return raw_assembler_->Load(MachineType::AnyTagged(), fixed_array,
entry_offset);
}
Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
return raw_assembler_->Load(MachineType::AnyTagged(), object,
IntPtrConstant(offset - kHeapObjectTag));
}
Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
return raw_assembler_->Load(MachineType::AnyTagged(), context,
IntPtrConstant(Context::SlotOffset(slot_index)));
}
Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag));
return raw_assembler_->Load(MachineType::AnyTagged(), context, offset);
}
Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
Node* value) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag));
return raw_assembler_->Store(MachineRepresentation::kTagged, context, offset,
value, kFullWriteBarrier);
}
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
Node* function = raw_assembler_->Load(
MachineType::AnyTagged(), RegisterFileRawPointer(),
IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
Node* shared_info =
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
Node* vector =
LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
return vector;
}
Node* InterpreterAssembler::Projection(int index, Node* node) {
return raw_assembler_->Projection(index, node);
}
Node* InterpreterAssembler::CallConstruct(Node* new_target, Node* constructor,
Node* first_arg, Node* arg_count) {
Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
Node* code_target = HeapConstant(callable.code());
Node** args = zone()->NewArray<Node*>(5);
args[0] = arg_count;
args[1] = new_target;
args[2] = constructor;
args[3] = first_arg;
args[4] = GetContext();
return CallN(descriptor, code_target, args);
}
void InterpreterAssembler::CallPrologue() {
StoreRegister(SmiTag(BytecodeOffset()),
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
}
Node* InterpreterAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
Node** args) {
CallPrologue();
Node* stack_pointer_before_call = nullptr;
if (FLAG_debug_code) {
stack_pointer_before_call = raw_assembler_->LoadStackPointer();
}
Node* return_val = raw_assembler_->CallN(descriptor, code_target, args);
if (FLAG_debug_code) {
Node* stack_pointer_after_call = raw_assembler_->LoadStackPointer();
AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
kUnexpectedStackPointer);
}
return return_val;
}
Node* InterpreterAssembler::CallJS(Node* function, Node* first_arg,
Node* arg_count) {
Callable callable = CodeFactory::InterpreterPushArgsAndCall(isolate());
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
Node* code_target = HeapConstant(callable.code());
Node** args = zone()->NewArray<Node*>(4);
args[0] = arg_count;
args[1] = first_arg;
args[2] = function;
args[3] = GetContext();
return CallN(descriptor, code_target, args);
}
Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
Node* target, Node** args) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, 0, CallDescriptor::kNoFlags);
return CallN(call_descriptor, target, args);
}
Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
Node* target, Node* arg1, Node* arg2,
Node* arg3) {
Node** args = zone()->NewArray<Node*>(4);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = GetContext();
return CallIC(descriptor, target, args);
}
Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
Node* target, Node* arg1, Node* arg2,
Node* arg3, Node* arg4) {
Node** args = zone()->NewArray<Node*>(5);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = arg4;
args[4] = GetContext();
return CallIC(descriptor, target, args);
}
Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
Node* target, Node* arg1, Node* arg2,
Node* arg3, Node* arg4, Node* arg5) {
Node** args = zone()->NewArray<Node*>(6);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = arg4;
args[4] = arg5;
args[5] = GetContext();
return CallIC(descriptor, target, args);
}
Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
Node* arg_count, int result_size) {
Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties, MachineType::AnyTagged(), result_size);
Node* code_target = HeapConstant(callable.code());
// Get the function entry from the function id.
Node* function_table = raw_assembler_->ExternalConstant(
ExternalReference::runtime_function_table_address(isolate()));
Node* function_offset = raw_assembler_->Int32Mul(
function_id, Int32Constant(sizeof(Runtime::Function)));
Node* function = IntPtrAdd(function_table, function_offset);
Node* function_entry =
raw_assembler_->Load(MachineType::Pointer(), function,
Int32Constant(offsetof(Runtime::Function, entry)));
Node** args = zone()->NewArray<Node*>(4);
args[0] = arg_count;
args[1] = first_arg;
args[2] = function_entry;
args[3] = GetContext();
return CallN(descriptor, code_target, args);
}
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id) {
CallPrologue();
Node* return_val = raw_assembler_->CallRuntime0(function_id, GetContext());
return return_val;
}
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1) {
CallPrologue();
Node* return_val =
raw_assembler_->CallRuntime1(function_id, arg1, GetContext());
return return_val;
}
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1, Node* arg2) {
CallPrologue();
Node* return_val =
raw_assembler_->CallRuntime2(function_id, arg1, arg2, GetContext());
return return_val;
}
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1, Node* arg2, Node* arg3) {
CallPrologue();
Node* return_val =
raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, GetContext());
return return_val;
}
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1, Node* arg2, Node* arg3,
Node* arg4) {
CallPrologue();
Node* return_val = raw_assembler_->CallRuntime4(function_id, arg1, arg2, arg3,
arg4, GetContext());
return return_val;
}
void InterpreterAssembler::Return() {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
}
Node* exit_trampoline_code_object =
HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
// If the order of the parameters you need to change the call signature below.
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
Node* args[] = { GetAccumulator(),
RegisterFileRawPointer(),
BytecodeOffset(),
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
GetContext() };
raw_assembler_->TailCallN(call_descriptor(), exit_trampoline_code_object,
args);
}
Node* InterpreterAssembler::Advance(int delta) {
return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
}
Node* InterpreterAssembler::Advance(Node* delta) {
return raw_assembler_->IntPtrAdd(BytecodeOffset(), delta);
}
void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
RawMachineLabel match, no_match;
raw_assembler_->Branch(condition, &match, &no_match);
raw_assembler_->Bind(&match);
DispatchTo(Advance(delta));
raw_assembler_->Bind(&no_match);
Dispatch();
}
void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
JumpConditional(raw_assembler_->WordEqual(lhs, rhs), delta);
}
void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
Node* delta) {
JumpConditional(raw_assembler_->WordNotEqual(lhs, rhs), delta);
}
void InterpreterAssembler::Dispatch() {
DispatchTo(Advance(interpreter::Bytecodes::Size(bytecode_)));
}
void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
}
Node* target_bytecode = raw_assembler_->Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
// TODO(rmcilroy): Create a code target dispatch table to avoid conversion
// from code object on every dispatch.
Node* target_code_object = raw_assembler_->Load(
MachineType::Pointer(), DispatchTableRawPointer(),
raw_assembler_->Word32Shl(target_bytecode,
Int32Constant(kPointerSizeLog2)));
// If the order of the parameters you need to change the call signature below.
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
Node* args[] = { GetAccumulator(),
RegisterFileRawPointer(),
new_bytecode_offset,
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
GetContext() };
raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
}
void InterpreterAssembler::StackCheck() {
RawMachineLabel end, ok, stack_guard;
Node* sp = raw_assembler_->LoadStackPointer();
Node* stack_limit = raw_assembler_->Load(
MachineType::Pointer(),
raw_assembler_->ExternalConstant(
ExternalReference::address_of_stack_limit(isolate())));
Node* condition = raw_assembler_->UintPtrGreaterThanOrEqual(sp, stack_limit);
raw_assembler_->Branch(condition, &ok, &stack_guard);
raw_assembler_->Bind(&stack_guard);
CallRuntime(Runtime::kStackGuard);
raw_assembler_->Goto(&end);
raw_assembler_->Bind(&ok);
raw_assembler_->Goto(&end);
raw_assembler_->Bind(&end);
}
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
Node* abort_id = SmiTag(Int32Constant(bailout_reason));
Node* ret_value = CallRuntime(Runtime::kAbort, abort_id);
// Unreached, but keeps turbofan happy.
raw_assembler_->Return(ret_value);
}
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
RawMachineLabel match, no_match;
Node* condition = raw_assembler_->WordEqual(lhs, rhs);
raw_assembler_->Branch(condition, &match, &no_match);
raw_assembler_->Bind(&no_match);
Abort(bailout_reason);
raw_assembler_->Bind(&match);
}
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
CallRuntime(function_id, BytecodeArrayTaggedPointer(),
SmiTag(BytecodeOffset()), GetAccumulator());
}
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
return true;
#else
#error "Unknown Architecture"
#endif
}
// RawMachineAssembler delegate helpers:
Isolate* InterpreterAssembler::isolate() { return raw_assembler_->isolate(); }
Graph* InterpreterAssembler::graph() { return raw_assembler_->graph(); }
CallDescriptor* InterpreterAssembler::call_descriptor() const {
return raw_assembler_->call_descriptor();
}
Zone* InterpreterAssembler::zone() { return raw_assembler_->zone(); }
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -1,241 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_INTERPRETER_ASSEMBLER_H_
#define V8_COMPILER_INTERPRETER_ASSEMBLER_H_
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/allocation.h"
#include "src/base/smart-pointers.h"
#include "src/builtins.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
class CallInterfaceDescriptor;
class Isolate;
class Zone;
namespace compiler {
class CallDescriptor;
class Graph;
class Node;
class Operator;
class RawMachineAssembler;
class Schedule;
class InterpreterAssembler {
public:
InterpreterAssembler(Isolate* isolate, Zone* zone,
interpreter::Bytecode bytecode);
virtual ~InterpreterAssembler();
Handle<Code> GenerateCode();
// Returns the count immediate for bytecode operand |operand_index| in the
// current bytecode.
Node* BytecodeOperandCount(int operand_index);
// Returns the index immediate for bytecode operand |operand_index| in the
// current bytecode.
Node* BytecodeOperandIdx(int operand_index);
// Returns the Imm8 immediate for bytecode operand |operand_index| in the
// current bytecode.
Node* BytecodeOperandImm(int operand_index);
// Returns the register index for bytecode operand |operand_index| in the
// current bytecode.
Node* BytecodeOperandReg(int operand_index);
// Accumulator.
Node* GetAccumulator();
void SetAccumulator(Node* value);
// Context.
Node* GetContext();
void SetContext(Node* value);
// Loads from and stores to the interpreter register file.
Node* LoadRegister(int offset);
Node* LoadRegister(interpreter::Register reg);
Node* LoadRegister(Node* reg_index);
Node* StoreRegister(Node* value, int offset);
Node* StoreRegister(Node* value, interpreter::Register reg);
Node* StoreRegister(Node* value, Node* reg_index);
// Returns the next consecutive register.
Node* NextRegister(Node* reg_index);
// Returns the location in memory of the register |reg_index| in the
// interpreter register file.
Node* RegisterLocation(Node* reg_index);
// Constants.
Node* Int32Constant(int value);
Node* IntPtrConstant(intptr_t value);
Node* NumberConstant(double value);
Node* HeapConstant(Handle<HeapObject> object);
Node* BooleanConstant(bool value);
// Tag and untag Smi values.
Node* SmiTag(Node* value);
Node* SmiUntag(Node* value);
// Basic arithmetic operations.
Node* IntPtrAdd(Node* a, Node* b);
Node* IntPtrSub(Node* a, Node* b);
Node* Int32Sub(Node* a, Node* b);
Node* WordShl(Node* value, int shift);
// Load constant at |index| in the constant pool.
Node* LoadConstantPoolEntry(Node* index);
// Load an element from a fixed array on the heap.
Node* LoadFixedArrayElement(Node* fixed_array, int index);
// Load a field from an object on the heap.
Node* LoadObjectField(Node* object, int offset);
// Load |slot_index| from |context|.
Node* LoadContextSlot(Node* context, int slot_index);
Node* LoadContextSlot(Node* context, Node* slot_index);
// Stores |value| into |slot_index| of |context|.
Node* StoreContextSlot(Node* context, Node* slot_index, Node* value);
// Load the TypeFeedbackVector for the current function.
Node* LoadTypeFeedbackVector();
// Project the output value at index |index|
Node* Projection(int index, Node* node);
// Call constructor |constructor| with |arg_count| arguments (not
// including receiver) and the first argument located at
// |first_arg|. The |new_target| is the same as the
// |constructor| for the new keyword, but differs for the super
// keyword.
Node* CallConstruct(Node* new_target, Node* constructor, Node* first_arg,
Node* arg_count);
// Call JSFunction or Callable |function| with |arg_count|
// arguments (not including receiver) and the first argument
// located at |first_arg|.
Node* CallJS(Node* function, Node* first_arg, Node* arg_count);
// Call an IC code stub.
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
Node* arg2, Node* arg3);
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
Node* arg2, Node* arg3, Node* arg4);
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
Node* arg2, Node* arg3, Node* arg4, Node* arg5);
// Call runtime function.
Node* CallRuntime(Node* function_id, Node* first_arg, Node* arg_count,
int return_size = 1);
Node* CallRuntime(Runtime::FunctionId function_id);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2,
Node* arg3);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2,
Node* arg3, Node* arg4);
// Jump relative to the current bytecode by |jump_offset|.
void Jump(Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfWordEqual and
// JumpIfWordNotEqual.
void JumpConditional(Node* condition, Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are equal.
void JumpIfWordEqual(Node* lhs, Node* rhs, Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are not equal.
void JumpIfWordNotEqual(Node* lhs, Node* rhs, Node* jump_offset);
// Perform a stack guard check.
void StackCheck();
// Returns from the function.
void Return();
// Dispatch to the bytecode.
void Dispatch();
// Abort with the given bailout reason.
void Abort(BailoutReason bailout_reason);
protected:
static bool TargetSupportsUnalignedAccess();
// Protected helpers (for testing) which delegate to RawMachineAssembler.
CallDescriptor* call_descriptor() const;
Graph* graph();
private:
// Returns a raw pointer to start of the register file on the stack.
Node* RegisterFileRawPointer();
// Returns a tagged pointer to the current function's BytecodeArray object.
Node* BytecodeArrayTaggedPointer();
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
Node* BytecodeOffset();
// Returns a raw pointer to first entry in the interpreter dispatch table.
Node* DispatchTableRawPointer();
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
void CallPrologue();
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
// Returns the offset of register |index| relative to RegisterFilePointer().
Node* RegisterFrameOffset(Node* index);
Node* SmiShiftBitsConstant();
Node* BytecodeOperand(int operand_index);
Node* BytecodeOperandSignExtended(int operand_index);
Node* BytecodeOperandShort(int operand_index);
Node* BytecodeOperandShortSignExtended(int operand_index);
Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node** args);
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
// update BytecodeOffset() itself.
Node* Advance(int delta);
Node* Advance(Node* delta);
// Starts next instruction dispatch at |new_bytecode_offset|.
void DispatchTo(Node* new_bytecode_offset);
// Abort operations for debug code.
void AbortIfWordNotEqual(Node* lhs, Node* rhs, BailoutReason bailout_reason);
// Private helpers which delegate to RawMachineAssembler.
Isolate* isolate();
Zone* zone();
interpreter::Bytecode bytecode_;
base::SmartPointer<RawMachineAssembler> raw_assembler_;
Node* accumulator_;
Node* context_;
bool code_generated_;
DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_INTERPRETER_ASSEMBLER_H_

View File

@ -366,60 +366,6 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
"js-call");
}
CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
MachineSignature::Builder types(zone, 0, 6);
LocationSignature::Builder locations(zone, 0, 6);
// Add registers for fixed parameters passed via interpreter dispatch.
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
types.AddParam(MachineType::AnyTagged());
locations.AddParam(regloc(kInterpreterAccumulatorRegister));
STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
types.AddParam(MachineType::Pointer());
locations.AddParam(regloc(kInterpreterRegisterFileRegister));
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
types.AddParam(MachineType::IntPtr());
locations.AddParam(regloc(kInterpreterBytecodeOffsetRegister));
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
types.AddParam(MachineType::AnyTagged());
locations.AddParam(regloc(kInterpreterBytecodeArrayRegister));
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
types.AddParam(MachineType::Pointer());
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
// TODO(rmcilroy): Make the context param the one spilled to the stack once
// Turbofan supports modified stack arguments in tail calls.
locations.AddParam(
LinkageLocation::ForCallerFrameSlot(kInterpreterDispatchTableSpillSlot));
#else
locations.AddParam(regloc(kInterpreterDispatchTableRegister));
#endif
STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
types.AddParam(MachineType::AnyTagged());
locations.AddParam(regloc(kContextRegister));
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
MachineType::None(), // target MachineType
target_loc, // target location
types.Build(), // machine_sig
locations.Build(), // location_sig
0, // stack_parameter_count
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp regs
CallDescriptor::kSupportsTailCalls | // flags
CallDescriptor::kCanUseRoots, // flags
"interpreter-dispatch");
}
// TODO(all): Add support for return representations/locations to
// CallInterfaceDescriptor.
// TODO(turbofan): cache call descriptors for code stub calls.

View File

@ -336,11 +336,6 @@ class Linkage : public ZoneObject {
Zone* zone, const MachineSignature* sig,
bool set_initialize_root_flag = false);
// Creates a call descriptor for interpreter handler code stubs. These are not
// intended to be called directly but are instead dispatched to by the
// interpreter.
static CallDescriptor* GetInterpreterDispatchDescriptor(Zone* zone);
// Get the location of an (incoming) parameter to this function.
LinkageLocation GetParameterLocation(int index) const {
return incoming_->GetInputLocation(index + 1); // + 1 to skip target.
@ -390,15 +385,6 @@ class Linkage : public ZoneObject {
// A special {OsrValue} index to indicate the context spill slot.
static const int kOsrContextSpillSlotIndex = -1;
// Special parameter indices used to pass fixed register data through
// interpreter dispatches.
static const int kInterpreterAccumulatorParameter = 0;
static const int kInterpreterRegisterFileParameter = 1;
static const int kInterpreterBytecodeOffsetParameter = 2;
static const int kInterpreterBytecodeArrayParameter = 3;
static const int kInterpreterDispatchTableParameter = 4;
static const int kInterpreterContextParameter = 5;
private:
CallDescriptor* const incoming_;

View File

@ -412,6 +412,13 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister };
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -423,7 +430,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -435,7 +441,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -545,6 +545,19 @@ FunctionType* ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
FunctionType*
InterpreterDispatchDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
function->InitParameter(kAccumulatorParameter, AnyTagged(zone));
function->InitParameter(kRegisterFileParameter, ExternalPointer(zone));
function->InitParameter(kBytecodeOffsetParameter, UntaggedIntegral32(zone));
function->InitParameter(kBytecodeArrayParameter, AnyTagged(zone));
function->InitParameter(kDispatchTableParameter, AnyTagged(zone));
return function;
}
} // namespace internal
} // namespace v8

View File

@ -76,6 +76,7 @@ class PlatformInterfaceDescriptor;
V(MathPowInteger) \
V(ContextOnly) \
V(GrowArrayElements) \
V(InterpreterDispatch) \
V(InterpreterPushArgsAndCall) \
V(InterpreterPushArgsAndConstruct) \
V(InterpreterCEntry)
@ -752,6 +753,18 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
static const Register KeyRegister();
};
class InterpreterDispatchDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterDispatchDescriptor,
CallInterfaceDescriptor)
static const int kAccumulatorParameter = 0;
static const int kRegisterFileParameter = 1;
static const int kBytecodeOffsetParameter = 2;
static const int kBytecodeArrayParameter = 3;
static const int kDispatchTableParameter = 4;
static const int kContextParameter = 5;
};
class InterpreterPushArgsAndCallDescriptor : public CallInterfaceDescriptor {
public:
@ -773,7 +786,6 @@ class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
};
#undef DECLARE_DESCRIPTOR

View File

@ -1,3 +0,0 @@
include_rules = [
"+src/compiler/interpreter-assembler.h",
]

View File

@ -0,0 +1,487 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/interpreter/interpreter-assembler.h"
#include <ostream>
#include "src/code-factory.h"
#include "src/frames.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
namespace interpreter {
using compiler::Node;
InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
Bytecode bytecode)
: compiler::CodeStubAssembler(
isolate, zone, InterpreterDispatchDescriptor(isolate),
Code::ComputeFlags(Code::STUB), Bytecodes::ToString(bytecode), 0),
bytecode_(bytecode),
accumulator_(
Parameter(InterpreterDispatchDescriptor::kAccumulatorParameter)),
context_(Parameter(InterpreterDispatchDescriptor::kContextParameter)),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
}
}
InterpreterAssembler::~InterpreterAssembler() {}
Node* InterpreterAssembler::GetAccumulator() { return accumulator_; }
void InterpreterAssembler::SetAccumulator(Node* value) { accumulator_ = value; }
Node* InterpreterAssembler::GetContext() { return context_; }
void InterpreterAssembler::SetContext(Node* value) {
StoreRegister(value, Register::current_context());
context_ = value;
}
Node* InterpreterAssembler::BytecodeOffset() {
return Parameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter);
}
Node* InterpreterAssembler::RegisterFileRawPointer() {
return Parameter(InterpreterDispatchDescriptor::kRegisterFileParameter);
}
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
return Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter);
}
Node* InterpreterAssembler::DispatchTableRawPointer() {
return Parameter(InterpreterDispatchDescriptor::kDispatchTableParameter);
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
}
Node* InterpreterAssembler::LoadRegister(int offset) {
return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
Int32Constant(offset));
}
Node* InterpreterAssembler::LoadRegister(Register reg) {
return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
return WordShl(index, kPointerSizeLog2);
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
RegisterFrameOffset(reg_index));
}
Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged,
RegisterFileRawPointer(), Int32Constant(offset),
value);
}
Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
}
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged,
RegisterFileRawPointer(),
RegisterFrameOffset(reg_index), value);
}
Node* InterpreterAssembler::NextRegister(Node* reg_index) {
// Register indexes are negative, so the next index is minus one.
return IntPtrAdd(reg_index, Int32Constant(-1));
}
Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte,
Bytecodes::GetOperandSize(bytecode_, operand_index));
return Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
}
Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte,
Bytecodes::GetOperandSize(bytecode_, operand_index));
Node* load = Load(
MachineType::Int8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
load = ChangeInt32ToInt64(load);
}
return load;
}
Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kShort,
Bytecodes::GetOperandSize(bytecode_, operand_index));
if (TargetSupportsUnalignedAccess()) {
return Load(
MachineType::Uint16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
} else {
int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
Node* second_byte =
Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
#if V8_TARGET_LITTLE_ENDIAN
return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
#elif V8_TARGET_BIG_ENDIAN
return WordOr(WordShl(first_byte, kBitsPerByte), second_byte);
#else
#error "Unknown Architecture"
#endif
}
}
Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kShort,
Bytecodes::GetOperandSize(bytecode_, operand_index));
int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
Node* load;
if (TargetSupportsUnalignedAccess()) {
load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
Node* hi_byte_offset = Int32Constant(operand_offset + 1);
Node* lo_byte_offset = Int32Constant(operand_offset);
#elif V8_TARGET_BIG_ENDIAN
Node* hi_byte_offset = Int32Constant(operand_offset);
Node* lo_byte_offset = Int32Constant(operand_offset + 1);
#else
#error "Unknown Architecture"
#endif
Node* hi_byte = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), hi_byte_offset));
Node* lo_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), lo_byte_offset));
hi_byte = Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
load = Word32Or(hi_byte, lo_byte);
}
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
load = ChangeInt32ToInt64(load);
}
return load;
}
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
case OperandSize::kByte:
DCHECK_EQ(OperandType::kRegCount8,
Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperand(operand_index);
case OperandSize::kShort:
DCHECK_EQ(OperandType::kRegCount16,
Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperandShort(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
DCHECK_EQ(OperandType::kImm8,
Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperandSignExtended(operand_index);
}
Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
case OperandSize::kByte:
DCHECK_EQ(OperandType::kIdx8,
Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperand(operand_index);
case OperandSize::kShort:
DCHECK_EQ(OperandType::kIdx16,
Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperandShort(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
OperandType operand_type =
Bytecodes::GetOperandType(bytecode_, operand_index);
if (Bytecodes::IsRegisterOperandType(operand_type)) {
OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
if (operand_size == OperandSize::kByte) {
return BytecodeOperandSignExtended(operand_index);
} else if (operand_size == OperandSize::kShort) {
return BytecodeOperandShortSignExtended(operand_index);
}
}
UNREACHABLE();
return nullptr;
}
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kConstantPoolOffset);
Node* entry_offset =
IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
WordShl(index, kPointerSizeLog2));
return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
}
Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
int index) {
Node* entry_offset =
IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
WordShl(Int32Constant(index), kPointerSizeLog2));
return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
}
Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
return Load(MachineType::AnyTagged(), object,
IntPtrConstant(offset - kHeapObjectTag));
}
Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
return Load(MachineType::AnyTagged(), context,
IntPtrConstant(Context::SlotOffset(slot_index)));
}
Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag));
return Load(MachineType::AnyTagged(), context, offset);
}
Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
Node* value) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag));
return Store(MachineRepresentation::kTagged, context, offset, value);
}
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
Node* function = Load(
MachineType::AnyTagged(), RegisterFileRawPointer(),
IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
Node* shared_info =
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
Node* vector =
LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
return vector;
}
void InterpreterAssembler::CallPrologue() {
StoreRegister(SmiTag(BytecodeOffset()),
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
if (FLAG_debug_code && !disable_stack_check_across_call_) {
DCHECK(stack_pointer_before_call_ == nullptr);
stack_pointer_before_call_ = LoadStackPointer();
}
}
void InterpreterAssembler::CallEpilogue() {
if (FLAG_debug_code && !disable_stack_check_across_call_) {
Node* stack_pointer_after_call = LoadStackPointer();
Node* stack_pointer_before_call = stack_pointer_before_call_;
stack_pointer_before_call_ = nullptr;
AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
kUnexpectedStackPointer);
}
}
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* first_arg, Node* arg_count) {
Callable callable = CodeFactory::InterpreterPushArgsAndCall(isolate());
Node* code_target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function);
}
Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Node* new_target, Node* first_arg,
Node* arg_count) {
Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
Node* code_target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), code_target, context, arg_count,
new_target, constructor, first_arg);
}
Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Node* first_arg, Node* arg_count,
int result_size) {
Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
Node* code_target = HeapConstant(callable.code());
// Get the function entry from the function id.
Node* function_table = ExternalConstant(
ExternalReference::runtime_function_table_address(isolate()));
Node* function_offset =
Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
Node* function = IntPtrAdd(function_table, function_offset);
Node* function_entry =
Load(MachineType::Pointer(), function,
Int32Constant(offsetof(Runtime::Function, entry)));
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function_entry, result_size);
}
Node* InterpreterAssembler::Advance(int delta) {
return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
}
Node* InterpreterAssembler::Advance(Node* delta) {
return IntPtrAdd(BytecodeOffset(), delta);
}
void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
CodeStubAssembler::Label match(this);
CodeStubAssembler::Label no_match(this);
Branch(condition, &match, &no_match);
Bind(&match);
DispatchTo(Advance(delta));
Bind(&no_match);
Dispatch();
}
void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
JumpConditional(WordEqual(lhs, rhs), delta);
}
void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
Node* delta) {
JumpConditional(WordNotEqual(lhs, rhs), delta);
}
void InterpreterAssembler::Dispatch() {
DispatchTo(Advance(Bytecodes::Size(bytecode_)));
}
void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
}
Node* target_bytecode = Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
// TODO(rmcilroy): Create a code target dispatch table to avoid conversion
// from code object on every dispatch.
Node* target_code_object =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2)));
InterpreterDispatchDescriptor descriptor(isolate());
Node* args[] = {GetAccumulator(), RegisterFileRawPointer(),
new_bytecode_offset, BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(), GetContext()};
TailCall(descriptor, target_code_object, args, 0);
}
void InterpreterAssembler::InterpreterReturn() {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
}
InterpreterDispatchDescriptor descriptor(isolate());
Node* exit_trampoline_code_object =
HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
Node* args[] = {GetAccumulator(), RegisterFileRawPointer(),
BytecodeOffset(), BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(), GetContext()};
TailCall(descriptor, exit_trampoline_code_object, args, 0);
}
void InterpreterAssembler::StackCheck() {
CodeStubAssembler::Label end(this);
CodeStubAssembler::Label ok(this);
CodeStubAssembler::Label stack_guard(this);
Node* sp = LoadStackPointer();
Node* stack_limit = Load(
MachineType::Pointer(),
ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
Node* condition = UintPtrGreaterThanOrEqual(sp, stack_limit);
Branch(condition, &ok, &stack_guard);
Bind(&stack_guard);
CallRuntime(Runtime::kStackGuard, GetContext());
Goto(&end);
Bind(&ok);
Goto(&end);
Bind(&end);
}
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
disable_stack_check_across_call_ = true;
Node* abort_id = SmiTag(Int32Constant(bailout_reason));
Node* ret_value = CallRuntime(Runtime::kAbort, GetContext(), abort_id);
disable_stack_check_across_call_ = false;
// Unreached, but keeps turbofan happy.
Return(ret_value);
}
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
CodeStubAssembler::Label match(this);
CodeStubAssembler::Label no_match(this);
Node* condition = WordEqual(lhs, rhs);
Branch(condition, &match, &no_match);
Bind(&no_match);
Abort(bailout_reason);
Bind(&match);
}
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
SmiTag(BytecodeOffset()), GetAccumulator());
}
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
return true;
#else
#error "Unknown Architecture"
#endif
}
} // namespace interpreter
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,192 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
#define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
#include "src/allocation.h"
#include "src/base/smart-pointers.h"
#include "src/builtins.h"
#include "src/compiler/code-stub-assembler.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
namespace interpreter {
class InterpreterAssembler : public compiler::CodeStubAssembler {
public:
InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode);
virtual ~InterpreterAssembler();
// Returns the count immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandCount(int operand_index);
// Returns the index immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandIdx(int operand_index);
// Returns the Imm8 immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandImm(int operand_index);
// Returns the register index for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandReg(int operand_index);
// Accumulator.
compiler::Node* GetAccumulator();
void SetAccumulator(compiler::Node* value);
// Context.
compiler::Node* GetContext();
void SetContext(compiler::Node* value);
// Loads from and stores to the interpreter register file.
compiler::Node* LoadRegister(int offset);
compiler::Node* LoadRegister(Register reg);
compiler::Node* LoadRegister(compiler::Node* reg_index);
compiler::Node* StoreRegister(compiler::Node* value, int offset);
compiler::Node* StoreRegister(compiler::Node* value, Register reg);
compiler::Node* StoreRegister(compiler::Node* value,
compiler::Node* reg_index);
// Returns the next consecutive register.
compiler::Node* NextRegister(compiler::Node* reg_index);
// Returns the location in memory of the register |reg_index| in the
// interpreter register file.
compiler::Node* RegisterLocation(compiler::Node* reg_index);
// Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
// Load an element from a fixed array on the heap.
compiler::Node* LoadFixedArrayElement(compiler::Node* fixed_array, int index);
// Load a field from an object on the heap.
compiler::Node* LoadObjectField(compiler::Node* object, int offset);
// Load |slot_index| from |context|.
compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
compiler::Node* LoadContextSlot(compiler::Node* context,
compiler::Node* slot_index);
// Stores |value| into |slot_index| of |context|.
compiler::Node* StoreContextSlot(compiler::Node* context,
compiler::Node* slot_index,
compiler::Node* value);
// Load the TypeFeedbackVector for the current function.
compiler::Node* LoadTypeFeedbackVector();
// Call JSFunction or Callable |function| with |arg_count|
// arguments (not including receiver) and the first argument
// located at |first_arg|.
compiler::Node* CallJS(compiler::Node* function, compiler::Node* context,
compiler::Node* first_arg, compiler::Node* arg_count);
// Call constructor |constructor| with |arg_count| arguments (not
// including receiver) and the first argument located at
// |first_arg|. The |new_target| is the same as the
// |constructor| for the new keyword, but differs for the super
// keyword.
compiler::Node* CallConstruct(compiler::Node* constructor,
compiler::Node* context,
compiler::Node* new_target,
compiler::Node* first_arg,
compiler::Node* arg_count);
// Call runtime function with |arg_count| arguments and the first argument
// located at |first_arg|.
compiler::Node* CallRuntimeN(compiler::Node* function_id,
compiler::Node* context,
compiler::Node* first_arg,
compiler::Node* arg_count, int return_size = 1);
// Jump relative to the current bytecode by |jump_offset|.
void Jump(compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfWordEqual and
// JumpIfWordNotEqual.
void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are equal.
void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are not equal.
void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* jump_offset);
// Perform a stack guard check.
void StackCheck();
// Returns from the function.
void InterpreterReturn();
// Dispatch to the bytecode.
void Dispatch();
// Abort with the given bailout reason.
void Abort(BailoutReason bailout_reason);
protected:
static bool TargetSupportsUnalignedAccess();
private:
// Returns a raw pointer to start of the register file on the stack.
compiler::Node* RegisterFileRawPointer();
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
compiler::Node* BytecodeOffset();
// Returns a raw pointer to first entry in the interpreter dispatch table.
compiler::Node* DispatchTableRawPointer();
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
void CallPrologue() override;
void CallEpilogue() override;
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
// Returns the offset of register |index| relative to RegisterFilePointer().
compiler::Node* RegisterFrameOffset(compiler::Node* index);
compiler::Node* BytecodeOperand(int operand_index);
compiler::Node* BytecodeOperandSignExtended(int operand_index);
compiler::Node* BytecodeOperandShort(int operand_index);
compiler::Node* BytecodeOperandShortSignExtended(int operand_index);
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
// update BytecodeOffset() itself.
compiler::Node* Advance(int delta);
compiler::Node* Advance(compiler::Node* delta);
// Starts next instruction dispatch at |new_bytecode_offset|.
void DispatchTo(compiler::Node* new_bytecode_offset);
// Abort operations for debug code.
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
BailoutReason bailout_reason);
Bytecode bytecode_;
compiler::Node* accumulator_;
compiler::Node* context_;
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;
DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
};
} // namespace interpreter
} // namespace internal
} // namespace v8
#endif // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_

File diff suppressed because it is too large Load Diff

View File

@ -21,12 +21,10 @@ class Isolate;
class Callable;
class CompilationInfo;
namespace compiler {
class InterpreterAssembler;
}
namespace interpreter {
class InterpreterAssembler;
class Interpreter {
public:
explicit Interpreter(Isolate* isolate);
@ -41,6 +39,8 @@ class Interpreter {
// GC support.
void IterateDispatchTable(ObjectVisitor* v);
void TraceCodegen(Handle<Code> code, const char* name);
Address dispatch_table_address() {
return reinterpret_cast<Address>(&dispatch_table_[0]);
}
@ -48,74 +48,73 @@ class Interpreter {
private:
// Bytecode handler generator functions.
#define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
void Do##Name(compiler::InterpreterAssembler* assembler);
void Do##Name(InterpreterAssembler* assembler);
BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
#undef DECLARE_BYTECODE_HANDLER_GENERATOR
// Generates code to perform the binary operations via |function_id|.
void DoBinaryOp(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
InterpreterAssembler* assembler);
// Generates code to perform the count operations via |function_id|.
void DoCountOp(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
InterpreterAssembler* assembler);
// Generates code to perform the comparison operation associated with
// |compare_op|.
void DoCompareOp(Token::Value compare_op,
compiler::InterpreterAssembler* assembler);
void DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler);
// Generates code to load a constant from the constant pool.
void DoLoadConstant(compiler::InterpreterAssembler* assembler);
void DoLoadConstant(InterpreterAssembler* assembler);
// Generates code to perform a global load via |ic|.
void DoLoadGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
void DoLoadGlobal(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a global store via |ic|.
void DoStoreGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
void DoStoreGlobal(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a named property load via |ic|.
void DoLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
void DoLoadIC(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a keyed property load via |ic|.
void DoKeyedLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
void DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a namedproperty store via |ic|.
void DoStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
void DoStoreIC(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a keyed property store via |ic|.
void DoKeyedStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
void DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a JS call.
void DoJSCall(compiler::InterpreterAssembler* assembler);
void DoJSCall(InterpreterAssembler* assembler);
// Generates code to perform a runtime call.
void DoCallRuntimeCommon(compiler::InterpreterAssembler* assembler);
void DoCallRuntimeCommon(InterpreterAssembler* assembler);
// Generates code to perform a runtime call returning a pair.
void DoCallRuntimeForPairCommon(compiler::InterpreterAssembler* assembler);
void DoCallRuntimeForPairCommon(InterpreterAssembler* assembler);
// Generates code to perform a JS runtime call.
void DoCallJSRuntimeCommon(compiler::InterpreterAssembler* assembler);
void DoCallJSRuntimeCommon(InterpreterAssembler* assembler);
// Generates code to perform a constructor call..
void DoCallConstruct(compiler::InterpreterAssembler* assembler);
void DoCallConstruct(InterpreterAssembler* assembler);
// Generates code ro create a literal via |function_id|.
void DoCreateLiteral(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
InterpreterAssembler* assembler);
// Generates code to perform delete via function_id.
void DoDelete(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
InterpreterAssembler* assembler);
// Generates code to perform a lookup slot load via |function_id|.
void DoLoadLookupSlot(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
InterpreterAssembler* assembler);
// Generates code to perform a lookup slot store depending on |language_mode|.
void DoStoreLookupSlot(LanguageMode language_mode,
compiler::InterpreterAssembler* assembler);
InterpreterAssembler* assembler);
bool IsDispatchTableInitialized();

View File

@ -406,6 +406,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -417,7 +425,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -429,7 +436,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -406,6 +406,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -417,7 +425,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -429,7 +436,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -404,6 +404,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -415,7 +423,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -427,7 +434,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -405,6 +405,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -416,7 +424,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -428,7 +435,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -410,6 +410,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -421,7 +429,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -433,7 +440,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -1,57 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
#define V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
#include "src/compiler/interpreter-assembler.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock-support.h"
namespace v8 {
namespace internal {
namespace compiler {
using ::testing::Matcher;
class InterpreterAssemblerTest : public TestWithIsolateAndZone {
public:
InterpreterAssemblerTest() {}
~InterpreterAssemblerTest() override {}
class InterpreterAssemblerForTest final : public InterpreterAssembler {
public:
InterpreterAssemblerForTest(InterpreterAssemblerTest* test,
interpreter::Bytecode bytecode)
: InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
~InterpreterAssemblerForTest() override {}
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher);
Matcher<Node*> IsBytecodeOperand(int offset);
Matcher<Node*> IsBytecodeOperandSignExtended(int offset);
Matcher<Node*> IsBytecodeOperandShort(int offset);
Matcher<Node*> IsBytecodeOperandShortSignExtended(int offset);
using InterpreterAssembler::call_descriptor;
using InterpreterAssembler::graph;
private:
DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
};
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/unittests/compiler/interpreter-assembler-unittest.h"
#include "test/unittests/interpreter/interpreter-assembler-unittest.h"
#include "src/code-factory.h"
#include "src/compiler/graph.h"
@ -16,7 +16,10 @@ using ::testing::_;
namespace v8 {
namespace internal {
namespace compiler {
using namespace compiler;
namespace interpreter {
const interpreter::Bytecode kBytecodes[] = {
#define DEFINE_BYTECODE(Name, ...) interpreter::Bytecode::k##Name,
@ -24,55 +27,47 @@ const interpreter::Bytecode kBytecodes[] = {
#undef DEFINE_BYTECODE
};
Matcher<Node*> IsIntPtrConstant(const intptr_t value) {
return kPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
: IsInt32Constant(static_cast<int32_t>(value));
}
Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
: IsInt32Add(lhs_matcher, rhs_matcher);
}
Matcher<Node*> IsIntPtrSub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
: IsInt32Sub(lhs_matcher, rhs_matcher);
}
Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
: IsWord32Shl(lhs_matcher, rhs_matcher);
}
Matcher<Node*> IsWordSar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
: IsWord32Sar(lhs_matcher, rhs_matcher);
}
Matcher<Node*> IsWordOr(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Or(lhs_matcher, rhs_matcher)
: IsWord32Or(lhs_matcher, rhs_matcher);
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
@ -81,52 +76,57 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
value_matcher, _, _);
}
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperand(
int offset) {
return IsLoad(
MachineType::Uint8(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset)));
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(offset)));
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
IsBytecodeOperandSignExtended(int offset) {
Matcher<Node*> load_matcher = IsLoad(
MachineType::Int8(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset)));
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(offset)));
if (kPointerSize == 8) {
load_matcher = IsChangeInt32ToInt64(load_matcher);
}
return load_matcher;
}
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
int offset) {
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint16(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset)));
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(offset)));
} else {
Matcher<Node*> first_byte = IsLoad(
MachineType::Uint8(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset)));
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(offset)));
Matcher<Node*> second_byte = IsLoad(
MachineType::Uint8(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset + 1)));
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(offset + 1)));
#if V8_TARGET_LITTLE_ENDIAN
return IsWordOr(IsWordShl(second_byte, IsInt32Constant(kBitsPerByte)),
first_byte);
@ -139,16 +139,17 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
}
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
IsBytecodeOperandShortSignExtended(int offset) {
Matcher<Node*> load_matcher;
if (TargetSupportsUnalignedAccess()) {
load_matcher = IsLoad(
MachineType::Int16(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset)));
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
int hi_byte_offset = offset + 1;
@ -162,15 +163,19 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
#endif
Matcher<Node*> hi_byte = IsLoad(
MachineType::Int8(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(hi_byte_offset)));
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(hi_byte_offset)));
hi_byte = IsWord32Shl(hi_byte, IsInt32Constant(kBitsPerByte));
Matcher<Node*> lo_byte = IsLoad(
MachineType::Uint8(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(lo_byte_offset)));
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrAdd(
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(lo_byte_offset)));
load_matcher = IsWord32Or(hi_byte, lo_byte);
}
@ -180,7 +185,6 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
return load_matcher;
}
TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -191,34 +195,33 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
Matcher<Node*> next_bytecode_offset_matcher =
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(interpreter::Bytecodes::Size(bytecode)));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
next_bytecode_offset_matcher);
Matcher<Node*> code_target_matcher =
m.IsLoad(MachineType::Pointer(),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
IsWord32Shl(target_bytecode_matcher,
IsInt32Constant(kPointerSizeLog2)));
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(interpreter::Bytecodes::Size(bytecode)));
Matcher<Node*> target_bytecode_matcher = m.IsLoad(
MachineType::Uint8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
next_bytecode_offset_matcher);
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
IsWord32Shl(target_bytecode_matcher,
IsInt32Constant(kPointerSizeLog2)));
EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
EXPECT_THAT(
tail_call_node,
IsTailCall(m.call_descriptor(), code_target_matcher,
IsParameter(Linkage::kInterpreterAccumulatorParameter),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
next_bytecode_offset_matcher,
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
IsParameter(Linkage::kInterpreterContextParameter), _, _));
IsTailCall(
_, code_target_matcher,
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
next_bytecode_offset_matcher,
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
_));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
int jump_offsets[] = {-9710, -77, 0, +3, +97109};
TRACED_FOREACH(int, jump_offset, jump_offsets) {
@ -230,35 +233,37 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
Matcher<Node*> next_bytecode_offset_matcher =
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(jump_offset));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
next_bytecode_offset_matcher);
Matcher<Node*> code_target_matcher =
m.IsLoad(MachineType::Pointer(),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
IsWord32Shl(target_bytecode_matcher,
IsInt32Constant(kPointerSizeLog2)));
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(jump_offset));
Matcher<Node*> target_bytecode_matcher = m.IsLoad(
MachineType::Uint8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
next_bytecode_offset_matcher);
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
IsWord32Shl(target_bytecode_matcher,
IsInt32Constant(kPointerSizeLog2)));
EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
EXPECT_THAT(
tail_call_node,
IsTailCall(m.call_descriptor(), code_target_matcher,
IsParameter(Linkage::kInterpreterAccumulatorParameter),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
next_bytecode_offset_matcher,
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
IsParameter(Linkage::kInterpreterContextParameter), _, _));
IsTailCall(
_, code_target_matcher,
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
IsParameter(
InterpreterDispatchDescriptor::kRegisterFileParameter),
next_bytecode_offset_matcher,
IsParameter(
InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsParameter(
InterpreterDispatchDescriptor::kDispatchTableParameter),
IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
_));
}
}
}
TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
static const int kJumpIfTrueOffset = 73;
@ -276,61 +281,65 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
int jump_offsets[] = {kJumpIfTrueOffset,
interpreter::Bytecodes::Size(bytecode)};
for (int i = 0; i < static_cast<int>(arraysize(jump_offsets)); i++) {
Matcher<Node*> next_bytecode_offset_matcher =
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(jump_offsets[i]));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
next_bytecode_offset_matcher);
Matcher<Node*> code_target_matcher =
m.IsLoad(MachineType::Pointer(),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
IsWord32Shl(target_bytecode_matcher,
IsInt32Constant(kPointerSizeLog2)));
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(jump_offsets[i]));
Matcher<Node*> target_bytecode_matcher = m.IsLoad(
MachineType::Uint8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
next_bytecode_offset_matcher);
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
IsWord32Shl(target_bytecode_matcher,
IsInt32Constant(kPointerSizeLog2)));
EXPECT_THAT(
end->InputAt(i),
IsTailCall(m.call_descriptor(), code_target_matcher,
IsParameter(Linkage::kInterpreterAccumulatorParameter),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
next_bytecode_offset_matcher,
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
IsParameter(Linkage::kInterpreterContextParameter), _, _));
IsTailCall(
_, code_target_matcher,
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
IsParameter(
InterpreterDispatchDescriptor::kRegisterFileParameter),
next_bytecode_offset_matcher,
IsParameter(
InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsParameter(
InterpreterDispatchDescriptor::kDispatchTableParameter),
IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
_));
}
// TODO(oth): test control flow paths.
}
}
TARGET_TEST_F(InterpreterAssemblerTest, Return) {
TARGET_TEST_F(InterpreterAssemblerTest, InterpreterReturn) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.Return();
m.InterpreterReturn();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
Handle<HeapObject> exit_trampoline =
isolate()->builtins()->InterpreterExitTrampoline();
EXPECT_THAT(
tail_call_node,
IsTailCall(m.call_descriptor(), IsHeapConstant(exit_trampoline),
IsParameter(Linkage::kInterpreterAccumulatorParameter),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
IsParameter(Linkage::kInterpreterContextParameter), _, _));
IsTailCall(
_, IsHeapConstant(exit_trampoline),
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
_));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -382,15 +391,15 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
// Should be incoming accumulator if not set.
EXPECT_THAT(m.GetAccumulator(),
IsParameter(Linkage::kInterpreterAccumulatorParameter));
EXPECT_THAT(
m.GetAccumulator(),
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
// Should be set by SedtAccumulator.
// Should be set by SetAccumulator.
Node* accumulator_value_1 = m.Int32Constant(0xdeadbeef);
m.SetAccumulator(accumulator_value_1);
EXPECT_THAT(m.GetAccumulator(), accumulator_value_1);
@ -407,12 +416,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
Node* tail_call_node = end->InputAt(0);
EXPECT_THAT(tail_call_node,
IsTailCall(m.call_descriptor(), _, accumulator_value_2, _, _, _,
_, _, _));
IsTailCall(_, _, accumulator_value_2, _, _, _, _, _, _));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, GetSetContext) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -422,7 +429,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetContext) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -431,12 +437,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
EXPECT_THAT(
reg_location_node,
IsIntPtrAdd(
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -444,13 +449,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
Node* load_reg_node = m.LoadRegister(reg_index_node);
EXPECT_THAT(
load_reg_node,
m.IsLoad(MachineType::AnyTagged(),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
m.IsLoad(
MachineType::AnyTagged(),
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -459,15 +464,15 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
EXPECT_THAT(
store_reg_node,
m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
kNoWriteBarrier),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
store_value));
m.IsStore(
StoreRepresentation(MachineRepresentation::kTagged,
kNoWriteBarrier),
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
store_value));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -479,7 +484,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, IntPtrAdd) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -490,7 +494,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, IntPtrAdd) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, IntPtrSub) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -501,7 +504,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, IntPtrSub) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, WordShl) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -511,7 +513,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, WordShl) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -519,7 +520,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
Matcher<Node*> constant_pool_matcher = m.IsLoad(
MachineType::AnyTagged(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - kHeapObjectTag));
EXPECT_THAT(
load_constant,
@ -530,7 +531,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadFixedArrayElement) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -547,7 +547,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFixedArrayElement) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -560,7 +559,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -576,7 +574,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, StoreContextSlot) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -595,21 +592,22 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreContextSlot) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* arg1 = m.Int32Constant(2);
Node* arg2 = m.Int32Constant(3);
Node* call_runtime = m.CallRuntime(Runtime::kAdd, arg1, arg2);
Node* context =
m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
Node* call_runtime = m.CallRuntime(Runtime::kAdd, context, arg1, arg2);
EXPECT_THAT(
call_runtime,
IsCall(_, _, arg1, arg2, _, IsInt32Constant(2),
IsParameter(Linkage::kInterpreterContextParameter), _, _));
IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
_));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
const int kResultSizes[] = {1, 2};
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
@ -620,6 +618,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
Node* function_id = m.Int32Constant(0);
Node* first_arg = m.Int32Constant(1);
Node* arg_count = m.Int32Constant(2);
Node* context =
m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
Matcher<Node*> function_table = IsExternalConstant(
ExternalReference::runtime_function_table_address(isolate()));
@ -630,36 +630,18 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
m.IsLoad(MachineType::Pointer(), function,
IsInt32Constant(offsetof(Runtime::Function, entry)));
Node* call_runtime =
m.CallRuntime(function_id, first_arg, arg_count, result_size);
Node* call_runtime = m.CallRuntimeN(function_id, context, first_arg,
arg_count, result_size);
EXPECT_THAT(
call_runtime,
IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
function_entry,
IsParameter(Linkage::kInterpreterContextParameter), _, _));
IsParameter(InterpreterDispatchDescriptor::kContextParameter),
_, _));
}
}
}
TARGET_TEST_F(InterpreterAssemblerTest, CallIC) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
LoadWithVectorDescriptor descriptor(isolate());
Node* target = m.Int32Constant(1);
Node* arg1 = m.Int32Constant(2);
Node* arg2 = m.Int32Constant(3);
Node* arg3 = m.Int32Constant(4);
Node* arg4 = m.Int32Constant(5);
Node* call_ic = m.CallIC(descriptor, target, arg1, arg2, arg3, arg4);
EXPECT_THAT(
call_ic,
IsCall(_, target, arg1, arg2, arg3, arg4,
IsParameter(Linkage::kInterpreterContextParameter), _, _));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -667,26 +649,28 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
Node* function = m.Int32Constant(0);
Node* first_arg = m.Int32Constant(1);
Node* arg_count = m.Int32Constant(2);
Node* call_js = m.CallJS(function, first_arg, arg_count);
Node* context =
m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
Node* call_js = m.CallJS(function, context, first_arg, arg_count);
EXPECT_THAT(
call_js,
IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
function, IsParameter(Linkage::kInterpreterContextParameter), _,
function,
IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
_));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* feedback_vector = m.LoadTypeFeedbackVector();
Matcher<Node*> load_function_matcher =
m.IsLoad(MachineType::AnyTagged(),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsIntPtrConstant(
InterpreterFrameConstants::kFunctionFromRegisterPointer));
Matcher<Node*> load_function_matcher = m.IsLoad(
MachineType::AnyTagged(),
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
IsIntPtrConstant(
InterpreterFrameConstants::kFunctionFromRegisterPointer));
Matcher<Node*> load_shared_function_info_matcher =
m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
IsIntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
@ -700,6 +684,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
}
}
} // namespace compiler
} // namespace interpreter
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,57 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_UNITTESTS_INTERPRETER_INTERPRETER_ASSEMBLER_UNITTEST_H_
#define V8_UNITTESTS_INTERPRETER_INTERPRETER_ASSEMBLER_UNITTEST_H_
#include "src/compiler/machine-operator.h"
#include "src/interpreter/interpreter-assembler.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock-support.h"
namespace v8 {
namespace internal {
namespace interpreter {
using ::testing::Matcher;
class InterpreterAssemblerTest : public TestWithIsolateAndZone {
public:
InterpreterAssemblerTest() {}
~InterpreterAssemblerTest() override {}
class InterpreterAssemblerForTest final : public InterpreterAssembler {
public:
InterpreterAssemblerForTest(InterpreterAssemblerTest* test,
Bytecode bytecode)
: InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
~InterpreterAssemblerForTest() override {}
Matcher<compiler::Node*> IsLoad(
const Matcher<compiler::LoadRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher,
const Matcher<compiler::Node*>& index_matcher);
Matcher<compiler::Node*> IsStore(
const Matcher<compiler::StoreRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher,
const Matcher<compiler::Node*>& index_matcher,
const Matcher<compiler::Node*>& value_matcher);
Matcher<compiler::Node*> IsBytecodeOperand(int offset);
Matcher<compiler::Node*> IsBytecodeOperandSignExtended(int offset);
Matcher<compiler::Node*> IsBytecodeOperandShort(int offset);
Matcher<compiler::Node*> IsBytecodeOperandShortSignExtended(int offset);
using InterpreterAssembler::graph;
private:
DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
};
};
} // namespace interpreter
} // namespace internal
} // namespace v8
#endif // V8_UNITTESTS_INTERPRETER_INTERPRETER_ASSEMBLER_UNITTEST_H_

View File

@ -60,8 +60,6 @@
'compiler/instruction-selector-unittest.h',
'compiler/instruction-sequence-unittest.cc',
'compiler/instruction-sequence-unittest.h',
'compiler/interpreter-assembler-unittest.cc',
'compiler/interpreter-assembler-unittest.h',
'compiler/js-builtin-reducer-unittest.cc',
'compiler/js-context-relaxation-unittest.cc',
'compiler/js-create-lowering-unittest.cc',
@ -100,6 +98,8 @@
'interpreter/bytecode-array-iterator-unittest.cc',
'interpreter/bytecode-register-allocator-unittest.cc',
'interpreter/constant-array-builder-unittest.cc',
'interpreter/interpreter-assembler-unittest.cc',
'interpreter/interpreter-assembler-unittest.h',
'interpreter/register-translator-unittest.cc',
'libplatform/default-platform-unittest.cc',
'libplatform/task-queue-unittest.cc',

View File

@ -606,8 +606,6 @@
'../../src/compiler/instruction.h',
'../../src/compiler/int64-lowering.cc',
'../../src/compiler/int64-lowering.h',
'../../src/compiler/interpreter-assembler.cc',
'../../src/compiler/interpreter-assembler.h',
'../../src/compiler/js-builtin-reducer.cc',
'../../src/compiler/js-builtin-reducer.h',
'../../src/compiler/js-call-reducer.cc',
@ -945,6 +943,8 @@
'../../src/interpreter/handler-table-builder.h',
'../../src/interpreter/interpreter.cc',
'../../src/interpreter/interpreter.h',
'../../src/interpreter/interpreter-assembler.cc',
'../../src/interpreter/interpreter-assembler.h',
'../../src/interpreter/register-translator.cc',
'../../src/interpreter/register-translator.h',
'../../src/interpreter/source-position-table.cc',