[interpreter]: Changes to interpreter builtins for accumulator and register file registers.

Makes the following modifications to the interpreter builtins and
InterpreterAssembler:
 - Adds an accumulator register and initializes it to undefined()
 - Adds a register file pointer register and use it instead of FramePointer to
   access registers
 - Modifies builtin to support functions with 0 regiters in the register file
 - Modifies builtin to Call rather than TailCall to first bytecode handler.

BUG=v8:4280
LOG=N

Review URL: https://codereview.chromium.org/1289863003

Cr-Commit-Position: refs/heads/master@{#30219}
This commit is contained in:
rmcilroy 2015-08-18 05:41:41 -07:00 committed by Commit bot
parent 8aef442917
commit 00df60d1c6
24 changed files with 346 additions and 211 deletions

View File

@ -8,7 +8,6 @@
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -925,16 +924,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
// Note: there should always be at least one stack slot for the return
// register in the register file.
Label loop_header;
Label loop_check;
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
__ b(&loop_check, al);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ push(r9);
// Continue loop if not done.
__ bind(&loop_check);
__ sub(r4, r4, Operand(kPointerSize), SetCC);
__ b(&loop_header, ne);
__ b(&loop_header, ge);
}
// TODO(rmcilroy): List of things not currently dealt with here but done in
@ -968,7 +968,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&ok);
}
// Load bytecode offset and dispatch table into registers.
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ sub(kInterpreterRegisterFileRegister, fp,
Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@ -977,14 +981,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Dispatch to the first bytecode handler for the function.
__ ldrb(r0, MemOperand(kInterpreterBytecodeArrayRegister,
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r0, LSL,
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
__ Call(ip);
}
@ -995,9 +999,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically decrementing profiling_counter
// appropriately and calling out to HandleInterrupts if necessary).
// Load return value into r0.
__ ldr(r0, MemOperand(fp, -kPointerSize -
StandardFrameConstants::kFixedFrameSizeFromFp));
// The return value is in accumulator, which is already in r0.
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
// Drop receiver + arguments.

View File

@ -18,6 +18,8 @@ const Register kReturnRegister0 = {kRegister_r0_Code};
const Register kReturnRegister1 = {kRegister_r1_Code};
const Register kJSFunctionRegister = {kRegister_r1_Code};
const Register kContextRegister = {kRegister_r7_Code};
const Register kInterpreterAccumulatorRegister = {kRegister_r0_Code};
const Register kInterpreterRegisterFileRegister = {kRegister_r4_Code};
const Register kInterpreterBytecodeOffsetRegister = {kRegister_r5_Code};
const Register kInterpreterBytecodeArrayRegister = {kRegister_r6_Code};
const Register kInterpreterDispatchTableRegister = {kRegister_r8_Code};

View File

@ -990,7 +990,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Bind(&ok);
}
// Load bytecode offset and dispatch table into registers.
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ Sub(kInterpreterRegisterFileRegister, fp,
Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
__ Mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@ -999,14 +1003,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Dispatch to the first bytecode handler for the function.
__ Ldrb(x0, MemOperand(kInterpreterBytecodeArrayRegister,
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x0, Operand(x0, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x0));
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip0);
__ Call(ip0);
}
@ -1017,9 +1021,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically decrementing profiling_counter
// appropriately and calling out to HandleInterrupts if necessary).
// Load return value into x0.
__ ldr(x0, MemOperand(fp, -kPointerSize -
StandardFrameConstants::kFixedFrameSizeFromFp));
// The return value is in accumulator, which is already in x0.
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
// Drop receiver + arguments.

View File

@ -39,6 +39,8 @@ namespace internal {
#define kReturnRegister1 x1
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kInterpreterAccumulatorRegister x0
#define kInterpreterRegisterFileRegister x18
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21

View File

@ -31,6 +31,8 @@ InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
Linkage::GetInterpreterDispatchDescriptor(zone), kMachPtr,
InstructionSelector::SupportedMachineOperatorFlags())),
end_node_(nullptr),
accumulator_(
raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
code_generated_(false) {}
@ -60,7 +62,22 @@ Handle<Code> InterpreterAssembler::GenerateCode() {
}
Node* InterpreterAssembler::BytecodeArrayPointer() {
Node* InterpreterAssembler::GetAccumulator() {
return accumulator_;
}
void InterpreterAssembler::SetAccumulator(Node* value) {
accumulator_ = value;
}
Node* InterpreterAssembler::RegisterFileRawPointer() {
return raw_assembler_->Parameter(Linkage::kInterpreterRegisterFileParameter);
}
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
return raw_assembler_->Parameter(Linkage::kInterpreterBytecodeArrayParameter);
}
@ -71,59 +88,46 @@ Node* InterpreterAssembler::BytecodeOffset() {
}
Node* InterpreterAssembler::DispatchTablePointer() {
Node* InterpreterAssembler::DispatchTableRawPointer() {
return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
}
Node* InterpreterAssembler::FramePointer() {
return raw_assembler_->LoadFramePointer();
}
Node* InterpreterAssembler::RegisterFrameOffset(int index) {
DCHECK_LE(index, kMaxRegisterIndex);
return Int32Constant(kFirstRegisterOffsetFromFp -
(index << kPointerSizeLog2));
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
return raw_assembler_->IntPtrSub(
Int32Constant(kFirstRegisterOffsetFromFp),
raw_assembler_->WordShl(index, Int32Constant(kPointerSizeLog2)));
return raw_assembler_->WordShl(index, Int32Constant(kPointerSizeLog2));
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
return raw_assembler_->Load(kMachPtr, RegisterFileRawPointer(),
RegisterFrameOffset(reg_index));
}
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
return raw_assembler_->Store(kMachPtr, RegisterFileRawPointer(),
RegisterFrameOffset(reg_index), value);
}
Node* InterpreterAssembler::BytecodeOperand(int delta) {
DCHECK_LT(delta, interpreter::Bytecodes::NumberOfOperands(bytecode_));
return raw_assembler_->Load(
kMachUint8, BytecodeArrayPointer(),
kMachUint8, BytecodeArrayTaggedPointer(),
raw_assembler_->IntPtrAdd(BytecodeOffset(), Int32Constant(1 + delta)));
}
Node* InterpreterAssembler::LoadRegister(int index) {
return raw_assembler_->Load(kMachPtr, FramePointer(),
RegisterFrameOffset(index));
}
Node* InterpreterAssembler::LoadRegister(Node* index) {
return raw_assembler_->Load(kMachPtr, FramePointer(),
RegisterFrameOffset(index));
}
Node* InterpreterAssembler::StoreRegister(Node* value, int index) {
return raw_assembler_->Store(kMachPtr, FramePointer(),
RegisterFrameOffset(index), value);
}
Node* InterpreterAssembler::StoreRegister(Node* value, Node* index) {
return raw_assembler_->Store(kMachPtr, FramePointer(),
RegisterFrameOffset(index), value);
Node* InterpreterAssembler::BytecodeOperandSignExtended(int delta) {
DCHECK_LT(delta, interpreter::Bytecodes::NumberOfOperands(bytecode_));
Node* load = raw_assembler_->Load(
kMachInt8, BytecodeArrayTaggedPointer(),
raw_assembler_->IntPtrAdd(BytecodeOffset(), Int32Constant(1 + delta)));
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
load = raw_assembler_->ChangeInt32ToInt64(load);
}
return load;
}
@ -132,14 +136,15 @@ void InterpreterAssembler::Return() {
HeapConstant(Unique<HeapObject>::CreateImmovable(
isolate()->builtins()->InterpreterExitTrampoline()));
// If the order of the parameters you need to change the call signature below.
STATIC_ASSERT(0 == Linkage::kInterpreterBytecodeOffsetParameter);
STATIC_ASSERT(1 == Linkage::kInterpreterBytecodeArrayParameter);
STATIC_ASSERT(2 == Linkage::kInterpreterDispatchTableParameter);
Node* tail_call = graph()->NewNode(
common()->TailCall(call_descriptor()), exit_trampoline_code_object,
BytecodeOffset(), BytecodeArrayPointer(), DispatchTablePointer(),
graph()->start(), graph()->start());
schedule()->AddTailCall(raw_assembler_->CurrentBlock(), tail_call);
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
Node* tail_call = raw_assembler_->TailCallInterpreterDispatch(
call_descriptor(), exit_trampoline_code_object, GetAccumulator(),
RegisterFileRawPointer(), BytecodeOffset(), BytecodeArrayTaggedPointer(),
DispatchTableRawPointer());
// This should always be the end node.
SetEndInput(tail_call);
}
@ -153,24 +158,25 @@ Node* InterpreterAssembler::Advance(int delta) {
void InterpreterAssembler::Dispatch() {
Node* new_bytecode_offset = Advance(interpreter::Bytecodes::Size(bytecode_));
Node* target_bytecode = raw_assembler_->Load(
kMachUint8, BytecodeArrayPointer(), new_bytecode_offset);
kMachUint8, BytecodeArrayTaggedPointer(), new_bytecode_offset);
// TODO(rmcilroy): Create a code target dispatch table to avoid conversion
// from code object on every dispatch.
Node* target_code_object = raw_assembler_->Load(
kMachPtr, DispatchTablePointer(),
kMachPtr, DispatchTableRawPointer(),
raw_assembler_->Word32Shl(target_bytecode,
Int32Constant(kPointerSizeLog2)));
// If the order of the parameters you need to change the call signature below.
STATIC_ASSERT(0 == Linkage::kInterpreterBytecodeOffsetParameter);
STATIC_ASSERT(1 == Linkage::kInterpreterBytecodeArrayParameter);
STATIC_ASSERT(2 == Linkage::kInterpreterDispatchTableParameter);
Node* tail_call = graph()->NewNode(
common()->TailCall(call_descriptor()), target_code_object,
new_bytecode_offset, BytecodeArrayPointer(), DispatchTablePointer(),
graph()->start(), graph()->start());
schedule()->AddTailCall(raw_assembler_->CurrentBlock(), tail_call);
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
Node* tail_call = raw_assembler_->TailCallInterpreterDispatch(
call_descriptor(), target_code_object, GetAccumulator(),
RegisterFileRawPointer(), new_bytecode_offset,
BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
// This should always be the end node.
SetEndInput(tail_call);
}
@ -185,7 +191,7 @@ void InterpreterAssembler::SetEndInput(Node* input) {
void InterpreterAssembler::End() {
DCHECK(end_node_);
// TODO(rmcilroy): Support more than 1 end input.
Node* end = graph()->NewNode(common()->End(1), end_node_);
Node* end = graph()->NewNode(raw_assembler_->common()->End(1), end_node_);
graph()->SetEnd(end);
}
@ -207,16 +213,6 @@ Schedule* InterpreterAssembler::schedule() {
}
MachineOperatorBuilder* InterpreterAssembler::machine() {
return raw_assembler_->machine();
}
CommonOperatorBuilder* InterpreterAssembler::common() {
return raw_assembler_->common();
}
Node* InterpreterAssembler::Int32Constant(int value) {
return raw_assembler_->Int32Constant(value);
}
@ -231,7 +227,6 @@ Node* InterpreterAssembler::HeapConstant(Unique<HeapObject> object) {
return raw_assembler_->HeapConstant(object);
}
} // namespace interpreter
} // namespace internal
} // namespace v8

View File

@ -22,9 +22,7 @@ class Zone;
namespace compiler {
class CallDescriptor;
class CommonOperatorBuilder;
class Graph;
class MachineOperatorBuilder;
class Node;
class Operator;
class RawMachineAssembler;
@ -38,33 +36,29 @@ class InterpreterAssembler {
Handle<Code> GenerateCode();
// Accumulator.
Node* GetAccumulator();
void SetAccumulator(Node* value);
// Loads from and stores to the interpreter register file.
Node* LoadRegister(Node* reg_index);
Node* StoreRegister(Node* value, Node* reg_index);
// Constants.
Node* Int32Constant(int value);
Node* NumberConstant(double value);
Node* HeapConstant(Unique<HeapObject> object);
// Returns the bytecode operand |index| for the current bytecode.
Node* BytecodeOperand(int index);
// Loads from and stores to the interpreter register file.
Node* LoadRegister(int index);
Node* LoadRegister(Node* index);
Node* StoreRegister(Node* value, int index);
Node* StoreRegister(Node* value, Node* index);
// Returns from the function.
void Return();
// Dispatch to the bytecode.
void Dispatch();
Node* BytecodeOperand(int index);
Node* BytecodeOperandSignExtended(int index);
protected:
static const int kFirstRegisterOffsetFromFp =
-kPointerSize - StandardFrameConstants::kFixedFrameSizeFromFp;
// TODO(rmcilroy): Increase this when required.
static const int kMaxRegisterIndex = 255;
// Close the graph.
void End();
@ -73,17 +67,16 @@ class InterpreterAssembler {
Graph* graph();
private:
// Returns a raw pointer to start of the register file on the stack.
Node* RegisterFileRawPointer();
// Returns a tagged pointer to the current function's BytecodeArray object.
Node* BytecodeArrayPointer();
Node* BytecodeArrayTaggedPointer();
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
Node* BytecodeOffset();
// Returns a pointer to first entry in the interpreter dispatch table.
Node* DispatchTablePointer();
// Returns the frame pointer for the current function.
Node* FramePointer();
Node* DispatchTableRawPointer();
// Returns the offset of register |index|.
Node* RegisterFrameOffset(int index);
// Returns the offset of register |index| relative to RegisterFilePointer().
Node* RegisterFrameOffset(Node* index);
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
@ -96,12 +89,11 @@ class InterpreterAssembler {
// Private helpers which delegate to RawMachineAssembler.
Isolate* isolate();
Schedule* schedule();
MachineOperatorBuilder* machine();
CommonOperatorBuilder* common();
interpreter::Bytecode bytecode_;
base::SmartPointer<RawMachineAssembler> raw_assembler_;
Node* end_node_;
Node* accumulator_;
bool code_generated_;
DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);

View File

@ -392,19 +392,27 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
MachineSignature::Builder types(zone, 0, 3);
LocationSignature::Builder locations(zone, 0, 3);
MachineSignature::Builder types(zone, 0, 5);
LocationSignature::Builder locations(zone, 0, 5);
// Add registers for fixed parameters passed via interpreter dispatch.
STATIC_ASSERT(0 == Linkage::kInterpreterBytecodeOffsetParameter);
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
types.AddParam(kMachAnyTagged);
locations.AddParam(regloc(kInterpreterAccumulatorRegister));
STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
types.AddParam(kMachPtr);
locations.AddParam(regloc(kInterpreterRegisterFileRegister));
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
types.AddParam(kMachIntPtr);
locations.AddParam(regloc(kInterpreterBytecodeOffsetRegister));
STATIC_ASSERT(1 == Linkage::kInterpreterBytecodeArrayParameter);
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
types.AddParam(kMachAnyTagged);
locations.AddParam(regloc(kInterpreterBytecodeArrayRegister));
STATIC_ASSERT(2 == Linkage::kInterpreterDispatchTableParameter);
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
types.AddParam(kMachPtr);
locations.AddParam(regloc(kInterpreterDispatchTableRegister));

View File

@ -330,9 +330,11 @@ class Linkage : public ZoneObject {
// Special parameter indices used to pass fixed register data through
// interpreter dispatches.
static const int kInterpreterBytecodeOffsetParameter = 0;
static const int kInterpreterBytecodeArrayParameter = 1;
static const int kInterpreterDispatchTableParameter = 2;
static const int kInterpreterAccumulatorParameter = 0;
static const int kInterpreterRegisterFileParameter = 1;
static const int kInterpreterBytecodeOffsetParameter = 2;
static const int kInterpreterBytecodeArrayParameter = 3;
static const int kInterpreterDispatchTableParameter = 4;
private:
CallDescriptor* const incoming_;

View File

@ -232,6 +232,17 @@ Node* RawMachineAssembler::CallCFunction8(
}
Node* RawMachineAssembler::TailCallInterpreterDispatch(
const CallDescriptor* call_descriptor, Node* target, Node* arg1, Node* arg2,
Node* arg3, Node* arg4, Node* arg5) {
Node* tail_call =
graph()->NewNode(common()->TailCall(call_descriptor), target, arg1, arg2,
arg3, arg4, arg5, graph()->start(), graph()->start());
schedule()->AddTailCall(CurrentBlock(), tail_call);
return tail_call;
}
void RawMachineAssembler::Bind(Label* label) {
DCHECK(current_block_ == nullptr);
DCHECK(!label->bound_);

View File

@ -508,6 +508,9 @@ class RawMachineAssembler {
MachineType arg7_type, Node* function, Node* arg0,
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
Node* TailCallInterpreterDispatch(const CallDescriptor* call_descriptor,
Node* target, Node* arg1, Node* arg2,
Node* arg3, Node* arg4, Node* arg5);
// ===========================================================================
// The following utility methods deal with control flow, hence might switch

View File

@ -634,20 +634,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
__ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFunctionDataOffset));
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(edi);
__ CmpObjectType(edi, BYTECODE_ARRAY_TYPE, eax);
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
eax);
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
__ mov(ebx, FieldOperand(edi, BytecodeArray::kFrameSizeOffset));
__ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
Label ok;
@ -656,21 +659,22 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
__ j(above_equal, &ok);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
// Note: there should always be at least one stack slot for the return
// register in the register file.
Label loop_header;
Label loop_check;
__ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
__ jmp(&loop_check);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ push(eax);
// Continue loop if not done.
__ bind(&loop_check);
__ sub(ebx, Immediate(kPointerSize));
__ j(not_equal, &loop_header, Label::kNear);
__ j(greater_equal, &loop_header);
}
// TODO(rmcilroy): List of things not currently dealt with here but done in
@ -700,25 +704,39 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
__ j(above_equal, &ok);
__ CallRuntime(Runtime::kStackGuard, 0);
__ bind(&ok);
}
// Load bytecode offset and dispatch table into registers.
__ mov(ecx, Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ mov(kInterpreterRegisterFileRegister, ebp);
__ sub(
kInterpreterRegisterFileRegister,
Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Since the dispatch table root might be set after builtins are generated,
// load directly from the roots table.
__ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
__ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
Heap::kInterpreterTableRootIndex);
__ add(kInterpreterDispatchTableRegister,
Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
// TODO(rmcilroy) Push our context as a stack located parameter of the
// bytecode handler.
// Dispatch to the first bytecode handler for the function.
__ movzx_b(eax, Operand(edi, ecx, times_1, 0));
__ mov(eax, Operand(ebx, eax, times_pointer_size, 0));
__ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(esi, Operand(kInterpreterDispatchTableRegister, esi,
times_pointer_size, 0));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ add(eax, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(eax);
__ add(esi, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(esi);
}
@ -729,9 +747,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically decrementing profiling_counter
// appropriately and calling out to HandleInterrupts if necessary).
// Load return value into r0.
__ mov(eax, Operand(ebp, -kPointerSize -
StandardFrameConstants::kFixedFrameSizeFromFp));
// The return value is in accumulator, which is already in rax.
// Leave the frame (also dropping the register file).
__ leave();
// Return droping receiver + arguments.

View File

@ -18,6 +18,8 @@ const Register kReturnRegister0 = {kRegister_eax_Code};
const Register kReturnRegister1 = {kRegister_edx_Code};
const Register kJSFunctionRegister = {kRegister_edi_Code};
const Register kContextRegister = {kRegister_esi_Code};
const Register kInterpreterAccumulatorRegister = {kRegister_eax_Code};
const Register kInterpreterRegisterFileRegister = {kRegister_edx_Code};
const Register kInterpreterBytecodeOffsetRegister = {kRegister_ecx_Code};
const Register kInterpreterBytecodeArrayRegister = {kRegister_edi_Code};
const Register kInterpreterDispatchTableRegister = {kRegister_ebx_Code};

View File

@ -916,14 +916,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
// Note: there should always be at least one stack slot for the return
// register in the register file.
Label loop_header;
Label loop_check;
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
__ Branch(&loop_check);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ push(t1);
// Continue loop if not done.
__ bind(&loop_check);
__ Subu(t0, t0, Operand(kPointerSize));
__ Branch(&loop_header, ge, t0, Operand(zero_reg));
}
@ -959,6 +960,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
}
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ Subu(
kInterpreterRegisterFileRegister, fp,
Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@ -976,7 +981,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Call(at);
}
@ -987,9 +992,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically decrementing profiling_counter
// appropriately and calling out to HandleInterrupts if necessary).
// Load return value into v0.
__ lw(v0, MemOperand(fp, -kPointerSize -
StandardFrameConstants::kFixedFrameSizeFromFp));
// The return value is in accumulator, which is already in v0.
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
// Drop receiver + arguments.

View File

@ -17,6 +17,8 @@ const Register kReturnRegister0 = {kRegister_v0_Code};
const Register kReturnRegister1 = {kRegister_v1_Code};
const Register kJSFunctionRegister = {kRegister_a1_Code};
const Register kContextRegister = {Register::kCpRegister};
const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
const Register kInterpreterRegisterFileRegister = {kRegister_t3_Code};
const Register kInterpreterBytecodeOffsetRegister = {kRegister_t4_Code};
const Register kInterpreterBytecodeArrayRegister = {kRegister_t5_Code};
const Register kInterpreterDispatchTableRegister = {kRegister_t6_Code};

View File

@ -75,7 +75,7 @@ namespace internal {
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp.
static const int kMaxNumAllocatableRegisters = 14; // v0 through t2 and cp.
static const int kSizeInBytes = 8;
static const int kCpRegister = 23; // cp (s7) is the 23rd register.

View File

@ -913,14 +913,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
// Note: there should always be at least one stack slot for the return
// register in the register file.
Label loop_header;
Label loop_check;
__ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
__ Branch(&loop_check);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ push(a5);
// Continue loop if not done.
__ bind(&loop_check);
__ Dsubu(a4, a4, Operand(kPointerSize));
__ Branch(&loop_header, ge, a4, Operand(zero_reg));
}
@ -956,6 +957,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
}
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ Dsubu(
kInterpreterRegisterFileRegister, fp,
Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@ -973,7 +978,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
__ Call(at);
}
@ -984,9 +989,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically decrementing profiling_counter
// appropriately and calling out to HandleInterrupts if necessary).
// Load return value into v0.
__ ld(v0, MemOperand(fp, -kPointerSize -
StandardFrameConstants::kFixedFrameSizeFromFp));
// The return value is in accumulator, which is already in v0.
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
// Drop receiver + arguments.

View File

@ -17,6 +17,8 @@ const Register kReturnRegister0 = {kRegister_v0_Code};
const Register kReturnRegister1 = {kRegister_v1_Code};
const Register kJSFunctionRegister = {kRegister_a1_Code};
const Register kContextRegister = {kRegister_s7_Code};
const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
const Register kInterpreterRegisterFileRegister = {kRegister_a7_Code};
const Register kInterpreterBytecodeOffsetRegister = {kRegister_t0_Code};
const Register kInterpreterBytecodeArrayRegister = {kRegister_t1_Code};
const Register kInterpreterDispatchTableRegister = {kRegister_t2_Code};

View File

@ -694,20 +694,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
__ movp(r14, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(r14, FieldOperand(r14, SharedFunctionInfo::kFunctionDataOffset));
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(r14);
__ CmpObjectType(r14, BYTECODE_ARRAY_TYPE, rax);
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rax);
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
__ movl(rcx, FieldOperand(r14, BytecodeArray::kFrameSizeOffset));
__ movl(rcx, FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
Label ok;
@ -719,16 +722,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
// Note: there should always be at least one stack slot for the return
// register in the register file.
Label loop_header;
Label loop_check;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(always, &loop_check);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ Push(rdx);
// Continue loop if not done.
__ bind(&loop_check);
__ subp(rcx, Immediate(kPointerSize));
__ j(not_equal, &loop_header, Label::kNear);
__ j(greater_equal, &loop_header, Label::kNear);
}
// TODO(rmcilroy): List of things not currently dealt with here but done in
@ -761,18 +765,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&ok);
}
// Load bytecode offset and dispatch table into registers.
__ movp(r12, Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(r15, Heap::kInterpreterTableRootIndex);
__ addp(r15, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ movp(kInterpreterRegisterFileRegister, rbp);
__ subp(
kInterpreterRegisterFileRegister,
Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
Heap::kInterpreterTableRootIndex);
__ addp(kInterpreterDispatchTableRegister,
Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
// Dispatch to the first bytecode handler for the function.
__ movzxbp(rax, Operand(r14, r12, times_1, 0));
__ movp(rax, Operand(r15, rax, times_pointer_size, 0));
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
times_pointer_size, 0));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ addp(rax, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rax);
__ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rbx);
}
@ -783,9 +798,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically decrementing profiling_counter
// appropriately and calling out to HandleInterrupts if necessary).
// Load return value into r0.
__ movp(rax, Operand(rbp, -kPointerSize -
StandardFrameConstants::kFixedFrameSizeFromFp));
// The return value is in accumulator, which is already in rax.
// Leave the frame (also dropping the register file).
__ leave();
// Return droping receiver + arguments.

View File

@ -20,6 +20,8 @@ const Register kReturnRegister0 = {kRegister_rax_Code};
const Register kReturnRegister1 = {kRegister_rdx_Code};
const Register kJSFunctionRegister = {kRegister_rdi_Code};
const Register kContextRegister = {kRegister_rsi_Code};
const Register kInterpreterAccumulatorRegister = {kRegister_rax_Code};
const Register kInterpreterRegisterFileRegister = {kRegister_r11_Code};
const Register kInterpreterBytecodeOffsetRegister = {kRegister_r12_Code};
const Register kInterpreterBytecodeArrayRegister = {kRegister_r14_Code};
const Register kInterpreterDispatchTableRegister = {kRegister_r15_Code};

View File

@ -72,9 +72,7 @@ TEST(TestInterpreterReturn) {
handles.main_isolate()->factory()->undefined_value();
BytecodeArrayBuilder builder(handles.main_isolate());
// TODO(rmcilroy) set to 0 once BytecodeArray update to allow zero size
// register file.
builder.set_locals_count(1);
builder.set_locals_count(0);
builder.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();

View File

@ -10,6 +10,8 @@
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/node-test-utils.h"
using ::testing::_;
namespace v8 {
namespace internal {
namespace compiler {
@ -93,6 +95,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
EXPECT_THAT(
tail_call_node,
IsTailCall(m.call_descriptor(), code_target_matcher,
IsParameter(Linkage::kInterpreterAccumulatorParameter),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
next_bytecode_offset_matcher,
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
@ -119,6 +123,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, Return) {
EXPECT_THAT(
tail_call_node,
IsTailCall(m.call_descriptor(), IsHeapConstant(exit_trampoline),
IsParameter(Linkage::kInterpreterAccumulatorParameter),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
@ -146,20 +152,55 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadRegisterFixed) {
TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperandSignExtended) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
for (int i = 0; i < m.kMaxRegisterIndex; i++) {
Node* load_reg_node = m.LoadRegister(i);
EXPECT_THAT(load_reg_node,
m.IsLoad(kMachPtr, IsLoadFramePointer(),
IsInt32Constant(m.kFirstRegisterOffsetFromFp -
(i << kPointerSizeLog2))));
int number_of_operands = interpreter::Bytecodes::NumberOfOperands(bytecode);
for (int i = 0; i < number_of_operands; i++) {
Node* load_arg_node = m.BytecodeOperandSignExtended(i);
Matcher<Node*> load_matcher = m.IsLoad(
kMachInt8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(1 + i)));
if (kPointerSize == 8) {
load_matcher = IsChangeInt32ToInt64(load_matcher);
}
EXPECT_THAT(load_arg_node, load_matcher);
}
}
}
TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
// Should be incoming accumulator if not set.
EXPECT_THAT(m.GetAccumulator(),
IsParameter(Linkage::kInterpreterAccumulatorParameter));
// Should be set by SedtAccumulator.
Node* accumulator_value_1 = m.Int32Constant(0xdeadbeef);
m.SetAccumulator(accumulator_value_1);
EXPECT_THAT(m.GetAccumulator(), accumulator_value_1);
Node* accumulator_value_2 = m.Int32Constant(42);
m.SetAccumulator(accumulator_value_2);
EXPECT_THAT(m.GetAccumulator(), accumulator_value_2);
// Should be passed to next bytecode handler on dispatch.
m.Dispatch();
Graph* graph = m.GetCompletedGraph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
EXPECT_THAT(tail_call_node,
IsTailCall(m.call_descriptor(), _, accumulator_value_2, _, _, _,
_, graph->start(), graph->start()));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@ -167,27 +208,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
Node* load_reg_node = m.LoadRegister(reg_index_node);
EXPECT_THAT(
load_reg_node,
m.IsLoad(kMachPtr, IsLoadFramePointer(),
IsIntPtrSub(IsInt32Constant(m.kFirstRegisterOffsetFromFp),
IsWordShl(reg_index_node,
IsInt32Constant(kPointerSizeLog2)))));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, StoreRegisterFixed) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* store_value = m.Int32Constant(0xdeadbeef);
for (int i = 0; i < m.kMaxRegisterIndex; i++) {
Node* store_reg_node = m.StoreRegister(store_value, i);
EXPECT_THAT(store_reg_node,
m.IsStore(StoreRepresentation(kMachPtr, kNoWriteBarrier),
IsLoadFramePointer(),
IsInt32Constant(m.kFirstRegisterOffsetFromFp -
(i << kPointerSizeLog2)),
store_value));
}
m.IsLoad(kMachPtr,
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
}
}
@ -201,10 +224,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
EXPECT_THAT(
store_reg_node,
m.IsStore(StoreRepresentation(kMachPtr, kNoWriteBarrier),
IsLoadFramePointer(),
IsIntPtrSub(IsInt32Constant(m.kFirstRegisterOffsetFromFp),
IsWordShl(reg_index_node,
IsInt32Constant(kPointerSizeLog2))),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
store_value));
}
}

View File

@ -41,8 +41,6 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
using InterpreterAssembler::call_descriptor;
using InterpreterAssembler::graph;
using InterpreterAssembler::kMaxRegisterIndex;
using InterpreterAssembler::kFirstRegisterOffsetFromFp;
private:
DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);

View File

@ -1730,6 +1730,42 @@ Matcher<Node*> IsTailCall(
}
Matcher<Node*> IsTailCall(
const Matcher<CallDescriptor const*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& value4_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
std::vector<Matcher<Node*>> value_matchers;
value_matchers.push_back(value0_matcher);
value_matchers.push_back(value1_matcher);
value_matchers.push_back(value2_matcher);
value_matchers.push_back(value3_matcher);
value_matchers.push_back(value4_matcher);
return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
effect_matcher, control_matcher));
}
Matcher<Node*> IsTailCall(
const Matcher<CallDescriptor const*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
std::vector<Matcher<Node*>> value_matchers;
value_matchers.push_back(value0_matcher);
value_matchers.push_back(value1_matcher);
value_matchers.push_back(value2_matcher);
value_matchers.push_back(value3_matcher);
value_matchers.push_back(value4_matcher);
value_matchers.push_back(value5_matcher);
return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
effect_matcher, control_matcher));
}
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {

View File

@ -145,6 +145,20 @@ Matcher<Node*> IsTailCall(
const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsTailCall(
const Matcher<CallDescriptor const*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& value4_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsTailCall(
const Matcher<CallDescriptor const*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,