[Interpreter] Do a couple of cleanups on interpreter assembler.

Does a couple of cleanups on interpreter assembler:
 - Adding naming to the variable fields to improve debugability
 - Grouping functions which deal with loading the state passed between
   bytecode handlers (e.g. bytecode array / offset / etc.).
 - Fix some comments in interpreter-generator.cc

Change-Id: I9decefebbdf7830a7ce75dd46e8a69a1db3c4cc8
Reviewed-on: https://chromium-review.googlesource.com/625797
Reviewed-by: Mythri Alle <mythria@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47499}
This commit is contained in:
Ross McIlroy 2017-08-22 11:26:42 +01:00 committed by Commit Bot
parent 7dda7e8003
commit e2eb208a03
4 changed files with 68 additions and 70 deletions

View File

@ -1889,16 +1889,18 @@ class ToDirectStringAssembler : public CodeStubAssembler {
{ #name, __FILE__, __LINE__ }
#define BIND(label) Bind(label, CSA_DEBUG_INFO(label))
#define VARIABLE(name, ...) \
Variable name(this, CSA_DEBUG_INFO(name), __VA_ARGS__);
Variable name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
#define VARIABLE_CONSTRUCTOR(name, ...) \
name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
#define TYPED_VARIABLE_DEF(type, name, ...) \
TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__);
TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
#else // DEBUG
#define CSA_ASSERT(csa, ...) ((void)0)
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
#define BIND(label) Bind(label);
#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__);
#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__);
#define BIND(label) Bind(label)
#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
#define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__)
#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
#endif // DEBUG
#define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this))

View File

@ -30,34 +30,36 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
: CodeStubAssembler(state),
bytecode_(bytecode),
operand_scale_(operand_scale),
bytecode_offset_(this, MachineType::PointerRepresentation()),
interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
bytecode_array_(this, MachineRepresentation::kTagged),
bytecode_array_valid_(true),
dispatch_table_(this, MachineType::PointerRepresentation()),
accumulator_(this, MachineRepresentation::kTagged),
VARIABLE_CONSTRUCTOR(interpreted_frame_pointer_,
MachineType::PointerRepresentation()),
VARIABLE_CONSTRUCTOR(
bytecode_array_, MachineRepresentation::kTagged,
Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
VARIABLE_CONSTRUCTOR(
bytecode_offset_, MachineType::PointerRepresentation(),
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset)),
VARIABLE_CONSTRUCTOR(
dispatch_table_, MachineType::PointerRepresentation(),
Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
VARIABLE_CONSTRUCTOR(
accumulator_, MachineRepresentation::kTagged,
Parameter(InterpreterDispatchDescriptor::kAccumulator)),
accumulator_use_(AccumulatorUse::kNone),
made_call_(false),
reloaded_frame_ptr_(false),
saved_bytecode_offset_(false),
bytecode_array_valid_(true),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
accumulator_.Bind(Parameter(InterpreterDispatchDescriptor::kAccumulator));
bytecode_offset_.Bind(
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset));
bytecode_array_.Bind(
Parameter(InterpreterDispatchDescriptor::kBytecodeArray));
dispatch_table_.Bind(
Parameter(InterpreterDispatchDescriptor::kDispatchTable));
#ifdef V8_TRACE_IGNITION
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
#endif
RegisterCallGenerationCallbacks([this] { CallPrologue(); },
[this] { CallEpilogue(); });
// Save the bytecode offset immediately if bytecode will make a call along the
// critical path.
if (Bytecodes::MakesCallAlongCriticalPath(bytecode)) {
SaveBytecodeOffset();
StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
}
}
@ -80,6 +82,35 @@ Node* InterpreterAssembler::GetInterpretedFramePointer() {
return interpreted_frame_pointer_.value();
}
Node* InterpreterAssembler::BytecodeOffset() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(bytecode_offset_.value() ==
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
bytecode_offset_.Bind(LoadAndUntagRegister(Register::bytecode_offset()));
}
return bytecode_offset_.value();
}
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
// Force a re-load of the bytecode array after every call in case the debugger
// has been activated.
if (!bytecode_array_valid_) {
bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
bytecode_array_valid_ = true;
}
return bytecode_array_.value();
}
Node* InterpreterAssembler::DispatchTableRawPointer() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(dispatch_table_.value() ==
Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
dispatch_table_.Bind(ExternalConstant(
ExternalReference::interpreter_dispatch_table_address(isolate())));
}
return dispatch_table_.value();
}
Node* InterpreterAssembler::GetAccumulatorUnchecked() {
return accumulator_.value();
}
@ -169,35 +200,6 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
}
}
Node* InterpreterAssembler::BytecodeOffset() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(bytecode_offset_.value() ==
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
bytecode_offset_.Bind(LoadAndUntagRegister(Register::bytecode_offset()));
}
return bytecode_offset_.value();
}
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
// Force a re-load of the bytecode array after every call in case the debugger
// has been activated.
if (!bytecode_array_valid_) {
bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
bytecode_array_valid_ = true;
}
return bytecode_array_.value();
}
Node* InterpreterAssembler::DispatchTableRawPointer() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(dispatch_table_.value() ==
Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
dispatch_table_.Bind(ExternalConstant(
ExternalReference::interpreter_dispatch_table_address(isolate())));
}
return dispatch_table_.value();
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
return IntPtrAdd(GetInterpretedFramePointer(),
RegisterFrameOffset(reg_index));
@ -520,18 +522,13 @@ Node* InterpreterAssembler::LoadFeedbackVector() {
return vector;
}
void InterpreterAssembler::SaveBytecodeOffset() {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
saved_bytecode_offset_ = true;
}
void InterpreterAssembler::CallPrologue() {
if (!saved_bytecode_offset_) {
// If there are multiple calls in the bytecode handler, you need to spill
if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
// Bytecodes that make a call along the critical path save the bytecode
// offset in the bytecode handler's prologue. For other bytecodes, if
// there are multiple calls in the bytecode handler, you need to spill
// before each of them, unless SaveBytecodeOffset has explicitly been called
// in a path that dominates _all_ of those calls. Therefore don't set
// saved_bytecode_offset_ to true or call SaveBytecodeOffset.
// in a path that dominates _all_ of those calls (which we don't track).
StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
}

View File

@ -345,16 +345,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
Bytecode bytecode_;
OperandScale operand_scale_;
CodeStubAssembler::Variable bytecode_offset_;
CodeStubAssembler::Variable interpreted_frame_pointer_;
CodeStubAssembler::Variable bytecode_array_;
bool bytecode_array_valid_;
CodeStubAssembler::Variable bytecode_offset_;
CodeStubAssembler::Variable dispatch_table_;
CodeStubAssembler::Variable accumulator_;
AccumulatorUse accumulator_use_;
bool made_call_;
bool reloaded_frame_ptr_;
bool saved_bytecode_offset_;
bool bytecode_array_valid_;
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;

View File

@ -1092,9 +1092,9 @@ IGNITION_HANDLER(ShiftRightLogical, InterpreterBitwiseBinaryOpAssembler) {
BitwiseBinaryOpWithFeedback(Token::SHR);
}
// BitwiseOr <imm>
// BitwiseOrSmi <imm>
//
// BitwiseOr accumulator with <imm>.
// BitwiseOrSmi accumulator with <imm>.
IGNITION_HANDLER(BitwiseOrSmi, InterpreterAssembler) {
Node* left = GetAccumulator();
Node* right = BytecodeOperandImmSmi(0);
@ -1118,9 +1118,9 @@ IGNITION_HANDLER(BitwiseOrSmi, InterpreterAssembler) {
Dispatch();
}
// BitwiseXor <imm>
// BitwiseXorSmi <imm>
//
// BitwiseXor accumulator with <imm>.
// BitwiseXorSmi accumulator with <imm>.
IGNITION_HANDLER(BitwiseXorSmi, InterpreterAssembler) {
Node* left = GetAccumulator();
Node* right = BytecodeOperandImmSmi(0);
@ -1144,9 +1144,9 @@ IGNITION_HANDLER(BitwiseXorSmi, InterpreterAssembler) {
Dispatch();
}
// BitwiseAnd <imm>
// BitwiseAndSmi <imm>
//
// BitwiseAnd accumulator with <imm>.
// BitwiseAndSmi accumulator with <imm>.
IGNITION_HANDLER(BitwiseAndSmi, InterpreterAssembler) {
Node* left = GetAccumulator();
Node* right = BytecodeOperandImmSmi(0);