[Interpreter] Optimize the Register Optimizer.

Modify the Bytecode Register Optimizer to be an independent component
rather than part of the BytecodePipeline. This means the BytecodeArrayBuilder
can explicitly call it with register operands when outputting a bytecode
and the Bytecode Register Optimizer doesn't need to work out which operands
are register operands. This also means we don't need to build BytecodeNodes
for Ldar / Star / Mov bytecodes unless they are actually emitted by the
optimizer.

This change also modifies the way the BytecodeArrayBuilder converts
operands to make use of the OperandTypes specified in bytecodes.h.
This avoids having to individually convert operands to their raw output
value before calling Output(...).

BUG=v8:4280

Review-Url: https://codereview.chromium.org/2393683004
Cr-Commit-Position: refs/heads/master@{#40543}
This commit is contained in:
rmcilroy 2016-10-24 13:47:41 -07:00 committed by Commit bot
parent caba112d03
commit ed7bef5b91
14 changed files with 615 additions and 900 deletions

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,7 @@ namespace interpreter {
class BytecodeLabel;
class BytecodeNode;
class BytecodePipelineStage;
class BytecodeRegisterOptimizer;
class Register;
class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
@ -320,6 +321,12 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
// Returns the raw operand value for the given register or register list.
uint32_t GetInputRegisterOperand(Register reg);
uint32_t GetOutputRegisterOperand(Register reg);
uint32_t GetInputRegisterListOperand(RegisterList reg_list);
uint32_t GetOutputRegisterListOperand(RegisterList reg_list);
// Accessors
BytecodeRegisterAllocator* register_allocator() {
return &register_allocator_;
@ -331,41 +338,22 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
private:
friend class BytecodeRegisterAllocator;
template <OperandType... operand_types>
friend class BytecodeNodeBuilder;
INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3));
INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2));
INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1));
INLINE(void Output(Bytecode bytecode, uint32_t operand0));
INLINE(void Output(Bytecode bytecode));
// Returns the current source position for the given |bytecode|.
INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
INLINE(void OutputJump(Bytecode bytecode, BytecodeLabel* label));
INLINE(void OutputJump(Bytecode bytecode, uint32_t operand0,
BytecodeLabel* label));
#define DECLARE_BYTECODE_OUTPUT(Name, ...) \
template <typename... Operands> \
INLINE(void Output##Name(Operands... operands)); \
template <typename... Operands> \
INLINE(void Output##Name(BytecodeLabel* label, Operands... operands));
BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT)
#undef DECLARE_OPERAND_TYPE_INFO
bool RegisterIsValid(Register reg) const;
bool OperandsAreValid(Bytecode bytecode, int operand_count,
uint32_t operand0 = 0, uint32_t operand1 = 0,
uint32_t operand2 = 0, uint32_t operand3 = 0) const;
static uint32_t RegisterOperand(Register reg) {
return static_cast<uint32_t>(reg.ToOperand());
}
static uint32_t SignedOperand(int value) {
return static_cast<uint32_t>(value);
}
static uint32_t UnsignedOperand(int value) {
DCHECK_GE(value, 0);
return static_cast<uint32_t>(value);
}
static uint32_t UnsignedOperand(size_t value) {
DCHECK_LE(value, kMaxUInt32);
return static_cast<uint32_t>(value);
}
bool RegisterListIsValid(RegisterList reg_list) const;
// Set position for return.
void SetReturnPosition();
@ -378,6 +366,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// during bytecode generation.
BytecodeArrayBuilder& Illegal();
void PrepareToOutputBytecode(Bytecode bytecode);
void LeaveBasicBlock() { return_seen_in_block_ = false; }
BytecodeArrayWriter* bytecode_array_writer() {
@ -406,6 +396,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeRegisterAllocator register_allocator_;
BytecodeArrayWriter bytecode_array_writer_;
BytecodePipelineStage* pipeline_;
BytecodeRegisterOptimizer* register_optimizer_;
BytecodeSourceInfo latest_source_info_;
static int const kNoFeedbackSlot = 0;

View File

@ -14,8 +14,8 @@ namespace interpreter {
#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
V(RegList, OperandTypeInfo::kScalableSignedByte) \
V(Reg, OperandTypeInfo::kScalableSignedByte) \
V(RegList, OperandTypeInfo::kScalableSignedByte) \
V(RegPair, OperandTypeInfo::kScalableSignedByte)
#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
@ -23,22 +23,25 @@ namespace interpreter {
V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
#define SCALAR_OPERAND_TYPE_LIST(V) \
#define UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
V(UImm, OperandTypeInfo::kScalableUnsignedByte) \
V(Imm, OperandTypeInfo::kScalableSignedByte) \
V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
#define SIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
V(Imm, OperandTypeInfo::kScalableSignedByte)
#define REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
INVALID_OPERAND_TYPE_LIST(V) \
SCALAR_OPERAND_TYPE_LIST(V)
UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
SIGNED_SCALAR_OPERAND_TYPE_LIST(V)
// The list of operand types used by bytecodes.
#define OPERAND_TYPE_LIST(V) \

View File

@ -13,7 +13,7 @@ namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
BytecodePipelineStage* next_stage)
: next_stage_(next_stage), last_(Bytecode::kIllegal) {
: next_stage_(next_stage), last_(Bytecode::kIllegal, BytecodeSourceInfo()) {
InvalidateLast();
}
@ -77,8 +77,7 @@ void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
// source position information. NOP without source information can
// always be elided.
DCHECK(node->bytecode() != Bytecode::kNop || node->source_info().is_valid());
last_.Clone(node);
last_ = *node;
}
bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
@ -142,7 +141,7 @@ void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
current->operand(1));
if (last->source_info().is_valid()) {
current->source_info_ptr()->Clone(last->source_info());
current->set_source_info(last->source_info());
}
}
@ -153,7 +152,7 @@ void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
current->set_bytecode(new_bytecode, 0, current->operand(0),
current->operand(1));
if (last->source_info().is_valid()) {
current->source_info_ptr()->Clone(last->source_info());
current->set_source_info(last->source_info());
}
}
@ -223,7 +222,7 @@ void BytecodePeepholeOptimizer::ElideLastAction(
// |node| can not have a valid source position if the source
// position of last() is valid (per rules in
// CanElideLastBasedOnSourcePosition()).
node->source_info_ptr()->Clone(last()->source_info());
node->set_source_info(last()->source_info());
}
SetLast(node);
} else {
@ -314,7 +313,7 @@ void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
if (!CanElideLastBasedOnSourcePosition(node)) {
next_stage()->Write(last());
} else if (!node->source_info().is_valid()) {
node->source_info_ptr()->Clone(last()->source_info());
node->set_source_info(last()->source_info());
}
InvalidateLast();
}

View File

@ -11,19 +11,6 @@ namespace v8 {
namespace internal {
namespace interpreter {
BytecodeNode::BytecodeNode(const BytecodeNode& other) {
memcpy(this, &other, sizeof(other));
}
BytecodeNode& BytecodeNode::operator=(const BytecodeNode& other) {
memcpy(this, &other, sizeof(other));
return *this;
}
void BytecodeNode::Clone(const BytecodeNode* const other) {
memcpy(this, other, sizeof(*other));
}
void BytecodeNode::Print(std::ostream& os) const {
#ifdef DEBUG
std::ios saved_state(nullptr);

View File

@ -95,14 +95,6 @@ class BytecodeSourceInfo final {
source_position_ = source_position;
}
// Clones a source position. The current instance is expected to be
// invalid.
void Clone(const BytecodeSourceInfo& other) {
DCHECK(!is_valid());
position_type_ = other.position_type_;
source_position_ = other.source_position_;
}
int source_position() const {
DCHECK(is_valid());
return source_position_;
@ -142,79 +134,77 @@ class BytecodeSourceInfo final {
// These must be allocated by a BytecodeNodeAllocator instance.
class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
public:
INLINE(BytecodeNode(const Bytecode bytecode,
BytecodeSourceInfo* source_info = nullptr))
INLINE(BytecodeNode(Bytecode bytecode,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(0),
operand_scale_(OperandScale::kSingle) {
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
AttachSourceInfo(source_info);
}
INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
BytecodeSourceInfo* source_info = nullptr))
INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(1),
operand_scale_(OperandScale::kSingle) {
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
AttachSourceInfo(source_info);
}
INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
uint32_t operand1,
BytecodeSourceInfo* source_info = nullptr))
INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(2),
operand_scale_(OperandScale::kSingle) {
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
AttachSourceInfo(source_info);
}
INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
BytecodeSourceInfo* source_info = nullptr))
INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(3),
operand_scale_(OperandScale::kSingle) {
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
AttachSourceInfo(source_info);
}
INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2, uint32_t operand3,
BytecodeSourceInfo* source_info = nullptr))
INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(4),
operand_scale_(OperandScale::kSingle) {
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
SetOperand(3, operand3);
AttachSourceInfo(source_info);
}
BytecodeNode(const BytecodeNode& other);
BytecodeNode& operator=(const BytecodeNode& other);
// Replace the bytecode of this node with |bytecode| and keep the operands.
void replace_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
Bytecodes::NumberOfOperands(bytecode));
bytecode_ = bytecode;
}
void set_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
bytecode_ = bytecode;
operand_count_ = 0;
operand_scale_ = OperandScale::kSingle;
}
void set_bytecode(Bytecode bytecode, uint32_t operand0) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
bytecode_ = bytecode;
@ -222,6 +212,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
operand_scale_ = OperandScale::kSingle;
SetOperand(0, operand0);
}
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
bytecode_ = bytecode;
@ -230,6 +221,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
SetOperand(0, operand0);
SetOperand(1, operand1);
}
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
@ -241,9 +233,6 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
SetOperand(2, operand2);
}
// Clone |other|.
void Clone(const BytecodeNode* const other);
// Print to stream |os|.
void Print(std::ostream& os) const;
@ -268,18 +257,6 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
SetOperand(operand_count() - 1, extra_operand);
}
// Updates the operand at |operand_index| to |operand|.
void UpdateOperand(int operand_index, uint32_t operand) {
DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(bytecode()));
operands_[operand_index] = operand;
if ((Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index) &&
Bytecodes::ScaleForSignedOperand(operand) != operand_scale_) ||
(Bytecodes::OperandIsScalableUnsignedByte(bytecode(), operand_index) &&
Bytecodes::ScaleForUnsignedOperand(operand) != operand_scale_)) {
UpdateScale();
}
}
Bytecode bytecode() const { return bytecode_; }
uint32_t operand(int i) const {
@ -292,27 +269,14 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
OperandScale operand_scale() const { return operand_scale_; }
const BytecodeSourceInfo& source_info() const { return source_info_; }
BytecodeSourceInfo* source_info_ptr() { return &source_info_; }
void set_source_info(BytecodeSourceInfo source_info) {
source_info_ = source_info;
}
bool operator==(const BytecodeNode& other) const;
bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private:
INLINE(void AttachSourceInfo(BytecodeSourceInfo* source_info)) {
if (source_info && source_info->is_valid()) {
// Statement positions need to be emitted immediately. Expression
// positions can be pushed back until a bytecode is found that can
// throw (if expression position filtering is turned on). We only
// invalidate the existing source position information if it is used.
if (source_info->is_statement() ||
!FLAG_ignition_filter_expression_positions ||
!Bytecodes::IsWithoutExternalSideEffects(bytecode())) {
source_info_.Clone(*source_info);
source_info->set_invalid();
}
}
}
INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
operand_scale_ =
@ -329,13 +293,6 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
UpdateScaleForOperand(operand_index, operand);
}
void UpdateScale() {
operand_scale_ = OperandScale::kSingle;
for (int i = 0; i < operand_count(); i++) {
UpdateScaleForOperand(i, operands_[i]);
}
}
Bytecode bytecode_;
uint32_t operands_[Bytecodes::kMaxOperands];
int operand_count_;

View File

@ -230,81 +230,7 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
DCHECK(accumulator_info_->register_value() == accumulator_);
}
// override
Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) {
FlushState();
return next_stage_->ToBytecodeArray(isolate, max_register_index_ + 1,
parameter_count, handler_table);
}
// override
void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
// Jumps are handled by WriteJump.
DCHECK(!Bytecodes::IsJump(node->bytecode()));
//
// Transfers with observable registers as the destination will be
// immediately materialized so the source position information will
// be ordered correctly.
//
// Transfers without observable destination registers will initially
// be emitted as Nop's with the source position. They may, or may
// not, be materialized by the optimizer. However, the source
// position is not lost and being attached to a Nop is fine as the
// destination register is not observable in the debugger.
//
switch (node->bytecode()) {
case Bytecode::kLdar: {
DoLdar(node);
return;
}
case Bytecode::kStar: {
DoStar(node);
return;
}
case Bytecode::kMov: {
DoMov(node);
return;
}
default:
break;
}
if (node->bytecode() == Bytecode::kDebugger ||
node->bytecode() == Bytecode::kSuspendGenerator) {
// All state must be flushed before emitting
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
FlushState();
}
PrepareOperands(node);
next_stage_->Write(node);
}
// override
void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node,
BytecodeLabel* label) {
FlushState();
next_stage_->WriteJump(node, label);
}
// override
void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) {
FlushState();
next_stage_->BindLabel(label);
}
// override
void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target,
BytecodeLabel* label) {
// There is no need to flush here, it will have been flushed when |target|
// was bound.
next_stage_->BindLabel(target, label);
}
void BytecodeRegisterOptimizer::FlushState() {
void BytecodeRegisterOptimizer::Flush() {
if (!flush_required_) {
return;
}
@ -332,7 +258,7 @@ void BytecodeRegisterOptimizer::FlushState() {
void BytecodeRegisterOptimizer::OutputRegisterTransfer(
RegisterInfo* input_info, RegisterInfo* output_info,
BytecodeSourceInfo* source_info) {
BytecodeSourceInfo source_info) {
Register input = input_info->register_value();
Register output = output_info->register_value();
DCHECK_NE(input.index(), output.index());
@ -404,7 +330,7 @@ void BytecodeRegisterOptimizer::AddToEquivalenceSet(
void BytecodeRegisterOptimizer::RegisterTransfer(
RegisterInfo* input_info, RegisterInfo* output_info,
BytecodeSourceInfo* source_info) {
BytecodeSourceInfo source_info) {
// Materialize an alternate in the equivalence set that
// |output_info| is leaving.
if (output_info->materialized()) {
@ -423,7 +349,7 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
output_info->set_materialized(false);
RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
OutputRegisterTransfer(materialized_info, output_info, source_info);
} else if (source_info->is_valid()) {
} else if (source_info.is_valid()) {
// Emit a placeholder nop to maintain source position info.
EmitNopForSourceInfo(source_info);
}
@ -437,60 +363,32 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
}
void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
BytecodeSourceInfo* source_info) const {
DCHECK(source_info->is_valid());
BytecodeSourceInfo source_info) const {
DCHECK(source_info.is_valid());
BytecodeNode nop(Bytecode::kNop, source_info);
next_stage_->Write(&nop);
}
void BytecodeRegisterOptimizer::DoLdar(BytecodeNode* node) {
Register input = GetRegisterInputOperand(
0, node->bytecode(), node->operands(), node->operand_count());
RegisterInfo* input_info = GetRegisterInfo(input);
RegisterTransfer(input_info, accumulator_info_, node->source_info_ptr());
}
void BytecodeRegisterOptimizer::DoMov(BytecodeNode* node) {
Register input = GetRegisterInputOperand(
0, node->bytecode(), node->operands(), node->operand_count());
RegisterInfo* input_info = GetRegisterInfo(input);
Register output = GetRegisterOutputOperand(
1, node->bytecode(), node->operands(), node->operand_count());
RegisterInfo* output_info = GetRegisterInfo(output);
RegisterTransfer(input_info, output_info, node->source_info_ptr());
}
void BytecodeRegisterOptimizer::DoStar(BytecodeNode* node) {
Register output = GetRegisterOutputOperand(
0, node->bytecode(), node->operands(), node->operand_count());
RegisterInfo* output_info = GetRegisterInfo(output);
RegisterTransfer(accumulator_info_, output_info, node->source_info_ptr());
}
void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
RegisterInfo* reg_info) {
void BytecodeRegisterOptimizer::PrepareOutputRegister(Register reg) {
RegisterInfo* reg_info = GetRegisterInfo(reg);
if (reg_info->materialized()) {
CreateMaterializedEquivalent(reg_info);
}
reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
max_register_index_ =
std::max(max_register_index_, reg_info->register_value().index());
reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
}
void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand(
Register start, int count) {
for (int i = 0; i < count; ++i) {
Register reg(start.index() + i);
RegisterInfo* reg_info = GetRegisterInfo(reg);
PrepareRegisterOutputOperand(reg_info);
void BytecodeRegisterOptimizer::PrepareOutputRegisterList(
RegisterList reg_list) {
int start_index = reg_list.first_register().index();
for (int i = 0; i < reg_list.register_count(); ++i) {
Register current(start_index + i);
PrepareOutputRegister(current);
}
}
Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
Register reg) {
// For a temporary register, RegInfo state may need be created. For
// locals and parameters, the RegInfo state is created in the
// BytecodeRegisterOptimizer constructor.
Register BytecodeRegisterOptimizer::GetInputRegister(Register reg) {
RegisterInfo* reg_info = GetRegisterInfo(reg);
if (reg_info->materialized()) {
return reg;
@ -501,124 +399,49 @@ Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
}
}
void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
BytecodeNode* const node, Register reg, int operand_index) {
Register equivalent = GetEquivalentRegisterForInputOperand(reg);
node->UpdateOperand(operand_index,
static_cast<uint32_t>(equivalent.ToOperand()));
}
void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
int count) {
for (int i = 0; i < count; ++i) {
Register current(start.index() + i);
RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
RegisterList reg_list) {
if (reg_list.register_count() == 1) {
// If there is only a single register, treat it as a normal input register.
Register reg(GetInputRegister(reg_list.first_register()));
return RegisterList(reg.index(), 1);
} else {
int start_index = reg_list.first_register().index();
for (int i = 0; i < reg_list.register_count(); ++i) {
Register current(start_index + i);
RegisterInfo* input_info = GetRegisterInfo(current);
Materialize(input_info);
}
}
void BytecodeRegisterOptimizer::PrepareRegisterOperands(
BytecodeNode* const node) {
//
// For each input operand, get a materialized equivalent if it is
// just a single register, otherwise materialize register range.
// Update operand_scale if necessary.
//
// For each output register about to be clobbered, materialize an
// equivalent if it exists. Put each register in it's own equivalence set.
//
const uint32_t* operands = node->operands();
int operand_count = node->operand_count();
const OperandType* operand_types =
Bytecodes::GetOperandTypes(node->bytecode());
for (int i = 0; i < operand_count; ++i) {
int count;
if (operand_types[i] == OperandType::kRegList) {
DCHECK_LT(i, operand_count - 1);
DCHECK(operand_types[i + 1] == OperandType::kRegCount);
count = static_cast<int>(operands[i + 1]);
} else {
count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
}
if (count == 0) {
continue;
}
Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
if (Bytecodes::IsRegisterInputOperandType(operand_types[i])) {
if (count == 1) {
PrepareRegisterInputOperand(node, reg, i);
} else if (count > 1) {
PrepareRegisterRangeInputOperand(reg, count);
}
} else if (Bytecodes::IsRegisterOutputOperandType(operand_types[i])) {
PrepareRegisterRangeOutputOperand(reg, count);
}
return reg_list;
}
}
void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) {
void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) {
if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
bytecode == Bytecode::kSuspendGenerator) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target aren't
// known.
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
Flush();
}
// Materialize the accumulator if it is read by the bytecode. The
// accumulator is special and no other register can be materialized
// in it's place.
if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
if (Bytecodes::ReadsAccumulator(bytecode) &&
!accumulator_info_->materialized()) {
Materialize(accumulator_info_);
}
// Materialize an equivalent to the accumulator if it will be
// clobbered when the bytecode is dispatched.
if (Bytecodes::WritesAccumulator(node->bytecode())) {
PrepareRegisterOutputOperand(accumulator_info_);
if (Bytecodes::WritesAccumulator(bytecode)) {
PrepareOutputRegister(accumulator_);
}
}
void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) {
PrepareAccumulator(node);
PrepareRegisterOperands(node);
}
// static
Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
DCHECK_LT(index, operand_count);
DCHECK(Bytecodes::IsRegisterInputOperandType(
Bytecodes::GetOperandType(bytecode, index)));
return OperandToRegister(operands[index]);
}
// static
Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
DCHECK_LT(index, operand_count);
DCHECK(Bytecodes::IsRegisterOutputOperandType(
Bytecodes::GetOperandType(bytecode, index)));
return OperandToRegister(operands[index]);
}
BytecodeRegisterOptimizer::RegisterInfo*
BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
size_t index = GetRegisterInfoTableIndex(reg);
DCHECK_LT(index, register_info_table_.size());
return register_info_table_[index];
}
BytecodeRegisterOptimizer::RegisterInfo*
BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
size_t index = GetRegisterInfoTableIndex(reg);
return index < register_info_table_.size() ? register_info_table_[index]
: NewRegisterInfo(reg);
}
BytecodeRegisterOptimizer::RegisterInfo*
BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
size_t index = GetRegisterInfoTableIndex(reg);
DCHECK_GE(index, register_info_table_.size());
GrowRegisterMap(reg);
return register_info_table_[index];
}
void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
DCHECK(RegisterIsTemporary(reg));
size_t index = GetRegisterInfoTableIndex(reg);

View File

@ -18,8 +18,7 @@ namespace interpreter {
// liberally for correctness and convenience and this stage removes
// transfers that are not required and preserves correctness.
class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
: public NON_EXPORTED_BASE(BytecodePipelineStage),
public NON_EXPORTED_BASE(BytecodeRegisterAllocator::Observer),
: public NON_EXPORTED_BASE(BytecodeRegisterAllocator::Observer),
public NON_EXPORTED_BASE(ZoneObject) {
public:
BytecodeRegisterOptimizer(Zone* zone,
@ -28,14 +27,41 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
BytecodePipelineStage* next_stage);
virtual ~BytecodeRegisterOptimizer() {}
// BytecodePipelineStage interface.
void Write(BytecodeNode* node) override;
void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
// Perform explicit register transfer operations.
void DoLdar(Register input, BytecodeSourceInfo source_info) {
RegisterInfo* input_info = GetRegisterInfo(input);
RegisterTransfer(input_info, accumulator_info_, source_info);
}
void DoStar(Register output, BytecodeSourceInfo source_info) {
RegisterInfo* output_info = GetRegisterInfo(output);
RegisterTransfer(accumulator_info_, output_info, source_info);
}
void DoMov(Register input, Register output, BytecodeSourceInfo source_info) {
RegisterInfo* input_info = GetRegisterInfo(input);
RegisterInfo* output_info = GetRegisterInfo(output);
RegisterTransfer(input_info, output_info, source_info);
}
// Materialize all live registers and flush equivalence sets.
void Flush();
// Prepares for |bytecode|.
void PrepareForBytecode(Bytecode bytecode);
// Prepares |reg| for being used as an output operand.
void PrepareOutputRegister(Register reg);
// Prepares registers in |reg_list| for being used as an output operand.
void PrepareOutputRegisterList(RegisterList reg_list);
// Returns an equivalent register to |reg| to be used as an input operand.
Register GetInputRegister(Register reg);
// Returns an equivalent register list to |reg_list| to be used as an input
// operand.
RegisterList GetInputRegisterList(RegisterList reg_list);
int maxiumum_register_index() const { return max_register_index_; }
private:
static const uint32_t kInvalidEquivalenceId;
@ -47,48 +73,20 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
void RegisterListAllocateEvent(RegisterList reg_list) override;
void RegisterListFreeEvent(RegisterList reg) override;
// Helpers for BytecodePipelineStage interface.
void FlushState();
// Update internal state for register transfer from |input| to
// |output| using |source_info| as source position information if
// any bytecodes are emitted due to transfer.
void RegisterTransfer(RegisterInfo* input, RegisterInfo* output,
BytecodeSourceInfo* source_info);
BytecodeSourceInfo source_info);
// Emit a register transfer bytecode from |input| to |output|.
void OutputRegisterTransfer(RegisterInfo* input, RegisterInfo* output,
BytecodeSourceInfo* source_info = nullptr);
void OutputRegisterTransfer(
RegisterInfo* input, RegisterInfo* output,
BytecodeSourceInfo source_info = BytecodeSourceInfo());
// Emits a Nop to preserve source position information in the
// bytecode pipeline.
void EmitNopForSourceInfo(BytecodeSourceInfo* source_info) const;
// Handlers for bytecode nodes for register to register transfers.
void DoLdar(BytecodeNode* node);
void DoMov(BytecodeNode* node);
void DoStar(BytecodeNode* node);
// Operand processing methods for bytecodes other than those
// performing register to register transfers.
void PrepareOperands(BytecodeNode* const node);
void PrepareAccumulator(BytecodeNode* const node);
void PrepareRegisterOperands(BytecodeNode* const node);
void PrepareRegisterOutputOperand(RegisterInfo* reg_info);
void PrepareRegisterRangeOutputOperand(Register start, int count);
void PrepareRegisterInputOperand(BytecodeNode* const node, Register reg,
int operand_index);
void PrepareRegisterRangeInputOperand(Register start, int count);
Register GetEquivalentRegisterForInputOperand(Register reg);
static Register GetRegisterInputOperand(int index, Bytecode bytecode,
const uint32_t* operands,
int operand_count);
static Register GetRegisterOutputOperand(int index, Bytecode bytecode,
const uint32_t* operands,
int operand_count);
void EmitNopForSourceInfo(BytecodeSourceInfo source_info) const;
void CreateMaterializedEquivalent(RegisterInfo* info);
RegisterInfo* GetMaterializedEquivalent(RegisterInfo* info);
@ -98,9 +96,23 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
RegisterInfo* non_set_member);
// Methods for finding and creating metadata for each register.
RegisterInfo* GetOrCreateRegisterInfo(Register reg);
RegisterInfo* GetRegisterInfo(Register reg);
RegisterInfo* NewRegisterInfo(Register reg);
RegisterInfo* GetRegisterInfo(Register reg) {
size_t index = GetRegisterInfoTableIndex(reg);
DCHECK_LT(index, register_info_table_.size());
return register_info_table_[index];
}
RegisterInfo* GetOrCreateRegisterInfo(Register reg) {
size_t index = GetRegisterInfoTableIndex(reg);
return index < register_info_table_.size() ? register_info_table_[index]
: NewRegisterInfo(reg);
}
RegisterInfo* NewRegisterInfo(Register reg) {
size_t index = GetRegisterInfoTableIndex(reg);
DCHECK_GE(index, register_info_table_.size());
GrowRegisterMap(reg);
return register_info_table_[index];
}
void GrowRegisterMap(Register reg);
bool RegisterIsTemporary(Register reg) const {

View File

@ -57,27 +57,27 @@ class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, &info);
BytecodeNode node(bytecode, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, operand0, &info);
BytecodeNode node(bytecode, operand0, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, operand0, operand1, &info);
BytecodeNode node(bytecode, operand0, operand1, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, operand0, operand1, operand2, &info);
BytecodeNode node(bytecode, operand0, operand1, operand2, info);
writer()->Write(&node);
}
@ -85,21 +85,21 @@ void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
uint32_t operand3,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, operand0, operand1, operand2, operand3, &info);
BytecodeNode node(bytecode, operand0, operand1, operand2, operand3, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::WriteJump(Bytecode bytecode,
BytecodeLabel* label,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, 0, &info);
BytecodeNode node(bytecode, 0, info);
writer()->WriteJump(&node, label);
}
void BytecodeArrayWriterUnittest::WriteJumpLoop(Bytecode bytecode,
BytecodeLabel* label, int depth,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, 0, depth, &info);
BytecodeNode node(bytecode, 0, depth, info);
writer()->WriteJump(&node, label);
}

View File

@ -22,12 +22,12 @@ class BytecodeDeadCodeOptimizerTest : public BytecodePipelineStage,
void Write(BytecodeNode* node) override {
write_count_++;
last_written_.Clone(node);
last_written_ = *node;
}
void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
write_count_++;
last_written_.Clone(node);
last_written_ = *node;
}
void BindLabel(BytecodeLabel* label) override {}
@ -57,7 +57,7 @@ TEST_F(BytecodeDeadCodeOptimizerTest, LiveCodeKept) {
CHECK_EQ(add, last_written());
BytecodeLabel target;
BytecodeNode jump(Bytecode::kJump, 0, nullptr);
BytecodeNode jump(Bytecode::kJump, 0);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 2);
CHECK_EQ(jump, last_written());
@ -101,7 +101,7 @@ TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterReThrowEliminated) {
TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterJumpEliminated) {
BytecodeLabel target;
BytecodeNode jump(Bytecode::kJump, 0, nullptr);
BytecodeNode jump(Bytecode::kJump, 0);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 1);
CHECK_EQ(jump, last_written());
@ -119,7 +119,7 @@ TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeStillDeadAfterConditinalJump) {
CHECK_EQ(ret, last_written());
BytecodeLabel target;
BytecodeNode jump(Bytecode::kJumpIfTrue, 0, nullptr);
BytecodeNode jump(Bytecode::kJumpIfTrue, 0);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 1);
CHECK_EQ(ret, last_written());

View File

@ -29,12 +29,12 @@ class BytecodePeepholeOptimizerTest : public BytecodePipelineStage,
void Write(BytecodeNode* node) override {
write_count_++;
last_written_.Clone(node);
last_written_ = *node;
}
void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
write_count_++;
last_written_.Clone(node);
last_written_ = *node;
}
void BindLabel(BytecodeLabel* label) override {}
@ -72,7 +72,7 @@ TEST_F(BytecodePeepholeOptimizerTest, FlushOnJump) {
CHECK_EQ(write_count(), 0);
BytecodeLabel target;
BytecodeNode jump(Bytecode::kJump, 0, nullptr);
BytecodeNode jump(Bytecode::kJump, 0);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 2);
CHECK_EQ(jump, last_written());
@ -105,7 +105,7 @@ TEST_F(BytecodePeepholeOptimizerTest, ElideEmptyNop) {
TEST_F(BytecodePeepholeOptimizerTest, ElideExpressionNop) {
BytecodeSourceInfo source_info(3, false);
BytecodeNode nop(Bytecode::kNop, &source_info);
BytecodeNode nop(Bytecode::kNop, source_info);
optimizer()->Write(&nop);
BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
optimizer()->Write(&add);
@ -115,11 +115,12 @@ TEST_F(BytecodePeepholeOptimizerTest, ElideExpressionNop) {
}
TEST_F(BytecodePeepholeOptimizerTest, KeepStatementNop) {
BytecodeSourceInfo source_info(3, true);
BytecodeNode nop(Bytecode::kNop, &source_info);
BytecodeSourceInfo source_info_statement(3, true);
BytecodeNode nop(Bytecode::kNop, source_info_statement);
optimizer()->Write(&nop);
source_info.MakeExpressionPosition(3);
BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1, &source_info);
BytecodeSourceInfo source_info_expression(3, false);
BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1,
source_info_expression);
optimizer()->Write(&add);
Flush();
CHECK_EQ(write_count(), 2);
@ -206,7 +207,7 @@ TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRx) {
TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatement) {
BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
BytecodeSourceInfo source_info(3, true);
BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), &source_info);
BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), source_info);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
@ -215,14 +216,14 @@ TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatement) {
Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written().bytecode(), Bytecode::kNop);
CHECK_EQ(last_written().source_info(), second.source_info());
CHECK_EQ(last_written().source_info(), source_info);
}
TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatementStarRy) {
BytecodeLabel label;
BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
BytecodeSourceInfo source_info(0, true);
BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), &source_info);
BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), source_info);
BytecodeNode third(Bytecode::kStar, Register(3).ToOperand());
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
@ -279,7 +280,7 @@ TEST_F(BytecodePeepholeOptimizerTest, LdaTrueLdaFalse) {
TEST_F(BytecodePeepholeOptimizerTest, LdaTrueStatementLdaFalse) {
BytecodeSourceInfo source_info(3, true);
BytecodeNode first(Bytecode::kLdaTrue, &source_info);
BytecodeNode first(Bytecode::kLdaTrue, source_info);
BytecodeNode second(Bytecode::kLdaFalse);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
@ -294,7 +295,7 @@ TEST_F(BytecodePeepholeOptimizerTest, LdaTrueStatementLdaFalse) {
TEST_F(BytecodePeepholeOptimizerTest, NopStackCheck) {
BytecodeNode first(Bytecode::kNop);
BytecodeNode second(Bytecode::kStackCheck, nullptr);
BytecodeNode second(Bytecode::kStackCheck);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
@ -306,7 +307,7 @@ TEST_F(BytecodePeepholeOptimizerTest, NopStackCheck) {
TEST_F(BytecodePeepholeOptimizerTest, NopStatementStackCheck) {
BytecodeSourceInfo source_info(3, true);
BytecodeNode first(Bytecode::kNop, &source_info);
BytecodeNode first(Bytecode::kNop, source_info);
BytecodeNode second(Bytecode::kStackCheck);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
@ -314,8 +315,7 @@ TEST_F(BytecodePeepholeOptimizerTest, NopStatementStackCheck) {
CHECK_EQ(write_count(), 0);
Flush();
CHECK_EQ(write_count(), 1);
BytecodeSourceInfo expected_source_info(3, true);
BytecodeNode expected(Bytecode::kStackCheck, &expected_source_info);
BytecodeNode expected(Bytecode::kStackCheck, source_info);
CHECK_EQ(last_written(), expected);
}
@ -353,8 +353,7 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaKeyedPropertyStar) {
static_cast<uint32_t>(Register(1).ToOperand())};
const int expected_operand_count = static_cast<int>(arraysize(operands));
BytecodeNode first(Bytecode::kLdaKeyedProperty, operands[0], operands[1],
nullptr);
BytecodeNode first(Bytecode::kLdaKeyedProperty, operands[0], operands[1]);
BytecodeNode second(Bytecode::kStar, operands[2]);
BytecodeNode third(Bytecode::kReturn);
optimizer()->Write(&first);
@ -460,7 +459,7 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaSmiWithBinaryOp) {
for (auto operator_replacement : operator_replacement_pairs) {
uint32_t imm_operand = 17;
BytecodeSourceInfo source_info(3, true);
BytecodeNode first(Bytecode::kLdaSmi, imm_operand, &source_info);
BytecodeNode first(Bytecode::kLdaSmi, imm_operand, source_info);
uint32_t reg_operand = Register(0).ToOperand();
uint32_t idx_operand = 1;
BytecodeNode second(operator_replacement[0], reg_operand, idx_operand);
@ -473,7 +472,7 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaSmiWithBinaryOp) {
CHECK_EQ(last_written().operand(0), imm_operand);
CHECK_EQ(last_written().operand(1), reg_operand);
CHECK_EQ(last_written().operand(2), idx_operand);
CHECK_EQ(last_written().source_info(), first.source_info());
CHECK_EQ(last_written().source_info(), source_info);
Reset();
}
}
@ -490,10 +489,10 @@ TEST_F(BytecodePeepholeOptimizerTest, NotMergingLdaSmiWithBinaryOp) {
for (auto operator_replacement : operator_replacement_pairs) {
uint32_t imm_operand = 17;
BytecodeSourceInfo source_info(3, true);
BytecodeNode first(Bytecode::kLdaSmi, imm_operand, &source_info);
BytecodeNode first(Bytecode::kLdaSmi, imm_operand, source_info);
uint32_t reg_operand = Register(0).ToOperand();
source_info.MakeStatementPosition(4);
BytecodeNode second(operator_replacement[0], reg_operand, 1, &source_info);
BytecodeNode second(operator_replacement[0], reg_operand, 1, source_info);
optimizer()->Write(&first);
optimizer()->Write(&second);
CHECK_EQ(last_written(), first);

View File

@ -115,11 +115,11 @@ TEST_F(BytecodeNodeTest, EqualityWithSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo first_source_info(3, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], &first_source_info);
operands[3], first_source_info);
CHECK_EQ(node, node);
BytecodeSourceInfo second_source_info(3, true);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
operands[2], operands[3], &second_source_info);
operands[2], operands[3], second_source_info);
CHECK_EQ(node, other);
}
@ -127,49 +127,40 @@ TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], &source_info);
operands[3], source_info);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
operands[2], operands[3]);
CHECK_NE(node, other);
}
TEST_F(BytecodeNodeTest, Clone) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3]);
BytecodeNode clone(Bytecode::kIllegal);
clone.Clone(&node);
CHECK_EQ(clone, node);
}
TEST_F(BytecodeNodeTest, SetBytecode0) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, false);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], &source_info);
CHECK_EQ(node.source_info(), BytecodeSourceInfo(77, false));
operands[3], source_info);
CHECK_EQ(node.source_info(), source_info);
BytecodeNode clone(Bytecode::kIllegal);
clone.Clone(&node);
clone = node;
clone.set_bytecode(Bytecode::kNop);
CHECK_EQ(clone.bytecode(), Bytecode::kNop);
CHECK_EQ(clone.operand_count(), 0);
CHECK_EQ(clone.source_info(), BytecodeSourceInfo(77, false));
CHECK_EQ(clone.source_info(), source_info);
}
TEST_F(BytecodeNodeTest, SetBytecode1) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, false);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], &source_info);
operands[3], source_info);
BytecodeNode clone(Bytecode::kIllegal);
clone.Clone(&node);
clone = node;
clone.set_bytecode(Bytecode::kJump, 0x01aabbcc);
CHECK_EQ(clone.bytecode(), Bytecode::kJump);
CHECK_EQ(clone.operand_count(), 1);
CHECK_EQ(clone.operand(0), 0x01aabbcc);
CHECK_EQ(clone.source_info(), BytecodeSourceInfo(77, false));
CHECK_EQ(clone.source_info(), source_info);
}
} // namespace interpreter

View File

@ -62,55 +62,23 @@ class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
// Sanity tests.
TEST_F(BytecodeRegisterOptimizerTest, WriteNop) {
TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForFlush) {
Initialize(1, 1);
BytecodeNode node(Bytecode::kNop);
optimizer()->Write(&node);
Register temp = NewTemporary();
optimizer()->DoStar(temp, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0);
optimizer()->Flush();
CHECK_EQ(write_count(), 1);
CHECK_EQ(node, last_written());
}
TEST_F(BytecodeRegisterOptimizerTest, WriteNopExpression) {
Initialize(1, 1);
BytecodeSourceInfo source_info(3, false);
BytecodeNode node(Bytecode::kNop, &source_info);
optimizer()->Write(&node);
CHECK_EQ(write_count(), 1);
CHECK_EQ(node, last_written());
}
TEST_F(BytecodeRegisterOptimizerTest, WriteNopStatement) {
Initialize(1, 1);
BytecodeSourceInfo source_info(3, true);
BytecodeNode node(Bytecode::kNop);
optimizer()->Write(&node);
CHECK_EQ(write_count(), 1);
CHECK_EQ(node, last_written());
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
}
TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
Initialize(1, 1);
Register temp = NewTemporary();
BytecodeNode node(Bytecode::kStar, temp.ToOperand());
optimizer()->Write(&node);
optimizer()->DoStar(temp, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0);
BytecodeLabel label;
BytecodeNode jump(Bytecode::kJump, 0, nullptr);
optimizer()->WriteJump(&jump, &label);
CHECK_EQ(write_count(), 2);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kJump);
}
TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForBind) {
Initialize(1, 1);
Register temp = NewTemporary();
BytecodeNode node(Bytecode::kStar, temp.ToOperand());
optimizer()->Write(&node);
CHECK_EQ(write_count(), 0);
BytecodeLabel label;
optimizer()->BindLabel(&label);
optimizer()->PrepareForBytecode(Bytecode::kJump);
CHECK_EQ(write_count(), 1);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
@ -121,117 +89,91 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForBind) {
TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
Initialize(3, 1);
Register parameter = Register::FromParameterIndex(1, 3);
BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
optimizer()->Write(&node0);
optimizer()->DoLdar(parameter, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0);
Register temp = NewTemporary();
optimizer()->DoStar(temp, BytecodeSourceInfo());
BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand());
optimizer()->Write(&node1);
CHECK_EQ(write_count(), 0);
ReleaseTemporaries(temp);
CHECK_EQ(write_count(), 0);
BytecodeNode node2(Bytecode::kReturn);
optimizer()->Write(&node2);
CHECK_EQ(write_count(), 2);
optimizer()->PrepareForBytecode(Bytecode::kReturn);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kReturn);
}
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
Initialize(3, 1);
BytecodeNode node0(Bytecode::kLdaSmi, 3);
optimizer()->Write(&node0);
CHECK_EQ(write_count(), 1);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
BytecodeNode node1(Bytecode::kStar, temp1.ToOperand());
optimizer()->Write(&node1);
optimizer()->DoStar(temp1, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
CHECK_EQ(write_count(), 1);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), temp1.ToOperand());
optimizer()->DoMov(temp1, temp0, BytecodeSourceInfo());
CHECK_EQ(write_count(), 1);
BytecodeNode node2(Bytecode::kLdaSmi, 1);
optimizer()->Write(&node2);
CHECK_EQ(write_count(), 3);
BytecodeNode node3(Bytecode::kMov, temp1.ToOperand(), temp0.ToOperand());
optimizer()->Write(&node3);
CHECK_EQ(write_count(), 3);
ReleaseTemporaries(temp1);
CHECK_EQ(write_count(), 3);
BytecodeNode node4(Bytecode::kLdar, temp0.ToOperand());
optimizer()->Write(&node4);
CHECK_EQ(write_count(), 3);
BytecodeNode node5(Bytecode::kReturn);
optimizer()->Write(&node5);
CHECK_EQ(write_count(), 5);
CHECK_EQ(output()->at(3).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(3).operand(0), temp1.ToOperand());
CHECK_EQ(output()->at(4).bytecode(), Bytecode::kReturn);
CHECK_EQ(write_count(), 1);
optimizer()->DoLdar(temp0, BytecodeSourceInfo());
CHECK_EQ(write_count(), 1);
optimizer()->PrepareForBytecode(Bytecode::kReturn);
CHECK_EQ(write_count(), 2);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(1).operand(0), temp1.ToOperand());
}
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterNotFlushed) {
Initialize(3, 1);
BytecodeNode node0(Bytecode::kLdaSmi, 3);
optimizer()->Write(&node0);
CHECK_EQ(write_count(), 1);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
BytecodeNode node1(Bytecode::kStar, temp0.ToOperand());
optimizer()->Write(&node1);
CHECK_EQ(write_count(), 1);
BytecodeNode node2(Bytecode::kStar, temp1.ToOperand());
optimizer()->Write(&node2);
CHECK_EQ(write_count(), 1);
optimizer()->DoStar(temp0, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0);
optimizer()->DoStar(temp1, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0);
ReleaseTemporaries(temp1);
BytecodeLabel label;
BytecodeNode jump(Bytecode::kJump, 0, nullptr);
optimizer()->WriteJump(&jump, &label);
BytecodeNode node3(Bytecode::kReturn);
optimizer()->Write(&node3);
CHECK_EQ(write_count(), 4);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(1).operand(0), temp0.ToOperand());
CHECK_EQ(output()->at(2).bytecode(), Bytecode::kJump);
CHECK_EQ(output()->at(3).bytecode(), Bytecode::kReturn);
optimizer()->Flush();
CHECK_EQ(write_count(), 1);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), temp0.ToOperand());
}
TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
Initialize(3, 1);
Register parameter = Register::FromParameterIndex(1, 3);
BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
optimizer()->Write(&node0);
optimizer()->DoLdar(parameter, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0);
Register local = Register(0);
BytecodeNode node1(Bytecode::kStar, local.ToOperand());
optimizer()->Write(&node1);
optimizer()->DoStar(local, BytecodeSourceInfo());
CHECK_EQ(write_count(), 1);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kMov);
CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
CHECK_EQ(output()->at(0).operand(1), local.ToOperand());
BytecodeNode node2(Bytecode::kReturn);
optimizer()->Write(&node2);
CHECK_EQ(write_count(), 3);
optimizer()->PrepareForBytecode(Bytecode::kReturn);
CHECK_EQ(write_count(), 2);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(1).operand(0), local.ToOperand());
CHECK_EQ(output()->at(2).bytecode(), Bytecode::kReturn);
}
TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotMaterializedForInput) {
TEST_F(BytecodeRegisterOptimizerTest, SingleTemporaryNotMaterializedForInput) {
Initialize(3, 1);
Register parameter = Register::FromParameterIndex(1, 3);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
BytecodeNode node0(Bytecode::kMov, parameter.ToOperand(), temp0.ToOperand());
optimizer()->Write(&node0);
BytecodeNode node1(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
optimizer()->Write(&node1);
optimizer()->DoMov(parameter, temp0, BytecodeSourceInfo());
optimizer()->DoMov(parameter, temp1, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0);
BytecodeNode node2(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 1);
optimizer()->Write(&node2);
CHECK_EQ(write_count(), 1);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kCallJSRuntime);
CHECK_EQ(output()->at(0).operand(0), 0);
CHECK_EQ(output()->at(0).operand(1), parameter.ToOperand());
CHECK_EQ(output()->at(0).operand(2), 1);
Register reg = optimizer()->GetInputRegister(temp0);
RegisterList reg_list =
optimizer()->GetInputRegisterList(RegisterList(temp0.index(), 1));
CHECK_EQ(write_count(), 0);
CHECK_EQ(parameter.index(), reg.index());
CHECK_EQ(parameter.index(), reg_list.first_register().index());
CHECK_EQ(1, reg_list.register_count());
}
TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
@ -239,32 +181,22 @@ TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
Register parameter = Register::FromParameterIndex(1, 3);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
BytecodeNode node0(Bytecode::kLdaSmi, 3);
optimizer()->Write(&node0);
CHECK_EQ(write_count(), 1);
BytecodeNode node1(Bytecode::kStar, temp0.ToOperand());
optimizer()->Write(&node1);
BytecodeNode node2(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
optimizer()->Write(&node2);
CHECK_EQ(write_count(), 1);
BytecodeNode node3(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 2);
optimizer()->Write(&node3);
CHECK_EQ(write_count(), 4);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
optimizer()->DoStar(temp0, BytecodeSourceInfo());
optimizer()->DoMov(parameter, temp1, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdaSmi);
CHECK_EQ(output()->at(0).operand(0), 3);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(1).operand(0), temp0.ToOperand());
CHECK_EQ(output()->at(2).bytecode(), Bytecode::kMov);
CHECK_EQ(output()->at(2).operand(0), parameter.ToOperand());
CHECK_EQ(output()->at(2).operand(1), temp1.ToOperand());
CHECK_EQ(output()->at(3).bytecode(), Bytecode::kCallJSRuntime);
CHECK_EQ(output()->at(3).operand(0), 0);
CHECK_EQ(output()->at(3).operand(1), temp0.ToOperand());
CHECK_EQ(output()->at(3).operand(2), 2);
optimizer()->PrepareForBytecode(Bytecode::kCallJSRuntime);
RegisterList reg_list =
optimizer()->GetInputRegisterList(RegisterList(temp0.index(), 2));
CHECK_EQ(temp0.index(), reg_list.first_register().index());
CHECK_EQ(2, reg_list.register_count());
CHECK_EQ(write_count(), 2);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), temp0.ToOperand());
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kMov);
CHECK_EQ(output()->at(1).operand(0), parameter.ToOperand());
CHECK_EQ(output()->at(1).operand(1), temp1.ToOperand());
}
} // namespace interpreter

View File

@ -95,42 +95,14 @@ TEST(OperandScaling, ScalableAndNonScalable) {
TEST(Bytecodes, RegisterOperands) {
CHECK(Bytecodes::IsRegisterOperandType(OperandType::kReg));
CHECK(Bytecodes::IsRegisterOperandType(OperandType::kRegPair));
CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kReg));
CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kRegPair));
CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kRegList));
CHECK(!Bytecodes::IsRegisterOutputOperandType(OperandType::kReg));
CHECK(!Bytecodes::IsRegisterInputOperandType(OperandType::kRegOut));
CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::kRegOut));
#define IS_REGISTER_OPERAND_TYPE(Name, _) \
CHECK(Bytecodes::IsRegisterOperandType(OperandType::k##Name));
REGISTER_OPERAND_TYPE_LIST(IS_REGISTER_OPERAND_TYPE)
#undef IS_REGISTER_OPERAND_TYPE
#define IS_NOT_REGISTER_OPERAND_TYPE(Name, _) \
CHECK(!Bytecodes::IsRegisterOperandType(OperandType::k##Name));
NON_REGISTER_OPERAND_TYPE_LIST(IS_NOT_REGISTER_OPERAND_TYPE)
#undef IS_NOT_REGISTER_OPERAND_TYPE
#define IS_REGISTER_INPUT_OPERAND_TYPE(Name, _) \
CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::k##Name));
REGISTER_INPUT_OPERAND_TYPE_LIST(IS_REGISTER_INPUT_OPERAND_TYPE)
#undef IS_REGISTER_INPUT_OPERAND_TYPE
#define IS_NOT_REGISTER_INPUT_OPERAND_TYPE(Name, _) \
CHECK(!Bytecodes::IsRegisterInputOperandType(OperandType::k##Name));
NON_REGISTER_OPERAND_TYPE_LIST(IS_NOT_REGISTER_INPUT_OPERAND_TYPE);
REGISTER_OUTPUT_OPERAND_TYPE_LIST(IS_NOT_REGISTER_INPUT_OPERAND_TYPE)
#undef IS_NOT_REGISTER_INPUT_OPERAND_TYPE
#define IS_REGISTER_OUTPUT_OPERAND_TYPE(Name, _) \
CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::k##Name));
REGISTER_OUTPUT_OPERAND_TYPE_LIST(IS_REGISTER_OUTPUT_OPERAND_TYPE)
#undef IS_REGISTER_OUTPUT_OPERAND_TYPE
#define IS_NOT_REGISTER_OUTPUT_OPERAND_TYPE(Name, _) \
CHECK(!Bytecodes::IsRegisterOutputOperandType(OperandType::k##Name));
NON_REGISTER_OPERAND_TYPE_LIST(IS_NOT_REGISTER_OUTPUT_OPERAND_TYPE)
REGISTER_INPUT_OPERAND_TYPE_LIST(IS_NOT_REGISTER_OUTPUT_OPERAND_TYPE)
#undef IS_NOT_REGISTER_INPUT_OPERAND_TYPE
CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::kRegOutPair));
}
TEST(Bytecodes, DebugBreakExistForEachBytecode) {