2016-04-18 11:57:06 +00:00
|
|
|
// Copyright 2015 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
|
|
|
#include "src/compiler/code-assembler.h"
|
|
|
|
|
|
|
|
#include <ostream>
|
|
|
|
|
|
|
|
#include "src/code-factory.h"
|
|
|
|
#include "src/compiler/graph.h"
|
|
|
|
#include "src/compiler/instruction-selector.h"
|
|
|
|
#include "src/compiler/linkage.h"
|
2016-05-19 15:49:03 +00:00
|
|
|
#include "src/compiler/node-matchers.h"
|
2016-04-18 11:57:06 +00:00
|
|
|
#include "src/compiler/pipeline.h"
|
|
|
|
#include "src/compiler/raw-machine-assembler.h"
|
|
|
|
#include "src/compiler/schedule.h"
|
|
|
|
#include "src/frames.h"
|
|
|
|
#include "src/interface-descriptors.h"
|
|
|
|
#include "src/interpreter/bytecodes.h"
|
|
|
|
#include "src/machine-type.h"
|
|
|
|
#include "src/macro-assembler.h"
|
2017-02-23 11:46:29 +00:00
|
|
|
#include "src/objects-inl.h"
|
2016-06-09 07:25:53 +00:00
|
|
|
#include "src/utils.h"
|
2016-09-20 16:07:25 +00:00
|
|
|
#include "src/zone/zone.h"
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-12-15 17:18:42 +00:00
|
|
|
#define REPEAT_1_TO_2(V, T) V(T) V(T, T)
|
|
|
|
#define REPEAT_1_TO_3(V, T) REPEAT_1_TO_2(V, T) V(T, T, T)
|
|
|
|
#define REPEAT_1_TO_4(V, T) REPEAT_1_TO_3(V, T) V(T, T, T, T)
|
|
|
|
#define REPEAT_1_TO_5(V, T) REPEAT_1_TO_4(V, T) V(T, T, T, T, T)
|
|
|
|
#define REPEAT_1_TO_6(V, T) REPEAT_1_TO_5(V, T) V(T, T, T, T, T, T)
|
2016-12-16 09:58:51 +00:00
|
|
|
#define REPEAT_1_TO_7(V, T) REPEAT_1_TO_6(V, T) V(T, T, T, T, T, T, T)
|
|
|
|
#define REPEAT_1_TO_8(V, T) REPEAT_1_TO_7(V, T) V(T, T, T, T, T, T, T, T)
|
|
|
|
#define REPEAT_1_TO_9(V, T) REPEAT_1_TO_8(V, T) V(T, T, T, T, T, T, T, T, T)
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
#define REPEAT_1_TO_10(V, T) REPEAT_1_TO_9(V, T) V(T, T, T, T, T, T, T, T, T, T)
|
|
|
|
#define REPEAT_1_TO_11(V, T) \
|
|
|
|
REPEAT_1_TO_10(V, T) V(T, T, T, T, T, T, T, T, T, T, T)
|
2017-03-22 13:18:26 +00:00
|
|
|
#define REPEAT_1_TO_12(V, T) \
|
|
|
|
REPEAT_1_TO_11(V, T) V(T, T, T, T, T, T, T, T, T, T, T, T)
|
2016-12-15 17:18:42 +00:00
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace compiler {
|
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
CodeAssemblerState::CodeAssemblerState(
|
|
|
|
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
|
|
|
|
Code::Flags flags, const char* name, size_t result_size)
|
|
|
|
: CodeAssemblerState(
|
2016-04-18 11:57:06 +00:00
|
|
|
isolate, zone,
|
|
|
|
Linkage::GetStubCallDescriptor(
|
|
|
|
isolate, zone, descriptor, descriptor.GetStackParameterCount(),
|
|
|
|
CallDescriptor::kNoFlags, Operator::kNoProperties,
|
|
|
|
MachineType::AnyTagged(), result_size),
|
|
|
|
flags, name) {}
|
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
|
|
|
|
int parameter_count, Code::Flags flags,
|
|
|
|
const char* name)
|
|
|
|
: CodeAssemblerState(isolate, zone,
|
|
|
|
Linkage::GetJSCallDescriptor(
|
|
|
|
zone, false, parameter_count,
|
|
|
|
Code::ExtractKindFromFlags(flags) == Code::BUILTIN
|
|
|
|
? CallDescriptor::kPushArgumentCount
|
|
|
|
: CallDescriptor::kNoFlags),
|
|
|
|
flags, name) {}
|
|
|
|
|
|
|
|
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
|
|
|
|
CallDescriptor* call_descriptor,
|
|
|
|
Code::Flags flags, const char* name)
|
2016-04-18 11:57:06 +00:00
|
|
|
: raw_assembler_(new RawMachineAssembler(
|
|
|
|
isolate, new (zone) Graph(zone), call_descriptor,
|
|
|
|
MachineType::PointerRepresentation(),
|
2016-07-22 20:55:03 +00:00
|
|
|
InstructionSelector::SupportedMachineOperatorFlags(),
|
|
|
|
InstructionSelector::AlignmentRequirements())),
|
2016-04-18 11:57:06 +00:00
|
|
|
flags_(flags),
|
|
|
|
name_(name),
|
|
|
|
code_generated_(false),
|
|
|
|
variables_(zone) {}
|
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
CodeAssemblerState::~CodeAssemblerState() {}
|
|
|
|
|
2017-01-02 17:22:30 +00:00
|
|
|
int CodeAssemblerState::parameter_count() const {
|
|
|
|
return static_cast<int>(raw_assembler_->call_descriptor()->ParameterCount());
|
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
CodeAssembler::~CodeAssembler() {}
|
|
|
|
|
2017-04-05 17:40:36 +00:00
|
|
|
#if DEBUG
|
|
|
|
void CodeAssemblerState::PrintCurrentBlock(std::ostream& os) {
|
|
|
|
raw_assembler_->PrintCurrentBlock(os);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void CodeAssemblerState::SetInitialDebugInformation(const char* msg,
|
|
|
|
const char* file,
|
|
|
|
int line) {
|
|
|
|
#if DEBUG
|
|
|
|
AssemblerDebugInfo debug_info = {msg, file, line};
|
|
|
|
raw_assembler_->SetInitialDebugInformation(debug_info);
|
|
|
|
#endif // DEBUG
|
|
|
|
}
|
|
|
|
|
2016-12-15 13:32:27 +00:00
|
|
|
class BreakOnNodeDecorator final : public GraphDecorator {
|
|
|
|
public:
|
|
|
|
explicit BreakOnNodeDecorator(NodeId node_id) : node_id_(node_id) {}
|
|
|
|
|
|
|
|
void Decorate(Node* node) final {
|
|
|
|
if (node->id() == node_id_) {
|
|
|
|
base::OS::DebugBreak();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
NodeId node_id_;
|
|
|
|
};
|
|
|
|
|
|
|
|
void CodeAssembler::BreakOnNode(int node_id) {
|
|
|
|
Graph* graph = raw_assembler()->graph();
|
|
|
|
Zone* zone = graph->zone();
|
|
|
|
GraphDecorator* decorator =
|
|
|
|
new (zone) BreakOnNodeDecorator(static_cast<NodeId>(node_id));
|
|
|
|
graph->AddDecorator(decorator);
|
|
|
|
}
|
|
|
|
|
2016-12-28 15:47:34 +00:00
|
|
|
void CodeAssembler::RegisterCallGenerationCallbacks(
|
|
|
|
const CodeAssemblerCallback& call_prologue,
|
|
|
|
const CodeAssemblerCallback& call_epilogue) {
|
|
|
|
// The callback can be registered only once.
|
|
|
|
DCHECK(!state_->call_prologue_);
|
|
|
|
DCHECK(!state_->call_epilogue_);
|
|
|
|
state_->call_prologue_ = call_prologue;
|
|
|
|
state_->call_epilogue_ = call_epilogue;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeAssembler::UnregisterCallGenerationCallbacks() {
|
|
|
|
state_->call_prologue_ = nullptr;
|
|
|
|
state_->call_epilogue_ = nullptr;
|
|
|
|
}
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-12-28 15:47:34 +00:00
|
|
|
void CodeAssembler::CallPrologue() {
|
|
|
|
if (state_->call_prologue_) {
|
|
|
|
state_->call_prologue_();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeAssembler::CallEpilogue() {
|
|
|
|
if (state_->call_epilogue_) {
|
|
|
|
state_->call_epilogue_();
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
// static
|
2016-12-15 11:45:18 +00:00
|
|
|
Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
|
2016-11-16 11:48:07 +00:00
|
|
|
DCHECK(!state->code_generated_);
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
RawMachineAssembler* rasm = state->raw_assembler_.get();
|
|
|
|
Schedule* schedule = rasm->Export();
|
2016-04-18 11:57:06 +00:00
|
|
|
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
|
2016-11-16 11:48:07 +00:00
|
|
|
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
|
2016-12-15 11:45:18 +00:00
|
|
|
state->flags_, state->name_);
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
state->code_generated_ = true;
|
2016-04-18 11:57:06 +00:00
|
|
|
return code;
|
|
|
|
}
|
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
bool CodeAssembler::Is64() const { return raw_assembler()->machine()->Is64(); }
|
2016-04-18 11:57:06 +00:00
|
|
|
|
|
|
|
bool CodeAssembler::IsFloat64RoundUpSupported() const {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->machine()->Float64RoundUp().IsSupported();
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool CodeAssembler::IsFloat64RoundDownSupported() const {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->machine()->Float64RoundDown().IsSupported();
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-11-30 15:23:49 +00:00
|
|
|
bool CodeAssembler::IsFloat64RoundTiesEvenSupported() const {
|
|
|
|
return raw_assembler()->machine()->Float64RoundTiesEven().IsSupported();
|
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->machine()->Float64RoundTruncate().IsSupported();
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2017-03-31 14:12:46 +00:00
|
|
|
bool CodeAssembler::IsInt32AbsWithOverflowSupported() const {
|
|
|
|
return raw_assembler()->machine()->Int32AbsWithOverflow().IsSupported();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CodeAssembler::IsInt64AbsWithOverflowSupported() const {
|
|
|
|
return raw_assembler()->machine()->Int64AbsWithOverflow().IsSupported();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CodeAssembler::IsIntPtrAbsWithOverflowSupported() const {
|
|
|
|
return Is64() ? IsInt64AbsWithOverflowSupported()
|
|
|
|
: IsInt32AbsWithOverflowSupported();
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:49:03 +00:00
|
|
|
Node* CodeAssembler::Int32Constant(int32_t value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Int32Constant(value);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-05-19 15:49:03 +00:00
|
|
|
Node* CodeAssembler::Int64Constant(int64_t value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Int64Constant(value);
|
2016-05-19 15:49:03 +00:00
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::IntPtrConstant(intptr_t value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->IntPtrConstant(value);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::NumberConstant(double value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->NumberConstant(value);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::SmiConstant(Smi* value) {
|
2016-10-11 08:23:12 +00:00
|
|
|
return BitcastWordToTaggedSigned(IntPtrConstant(bit_cast<intptr_t>(value)));
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-10-25 08:30:37 +00:00
|
|
|
Node* CodeAssembler::SmiConstant(int value) {
|
|
|
|
return SmiConstant(Smi::FromInt(value));
|
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->HeapConstant(object);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2017-02-24 17:48:49 +00:00
|
|
|
Node* CodeAssembler::CStringConstant(const char* str) {
|
|
|
|
return HeapConstant(factory()->NewStringFromAsciiChecked(str, TENURED));
|
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::BooleanConstant(bool value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->BooleanConstant(value);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::ExternalConstant(ExternalReference address) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->ExternalConstant(address);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::Float64Constant(double value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Float64Constant(value);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::NaNConstant() {
|
|
|
|
return LoadRoot(Heap::kNanValueRootIndex);
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:49:03 +00:00
|
|
|
bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
|
|
|
|
Int64Matcher m(node);
|
|
|
|
if (m.HasValue() &&
|
|
|
|
m.IsInRange(std::numeric_limits<int32_t>::min(),
|
|
|
|
std::numeric_limits<int32_t>::max())) {
|
|
|
|
out_value = static_cast<int32_t>(m.Value());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
|
|
|
|
Int64Matcher m(node);
|
|
|
|
if (m.HasValue()) out_value = m.Value();
|
|
|
|
return m.HasValue();
|
|
|
|
}
|
|
|
|
|
2016-10-18 06:34:40 +00:00
|
|
|
bool CodeAssembler::ToSmiConstant(Node* node, Smi*& out_value) {
|
|
|
|
if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
|
|
|
|
node = node->InputAt(0);
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
IntPtrMatcher m(node);
|
|
|
|
if (m.HasValue()) {
|
|
|
|
out_value = Smi::cast(bit_cast<Object*>(m.Value()));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-05-19 15:49:03 +00:00
|
|
|
bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
|
2016-12-28 16:59:03 +00:00
|
|
|
if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned ||
|
|
|
|
node->opcode() == IrOpcode::kBitcastWordToTagged) {
|
|
|
|
node = node->InputAt(0);
|
|
|
|
}
|
2016-05-19 15:49:03 +00:00
|
|
|
IntPtrMatcher m(node);
|
|
|
|
if (m.HasValue()) out_value = m.Value();
|
|
|
|
return m.HasValue();
|
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::Parameter(int value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Parameter(value);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2017-01-18 19:13:49 +00:00
|
|
|
Node* CodeAssembler::GetJSContextParameter() {
|
|
|
|
CallDescriptor* desc = raw_assembler()->call_descriptor();
|
|
|
|
DCHECK(desc->IsJSFunctionCall());
|
|
|
|
return Parameter(Linkage::GetJSCallContextParamIndex(
|
|
|
|
static_cast<int>(desc->JSParameterCount())));
|
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
void CodeAssembler::Return(Node* value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Return(value);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2017-02-08 14:26:56 +00:00
|
|
|
void CodeAssembler::Return(Node* value1, Node* value2) {
|
|
|
|
return raw_assembler()->Return(value1, value2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeAssembler::Return(Node* value1, Node* value2, Node* value3) {
|
|
|
|
return raw_assembler()->Return(value1, value2, value3);
|
|
|
|
}
|
|
|
|
|
2016-11-02 13:15:39 +00:00
|
|
|
void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->PopAndReturn(pop, value);
|
2016-11-02 13:15:39 +00:00
|
|
|
}
|
|
|
|
|
[async-iteration] implement AsyncGenerator
- Introduce new struct AsyncGeneratorRequest, which holds
information pertinent to resuming execution of an
AsyncGenerator, such as the Promise associated with the async
generator request. It is intended to be used as a singly
linked list, and holds a pointer to the next item in te queue.
- Introduce JSAsyncGeneratorObject (subclass of
JSGeneratorObject), which includes several new internal fields
(`queue` which contains a singly linked list of
AsyncGeneratorRequest objects, and `await_input` which
contains the sent value from an Await expression (This is
necessary to prevent function.sent (used by yield*) from
having the sent value observably overwritten during
execution).
- Modify SuspendGenerator to accept a set of Flags, which
indicate whether the suspend is for a Yield or Await, and
whether it takes place on an async generator or ES6
generator.
- Introduce interpreter intrinsics and TF intrinsic lowering for
accessing the await input of an async generator
- Modify the JSGeneratorStore operator to understand whether or
not it's suspending for a normal yield, or an AsyncGenerator
Await. This ensures appropriate registers are stored.
- Add versions of ResumeGeneratorTrampoline which store the
input value in a different field depending on wether it's an
AsyncGenerator Await resume, or an ordinary resume. Also modifies
whether debug code will assert that the generator object is a
JSGeneratorObject or a JSAsyncGeneratorObject depending on the
resume type.
BUG=v8:5855
R=bmeurer@chromium.org, rmcilroy@chromium.org, jgruber@chromium.org,
littledan@chromium.org, neis@chromium.org
TBR=marja@chromium.org
Change-Id: I9d58df1d344465fc937fe7eed322424204497187
Reviewed-on: https://chromium-review.googlesource.com/446961
Commit-Queue: Caitlin Potter <caitp@igalia.com>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#44240}
2017-03-29 13:41:45 +00:00
|
|
|
void CodeAssembler::ReturnIf(Node* condition, Node* value) {
|
|
|
|
Label if_return(this), if_continue(this);
|
|
|
|
Branch(condition, &if_return, &if_continue);
|
|
|
|
Bind(&if_return);
|
|
|
|
Return(value);
|
|
|
|
Bind(&if_continue);
|
|
|
|
}
|
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
|
2016-05-19 09:46:34 +00:00
|
|
|
|
2017-02-22 12:02:34 +00:00
|
|
|
void CodeAssembler::Unreachable() {
|
|
|
|
DebugBreak();
|
|
|
|
raw_assembler()->Unreachable();
|
|
|
|
}
|
|
|
|
|
2016-06-09 07:25:53 +00:00
|
|
|
void CodeAssembler::Comment(const char* format, ...) {
|
|
|
|
if (!FLAG_code_comments) return;
|
|
|
|
char buffer[4 * KB];
|
|
|
|
StringBuilder builder(buffer, arraysize(buffer));
|
|
|
|
va_list arguments;
|
|
|
|
va_start(arguments, format);
|
|
|
|
builder.AddFormattedList(format, arguments);
|
|
|
|
va_end(arguments);
|
|
|
|
|
|
|
|
// Copy the string before recording it in the assembler to avoid
|
|
|
|
// issues when the stack allocated buffer goes out of scope.
|
2016-06-17 13:49:26 +00:00
|
|
|
const int prefix_len = 2;
|
|
|
|
int length = builder.position() + 1;
|
|
|
|
char* copy = reinterpret_cast<char*>(malloc(length + prefix_len));
|
|
|
|
MemCopy(copy + prefix_len, builder.Finalize(), length);
|
2016-06-09 07:25:53 +00:00
|
|
|
copy[0] = ';';
|
|
|
|
copy[1] = ' ';
|
2016-11-16 11:48:07 +00:00
|
|
|
raw_assembler()->Comment(copy);
|
2016-06-09 07:25:53 +00:00
|
|
|
}
|
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
void CodeAssembler::Bind(Label* label) { return label->Bind(); }
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2017-04-05 17:40:36 +00:00
|
|
|
#if DEBUG
|
|
|
|
void CodeAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
|
|
|
|
return label->Bind(debug_info);
|
|
|
|
}
|
|
|
|
#endif // DEBUG
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::LoadFramePointer() {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->LoadFramePointer();
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::LoadParentFramePointer() {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->LoadParentFramePointer();
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::LoadStackPointer() {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->LoadStackPointer();
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name) \
|
|
|
|
Node* CodeAssembler::name(Node* a, Node* b) { \
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->name(a, b); \
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
|
|
|
|
#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
|
|
|
|
|
2016-12-28 16:59:03 +00:00
|
|
|
Node* CodeAssembler::IntPtrAdd(Node* left, Node* right) {
|
|
|
|
intptr_t left_constant;
|
|
|
|
bool is_left_constant = ToIntPtrConstant(left, left_constant);
|
|
|
|
intptr_t right_constant;
|
|
|
|
bool is_right_constant = ToIntPtrConstant(right, right_constant);
|
|
|
|
if (is_left_constant) {
|
|
|
|
if (is_right_constant) {
|
|
|
|
return IntPtrConstant(left_constant + right_constant);
|
|
|
|
}
|
|
|
|
if (left_constant == 0) {
|
|
|
|
return right;
|
|
|
|
}
|
|
|
|
} else if (is_right_constant) {
|
|
|
|
if (right_constant == 0) {
|
|
|
|
return left;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return raw_assembler()->IntPtrAdd(left, right);
|
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::IntPtrSub(Node* left, Node* right) {
|
|
|
|
intptr_t left_constant;
|
|
|
|
bool is_left_constant = ToIntPtrConstant(left, left_constant);
|
|
|
|
intptr_t right_constant;
|
|
|
|
bool is_right_constant = ToIntPtrConstant(right, right_constant);
|
|
|
|
if (is_left_constant) {
|
|
|
|
if (is_right_constant) {
|
|
|
|
return IntPtrConstant(left_constant - right_constant);
|
|
|
|
}
|
|
|
|
} else if (is_right_constant) {
|
|
|
|
if (right_constant == 0) {
|
|
|
|
return left;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return raw_assembler()->IntPtrSub(left, right);
|
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::WordShl(Node* value, int shift) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return (shift != 0) ? raw_assembler()->WordShl(value, IntPtrConstant(shift))
|
2016-07-20 08:07:04 +00:00
|
|
|
: value;
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-04-29 14:19:22 +00:00
|
|
|
Node* CodeAssembler::WordShr(Node* value, int shift) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return (shift != 0) ? raw_assembler()->WordShr(value, IntPtrConstant(shift))
|
2016-07-20 08:07:04 +00:00
|
|
|
: value;
|
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::Word32Shr(Node* value, int shift) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return (shift != 0) ? raw_assembler()->Word32Shr(value, Int32Constant(shift))
|
2016-07-20 08:07:04 +00:00
|
|
|
: value;
|
2016-04-29 14:19:22 +00:00
|
|
|
}
|
|
|
|
|
2016-04-18 19:41:09 +00:00
|
|
|
Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
if (raw_assembler()->machine()->Is64()) {
|
|
|
|
value = raw_assembler()->ChangeUint32ToUint64(value);
|
2016-04-18 19:41:09 +00:00
|
|
|
}
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2016-04-22 09:17:58 +00:00
|
|
|
Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
if (raw_assembler()->machine()->Is64()) {
|
|
|
|
value = raw_assembler()->ChangeInt32ToInt64(value);
|
2016-04-22 09:17:58 +00:00
|
|
|
}
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2017-03-13 13:40:25 +00:00
|
|
|
Node* CodeAssembler::ChangeFloat64ToUintPtr(Node* value) {
|
|
|
|
if (raw_assembler()->machine()->Is64()) {
|
|
|
|
return raw_assembler()->ChangeFloat64ToUint64(value);
|
|
|
|
}
|
|
|
|
return raw_assembler()->ChangeFloat64ToUint32(value);
|
|
|
|
}
|
|
|
|
|
2016-09-06 16:17:08 +00:00
|
|
|
Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
if (raw_assembler()->machine()->Is64()) {
|
|
|
|
return raw_assembler()->RoundInt64ToFloat64(value);
|
2016-09-06 16:17:08 +00:00
|
|
|
}
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->ChangeInt32ToFloat64(value);
|
2016-09-06 16:17:08 +00:00
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
|
2016-11-16 11:48:07 +00:00
|
|
|
Node* CodeAssembler::name(Node* a) { return raw_assembler()->name(a); }
|
2016-04-18 11:57:06 +00:00
|
|
|
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
|
|
|
|
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
|
|
|
|
|
2016-04-21 06:45:44 +00:00
|
|
|
Node* CodeAssembler::Load(MachineType rep, Node* base) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Load(rep, base);
|
2016-04-21 06:45:44 +00:00
|
|
|
}
|
|
|
|
|
2016-12-01 13:09:17 +00:00
|
|
|
Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset) {
|
|
|
|
return raw_assembler()->Load(rep, base, offset);
|
2016-04-21 06:45:44 +00:00
|
|
|
}
|
|
|
|
|
2016-12-01 13:09:17 +00:00
|
|
|
Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
|
|
|
|
return raw_assembler()->AtomicLoad(rep, base, offset);
|
2016-04-21 06:45:44 +00:00
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
|
|
|
|
if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
|
|
|
|
Handle<Object> root = isolate()->heap()->root_handle(root_index);
|
|
|
|
if (root->IsSmi()) {
|
|
|
|
return SmiConstant(Smi::cast(*root));
|
|
|
|
} else {
|
|
|
|
return HeapConstant(Handle<HeapObject>::cast(root));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-17 11:23:59 +00:00
|
|
|
Node* roots_array_start =
|
2016-04-18 11:57:06 +00:00
|
|
|
ExternalConstant(ExternalReference::roots_array_start(isolate()));
|
2016-05-17 11:23:59 +00:00
|
|
|
return Load(MachineType::AnyTagged(), roots_array_start,
|
|
|
|
IntPtrConstant(root_index * kPointerSize));
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-12-01 13:12:00 +00:00
|
|
|
Node* CodeAssembler::Store(Node* base, Node* value) {
|
|
|
|
return raw_assembler()->Store(MachineRepresentation::kTagged, base, value,
|
|
|
|
kFullWriteBarrier);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-12-01 13:12:00 +00:00
|
|
|
Node* CodeAssembler::Store(Node* base, Node* offset, Node* value) {
|
|
|
|
return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
|
|
|
|
value, kFullWriteBarrier);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-12-01 16:51:09 +00:00
|
|
|
Node* CodeAssembler::StoreWithMapWriteBarrier(Node* base, Node* offset,
|
|
|
|
Node* value) {
|
|
|
|
return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
|
|
|
|
value, kMapWriteBarrier);
|
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
|
|
|
|
Node* value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Store(rep, base, value, kNoWriteBarrier);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
|
2016-12-01 13:09:17 +00:00
|
|
|
Node* offset, Node* value) {
|
|
|
|
return raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-05-03 17:27:07 +00:00
|
|
|
Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
|
2016-12-01 13:09:17 +00:00
|
|
|
Node* offset, Node* value) {
|
|
|
|
return raw_assembler()->AtomicStore(rep, base, offset, value);
|
2016-05-03 17:27:07 +00:00
|
|
|
}
|
|
|
|
|
2017-04-11 00:09:37 +00:00
|
|
|
#define ATOMIC_FUNCTION(name) \
|
|
|
|
Node* CodeAssembler::Atomic##name(MachineType type, Node* base, \
|
|
|
|
Node* offset, Node* value) { \
|
|
|
|
return raw_assembler()->Atomic##name(type, base, offset, value); \
|
|
|
|
}
|
|
|
|
ATOMIC_FUNCTION(Exchange);
|
|
|
|
ATOMIC_FUNCTION(Add);
|
|
|
|
ATOMIC_FUNCTION(Sub);
|
|
|
|
ATOMIC_FUNCTION(And);
|
|
|
|
ATOMIC_FUNCTION(Or);
|
|
|
|
ATOMIC_FUNCTION(Xor);
|
|
|
|
#undef ATOMIC_FUNCTION
|
2017-03-07 00:07:44 +00:00
|
|
|
|
2017-03-16 22:29:02 +00:00
|
|
|
Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
|
|
|
|
Node* offset, Node* old_value,
|
|
|
|
Node* new_value) {
|
|
|
|
return raw_assembler()->AtomicCompareExchange(type, base, offset, old_value,
|
|
|
|
new_value);
|
|
|
|
}
|
|
|
|
|
2016-05-17 11:23:59 +00:00
|
|
|
Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
|
|
|
|
DCHECK(Heap::RootCanBeWrittenAfterInitialization(root_index));
|
|
|
|
Node* roots_array_start =
|
|
|
|
ExternalConstant(ExternalReference::roots_array_start(isolate()));
|
|
|
|
return StoreNoWriteBarrier(MachineRepresentation::kTagged, roots_array_start,
|
|
|
|
IntPtrConstant(root_index * kPointerSize), value);
|
|
|
|
}
|
|
|
|
|
2016-09-14 09:28:04 +00:00
|
|
|
Node* CodeAssembler::Retain(Node* value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Retain(value);
|
2016-09-14 09:28:04 +00:00
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::Projection(int index, Node* value) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Projection(index, value);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-08-22 15:58:01 +00:00
|
|
|
void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
|
|
|
|
Variable* exception_var) {
|
|
|
|
Label success(this), exception(this, Label::kDeferred);
|
|
|
|
success.MergeVariables();
|
|
|
|
exception.MergeVariables();
|
|
|
|
DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
|
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
raw_assembler()->Continuations(node, success.label_, exception.label_);
|
2016-08-22 15:58:01 +00:00
|
|
|
|
|
|
|
Bind(&exception);
|
2016-11-16 11:48:07 +00:00
|
|
|
const Operator* op = raw_assembler()->common()->IfException();
|
|
|
|
Node* exception_value = raw_assembler()->AddNode(op, node, node);
|
2016-08-22 15:58:01 +00:00
|
|
|
if (exception_var != nullptr) {
|
|
|
|
exception_var->Bind(exception_value);
|
|
|
|
}
|
|
|
|
Goto(if_exception);
|
|
|
|
|
|
|
|
Bind(&success);
|
|
|
|
}
|
|
|
|
|
2016-12-15 17:18:42 +00:00
|
|
|
template <class... TArgs>
|
|
|
|
Node* CodeAssembler::CallRuntime(Runtime::FunctionId function, Node* context,
|
|
|
|
TArgs... args) {
|
2016-12-16 09:58:51 +00:00
|
|
|
int argc = static_cast<int>(sizeof...(args));
|
|
|
|
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
|
|
|
|
zone(), function, argc, Operator::kNoProperties,
|
|
|
|
CallDescriptor::kNoFlags);
|
|
|
|
int return_count = static_cast<int>(desc->ReturnCount());
|
|
|
|
|
|
|
|
Node* centry =
|
|
|
|
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
|
|
|
|
Node* ref = ExternalConstant(ExternalReference(function, isolate()));
|
|
|
|
Node* arity = Int32Constant(argc);
|
|
|
|
|
|
|
|
Node* nodes[] = {centry, args..., ref, arity, context};
|
|
|
|
|
2016-12-16 10:40:16 +00:00
|
|
|
CallPrologue();
|
2016-12-16 09:58:51 +00:00
|
|
|
Node* return_value = raw_assembler()->CallN(desc, arraysize(nodes), nodes);
|
2016-04-18 11:57:06 +00:00
|
|
|
CallEpilogue();
|
|
|
|
return return_value;
|
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
// Instantiate CallRuntime() for argument counts used by CSA-generated code
|
2016-12-15 17:18:42 +00:00
|
|
|
#define INSTANTIATE(...) \
|
|
|
|
template V8_EXPORT_PRIVATE Node* CodeAssembler::CallRuntime( \
|
|
|
|
Runtime::FunctionId, __VA_ARGS__);
|
2017-01-05 07:30:01 +00:00
|
|
|
REPEAT_1_TO_7(INSTANTIATE, Node*)
|
2016-12-15 17:18:42 +00:00
|
|
|
#undef INSTANTIATE
|
2016-11-14 17:15:35 +00:00
|
|
|
|
2016-12-16 09:58:51 +00:00
|
|
|
template <class... TArgs>
|
|
|
|
Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function,
|
|
|
|
Node* context, TArgs... args) {
|
|
|
|
int argc = static_cast<int>(sizeof...(args));
|
|
|
|
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
|
|
|
|
zone(), function, argc, Operator::kNoProperties,
|
|
|
|
CallDescriptor::kSupportsTailCalls);
|
|
|
|
int return_count = static_cast<int>(desc->ReturnCount());
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-12-16 09:58:51 +00:00
|
|
|
Node* centry =
|
|
|
|
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
|
|
|
|
Node* ref = ExternalConstant(ExternalReference(function, isolate()));
|
|
|
|
Node* arity = Int32Constant(argc);
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-12-16 09:58:51 +00:00
|
|
|
Node* nodes[] = {centry, args..., ref, arity, context};
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-12-28 15:47:34 +00:00
|
|
|
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
|
2016-07-13 12:09:58 +00:00
|
|
|
}
|
|
|
|
|
2017-06-07 11:12:49 +00:00
|
|
|
Node* CodeAssembler::TailCallRuntimeN(Runtime::FunctionId function,
|
|
|
|
Node* context, Node* argc) {
|
|
|
|
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
|
|
|
|
zone(), function, 0, Operator::kNoProperties,
|
|
|
|
CallDescriptor::kSupportsTailCalls);
|
|
|
|
int return_count = static_cast<int>(desc->ReturnCount());
|
|
|
|
|
|
|
|
Node* centry =
|
|
|
|
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
|
|
|
|
Node* ref = ExternalConstant(ExternalReference(function, isolate()));
|
|
|
|
|
|
|
|
Node* nodes[] = {centry, ref, argc, context};
|
|
|
|
|
|
|
|
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
|
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
// Instantiate TailCallRuntime() for argument counts used by CSA-generated code
|
2016-12-16 09:58:51 +00:00
|
|
|
#define INSTANTIATE(...) \
|
|
|
|
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallRuntime( \
|
|
|
|
Runtime::FunctionId, __VA_ARGS__);
|
|
|
|
REPEAT_1_TO_7(INSTANTIATE, Node*)
|
|
|
|
#undef INSTANTIATE
|
2016-09-07 10:12:52 +00:00
|
|
|
|
2016-12-16 10:40:16 +00:00
|
|
|
template <class... TArgs>
|
|
|
|
Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
|
|
|
|
size_t result_size, Node* target, Node* context,
|
|
|
|
TArgs... args) {
|
|
|
|
Node* nodes[] = {target, args..., context};
|
|
|
|
return CallStubN(descriptor, result_size, arraysize(nodes), nodes);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
// Instantiate CallStubR() for argument counts used by CSA-generated code.
|
2016-12-16 10:40:16 +00:00
|
|
|
#define INSTANTIATE(...) \
|
|
|
|
template V8_EXPORT_PRIVATE Node* CodeAssembler::CallStubR( \
|
|
|
|
const CallInterfaceDescriptor& descriptor, size_t, Node*, __VA_ARGS__);
|
2017-04-29 11:40:48 +00:00
|
|
|
REPEAT_1_TO_11(INSTANTIATE, Node*)
|
2016-12-16 10:40:16 +00:00
|
|
|
#undef INSTANTIATE
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-12-16 10:40:16 +00:00
|
|
|
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
|
|
|
|
size_t result_size, int input_count,
|
|
|
|
Node* const* inputs) {
|
|
|
|
// 2 is for target and context.
|
|
|
|
DCHECK_LE(2, input_count);
|
|
|
|
int argc = input_count - 2;
|
|
|
|
DCHECK_LE(descriptor.GetParameterCount(), argc);
|
|
|
|
// Extra arguments not mentioned in the descriptor are passed on the stack.
|
|
|
|
int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
|
|
|
|
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
|
|
|
|
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
|
|
|
|
isolate(), zone(), descriptor, stack_parameter_count,
|
2016-04-18 11:57:06 +00:00
|
|
|
CallDescriptor::kNoFlags, Operator::kNoProperties,
|
|
|
|
MachineType::AnyTagged(), result_size);
|
|
|
|
|
2016-12-16 10:40:16 +00:00
|
|
|
CallPrologue();
|
|
|
|
Node* return_value = raw_assembler()->CallN(desc, input_count, inputs);
|
|
|
|
CallEpilogue();
|
|
|
|
return return_value;
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-12-16 12:39:27 +00:00
|
|
|
template <class... TArgs>
|
2016-09-27 07:20:39 +00:00
|
|
|
Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
|
2016-12-16 12:39:27 +00:00
|
|
|
Node* target, Node* context, TArgs... args) {
|
|
|
|
DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
|
|
|
|
size_t result_size = 1;
|
|
|
|
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
|
2016-09-27 07:20:39 +00:00
|
|
|
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
|
|
|
|
CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
|
|
|
|
MachineType::AnyTagged(), result_size);
|
|
|
|
|
2016-12-16 12:39:27 +00:00
|
|
|
Node* nodes[] = {target, args..., context};
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes));
|
2016-12-16 12:39:27 +00:00
|
|
|
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
|
2016-09-27 07:20:39 +00:00
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
// Instantiate TailCallStub() for argument counts used by CSA-generated code
|
2016-12-16 12:39:27 +00:00
|
|
|
#define INSTANTIATE(...) \
|
|
|
|
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallStub( \
|
|
|
|
const CallInterfaceDescriptor& descriptor, Node*, __VA_ARGS__);
|
2017-03-22 13:18:26 +00:00
|
|
|
REPEAT_1_TO_12(INSTANTIATE, Node*)
|
2016-12-16 12:39:27 +00:00
|
|
|
#undef INSTANTIATE
|
2016-10-19 10:11:25 +00:00
|
|
|
|
2016-12-16 12:58:19 +00:00
|
|
|
template <class... TArgs>
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* CodeAssembler::TailCallBytecodeDispatch(
|
2016-12-16 12:58:19 +00:00
|
|
|
const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
|
|
|
|
DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
|
|
|
|
CallDescriptor* desc = Linkage::GetBytecodeDispatchCallDescriptor(
|
|
|
|
isolate(), zone(), descriptor, descriptor.GetStackParameterCount());
|
|
|
|
|
|
|
|
Node* nodes[] = {target, args...};
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes));
|
2016-12-16 12:58:19 +00:00
|
|
|
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
// Instantiate TailCallBytecodeDispatch() for argument counts used by
|
|
|
|
// CSA-generated code
|
2016-12-16 12:58:19 +00:00
|
|
|
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
|
|
|
|
const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
|
|
|
|
Node*, Node*);
|
|
|
|
|
2017-01-18 19:13:49 +00:00
|
|
|
Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
|
|
|
|
int input_count, Node* const* inputs) {
|
|
|
|
CallDescriptor* desc = Linkage::GetSimplifiedCDescriptor(zone(), signature);
|
|
|
|
return raw_assembler()->CallN(desc, input_count, inputs);
|
|
|
|
}
|
|
|
|
|
2017-04-13 14:41:22 +00:00
|
|
|
Node* CodeAssembler::CallCFunction1(MachineType return_type,
|
|
|
|
MachineType arg0_type, Node* function,
|
|
|
|
Node* arg0) {
|
|
|
|
return raw_assembler()->CallCFunction1(return_type, arg0_type, function,
|
|
|
|
arg0);
|
|
|
|
}
|
|
|
|
|
2016-09-15 08:19:59 +00:00
|
|
|
Node* CodeAssembler::CallCFunction2(MachineType return_type,
|
|
|
|
MachineType arg0_type,
|
|
|
|
MachineType arg1_type, Node* function,
|
|
|
|
Node* arg0, Node* arg1) {
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->CallCFunction2(return_type, arg0_type, arg1_type,
|
|
|
|
function, arg0, arg1);
|
2016-09-15 08:19:59 +00:00
|
|
|
}
|
|
|
|
|
2016-12-16 13:24:07 +00:00
|
|
|
Node* CodeAssembler::CallCFunction3(MachineType return_type,
|
|
|
|
MachineType arg0_type,
|
|
|
|
MachineType arg1_type,
|
|
|
|
MachineType arg2_type, Node* function,
|
|
|
|
Node* arg0, Node* arg1, Node* arg2) {
|
|
|
|
return raw_assembler()->CallCFunction3(return_type, arg0_type, arg1_type,
|
|
|
|
arg2_type, function, arg0, arg1, arg2);
|
|
|
|
}
|
|
|
|
|
2017-04-19 10:47:03 +00:00
|
|
|
Node* CodeAssembler::CallCFunction6(
|
|
|
|
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
|
|
|
|
MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
|
|
|
|
MachineType arg5_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
|
|
|
|
Node* arg3, Node* arg4, Node* arg5) {
|
|
|
|
return raw_assembler()->CallCFunction6(
|
|
|
|
return_type, arg0_type, arg1_type, arg2_type, arg3_type, arg4_type,
|
|
|
|
arg5_type, function, arg0, arg1, arg2, arg3, arg4, arg5);
|
|
|
|
}
|
|
|
|
|
2017-04-21 14:03:34 +00:00
|
|
|
Node* CodeAssembler::CallCFunction9(
|
|
|
|
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
|
|
|
|
MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
|
|
|
|
MachineType arg5_type, MachineType arg6_type, MachineType arg7_type,
|
|
|
|
MachineType arg8_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
|
|
|
|
Node* arg3, Node* arg4, Node* arg5, Node* arg6, Node* arg7, Node* arg8) {
|
|
|
|
return raw_assembler()->CallCFunction9(
|
|
|
|
return_type, arg0_type, arg1_type, arg2_type, arg3_type, arg4_type,
|
|
|
|
arg5_type, arg6_type, arg7_type, arg8_type, function, arg0, arg1, arg2,
|
|
|
|
arg3, arg4, arg5, arg6, arg7, arg8);
|
|
|
|
}
|
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
void CodeAssembler::Goto(Label* label) {
|
2016-04-18 11:57:06 +00:00
|
|
|
label->MergeVariables();
|
2016-11-16 11:48:07 +00:00
|
|
|
raw_assembler()->Goto(label->label_);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
|
|
|
|
Label false_label(this);
|
|
|
|
Branch(condition, true_label, &false_label);
|
|
|
|
Bind(&false_label);
|
|
|
|
}
|
|
|
|
|
2017-02-17 16:25:07 +00:00
|
|
|
void CodeAssembler::GotoIfNot(Node* condition, Label* false_label) {
|
2016-04-18 11:57:06 +00:00
|
|
|
Label true_label(this);
|
|
|
|
Branch(condition, &true_label, false_label);
|
|
|
|
Bind(&true_label);
|
|
|
|
}
|
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
void CodeAssembler::Branch(Node* condition, Label* true_label,
|
|
|
|
Label* false_label) {
|
2016-04-18 11:57:06 +00:00
|
|
|
true_label->MergeVariables();
|
|
|
|
false_label->MergeVariables();
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Branch(condition, true_label->label_,
|
|
|
|
false_label->label_);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void CodeAssembler::Switch(Node* index, Label* default_label,
|
2016-08-03 03:26:58 +00:00
|
|
|
const int32_t* case_values, Label** case_labels,
|
2016-04-18 11:57:06 +00:00
|
|
|
size_t case_count) {
|
|
|
|
RawMachineLabel** labels =
|
|
|
|
new (zone()->New(sizeof(RawMachineLabel*) * case_count))
|
|
|
|
RawMachineLabel*[case_count];
|
|
|
|
for (size_t i = 0; i < case_count; ++i) {
|
|
|
|
labels[i] = case_labels[i]->label_;
|
|
|
|
case_labels[i]->MergeVariables();
|
|
|
|
default_label->MergeVariables();
|
|
|
|
}
|
2016-11-16 11:48:07 +00:00
|
|
|
return raw_assembler()->Switch(index, default_label->label_, case_values,
|
|
|
|
labels, case_count);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2017-04-08 22:15:38 +00:00
|
|
|
bool CodeAssembler::UnalignedLoadSupported(const MachineType& machineType,
|
|
|
|
uint8_t alignment) const {
|
|
|
|
return raw_assembler()->machine()->UnalignedLoadSupported(machineType,
|
|
|
|
alignment);
|
|
|
|
}
|
|
|
|
bool CodeAssembler::UnalignedStoreSupported(const MachineType& machineType,
|
|
|
|
uint8_t alignment) const {
|
|
|
|
return raw_assembler()->machine()->UnalignedStoreSupported(machineType,
|
|
|
|
alignment);
|
|
|
|
}
|
|
|
|
|
2016-04-18 11:57:06 +00:00
|
|
|
// RawMachineAssembler delegate helpers:
|
2016-11-16 11:48:07 +00:00
|
|
|
Isolate* CodeAssembler::isolate() const { return raw_assembler()->isolate(); }
|
2016-04-18 11:57:06 +00:00
|
|
|
|
|
|
|
Factory* CodeAssembler::factory() const { return isolate()->factory(); }
|
|
|
|
|
2016-11-16 11:48:07 +00:00
|
|
|
Zone* CodeAssembler::zone() const { return raw_assembler()->zone(); }
|
|
|
|
|
|
|
|
RawMachineAssembler* CodeAssembler::raw_assembler() const {
|
|
|
|
return state_->raw_assembler_.get();
|
|
|
|
}
|
2016-04-18 11:57:06 +00:00
|
|
|
|
|
|
|
// The core implementation of Variable is stored through an indirection so
|
|
|
|
// that it can outlive the often block-scoped Variable declarations. This is
|
|
|
|
// needed to ensure that variable binding and merging through phis can
|
|
|
|
// properly be verified.
|
2016-12-06 10:29:13 +00:00
|
|
|
class CodeAssemblerVariable::Impl : public ZoneObject {
|
2016-04-18 11:57:06 +00:00
|
|
|
public:
|
2017-04-05 17:40:36 +00:00
|
|
|
explicit Impl(MachineRepresentation rep)
|
|
|
|
:
|
|
|
|
#if DEBUG
|
|
|
|
debug_info_(AssemblerDebugInfo(nullptr, nullptr, -1)),
|
|
|
|
#endif
|
|
|
|
value_(nullptr),
|
|
|
|
rep_(rep) {
|
|
|
|
}
|
|
|
|
|
|
|
|
#if DEBUG
|
|
|
|
AssemblerDebugInfo debug_info() const { return debug_info_; }
|
|
|
|
void set_debug_info(AssemblerDebugInfo debug_info) {
|
|
|
|
debug_info_ = debug_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
AssemblerDebugInfo debug_info_;
|
|
|
|
#endif // DEBUG
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* value_;
|
|
|
|
MachineRepresentation rep_;
|
|
|
|
};
|
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
|
|
|
|
MachineRepresentation rep)
|
|
|
|
: impl_(new (assembler->zone()) Impl(rep)), state_(assembler->state()) {
|
2016-11-16 11:48:07 +00:00
|
|
|
state_->variables_.insert(impl_);
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2017-01-25 10:00:38 +00:00
|
|
|
CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
|
|
|
|
MachineRepresentation rep,
|
|
|
|
Node* initial_value)
|
|
|
|
: CodeAssemblerVariable(assembler, rep) {
|
|
|
|
Bind(initial_value);
|
|
|
|
}
|
|
|
|
|
2017-04-05 17:40:36 +00:00
|
|
|
#if DEBUG
|
|
|
|
CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
|
|
|
|
AssemblerDebugInfo debug_info,
|
|
|
|
MachineRepresentation rep)
|
|
|
|
: impl_(new (assembler->zone()) Impl(rep)), state_(assembler->state()) {
|
|
|
|
impl_->set_debug_info(debug_info);
|
|
|
|
state_->variables_.insert(impl_);
|
|
|
|
}
|
|
|
|
|
|
|
|
CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
|
|
|
|
AssemblerDebugInfo debug_info,
|
|
|
|
MachineRepresentation rep,
|
|
|
|
Node* initial_value)
|
|
|
|
: CodeAssemblerVariable(assembler, debug_info, rep) {
|
|
|
|
impl_->set_debug_info(debug_info);
|
|
|
|
Bind(initial_value);
|
|
|
|
}
|
|
|
|
#endif // DEBUG
|
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
CodeAssemblerVariable::~CodeAssemblerVariable() {
|
|
|
|
state_->variables_.erase(impl_);
|
|
|
|
}
|
2016-06-02 21:24:24 +00:00
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
void CodeAssemblerVariable::Bind(Node* value) { impl_->value_ = value; }
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
Node* CodeAssemblerVariable::value() const {
|
2017-04-05 17:40:36 +00:00
|
|
|
#if DEBUG
|
|
|
|
if (!IsBound()) {
|
|
|
|
std::stringstream str;
|
|
|
|
str << "#Use of unbound variable:"
|
|
|
|
<< "#\n Variable: " << *this;
|
|
|
|
if (state_) {
|
|
|
|
str << "#\n Current Block: ";
|
|
|
|
state_->PrintCurrentBlock(str);
|
|
|
|
}
|
|
|
|
FATAL(str.str().c_str());
|
|
|
|
}
|
|
|
|
#endif // DEBUG
|
2016-04-18 11:57:06 +00:00
|
|
|
return impl_->value_;
|
|
|
|
}
|
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
MachineRepresentation CodeAssemblerVariable::rep() const { return impl_->rep_; }
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
bool CodeAssemblerVariable::IsBound() const { return impl_->value_ != nullptr; }
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2017-04-05 17:40:36 +00:00
|
|
|
std::ostream& operator<<(std::ostream& os,
|
|
|
|
const CodeAssemblerVariable::Impl& impl) {
|
|
|
|
#if DEBUG
|
|
|
|
AssemblerDebugInfo info = impl.debug_info();
|
|
|
|
if (info.name) os << "V" << info;
|
|
|
|
#endif // DEBUG
|
|
|
|
return os;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::ostream& operator<<(std::ostream& os,
|
|
|
|
const CodeAssemblerVariable& variable) {
|
|
|
|
os << *variable.impl_;
|
|
|
|
return os;
|
|
|
|
}
|
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
CodeAssemblerLabel::CodeAssemblerLabel(CodeAssembler* assembler,
|
|
|
|
size_t vars_count,
|
2017-03-21 15:57:38 +00:00
|
|
|
CodeAssemblerVariable* const* vars,
|
2016-12-06 10:29:13 +00:00
|
|
|
CodeAssemblerLabel::Type type)
|
2016-11-16 11:48:07 +00:00
|
|
|
: bound_(false),
|
|
|
|
merge_count_(0),
|
2016-12-06 10:29:13 +00:00
|
|
|
state_(assembler->state()),
|
2016-11-16 11:48:07 +00:00
|
|
|
label_(nullptr) {
|
2016-04-18 11:57:06 +00:00
|
|
|
void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
|
|
|
|
label_ = new (buffer)
|
|
|
|
RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
|
|
|
|
: RawMachineLabel::kNonDeferred);
|
2016-11-02 13:18:50 +00:00
|
|
|
for (size_t i = 0; i < vars_count; ++i) {
|
|
|
|
variable_phis_[vars[i]->impl_] = nullptr;
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-18 14:39:01 +00:00
|
|
|
CodeAssemblerLabel::~CodeAssemblerLabel() { label_->~RawMachineLabel(); }
|
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
void CodeAssemblerLabel::MergeVariables() {
|
2016-04-18 11:57:06 +00:00
|
|
|
++merge_count_;
|
2017-04-05 17:40:36 +00:00
|
|
|
for (CodeAssemblerVariable::Impl* var : state_->variables_) {
|
2016-04-18 11:57:06 +00:00
|
|
|
size_t count = 0;
|
|
|
|
Node* node = var->value_;
|
|
|
|
if (node != nullptr) {
|
|
|
|
auto i = variable_merges_.find(var);
|
|
|
|
if (i != variable_merges_.end()) {
|
|
|
|
i->second.push_back(node);
|
|
|
|
count = i->second.size();
|
|
|
|
} else {
|
|
|
|
count = 1;
|
|
|
|
variable_merges_[var] = std::vector<Node*>(1, node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If the following asserts, then you've jumped to a label without a bound
|
|
|
|
// variable along that path that expects to merge its value into a phi.
|
|
|
|
DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
|
|
|
|
count == merge_count_);
|
|
|
|
USE(count);
|
|
|
|
|
|
|
|
// If the label is already bound, we already know the set of variables to
|
|
|
|
// merge and phi nodes have already been created.
|
|
|
|
if (bound_) {
|
|
|
|
auto phi = variable_phis_.find(var);
|
|
|
|
if (phi != variable_phis_.end()) {
|
|
|
|
DCHECK_NOT_NULL(phi->second);
|
2016-11-16 11:48:07 +00:00
|
|
|
state_->raw_assembler_->AppendPhiInput(phi->second, node);
|
2016-04-18 11:57:06 +00:00
|
|
|
} else {
|
|
|
|
auto i = variable_merges_.find(var);
|
|
|
|
if (i != variable_merges_.end()) {
|
|
|
|
// If the following assert fires, then you've declared a variable that
|
|
|
|
// has the same bound value along all paths up until the point you
|
|
|
|
// bound this label, but then later merged a path with a new value for
|
|
|
|
// the variable after the label bind (it's not possible to add phis to
|
|
|
|
// the bound label after the fact, just make sure to list the variable
|
|
|
|
// in the label's constructor's list of merged variables).
|
2017-04-05 17:40:36 +00:00
|
|
|
#if DEBUG
|
|
|
|
if (find_if(i->second.begin(), i->second.end(),
|
|
|
|
[node](Node* e) -> bool { return node != e; }) !=
|
|
|
|
i->second.end()) {
|
|
|
|
std::stringstream str;
|
|
|
|
str << "Unmerged variable found when jumping to block. \n"
|
|
|
|
<< "# Variable: " << *var;
|
|
|
|
if (bound_) {
|
|
|
|
str << "\n# Target block: " << *label_->block();
|
|
|
|
}
|
|
|
|
str << "\n# Current Block: ";
|
|
|
|
state_->PrintCurrentBlock(str);
|
|
|
|
FATAL(str.str().c_str());
|
|
|
|
}
|
|
|
|
#endif // DEBUG
|
2016-04-18 11:57:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-05 17:40:36 +00:00
|
|
|
#if DEBUG
|
|
|
|
void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) {
|
2017-05-15 08:39:41 +00:00
|
|
|
if (bound_) {
|
|
|
|
std::stringstream str;
|
|
|
|
str << "Cannot bind the same label twice:"
|
|
|
|
<< "\n# current: " << debug_info
|
|
|
|
<< "\n# previous: " << *label_->block();
|
|
|
|
FATAL(str.str().c_str());
|
|
|
|
}
|
2017-04-05 17:40:36 +00:00
|
|
|
state_->raw_assembler_->Bind(label_, debug_info);
|
|
|
|
UpdateVariablesAfterBind();
|
|
|
|
}
|
|
|
|
#endif // DEBUG
|
|
|
|
|
2016-12-06 10:29:13 +00:00
|
|
|
void CodeAssemblerLabel::Bind() {
|
2016-04-18 11:57:06 +00:00
|
|
|
DCHECK(!bound_);
|
2016-11-16 11:48:07 +00:00
|
|
|
state_->raw_assembler_->Bind(label_);
|
2017-04-05 17:40:36 +00:00
|
|
|
UpdateVariablesAfterBind();
|
|
|
|
}
|
2016-04-18 11:57:06 +00:00
|
|
|
|
2017-04-05 17:40:36 +00:00
|
|
|
void CodeAssemblerLabel::UpdateVariablesAfterBind() {
|
2016-04-18 11:57:06 +00:00
|
|
|
// Make sure that all variables that have changed along any path up to this
|
|
|
|
// point are marked as merge variables.
|
2016-11-16 11:48:07 +00:00
|
|
|
for (auto var : state_->variables_) {
|
2016-04-18 11:57:06 +00:00
|
|
|
Node* shared_value = nullptr;
|
|
|
|
auto i = variable_merges_.find(var);
|
|
|
|
if (i != variable_merges_.end()) {
|
|
|
|
for (auto value : i->second) {
|
|
|
|
DCHECK(value != nullptr);
|
|
|
|
if (value != shared_value) {
|
|
|
|
if (shared_value == nullptr) {
|
|
|
|
shared_value = value;
|
|
|
|
} else {
|
|
|
|
variable_phis_[var] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto var : variable_phis_) {
|
2016-12-06 10:29:13 +00:00
|
|
|
CodeAssemblerVariable::Impl* var_impl = var.first;
|
2016-04-18 11:57:06 +00:00
|
|
|
auto i = variable_merges_.find(var_impl);
|
2017-05-24 09:04:59 +00:00
|
|
|
#if DEBUG
|
|
|
|
bool not_found = i == variable_merges_.end();
|
|
|
|
if (not_found || i->second.size() != merge_count_) {
|
|
|
|
std::stringstream str;
|
|
|
|
str << "A variable that has been marked as beeing merged at the label"
|
|
|
|
<< "\n# doesn't have a bound value along all of the paths that "
|
|
|
|
<< "\n# have been merged into the label up to this point."
|
|
|
|
<< "\n#"
|
|
|
|
<< "\n# This can happen in the following cases:"
|
|
|
|
<< "\n# - By explicitly marking it so in the label constructor"
|
|
|
|
<< "\n# - By having seen different bound values at branches"
|
|
|
|
<< "\n#"
|
|
|
|
<< "\n# Merge count: expected=" << merge_count_
|
|
|
|
<< " vs. found=" << (not_found ? 0 : i->second.size())
|
|
|
|
<< "\n# Variable: " << *var_impl
|
|
|
|
<< "\n# Current Block: " << *label_->block();
|
|
|
|
FATAL(str.str().c_str());
|
|
|
|
}
|
|
|
|
#endif // DEBUG
|
2016-11-16 11:48:07 +00:00
|
|
|
Node* phi = state_->raw_assembler_->Phi(
|
2016-04-18 11:57:06 +00:00
|
|
|
var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
|
|
|
|
variable_phis_[var_impl] = phi;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bind all variables to a merge phi, the common value along all paths or
|
|
|
|
// null.
|
2016-11-16 11:48:07 +00:00
|
|
|
for (auto var : state_->variables_) {
|
2016-04-18 11:57:06 +00:00
|
|
|
auto i = variable_phis_.find(var);
|
|
|
|
if (i != variable_phis_.end()) {
|
|
|
|
var->value_ = i->second;
|
|
|
|
} else {
|
|
|
|
auto j = variable_merges_.find(var);
|
|
|
|
if (j != variable_merges_.end() && j->second.size() == merge_count_) {
|
|
|
|
var->value_ = j->second.back();
|
|
|
|
} else {
|
|
|
|
var->value_ = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bound_ = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace compiler
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|