Separate CodeAssembler and CodeStubAssembler

This separation is needed to make two goals possible simultaneously:
* is should be possible to offer V8 components a simple, clean
  interface to TurboFan's low-level code generation that doesn't
  expose details about the TF.
* it should be possible to easily create new CodeAssembler "macros"
  that don't require a review from an OWNER of the compiler directory.

Review URL: https://codereview.chromium.org/1875583003

Cr-Commit-Position: refs/heads/master@{#35576}
This commit is contained in:
danno 2016-04-18 04:57:06 -07:00 committed by Commit bot
parent 19b58454fa
commit 973fc5b9b0
16 changed files with 1764 additions and 1712 deletions

View File

@ -755,6 +755,8 @@ source_set("v8_base") {
"src/checks.h",
"src/code-factory.cc",
"src/code-factory.h",
"src/code-stub-assembler.cc",
"src/code-stub-assembler.h",
"src/code-stubs-hydrogen.cc",
"src/code-stubs.cc",
"src/code-stubs.h",
@ -792,11 +794,11 @@ source_set("v8_base") {
"src/compiler/change-lowering.h",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-assembler.cc",
"src/compiler/code-assembler.h",
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
"src/compiler/code-stub-assembler.cc",
"src/compiler/code-stub-assembler.h",
"src/compiler/common-node-cache.cc",
"src/compiler/common-node-cache.h",
"src/compiler/common-operator-reducer.cc",

View File

@ -3,7 +3,7 @@ include_rules = [
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
"+src/compiler/code-stub-assembler.h",
"+src/compiler/code-assembler.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/heap.h",

View File

@ -4,13 +4,13 @@
#include "src/builtins.h"
#include "src/api.h"
#include "src/api-arguments.h"
#include "src/api-natives.h"
#include "src/api.h"
#include "src/base/once.h"
#include "src/bootstrapper.h"
#include "src/code-factory.h"
#include "src/compiler/code-stub-assembler.h"
#include "src/code-stub-assembler.h"
#include "src/dateparser-inl.h"
#include "src/elements.h"
#include "src/frames-inl.h"
@ -352,11 +352,10 @@ BUILTIN(Illegal) {
BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
void Builtins::Generate_ObjectHasOwnProperty(
compiler::CodeStubAssembler* assembler) {
void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Label Label;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
Node* object = assembler->Parameter(0);
Node* key = assembler->Parameter(1);
@ -2134,12 +2133,11 @@ BUILTIN(MathAtan) {
namespace {
void Generate_MathRoundingOperation(
compiler::CodeStubAssembler* assembler,
compiler::Node* (compiler::CodeStubAssembler::*float64op)(
compiler::Node*)) {
typedef compiler::CodeStubAssembler::Label Label;
CodeStubAssembler* assembler,
compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(4);
@ -2196,16 +2194,15 @@ void Generate_MathRoundingOperation(
} // namespace
// ES6 section 20.2.2.10 Math.ceil ( x )
void Builtins::Generate_MathCeil(compiler::CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler,
&compiler::CodeStubAssembler::Float64Ceil);
void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
}
// ES6 section 20.2.2.11 Math.clz32 ( x )
void Builtins::Generate_MathClz32(compiler::CodeStubAssembler* assembler) {
typedef compiler::CodeStubAssembler::Label Label;
void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(4);
@ -2270,9 +2267,8 @@ void Builtins::Generate_MathClz32(compiler::CodeStubAssembler* assembler) {
}
// ES6 section 20.2.2.16 Math.floor ( x )
void Builtins::Generate_MathFloor(compiler::CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler,
&compiler::CodeStubAssembler::Float64Floor);
void Builtins::Generate_MathFloor(CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Floor);
}
// ES6 section 20.2.2.17 Math.fround ( x )
@ -2298,13 +2294,12 @@ BUILTIN(MathImul) {
}
// ES6 section 20.2.2.28 Math.round ( x )
void Builtins::Generate_MathRound(compiler::CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler,
&compiler::CodeStubAssembler::Float64Round);
void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
}
// ES6 section 20.2.2.32 Math.sqrt ( x )
void Builtins::Generate_MathSqrt(compiler::CodeStubAssembler* assembler) {
void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
using compiler::Node;
Node* x = assembler->Parameter(1);
@ -2316,9 +2311,8 @@ void Builtins::Generate_MathSqrt(compiler::CodeStubAssembler* assembler) {
}
// ES6 section 20.2.2.35 Math.trunc ( x )
void Builtins::Generate_MathTrunc(compiler::CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler,
&compiler::CodeStubAssembler::Float64Trunc);
void Builtins::Generate_MathTrunc(CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Trunc);
}
// -----------------------------------------------------------------------------
@ -2327,9 +2321,9 @@ void Builtins::Generate_MathTrunc(compiler::CodeStubAssembler* assembler) {
namespace {
void Generate_GeneratorPrototypeResume(
compiler::CodeStubAssembler* assembler,
JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) {
typedef compiler::CodeStubAssembler::Label Label;
CodeStubAssembler* assembler, JSGeneratorObject::ResumeMode resume_mode,
char const* const method_name) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
Node* receiver = assembler->Parameter(0);
@ -2406,22 +2400,19 @@ void Generate_GeneratorPrototypeResume(
} // namespace
// ES6 section 25.3.1.2 Generator.prototype.next ( value )
void Builtins::Generate_GeneratorPrototypeNext(
compiler::CodeStubAssembler* assembler) {
void Builtins::Generate_GeneratorPrototypeNext(CodeStubAssembler* assembler) {
Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kNext,
"[Generator].prototype.next");
}
// ES6 section 25.3.1.3 Generator.prototype.return ( value )
void Builtins::Generate_GeneratorPrototypeReturn(
compiler::CodeStubAssembler* assembler) {
void Builtins::Generate_GeneratorPrototypeReturn(CodeStubAssembler* assembler) {
Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kReturn,
"[Generator].prototype.return");
}
// ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
void Builtins::Generate_GeneratorPrototypeThrow(
compiler::CodeStubAssembler* assembler) {
void Builtins::Generate_GeneratorPrototypeThrow(CodeStubAssembler* assembler) {
Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kThrow,
"[Generator].prototype.throw");
}
@ -4191,11 +4182,10 @@ BUILTIN(ObjectProtoToString) {
// ES6 section 21.1 String Objects
// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
void Builtins::Generate_StringPrototypeCharAt(
compiler::CodeStubAssembler* assembler) {
typedef compiler::CodeStubAssembler::Label Label;
void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* receiver = assembler->Parameter(0);
Node* position = assembler->Parameter(1);
@ -4287,10 +4277,10 @@ void Builtins::Generate_StringPrototypeCharAt(
// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
void Builtins::Generate_StringPrototypeCharCodeAt(
compiler::CodeStubAssembler* assembler) {
typedef compiler::CodeStubAssembler::Label Label;
CodeStubAssembler* assembler) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* receiver = assembler->Parameter(0);
Node* position = assembler->Parameter(1);
@ -4987,11 +4977,10 @@ Handle<Code> MacroAssemblerBuilder(Isolate* isolate,
Handle<Code> CodeStubAssemblerBuilder(Isolate* isolate,
BuiltinDesc const* builtin_desc) {
Zone zone(isolate->allocator());
compiler::CodeStubAssembler assembler(isolate, &zone, builtin_desc->argc,
builtin_desc->flags,
builtin_desc->s_name);
CodeStubAssembler assembler(isolate, &zone, builtin_desc->argc,
builtin_desc->flags, builtin_desc->s_name);
// Generate the code/adaptor.
typedef void (*Generator)(compiler::CodeStubAssembler*);
typedef void (*Generator)(CodeStubAssembler*);
Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
g(&assembler);
return assembler.GenerateCode();
@ -5147,8 +5136,8 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) {
namespace {
void ValidateSharedTypedArray(compiler::CodeStubAssembler* a,
compiler::Node* tagged, compiler::Node* context) {
void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
compiler::Node* context) {
using namespace compiler;
CodeStubAssembler::Label is_smi(a), not_smi(a), is_typed_array(a),
not_typed_array(a), is_shared(a), not_shared(a), is_float_or_clamped(a),
@ -5202,7 +5191,7 @@ void ValidateSharedTypedArray(compiler::CodeStubAssembler* a,
}
// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
compiler::Node* ConvertTaggedAtomicIndexToWord32(compiler::CodeStubAssembler* a,
compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
compiler::Node* tagged,
compiler::Node* context) {
using namespace compiler;
@ -5246,8 +5235,7 @@ compiler::Node* ConvertTaggedAtomicIndexToWord32(compiler::CodeStubAssembler* a,
return var_result.value();
}
void ValidateAtomicIndex(compiler::CodeStubAssembler* a,
compiler::Node* index_word,
void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
compiler::Node* array_length_word,
compiler::Node* context) {
using namespace compiler;
@ -5265,7 +5253,7 @@ void ValidateAtomicIndex(compiler::CodeStubAssembler* a,
} // anonymous namespace
void Builtins::Generate_AtomicsLoadCheck(compiler::CodeStubAssembler* a) {
void Builtins::Generate_AtomicsLoadCheck(CodeStubAssembler* a) {
using namespace compiler;
Isolate* isolate = a->isolate();
Node* array = a->Parameter(1);

View File

@ -11,13 +11,9 @@
namespace v8 {
namespace internal {
namespace compiler {
// Forward declarations.
class CodeStubAssembler;
} // namespace compiler
// Specifies extra arguments required by a C++ builtin.
enum class BuiltinExtraArguments : uint8_t {
kNone = 0u,
@ -591,11 +587,11 @@ class Builtins {
static void Generate_ArrayCode(MacroAssembler* masm);
// ES6 section 20.2.2.10 Math.ceil ( x )
static void Generate_MathCeil(compiler::CodeStubAssembler* assembler);
static void Generate_MathCeil(CodeStubAssembler* assembler);
// ES6 section 20.2.2.11 Math.clz32 ( x )
static void Generate_MathClz32(compiler::CodeStubAssembler* assembler);
static void Generate_MathClz32(CodeStubAssembler* assembler);
// ES6 section 20.2.2.16 Math.floor ( x )
static void Generate_MathFloor(compiler::CodeStubAssembler* assembler);
static void Generate_MathFloor(CodeStubAssembler* assembler);
enum class MathMaxMinKind { kMax, kMin };
static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
// ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
@ -607,11 +603,11 @@ class Builtins {
Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
}
// ES6 section 20.2.2.28 Math.round ( x )
static void Generate_MathRound(compiler::CodeStubAssembler* assembler);
static void Generate_MathRound(CodeStubAssembler* assembler);
// ES6 section 20.2.2.32 Math.sqrt ( x )
static void Generate_MathSqrt(compiler::CodeStubAssembler* assembler);
static void Generate_MathSqrt(CodeStubAssembler* assembler);
// ES6 section 20.2.2.35 Math.trunc ( x )
static void Generate_MathTrunc(compiler::CodeStubAssembler* assembler);
static void Generate_MathTrunc(CodeStubAssembler* assembler);
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
static void Generate_NumberConstructor(MacroAssembler* masm);
@ -619,25 +615,19 @@ class Builtins {
static void Generate_NumberConstructor_ConstructStub(MacroAssembler* masm);
// ES6 section 25.3.1.2 Generator.prototype.next ( value )
static void Generate_GeneratorPrototypeNext(
compiler::CodeStubAssembler* assembler);
static void Generate_GeneratorPrototypeNext(CodeStubAssembler* assembler);
// ES6 section 25.3.1.3 Generator.prototype.return ( value )
static void Generate_GeneratorPrototypeReturn(
compiler::CodeStubAssembler* assembler);
static void Generate_GeneratorPrototypeReturn(CodeStubAssembler* assembler);
// ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
static void Generate_GeneratorPrototypeThrow(
compiler::CodeStubAssembler* assembler);
static void Generate_GeneratorPrototypeThrow(CodeStubAssembler* assembler);
// ES6 section 19.1.3.2 Object.prototype.hasOwnProperty
static void Generate_ObjectHasOwnProperty(
compiler::CodeStubAssembler* assembler);
static void Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler);
// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
static void Generate_StringPrototypeCharAt(
compiler::CodeStubAssembler* assembler);
static void Generate_StringPrototypeCharAt(CodeStubAssembler* assembler);
// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
static void Generate_StringPrototypeCharCodeAt(
compiler::CodeStubAssembler* assembler);
static void Generate_StringPrototypeCharCodeAt(CodeStubAssembler* assembler);
static void Generate_StringConstructor(MacroAssembler* masm);
static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
@ -674,7 +664,7 @@ class Builtins {
static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
static void Generate_AtomicsLoadCheck(compiler::CodeStubAssembler* assembler);
static void Generate_AtomicsLoadCheck(CodeStubAssembler* assembler);
static void InitBuiltinFunctionTable();

187
src/code-stub-assembler.h Normal file
View File

@ -0,0 +1,187 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODE_STUB_ASSEMBLER_H_
#define V8_CODE_STUB_ASSEMBLER_H_
#include "src/compiler/code-assembler.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
class CallInterfaceDescriptor;
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
// it's possible to add JavaScript-specific useful CodeAssembler "macros"
// without modifying files in the compiler directory (and requiring a review
// from a compiler directory OWNER).
class CodeStubAssembler : public compiler::CodeAssembler {
public:
// Create with CallStub linkage.
// |result_size| specifies the number of results returned by the stub.
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeStubAssembler(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor,
Code::Flags flags, const char* name,
size_t result_size = 1);
// Create with JSCall linkage.
CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
Code::Flags flags, const char* name);
// Float64 operations.
compiler::Node* Float64Ceil(compiler::Node* x);
compiler::Node* Float64Floor(compiler::Node* x);
compiler::Node* Float64Round(compiler::Node* x);
compiler::Node* Float64Trunc(compiler::Node* x);
// Smi conversions.
compiler::Node* SmiToFloat64(compiler::Node* value);
compiler::Node* SmiFromWord32(compiler::Node* value);
compiler::Node* SmiToWord(compiler::Node* value) { return SmiUntag(value); }
compiler::Node* SmiToWord32(compiler::Node* value);
// Smi operations.
compiler::Node* SmiAdd(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiAddWithOverflow(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiSub(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiSubWithOverflow(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiEqual(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiAboveOrEqual(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiLessThan(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b);
// Check a value for smi-ness
compiler::Node* WordIsSmi(compiler::Node* a);
// Check that the value is a positive smi.
compiler::Node* WordIsPositiveSmi(compiler::Node* a);
void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true,
Label* if_false) {
BranchIf(SmiLessThan(a, b), if_true, if_false);
}
void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b,
Label* if_true, Label* if_false) {
BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
}
void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true,
Label* if_false) {
BranchIfFloat64Equal(value, value, if_false, if_true);
}
// Load an object pointer from a buffer that isn't in the heap.
compiler::Node* LoadBufferObject(compiler::Node* buffer, int offset,
MachineType rep = MachineType::AnyTagged());
// Load a field from an object on the heap.
compiler::Node* LoadObjectField(compiler::Node* object, int offset,
MachineType rep = MachineType::AnyTagged());
// Load the floating point value of a HeapNumber.
compiler::Node* LoadHeapNumberValue(compiler::Node* object);
// Load the Map of an HeapObject.
compiler::Node* LoadMap(compiler::Node* object);
// Load the instance type of an HeapObject.
compiler::Node* LoadInstanceType(compiler::Node* object);
// Load the elements backing store of a JSObject.
compiler::Node* LoadElements(compiler::Node* object);
// Load the length of a fixed array base instance.
compiler::Node* LoadFixedArrayBaseLength(compiler::Node* array);
// Load the bit field of a Map.
compiler::Node* LoadMapBitField(compiler::Node* map);
// Load bit field 2 of a map.
compiler::Node* LoadMapBitField2(compiler::Node* map);
// Load bit field 3 of a map.
compiler::Node* LoadMapBitField3(compiler::Node* map);
// Load the instance type of a map.
compiler::Node* LoadMapInstanceType(compiler::Node* map);
// Load the instance descriptors of a map.
compiler::Node* LoadMapDescriptors(compiler::Node* map);
// Load the hash field of a name.
compiler::Node* LoadNameHash(compiler::Node* name);
// Load the instance size of a Map.
compiler::Node* LoadMapInstanceSize(compiler::Node* map);
// Load an array element from a FixedArray.
compiler::Node* LoadFixedArrayElementInt32Index(compiler::Node* object,
compiler::Node* int32_index,
int additional_offset = 0);
compiler::Node* LoadFixedArrayElementSmiIndex(compiler::Node* object,
compiler::Node* smi_index,
int additional_offset = 0);
compiler::Node* LoadFixedArrayElementConstantIndex(compiler::Node* object,
int index);
// Store the floating point value of a HeapNumber.
compiler::Node* StoreHeapNumberValue(compiler::Node* object,
compiler::Node* value);
// Store a field to an object on the heap.
compiler::Node* StoreObjectFieldNoWriteBarrier(
compiler::Node* object, int offset, compiler::Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
// Store the Map of an HeapObject.
compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object,
compiler::Node* map);
// Store an array element to a FixedArray.
compiler::Node* StoreFixedArrayElementInt32Index(compiler::Node* object,
compiler::Node* index,
compiler::Node* value);
compiler::Node* StoreFixedArrayElementNoWriteBarrier(compiler::Node* object,
compiler::Node* index,
compiler::Node* value);
// Allocate a HeapNumber without initializing its value.
compiler::Node* AllocateHeapNumber();
// Allocate a HeapNumber with a specific value.
compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value);
// Allocate a SeqOneByteString with the given length.
compiler::Node* AllocateSeqOneByteString(int length);
// Allocate a SeqTwoByteString with the given length.
compiler::Node* AllocateSeqTwoByteString(int length);
compiler::Node* TruncateTaggedToFloat64(compiler::Node* context,
compiler::Node* value);
compiler::Node* TruncateTaggedToWord32(compiler::Node* context,
compiler::Node* value);
// Truncate to int32 using JavaScript truncation mode.
compiler::Node* TruncateFloat64ToInt32(compiler::Node* value);
// Truncate the floating point value of a HeapNumber to an Int32.
compiler::Node* TruncateHeapNumberValueToWord32(compiler::Node* object);
// Conversions.
compiler::Node* ChangeFloat64ToTagged(compiler::Node* value);
compiler::Node* ChangeInt32ToTagged(compiler::Node* value);
compiler::Node* ChangeUint32ToTagged(compiler::Node* value);
// Type conversions.
// Throws a TypeError for {method_name} if {value} is not coercible to Object,
// or returns the {value} converted to a String otherwise.
compiler::Node* ToThisString(compiler::Node* context, compiler::Node* value,
char const* method_name);
// String helpers.
// Load a character from a String (might flatten a ConsString).
compiler::Node* StringCharCodeAt(compiler::Node* string,
compiler::Node* smi_index);
// Return the single character string with only {code}.
compiler::Node* StringFromCharCode(compiler::Node* code);
// Returns a node that is true if the given bit is set in |word32|.
template <typename T>
compiler::Node* BitFieldDecode(compiler::Node* word32) {
return BitFieldDecode(word32, T::kShift, T::kMask);
}
compiler::Node* BitFieldDecode(compiler::Node* word32, uint32_t shift,
uint32_t mask);
};
} // namespace internal
} // namespace v8
#endif // V8_CODE_STUB_ASSEMBLER_H_

View File

@ -8,7 +8,7 @@
#include "src/bootstrapper.h"
#include "src/code-factory.h"
#include "src/compiler/code-stub-assembler.h"
#include "src/code-stub-assembler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
#include "src/ic/handler-compiler.h"
@ -448,14 +448,14 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
const char* name = CodeStub::MajorName(MajorKey());
Zone zone(isolate()->allocator());
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
GetCodeFlags(), name);
CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
name);
GenerateAssembly(&assembler);
return assembler.GenerateCode();
}
void AllocateHeapNumberStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
Node* result = assembler->AllocateHeapNumber();
@ -463,7 +463,7 @@ void AllocateHeapNumberStub::GenerateAssembly(
}
void AllocateMutableHeapNumberStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
Node* result = assembler->Allocate(HeapNumber::kSize);
@ -474,10 +474,10 @@ void AllocateMutableHeapNumberStub::GenerateAssembly(
}
#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
void Allocate##Type##Stub::GenerateAssembly( \
compiler::CodeStubAssembler* assembler) const { \
compiler::Node* result = assembler->Allocate( \
Simd128Value::kSize, compiler::CodeStubAssembler::kNone); \
void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \
const { \
compiler::Node* result = \
assembler->Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
compiler::Node* map_offset = \
assembler->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag); \
compiler::Node* map = assembler->IntPtrAdd(result, map_offset); \
@ -489,8 +489,7 @@ void AllocateMutableHeapNumberStub::GenerateAssembly(
SIMD128_TYPES(SIMD128_GEN_ASM)
#undef SIMD128_GEN_ASM
void StringLengthStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void StringLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
compiler::Node* value = assembler->Parameter(0);
compiler::Node* string =
assembler->LoadObjectField(value, JSValue::kValueOffset);
@ -499,10 +498,10 @@ void StringLengthStub::GenerateAssembly(
assembler->Return(result);
}
void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
typedef compiler::CodeStubAssembler::Label Label;
void AddStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(2);
@ -855,11 +854,10 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
}
}
void SubtractStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
typedef compiler::CodeStubAssembler::Label Label;
void SubtractStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(2);
@ -1022,11 +1020,10 @@ void SubtractStub::GenerateAssembly(
}
}
void MultiplyStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void MultiplyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
using compiler::Node;
typedef compiler::CodeStubAssembler::Label Label;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(2);
@ -1169,11 +1166,10 @@ void MultiplyStub::GenerateAssembly(
}
}
void DivideStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void DivideStub::GenerateAssembly(CodeStubAssembler* assembler) const {
using compiler::Node;
typedef compiler::CodeStubAssembler::Label Label;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(2);
@ -1379,8 +1375,7 @@ void DivideStub::GenerateAssembly(
}
}
void BitwiseAndStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void BitwiseAndStub::GenerateAssembly(CodeStubAssembler* assembler) const {
using compiler::Node;
Node* lhs = assembler->Parameter(0);
@ -1393,11 +1388,10 @@ void BitwiseAndStub::GenerateAssembly(
assembler->Return(result);
}
void ModulusStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void ModulusStub::GenerateAssembly(CodeStubAssembler* assembler) const {
using compiler::Node;
typedef compiler::CodeStubAssembler::Label Label;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(2);
@ -1542,8 +1536,7 @@ void ModulusStub::GenerateAssembly(
}
}
void ShiftLeftStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void ShiftLeftStub::GenerateAssembly(CodeStubAssembler* assembler) const {
using compiler::Node;
Node* lhs = assembler->Parameter(0);
@ -1558,8 +1551,7 @@ void ShiftLeftStub::GenerateAssembly(
assembler->Return(result);
}
void ShiftRightStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void ShiftRightStub::GenerateAssembly(CodeStubAssembler* assembler) const {
using compiler::Node;
Node* lhs = assembler->Parameter(0);
@ -1575,7 +1567,7 @@ void ShiftRightStub::GenerateAssembly(
}
void ShiftRightLogicalStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
CodeStubAssembler* assembler) const {
using compiler::Node;
Node* lhs = assembler->Parameter(0);
@ -1590,8 +1582,7 @@ void ShiftRightLogicalStub::GenerateAssembly(
assembler->Return(result);
}
void BitwiseOrStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void BitwiseOrStub::GenerateAssembly(CodeStubAssembler* assembler) const {
using compiler::Node;
Node* lhs = assembler->Parameter(0);
@ -1604,8 +1595,7 @@ void BitwiseOrStub::GenerateAssembly(
assembler->Return(result);
}
void BitwiseXorStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void BitwiseXorStub::GenerateAssembly(CodeStubAssembler* assembler) const {
using compiler::Node;
Node* lhs = assembler->Parameter(0);
@ -1627,11 +1617,11 @@ enum RelationalComparisonMode {
kGreaterThanOrEqual
};
void GenerateAbstractRelationalComparison(
compiler::CodeStubAssembler* assembler, RelationalComparisonMode mode) {
typedef compiler::CodeStubAssembler::Label Label;
void GenerateAbstractRelationalComparison(CodeStubAssembler* assembler,
RelationalComparisonMode mode) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(2);
@ -1970,17 +1960,16 @@ void GenerateAbstractRelationalComparison(
enum ResultMode { kDontNegateResult, kNegateResult };
void GenerateEqual_Same(compiler::CodeStubAssembler* assembler,
compiler::Node* value,
compiler::CodeStubAssembler::Label* if_equal,
compiler::CodeStubAssembler::Label* if_notequal) {
void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
CodeStubAssembler::Label* if_equal,
CodeStubAssembler::Label* if_notequal) {
// In case of abstract or strict equality checks, we need additional checks
// for NaN values because they are not considered equal, even if both the
// left and the right hand side reference exactly the same value.
// TODO(bmeurer): This seems to violate the SIMD.js specification, but it
// seems to be what is tested in the current SIMD.js testsuite.
typedef compiler::CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
// Check if {value} is a Smi or a HeapObject.
@ -2017,11 +2006,10 @@ void GenerateEqual_Same(compiler::CodeStubAssembler* assembler,
}
void GenerateEqual_Simd128Value_HeapObject(
compiler::CodeStubAssembler* assembler, compiler::Node* lhs,
compiler::Node* lhs_map, compiler::Node* rhs, compiler::Node* rhs_map,
compiler::CodeStubAssembler::Label* if_equal,
compiler::CodeStubAssembler::Label* if_notequal) {
typedef compiler::CodeStubAssembler::Label Label;
CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
compiler::Node* rhs, compiler::Node* rhs_map,
CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
// Check if {lhs} and {rhs} have the same map.
@ -2096,14 +2084,14 @@ void GenerateEqual_Simd128Value_HeapObject(
}
// ES6 section 7.2.12 Abstract Equality Comparison
void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
void GenerateEqual(CodeStubAssembler* assembler, ResultMode mode) {
// This is a slightly optimized version of Object::Equals represented as
// scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
// change something functionality wise in here, remember to update the
// Object::Equals method as well.
typedef compiler::CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(2);
@ -2624,8 +2612,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
}
void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
ResultMode mode) {
void GenerateStrictEqual(CodeStubAssembler* assembler, ResultMode mode) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@ -2674,7 +2661,7 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
// }
// }
typedef compiler::CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
Node* lhs = assembler->Parameter(0);
@ -2881,11 +2868,11 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
}
void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
RelationalComparisonMode mode) {
typedef compiler::CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* lhs = assembler->Parameter(0);
Node* rhs = assembler->Parameter(1);
@ -3064,8 +3051,7 @@ void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
}
}
void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
ResultMode mode) {
void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@ -3082,9 +3068,9 @@ void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
// }
// return %StringEqual(lhs, rhs);
typedef compiler::CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* lhs = assembler->Parameter(0);
Node* rhs = assembler->Parameter(1);
@ -3243,80 +3229,70 @@ void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
} // namespace
void LessThanStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void LessThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateAbstractRelationalComparison(assembler, kLessThan);
}
void LessThanOrEqualStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void LessThanOrEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual);
}
void GreaterThanStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void GreaterThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateAbstractRelationalComparison(assembler, kGreaterThan);
}
void GreaterThanOrEqualStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
CodeStubAssembler* assembler) const {
GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual);
}
void EqualStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
void EqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateEqual(assembler, kDontNegateResult);
}
void NotEqualStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void NotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateEqual(assembler, kNegateResult);
}
void StrictEqualStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void StrictEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStrictEqual(assembler, kDontNegateResult);
}
void StrictNotEqualStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void StrictNotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStrictEqual(assembler, kNegateResult);
}
void StringEqualStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void StringEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStringEqual(assembler, kDontNegateResult);
}
void StringNotEqualStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void StringNotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStringEqual(assembler, kNegateResult);
}
void StringLessThanStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void StringLessThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kLessThan);
}
void StringLessThanOrEqualStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
}
void StringGreaterThanStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kGreaterThan);
}
void StringGreaterThanOrEqualStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
}
void ToLengthStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
typedef compiler::CodeStubAssembler::Label Label;
void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(1);
@ -3389,10 +3365,9 @@ void ToLengthStub::GenerateAssembly(
}
}
void ToBooleanStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
void ToBooleanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Label Label;
Node* value = assembler->Parameter(0);
Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
@ -3528,11 +3503,10 @@ void ToBooleanStub::GenerateAssembly(
}
}
void ToIntegerStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
typedef compiler::CodeStubAssembler::Label Label;
void ToIntegerStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(1);
@ -3591,7 +3565,7 @@ void ToIntegerStub::GenerateAssembly(
}
void StoreInterceptorStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
Node* receiver = assembler->Parameter(0);
Node* name = assembler->Parameter(1);
@ -3602,9 +3576,9 @@ void StoreInterceptorStub::GenerateAssembly(
}
void LoadIndexedInterceptorStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Label Label;
Node* receiver = assembler->Parameter(0);
Node* key = assembler->Parameter(1);
Node* slot = assembler->Parameter(2);
@ -3624,8 +3598,8 @@ void LoadIndexedInterceptorStub::GenerateAssembly(
}
void FastCloneShallowObjectStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
typedef compiler::CodeStubAssembler::Label Label;
CodeStubAssembler* assembler) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
Label call_runtime(assembler);
Node* closure = assembler->Parameter(0);

View File

@ -7,8 +7,8 @@
#include "src/allocation.h"
#include "src/assembler.h"
#include "src/code-stub-assembler.h"
#include "src/codegen.h"
#include "src/compiler/code-stub-assembler.h"
#include "src/globals.h"
#include "src/ic/ic-state.h"
#include "src/interface-descriptors.h"
@ -396,10 +396,9 @@ class CodeStub BASE_EMBEDDED {
Handle<Code> GenerateCode() override; \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
public: \
void GenerateAssembly(compiler::CodeStubAssembler* assembler) \
const override; \
#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
public: \
void GenerateAssembly(CodeStubAssembler* assembler) const override; \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
@ -588,8 +587,7 @@ class TurboFanCodeStub : public CodeStub {
protected:
explicit TurboFanCodeStub(Isolate* isolate) : CodeStub(isolate) {}
virtual void GenerateAssembly(
compiler::CodeStubAssembler* assembler) const = 0;
virtual void GenerateAssembly(CodeStubAssembler* assembler) const = 0;
private:
DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
@ -910,7 +908,7 @@ class StoreInterceptorStub : public TurboFanCodeStub {
public:
explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
void GenerateAssembly(compiler::CodeStubAssembler* assember) const override;
void GenerateAssembly(CodeStubAssembler* assember) const override;
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
@ -2677,7 +2675,7 @@ class AllocateHeapNumberStub : public TurboFanCodeStub {
: TurboFanCodeStub(isolate) {}
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
void GenerateAssembly(CodeStubAssembler* assembler) const override;
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
@ -2689,7 +2687,7 @@ class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
: TurboFanCodeStub(isolate) {}
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
void GenerateAssembly(CodeStubAssembler* assembler) const override;
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
DEFINE_CODE_STUB(AllocateMutableHeapNumber, TurboFanCodeStub);
@ -2702,8 +2700,7 @@ class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
: TurboFanCodeStub(isolate) {} \
\
void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
void GenerateAssembly( \
compiler::CodeStubAssembler* assembler) const override; \
void GenerateAssembly(CodeStubAssembler* assembler) const override; \
\
DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type); \
DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub); \

View File

@ -0,0 +1,814 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/code-assembler.h"
#include <ostream>
#include "src/code-factory.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/schedule.h"
#include "src/frames.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor,
Code::Flags flags, const char* name,
size_t result_size)
: CodeAssembler(
isolate, zone,
Linkage::GetStubCallDescriptor(
isolate, zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size),
flags, name) {}
CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
Code::Flags flags, const char* name)
: CodeAssembler(isolate, zone,
Linkage::GetJSCallDescriptor(zone, false, parameter_count,
CallDescriptor::kNoFlags),
flags, name) {}
CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, Code::Flags flags,
const char* name)
: raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone), call_descriptor,
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags())),
flags_(flags),
name_(name),
code_generated_(false),
variables_(zone) {}
CodeAssembler::~CodeAssembler() {}
void CodeAssembler::CallPrologue() {}
void CodeAssembler::CallEpilogue() {}
Handle<Code> CodeAssembler::GenerateCode() {
DCHECK(!code_generated_);
Schedule* schedule = raw_assembler_->Export();
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
name_);
code_generated_ = true;
return code;
}
bool CodeAssembler::Is64() const { return raw_assembler_->machine()->Is64(); }
bool CodeAssembler::IsFloat64RoundUpSupported() const {
return raw_assembler_->machine()->Float64RoundUp().IsSupported();
}
bool CodeAssembler::IsFloat64RoundDownSupported() const {
return raw_assembler_->machine()->Float64RoundDown().IsSupported();
}
bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
}
Node* CodeAssembler::Int32Constant(int value) {
return raw_assembler_->Int32Constant(value);
}
Node* CodeAssembler::IntPtrConstant(intptr_t value) {
return raw_assembler_->IntPtrConstant(value);
}
Node* CodeAssembler::NumberConstant(double value) {
return raw_assembler_->NumberConstant(value);
}
Node* CodeAssembler::SmiConstant(Smi* value) {
return IntPtrConstant(bit_cast<intptr_t>(value));
}
Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
return raw_assembler_->HeapConstant(object);
}
Node* CodeAssembler::BooleanConstant(bool value) {
return raw_assembler_->BooleanConstant(value);
}
Node* CodeAssembler::ExternalConstant(ExternalReference address) {
return raw_assembler_->ExternalConstant(address);
}
Node* CodeAssembler::Float64Constant(double value) {
return raw_assembler_->Float64Constant(value);
}
Node* CodeAssembler::BooleanMapConstant() {
return HeapConstant(isolate()->factory()->boolean_map());
}
Node* CodeAssembler::EmptyStringConstant() {
return LoadRoot(Heap::kempty_stringRootIndex);
}
Node* CodeAssembler::HeapNumberMapConstant() {
return HeapConstant(isolate()->factory()->heap_number_map());
}
Node* CodeAssembler::NaNConstant() {
return LoadRoot(Heap::kNanValueRootIndex);
}
Node* CodeAssembler::NoContextConstant() {
return SmiConstant(Smi::FromInt(0));
}
Node* CodeAssembler::NullConstant() {
return LoadRoot(Heap::kNullValueRootIndex);
}
Node* CodeAssembler::UndefinedConstant() {
return LoadRoot(Heap::kUndefinedValueRootIndex);
}
Node* CodeAssembler::Parameter(int value) {
return raw_assembler_->Parameter(value);
}
void CodeAssembler::Return(Node* value) {
return raw_assembler_->Return(value);
}
void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
Node* CodeAssembler::LoadFramePointer() {
return raw_assembler_->LoadFramePointer();
}
Node* CodeAssembler::LoadParentFramePointer() {
return raw_assembler_->LoadParentFramePointer();
}
Node* CodeAssembler::LoadStackPointer() {
return raw_assembler_->LoadStackPointer();
}
Node* CodeAssembler::SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
Node* CodeAssembler::SmiTag(Node* value) {
return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
}
Node* CodeAssembler::SmiUntag(Node* value) {
return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
}
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name) \
Node* CodeAssembler::name(Node* a, Node* b) { \
return raw_assembler_->name(a, b); \
}
CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
Node* CodeAssembler::WordShl(Node* value, int shift) {
return raw_assembler_->WordShl(value, IntPtrConstant(shift));
}
Node* CodeAssembler::TruncateFloat64ToInt32RoundToZero(Node* a) {
return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kRoundToZero,
a);
}
Node* CodeAssembler::TruncateFloat64ToInt32JavaScript(Node* a) {
return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript, a);
}
#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
Handle<Object> root = isolate()->heap()->root_handle(root_index);
if (root->IsSmi()) {
return SmiConstant(Smi::cast(*root));
} else {
return HeapConstant(Handle<HeapObject>::cast(root));
}
}
compiler::Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
USE(roots_array_start);
// TODO(danno): Implement thee root-access case where the root is not constant
// and must be loaded from the root array.
UNIMPLEMENTED();
return nullptr;
}
Node* CodeAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
// If there's not enough space, call the runtime.
RawMachineLabel runtime_call(RawMachineLabel::kDeferred), no_runtime_call,
merge_runtime;
raw_assembler_->Branch(
raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
&runtime_call, &no_runtime_call);
raw_assembler_->Bind(&runtime_call);
// AllocateInTargetSpace does not use the context.
Node* context = IntPtrConstant(0);
Node* runtime_flags = SmiTag(Int32Constant(
AllocateDoubleAlignFlag::encode(false) |
AllocateTargetSpace::encode(flags & kPretenured
? AllocationSpace::OLD_SPACE
: AllocationSpace::NEW_SPACE)));
Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
SmiTag(size_in_bytes), runtime_flags);
raw_assembler_->Goto(&merge_runtime);
// When there is enough space, return `top' and bump it up.
raw_assembler_->Bind(&no_runtime_call);
Node* no_runtime_result = top;
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, size_in_bytes));
no_runtime_result =
IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
raw_assembler_->Goto(&merge_runtime);
raw_assembler_->Bind(&merge_runtime);
return raw_assembler_->Phi(MachineType::PointerRepresentation(),
runtime_result, no_runtime_result);
}
Node* CodeAssembler::AllocateRawAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
Node* adjusted_size = size_in_bytes;
if (flags & kDoubleAlignment) {
// TODO(epertoso): Simd128 alignment.
RawMachineLabel aligned, not_aligned, merge;
raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
&not_aligned, &aligned);
raw_assembler_->Bind(&not_aligned);
Node* not_aligned_size =
IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
raw_assembler_->Goto(&merge);
raw_assembler_->Bind(&aligned);
raw_assembler_->Goto(&merge);
raw_assembler_->Bind(&merge);
adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
not_aligned_size, adjusted_size);
}
Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
raw_assembler_->Branch(
raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
&doesnt_need_filler, &needs_filler);
raw_assembler_->Bind(&needs_filler);
// Store a filler and increase the address by kPointerSize.
// TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
// it when Simd128 alignment is supported.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
raw_assembler_->Goto(&merge_address);
raw_assembler_->Bind(&doesnt_need_filler);
Node* address_without_filler = address;
raw_assembler_->Goto(&merge_address);
raw_assembler_->Bind(&merge_address);
address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
address_with_filler, address_without_filler);
// Update the top.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, adjusted_size));
return address;
}
Node* CodeAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
bool const new_space = !(flags & kPretenured);
Node* top_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
#ifdef V8_HOST_ARCH_32_BIT
if (flags & kDoubleAlignment) {
return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
limit_address);
}
#endif
return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
limit_address);
}
Node* CodeAssembler::InnerAllocate(Node* previous, int offset) {
return IntPtrAdd(previous, IntPtrConstant(offset));
}
Node* CodeAssembler::Load(MachineType rep, Node* base) {
return raw_assembler_->Load(rep, base);
}
Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
return raw_assembler_->Load(rep, base, index);
}
Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
}
Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index,
Node* value) {
return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
}
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* value) {
return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
}
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* index, Node* value) {
return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
}
Node* CodeAssembler::Projection(int index, Node* value) {
return raw_assembler_->Projection(index, value);
}
void CodeAssembler::BranchIf(Node* condition, Label* if_true, Label* if_false) {
Label if_condition_is_true(this), if_condition_is_false(this);
Branch(condition, &if_condition_is_true, &if_condition_is_false);
Bind(&if_condition_is_true);
Goto(if_true);
Bind(&if_condition_is_false);
Goto(if_false);
}
Node* CodeAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
Node** args) {
CallPrologue();
Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
CallEpilogue();
return return_value;
}
Node* CodeAssembler::TailCallN(CallDescriptor* descriptor, Node* code_target,
Node** args) {
return raw_assembler_->TailCallN(descriptor, code_target, args);
}
Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* context) {
CallPrologue();
Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
CallEpilogue();
return return_value;
}
Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1) {
CallPrologue();
Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
CallEpilogue();
return return_value;
}
Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2) {
CallPrologue();
Node* return_value =
raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
CallEpilogue();
return return_value;
}
Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2, Node* arg3) {
CallPrologue();
Node* return_value =
raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
CallEpilogue();
return return_value;
}
Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2, Node* arg3,
Node* arg4) {
CallPrologue();
Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
arg3, arg4, context);
CallEpilogue();
return return_value;
}
Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
Node* context) {
return raw_assembler_->TailCallRuntime0(function_id, context);
}
Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1) {
return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
}
Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1, Node* arg2) {
return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
}
Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1, Node* arg2,
Node* arg3) {
return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
context);
}
Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1, Node* arg2,
Node* arg3, Node* arg4) {
return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
context);
}
Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
Node* arg1, size_t result_size) {
Node* target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), target, context, arg1, result_size);
}
Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
Node* arg1, Node* arg2, size_t result_size) {
Node* target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), target, context, arg1, arg2,
result_size);
}
Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
Node* arg1, Node* arg2, Node* arg3,
size_t result_size) {
Node* target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
result_size);
}
Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(2);
args[0] = arg1;
args[1] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(3);
args[0] = arg1;
args[1] = arg2;
args[2] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, Node* arg3, size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(4);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4,
size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(5);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = arg4;
args[4] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4, Node* arg5,
size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(6);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = arg4;
args[4] = arg5;
args[5] = context;
return CallN(call_descriptor, target, args);
}
Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
Node* arg1, Node* arg2, size_t result_size) {
Node* target = HeapConstant(callable.code());
return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
result_size);
}
Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(3);
args[0] = arg1;
args[1] = arg2;
args[2] = context;
return raw_assembler_->TailCallN(call_descriptor, target, args);
}
Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& interface_descriptor,
Node* code_target_address, Node** args) {
CallDescriptor* descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
isolate(), zone(), interface_descriptor,
interface_descriptor.GetStackParameterCount());
return raw_assembler_->TailCallN(descriptor, code_target_address, args);
}
void CodeAssembler::Goto(CodeAssembler::Label* label) {
label->MergeVariables();
raw_assembler_->Goto(label->label_);
}
void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
Label false_label(this);
Branch(condition, true_label, &false_label);
Bind(&false_label);
}
void CodeAssembler::GotoUnless(Node* condition, Label* false_label) {
Label true_label(this);
Branch(condition, &true_label, false_label);
Bind(&true_label);
}
void CodeAssembler::Branch(Node* condition, CodeAssembler::Label* true_label,
CodeAssembler::Label* false_label) {
true_label->MergeVariables();
false_label->MergeVariables();
return raw_assembler_->Branch(condition, true_label->label_,
false_label->label_);
}
void CodeAssembler::Switch(Node* index, Label* default_label,
int32_t* case_values, Label** case_labels,
size_t case_count) {
RawMachineLabel** labels =
new (zone()->New(sizeof(RawMachineLabel*) * case_count))
RawMachineLabel*[case_count];
for (size_t i = 0; i < case_count; ++i) {
labels[i] = case_labels[i]->label_;
case_labels[i]->MergeVariables();
default_label->MergeVariables();
}
return raw_assembler_->Switch(index, default_label->label_, case_values,
labels, case_count);
}
// RawMachineAssembler delegate helpers:
Isolate* CodeAssembler::isolate() const { return raw_assembler_->isolate(); }
Factory* CodeAssembler::factory() const { return isolate()->factory(); }
Graph* CodeAssembler::graph() const { return raw_assembler_->graph(); }
Zone* CodeAssembler::zone() const { return raw_assembler_->zone(); }
// The core implementation of Variable is stored through an indirection so
// that it can outlive the often block-scoped Variable declarations. This is
// needed to ensure that variable binding and merging through phis can
// properly be verified.
class CodeAssembler::Variable::Impl : public ZoneObject {
public:
explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
Node* value_;
MachineRepresentation rep_;
};
CodeAssembler::Variable::Variable(CodeAssembler* assembler,
MachineRepresentation rep)
: impl_(new (assembler->zone()) Impl(rep)) {
assembler->variables_.push_back(impl_);
}
void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
Node* CodeAssembler::Variable::value() const {
DCHECK_NOT_NULL(impl_->value_);
return impl_->value_;
}
MachineRepresentation CodeAssembler::Variable::rep() const {
return impl_->rep_;
}
bool CodeAssembler::Variable::IsBound() const {
return impl_->value_ != nullptr;
}
CodeAssembler::Label::Label(CodeAssembler* assembler, int merged_value_count,
CodeAssembler::Variable** merged_variables,
CodeAssembler::Label::Type type)
: bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
label_ = new (buffer)
RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
: RawMachineLabel::kNonDeferred);
for (int i = 0; i < merged_value_count; ++i) {
variable_phis_[merged_variables[i]->impl_] = nullptr;
}
}
void CodeAssembler::Label::MergeVariables() {
++merge_count_;
for (auto var : assembler_->variables_) {
size_t count = 0;
Node* node = var->value_;
if (node != nullptr) {
auto i = variable_merges_.find(var);
if (i != variable_merges_.end()) {
i->second.push_back(node);
count = i->second.size();
} else {
count = 1;
variable_merges_[var] = std::vector<Node*>(1, node);
}
}
// If the following asserts, then you've jumped to a label without a bound
// variable along that path that expects to merge its value into a phi.
DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
count == merge_count_);
USE(count);
// If the label is already bound, we already know the set of variables to
// merge and phi nodes have already been created.
if (bound_) {
auto phi = variable_phis_.find(var);
if (phi != variable_phis_.end()) {
DCHECK_NOT_NULL(phi->second);
assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
} else {
auto i = variable_merges_.find(var);
if (i != variable_merges_.end()) {
// If the following assert fires, then you've declared a variable that
// has the same bound value along all paths up until the point you
// bound this label, but then later merged a path with a new value for
// the variable after the label bind (it's not possible to add phis to
// the bound label after the fact, just make sure to list the variable
// in the label's constructor's list of merged variables).
DCHECK(find_if(i->second.begin(), i->second.end(),
[node](Node* e) -> bool { return node != e; }) ==
i->second.end());
}
}
}
}
}
void CodeAssembler::Label::Bind() {
DCHECK(!bound_);
assembler_->raw_assembler_->Bind(label_);
// Make sure that all variables that have changed along any path up to this
// point are marked as merge variables.
for (auto var : assembler_->variables_) {
Node* shared_value = nullptr;
auto i = variable_merges_.find(var);
if (i != variable_merges_.end()) {
for (auto value : i->second) {
DCHECK(value != nullptr);
if (value != shared_value) {
if (shared_value == nullptr) {
shared_value = value;
} else {
variable_phis_[var] = nullptr;
}
}
}
}
}
for (auto var : variable_phis_) {
CodeAssembler::Variable::Impl* var_impl = var.first;
auto i = variable_merges_.find(var_impl);
// If the following assert fires, then a variable that has been marked as
// being merged at the label--either by explicitly marking it so in the
// label constructor or by having seen different bound values at branches
// into the label--doesn't have a bound value along all of the paths that
// have been merged into the label up to this point.
DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
Node* phi = assembler_->raw_assembler_->Phi(
var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
variable_phis_[var_impl] = phi;
}
// Bind all variables to a merge phi, the common value along all paths or
// null.
for (auto var : assembler_->variables_) {
auto i = variable_phis_.find(var);
if (i != variable_phis_.end()) {
var->value_ = i->second;
} else {
auto j = variable_merges_.find(var);
if (j != variable_merges_.end() && j->second.size() == merge_count_) {
var->value_ = j->second.back();
} else {
var->value_ = nullptr;
}
}
}
bound_ = true;
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,406 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_CODE_ASSEMBLER_H_
#define V8_COMPILER_CODE_ASSEMBLER_H_
#include <map>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/heap/heap.h"
#include "src/machine-type.h"
#include "src/runtime/runtime.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
class Callable;
class CallInterfaceDescriptor;
class Isolate;
class Factory;
class Zone;
namespace compiler {
class CallDescriptor;
class Graph;
class Node;
class Operator;
class RawMachineAssembler;
class RawMachineLabel;
class Schedule;
#define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
V(Float32Equal) \
V(Float32LessThan) \
V(Float32LessThanOrEqual) \
V(Float32GreaterThan) \
V(Float32GreaterThanOrEqual) \
V(Float64Equal) \
V(Float64LessThan) \
V(Float64LessThanOrEqual) \
V(Float64GreaterThan) \
V(Float64GreaterThanOrEqual) \
V(Int32GreaterThan) \
V(Int32GreaterThanOrEqual) \
V(Int32LessThan) \
V(Int32LessThanOrEqual) \
V(IntPtrLessThan) \
V(IntPtrLessThanOrEqual) \
V(Uint32LessThan) \
V(UintPtrGreaterThanOrEqual) \
V(WordEqual) \
V(WordNotEqual) \
V(Word32Equal) \
V(Word32NotEqual) \
V(Word64Equal) \
V(Word64NotEqual)
#define CODE_ASSEMBLER_BINARY_OP_LIST(V) \
CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
V(Float64Add) \
V(Float64Sub) \
V(Float64Mul) \
V(Float64Div) \
V(Float64Mod) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(IntPtrAdd) \
V(IntPtrAddWithOverflow) \
V(IntPtrSub) \
V(IntPtrSubWithOverflow) \
V(IntPtrMul) \
V(Int32Add) \
V(Int32AddWithOverflow) \
V(Int32Sub) \
V(Int32Mul) \
V(Int32Div) \
V(WordOr) \
V(WordAnd) \
V(WordXor) \
V(WordShl) \
V(WordShr) \
V(WordSar) \
V(WordRor) \
V(Word32Or) \
V(Word32And) \
V(Word32Xor) \
V(Word32Shl) \
V(Word32Shr) \
V(Word32Sar) \
V(Word32Ror) \
V(Word64Or) \
V(Word64And) \
V(Word64Xor) \
V(Word64Shr) \
V(Word64Sar) \
V(Word64Ror)
#define CODE_ASSEMBLER_UNARY_OP_LIST(V) \
V(Float64Neg) \
V(Float64Sqrt) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(TruncateInt64ToInt32) \
V(ChangeFloat64ToUint32) \
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(Float64RoundDown) \
V(Float64RoundUp) \
V(Float64RoundTruncate) \
V(Word32Clz)
// A "public" interface used by components outside of compiler directory to
// create code objects with TurboFan's backend. This class is mostly a thin shim
// around the RawMachineAssembler, and its primary job is to ensure that the
// innards of the RawMachineAssembler and other compiler implementation details
// don't leak outside of the the compiler directory..
//
// V8 components that need to generate low-level code using this interface
// should include this header--and this header only--from the compiler directory
// (this is actually enforced). Since all interesting data structures are
// forward declared, it's not possible for clients to peek inside the compiler
// internals.
//
// In addition to providing isolation between TurboFan and code generation
// clients, CodeAssembler also provides an abstraction for creating variables
// and enhanced Label functionality to merge variable values along paths where
// they have differing values, including loops.
class CodeAssembler {
public:
// Create with CallStub linkage.
// |result_size| specifies the number of results returned by the stub.
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeAssembler(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, Code::Flags flags,
const char* name, size_t result_size = 1);
// Create with JSCall linkage.
CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
Code::Flags flags, const char* name);
virtual ~CodeAssembler();
Handle<Code> GenerateCode();
bool Is64() const;
bool IsFloat64RoundUpSupported() const;
bool IsFloat64RoundDownSupported() const;
bool IsFloat64RoundTruncateSupported() const;
class Label;
class Variable {
public:
explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
void Bind(Node* value);
Node* value() const;
MachineRepresentation rep() const;
bool IsBound() const;
private:
friend class CodeAssembler;
class Impl;
Impl* impl_;
};
enum AllocationFlag : uint8_t {
kNone = 0,
kDoubleAlignment = 1,
kPretenured = 1 << 1
};
typedef base::Flags<AllocationFlag> AllocationFlags;
// ===========================================================================
// Base Assembler
// ===========================================================================
// Constants.
Node* Int32Constant(int value);
Node* IntPtrConstant(intptr_t value);
Node* NumberConstant(double value);
Node* SmiConstant(Smi* value);
Node* HeapConstant(Handle<HeapObject> object);
Node* BooleanConstant(bool value);
Node* ExternalConstant(ExternalReference address);
Node* Float64Constant(double value);
Node* BooleanMapConstant();
Node* EmptyStringConstant();
Node* HeapNumberMapConstant();
Node* NaNConstant();
Node* NoContextConstant();
Node* NullConstant();
Node* UndefinedConstant();
Node* Parameter(int value);
void Return(Node* value);
void Bind(Label* label);
void Goto(Label* label);
void GotoIf(Node* condition, Label* true_label);
void GotoUnless(Node* condition, Label* false_label);
void Branch(Node* condition, Label* true_label, Label* false_label);
void Switch(Node* index, Label* default_label, int32_t* case_values,
Label** case_labels, size_t case_count);
// Access to the frame pointer
Node* LoadFramePointer();
Node* LoadParentFramePointer();
// Access to the stack pointer
Node* LoadStackPointer();
// Load raw memory location.
Node* Load(MachineType rep, Node* base);
Node* Load(MachineType rep, Node* base, Node* index);
// Store value to raw memory location.
Node* Store(MachineRepresentation rep, Node* base, Node* value);
Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
Node* value);
// Basic arithmetic operations.
#define DECLARE_CODE_ASSEMBLER_BINARY_OP(name) Node* name(Node* a, Node* b);
CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
Node* WordShl(Node* value, int shift);
// Unary
#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name) Node* name(Node* a);
CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
Node* TruncateFloat64ToInt32RoundToZero(Node* a);
Node* TruncateFloat64ToInt32JavaScript(Node* a);
// Projections
Node* Projection(int index, Node* value);
// Calls
Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4, Node* arg5);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2, Node* arg3);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2, Node* arg3, Node* arg4);
Node* CallStub(Callable const& callable, Node* context, Node* arg1,
size_t result_size = 1);
Node* CallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, size_t result_size = 1);
Node* CallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, Node* arg3, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3,
size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, size_t result_size = 1);
Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, size_t result_size = 1);
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2,
size_t result_size = 1);
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
Node* code_target_address, Node** args);
// ===========================================================================
// Macros
// ===========================================================================
// Tag a Word as a Smi value.
Node* SmiTag(Node* value);
// Untag a Smi value as a Word.
Node* SmiUntag(Node* value);
// Load a value from the root array.
Node* LoadRoot(Heap::RootListIndex root_index);
// Allocate an object of the given size.
Node* Allocate(int size, AllocationFlags flags = kNone);
Node* InnerAllocate(Node* previous, int offset);
// Branching helpers.
void BranchIf(Node* condition, Label* if_true, Label* if_false);
#define BRANCH_HELPER(name) \
void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
BranchIf(name(a, b), if_true, if_false); \
}
CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
#undef BRANCH_HELPER
// Helpers which delegate to RawMachineAssembler.
Factory* factory() const;
Isolate* isolate() const;
Zone* zone() const;
protected:
// Protected helpers which delegate to RawMachineAssembler.
Graph* graph() const;
Node* SmiShiftBitsConstant();
// Enables subclasses to perform operations before and after a call.
virtual void CallPrologue();
virtual void CallEpilogue();
private:
friend class CodeAssemblerTester;
CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
Code::Flags flags, const char* name);
Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address);
Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_adddress, Node* limit_address);
base::SmartPointer<RawMachineAssembler> raw_assembler_;
Code::Flags flags_;
const char* name_;
bool code_generated_;
ZoneVector<Variable::Impl*> variables_;
DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
};
DEFINE_OPERATORS_FOR_FLAGS(CodeAssembler::AllocationFlags);
class CodeAssembler::Label {
public:
enum Type { kDeferred, kNonDeferred };
explicit Label(
CodeAssembler* assembler,
CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
: CodeAssembler::Label(assembler, 0, nullptr, type) {}
Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
: CodeAssembler::Label(assembler, 1, &merged_variable, type) {}
Label(CodeAssembler* assembler, int merged_variable_count,
CodeAssembler::Variable** merged_variables,
CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
~Label() {}
private:
friend class CodeAssembler;
void Bind();
void MergeVariables();
bool bound_;
size_t merge_count_;
CodeAssembler* assembler_;
RawMachineLabel* label_;
// Map of variables that need to be merged to their phi nodes (or placeholders
// for those phis).
std::map<Variable::Impl*, Node*> variable_phis_;
// Map of variables to the list of value nodes that have been added from each
// merge path in their order of merging.
std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_CODE_ASSEMBLER_H_

View File

@ -1,516 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
#define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
#include <map>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/heap/heap.h"
#include "src/machine-type.h"
#include "src/runtime/runtime.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
class Callable;
class CallInterfaceDescriptor;
class Isolate;
class Factory;
class Zone;
namespace compiler {
class CallDescriptor;
class Graph;
class Node;
class Operator;
class RawMachineAssembler;
class RawMachineLabel;
class Schedule;
#define CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
V(Float32Equal) \
V(Float32LessThan) \
V(Float32LessThanOrEqual) \
V(Float32GreaterThan) \
V(Float32GreaterThanOrEqual) \
V(Float64Equal) \
V(Float64LessThan) \
V(Float64LessThanOrEqual) \
V(Float64GreaterThan) \
V(Float64GreaterThanOrEqual) \
V(Int32GreaterThan) \
V(Int32GreaterThanOrEqual) \
V(Int32LessThan) \
V(Int32LessThanOrEqual) \
V(IntPtrLessThan) \
V(IntPtrLessThanOrEqual) \
V(Uint32LessThan) \
V(UintPtrGreaterThanOrEqual) \
V(WordEqual) \
V(WordNotEqual) \
V(Word32Equal) \
V(Word32NotEqual) \
V(Word64Equal) \
V(Word64NotEqual)
#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
V(Float64Add) \
V(Float64Sub) \
V(Float64Mul) \
V(Float64Div) \
V(Float64Mod) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(IntPtrAdd) \
V(IntPtrAddWithOverflow) \
V(IntPtrSub) \
V(IntPtrSubWithOverflow) \
V(IntPtrMul) \
V(Int32Add) \
V(Int32AddWithOverflow) \
V(Int32Sub) \
V(Int32Mul) \
V(Int32Div) \
V(WordOr) \
V(WordAnd) \
V(WordXor) \
V(WordShl) \
V(WordShr) \
V(WordSar) \
V(WordRor) \
V(Word32Or) \
V(Word32And) \
V(Word32Xor) \
V(Word32Shl) \
V(Word32Shr) \
V(Word32Sar) \
V(Word32Ror) \
V(Word64Or) \
V(Word64And) \
V(Word64Xor) \
V(Word64Shr) \
V(Word64Sar) \
V(Word64Ror)
#define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \
V(Float64Neg) \
V(Float64Sqrt) \
V(ChangeFloat64ToUint32) \
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(Word32Clz)
class CodeStubAssembler {
public:
// Create with CallStub linkage.
// |result_size| specifies the number of results returned by the stub.
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeStubAssembler(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor,
Code::Flags flags, const char* name,
size_t result_size = 1);
// Create with JSCall linkage.
CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
Code::Flags flags, const char* name);
virtual ~CodeStubAssembler();
Handle<Code> GenerateCode();
class Label;
class Variable {
public:
explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep);
void Bind(Node* value);
Node* value() const;
MachineRepresentation rep() const;
bool IsBound() const;
private:
friend class CodeStubAssembler;
class Impl;
Impl* impl_;
};
enum AllocationFlag : uint8_t {
kNone = 0,
kDoubleAlignment = 1,
kPretenured = 1 << 1
};
typedef base::Flags<AllocationFlag> AllocationFlags;
// ===========================================================================
// Base Assembler
// ===========================================================================
// Constants.
Node* Int32Constant(int value);
Node* IntPtrConstant(intptr_t value);
Node* NumberConstant(double value);
Node* SmiConstant(Smi* value);
Node* HeapConstant(Handle<HeapObject> object);
Node* BooleanConstant(bool value);
Node* ExternalConstant(ExternalReference address);
Node* Float64Constant(double value);
Node* BooleanMapConstant();
Node* EmptyStringConstant();
Node* HeapNumberMapConstant();
Node* NaNConstant();
Node* NoContextConstant();
Node* NullConstant();
Node* UndefinedConstant();
Node* Parameter(int value);
void Return(Node* value);
void Bind(Label* label);
void Goto(Label* label);
void GotoIf(Node* condition, Label* true_label);
void GotoUnless(Node* condition, Label* false_label);
void Branch(Node* condition, Label* true_label, Label* false_label);
void Switch(Node* index, Label* default_label, int32_t* case_values,
Label** case_labels, size_t case_count);
// Access to the frame pointer
Node* LoadFramePointer();
Node* LoadParentFramePointer();
// Access to the stack pointer
Node* LoadStackPointer();
// Load raw memory location.
Node* Load(MachineType rep, Node* base);
Node* Load(MachineType rep, Node* base, Node* index);
// Store value to raw memory location.
Node* Store(MachineRepresentation rep, Node* base, Node* value);
Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
Node* value);
// Basic arithmetic operations.
#define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b);
CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP)
#undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP
Node* WordShl(Node* value, int shift);
// Unary
#define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a);
CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP)
#undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP
// Projections
Node* Projection(int index, Node* value);
// Calls
Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4, Node* arg5);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2, Node* arg3);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2, Node* arg3, Node* arg4);
Node* CallStub(Callable const& callable, Node* context, Node* arg1,
size_t result_size = 1);
Node* CallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, size_t result_size = 1);
Node* CallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, Node* arg3, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3,
size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, size_t result_size = 1);
Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, size_t result_size = 1);
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2,
size_t result_size = 1);
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
Node* code_target_address, Node** args);
// ===========================================================================
// Macros
// ===========================================================================
// Float64 operations.
Node* Float64Ceil(Node* x);
Node* Float64Floor(Node* x);
Node* Float64Round(Node* x);
Node* Float64Trunc(Node* x);
// Tag a Word as a Smi value.
Node* SmiTag(Node* value);
// Untag a Smi value as a Word.
Node* SmiUntag(Node* value);
Node* SmiToWord(Node* value) { return SmiUntag(value); }
// Smi conversions.
Node* SmiToFloat64(Node* value);
Node* SmiFromWord32(Node* value);
Node* SmiToWord32(Node* value);
// Smi operations.
Node* SmiAdd(Node* a, Node* b);
Node* SmiAddWithOverflow(Node* a, Node* b);
Node* SmiSub(Node* a, Node* b);
Node* SmiSubWithOverflow(Node* a, Node* b);
Node* SmiEqual(Node* a, Node* b);
Node* SmiAboveOrEqual(Node* a, Node* b);
Node* SmiLessThan(Node* a, Node* b);
Node* SmiLessThanOrEqual(Node* a, Node* b);
Node* SmiMin(Node* a, Node* b);
// Load a value from the root array.
Node* LoadRoot(Heap::RootListIndex root_index);
// Check a value for smi-ness
Node* WordIsSmi(Node* a);
// Check that the value is a positive smi.
Node* WordIsPositiveSmi(Node* a);
// Load an object pointer from a buffer that isn't in the heap.
Node* LoadBufferObject(Node* buffer, int offset,
MachineType rep = MachineType::AnyTagged());
// Load a field from an object on the heap.
Node* LoadObjectField(Node* object, int offset,
MachineType rep = MachineType::AnyTagged());
// Store a field to an object on the heap.
Node* StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
// Load the floating point value of a HeapNumber.
Node* LoadHeapNumberValue(Node* object);
// Store the floating point value of a HeapNumber.
Node* StoreHeapNumberValue(Node* object, Node* value);
// Truncate the floating point value of a HeapNumber to an Int32.
Node* TruncateHeapNumberValueToWord32(Node* object);
// Load the bit field of a Map.
Node* LoadMapBitField(Node* map);
// Load bit field 2 of a map.
Node* LoadMapBitField2(Node* map);
// Load bit field 3 of a map.
Node* LoadMapBitField3(Node* map);
// Load the instance type of a map.
Node* LoadMapInstanceType(Node* map);
// Load the instance descriptors of a map.
Node* LoadMapDescriptors(Node* map);
// Load the hash field of a name.
Node* LoadNameHash(Node* name);
// Load the instance size of a Map.
Node* LoadMapInstanceSize(Node* map);
// Load an array element from a FixedArray.
Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index,
int additional_offset = 0);
Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
int additional_offset = 0);
Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
// Allocate an object of the given size.
Node* Allocate(int size, AllocationFlags flags = kNone);
Node* InnerAllocate(Node* previous, int offset);
// Allocate a HeapNumber without initializing its value.
Node* AllocateHeapNumber();
// Allocate a HeapNumber with a specific value.
Node* AllocateHeapNumberWithValue(Node* value);
// Allocate a SeqOneByteString with the given length.
Node* AllocateSeqOneByteString(int length);
// Allocate a SeqTwoByteString with the given length.
Node* AllocateSeqTwoByteString(int length);
// Store an array element to a FixedArray.
Node* StoreFixedArrayElementInt32Index(Node* object, Node* index,
Node* value);
Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index,
Node* value);
// Load the Map of an HeapObject.
Node* LoadMap(Node* object);
// Store the Map of an HeapObject.
Node* StoreMapNoWriteBarrier(Node* object, Node* map);
// Load the instance type of an HeapObject.
Node* LoadInstanceType(Node* object);
// Load the elements backing store of a JSObject.
Node* LoadElements(Node* object);
// Load the length of a fixed array base instance.
Node* LoadFixedArrayBaseLength(Node* array);
// Returns a node that is true if the given bit is set in |word32|.
template <typename T>
Node* BitFieldDecode(Node* word32) {
return BitFieldDecode(word32, T::kShift, T::kMask);
}
Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask);
// Conversions.
Node* ChangeFloat64ToTagged(Node* value);
Node* ChangeInt32ToTagged(Node* value);
Node* ChangeUint32ToTagged(Node* value);
Node* TruncateTaggedToFloat64(Node* context, Node* value);
Node* TruncateTaggedToWord32(Node* context, Node* value);
// Truncate to int32 using JavaScript truncation mode.
Node* TruncateFloat64ToInt32(Node* value);
// Type conversions.
// Throws a TypeError for {method_name} if {value} is not coercible to Object,
// or returns the {value} converted to a String otherwise.
Node* ToThisString(Node* context, Node* value, char const* method_name);
// String helpers.
// Load a character from a String (might flatten a ConsString).
Node* StringCharCodeAt(Node* string, Node* smi_index);
// Return the single character string with only {code}.
Node* StringFromCharCode(Node* code);
// Branching helpers.
// TODO(danno): Can we be more cleverish wrt. edge-split?
void BranchIf(Node* condition, Label* if_true, Label* if_false);
#define BRANCH_HELPER(name) \
void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
BranchIf(name(a, b), if_true, if_false); \
}
CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
#undef BRANCH_HELPER
void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
BranchIf(SmiLessThan(a, b), if_true, if_false);
}
void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
Label* if_false) {
BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
}
void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
BranchIfFloat64Equal(value, value, if_false, if_true);
}
// Helpers which delegate to RawMachineAssembler.
Factory* factory() const;
Isolate* isolate() const;
Zone* zone() const;
protected:
// Protected helpers which delegate to RawMachineAssembler.
Graph* graph() const;
// Enables subclasses to perform operations before and after a call.
virtual void CallPrologue();
virtual void CallEpilogue();
private:
friend class CodeStubAssemblerTester;
CodeStubAssembler(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, Code::Flags flags,
const char* name);
Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* SmiShiftBitsConstant();
Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address);
Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_adddress, Node* limit_address);
base::SmartPointer<RawMachineAssembler> raw_assembler_;
Code::Flags flags_;
const char* name_;
bool code_generated_;
ZoneVector<Variable::Impl*> variables_;
DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
};
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
class CodeStubAssembler::Label {
public:
enum Type { kDeferred, kNonDeferred };
explicit Label(CodeStubAssembler* assembler,
CodeStubAssembler::Label::Type type =
CodeStubAssembler::Label::kNonDeferred)
: CodeStubAssembler::Label(assembler, 0, nullptr, type) {}
Label(CodeStubAssembler* assembler,
CodeStubAssembler::Variable* merged_variable,
CodeStubAssembler::Label::Type type =
CodeStubAssembler::Label::kNonDeferred)
: CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {}
Label(CodeStubAssembler* assembler, int merged_variable_count,
CodeStubAssembler::Variable** merged_variables,
CodeStubAssembler::Label::Type type =
CodeStubAssembler::Label::kNonDeferred);
~Label() {}
private:
friend class CodeStubAssembler;
void Bind();
void MergeVariables();
bool bound_;
size_t merge_count_;
CodeStubAssembler* assembler_;
RawMachineLabel* label_;
// Map of variables that need to be merged to their phi nodes (or placeholders
// for those phis).
std::map<Variable::Impl*, Node*> variable_phis_;
// Map of variables to the list of value nodes that have been added from each
// merge path in their order of merging.
std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_

View File

@ -5,12 +5,12 @@
#include "src/fast-accessor-assembler.h"
#include "src/base/logging.h"
#include "src/code-stub-assembler.h"
#include "src/code-stubs.h" // For CallApiCallbackStub.
#include "src/compiler/code-stub-assembler.h"
#include "src/handles-inl.h"
#include "src/objects.h" // For FAA::LoadInternalField impl.
using v8::internal::compiler::CodeStubAssembler;
using v8::internal::CodeStubAssembler;
using v8::internal::compiler::Node;
namespace v8 {

View File

@ -14,7 +14,7 @@
#include "src/handles.h"
// For CodeStubAssembler::Label. (We cannot forward-declare inner classes.)
#include "src/compiler/code-stub-assembler.h"
#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@ -73,9 +73,9 @@ class FastAccessorAssembler {
private:
ValueId FromRaw(compiler::Node* node);
LabelId FromRaw(compiler::CodeStubAssembler::Label* label);
LabelId FromRaw(CodeStubAssembler::Label* label);
compiler::Node* FromId(ValueId value) const;
compiler::CodeStubAssembler::Label* FromId(LabelId value) const;
CodeStubAssembler::Label* FromId(LabelId value) const;
void Clear();
Zone* zone() { return &zone_; }
@ -83,13 +83,13 @@ class FastAccessorAssembler {
Zone zone_;
Isolate* isolate_;
base::SmartPointer<compiler::CodeStubAssembler> assembler_;
base::SmartPointer<CodeStubAssembler> assembler_;
// To prevent exposing the RMA internals to the outside world, we'll map
// Node + Label pointers integers wrapped in ValueId and LabelId instances.
// These vectors maintain this mapping.
std::vector<compiler::Node*> nodes_;
std::vector<compiler::CodeStubAssembler::Label*> labels_;
std::vector<CodeStubAssembler::Label*> labels_;
// Remember the current state for easy error checking. (We prefer to be
// strict as this class will be exposed at the API.)

View File

@ -25,10 +25,9 @@ using compiler::Node;
InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
Bytecode bytecode,
OperandScale operand_scale)
: compiler::CodeStubAssembler(isolate, zone,
InterpreterDispatchDescriptor(isolate),
Code::ComputeFlags(Code::BYTECODE_HANDLER),
Bytecodes::ToString(bytecode), 0),
: CodeStubAssembler(isolate, zone, InterpreterDispatchDescriptor(isolate),
Code::ComputeFlags(Code::BYTECODE_HANDLER),
Bytecodes::ToString(bytecode), 0),
bytecode_(bytecode),
operand_scale_(operand_scale),
accumulator_(this, MachineRepresentation::kTagged),

View File

@ -8,7 +8,7 @@
#include "src/allocation.h"
#include "src/base/smart-pointers.h"
#include "src/builtins.h"
#include "src/compiler/code-stub-assembler.h"
#include "src/code-stub-assembler.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
@ -17,7 +17,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
class InterpreterAssembler : public compiler::CodeStubAssembler {
class InterpreterAssembler : public CodeStubAssembler {
public:
InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
OperandScale operand_scale);

View File

@ -455,6 +455,8 @@
'../../src/checks.h',
'../../src/code-factory.cc',
'../../src/code-factory.h',
'../../src/code-stub-assembler.cc',
'../../src/code-stub-assembler.h',
'../../src/code-stubs.cc',
'../../src/code-stubs.h',
'../../src/code-stubs-hydrogen.cc',
@ -493,8 +495,8 @@
'../../src/compiler/code-generator-impl.h',
'../../src/compiler/code-generator.cc',
'../../src/compiler/code-generator.h',
'../../src/compiler/code-stub-assembler.cc',
'../../src/compiler/code-stub-assembler.h',
'../../src/compiler/code-assembler.cc',
'../../src/compiler/code-assembler.h',
'../../src/compiler/common-node-cache.cc',
'../../src/compiler/common-node-cache.h',
'../../src/compiler/common-operator-reducer.cc',