[builtins] Add support for JS builtins written in TurboFan.
This CL adds support for builtins with JavaScript linkage written using the TurboFan CodeStubAssembler, but with a JSCall descriptor (which was already supported thanks to a previous patch by Ben Smith). As a first example, we convert the Math.sqrt builtin and thereby get rid of the %_MathSqrt intrinsic, which causes trouble for the representation selection pass in the JavaScript pipeline. R=mstarzinger@chromium.org Review URL: https://codereview.chromium.org/1824993002 Cr-Commit-Position: refs/heads/master@{#34989}
This commit is contained in:
parent
fddd4f06f9
commit
43fe7d6854
@ -1523,6 +1523,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
|
||||
SimpleInstallFunction(math, "imul", Builtins::kMathImul, 2, true);
|
||||
SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
|
||||
SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
|
||||
Handle<JSFunction> math_sqrt =
|
||||
SimpleInstallFunction(math, "sqrt", Builtins::kMathSqrt, 1, true);
|
||||
native_context()->set_math_sqrt(*math_sqrt);
|
||||
}
|
||||
|
||||
{ // -- A r r a y B u f f e r
|
||||
|
204
src/builtins.cc
204
src/builtins.cc
@ -9,6 +9,8 @@
|
||||
#include "src/api-natives.h"
|
||||
#include "src/base/once.h"
|
||||
#include "src/bootstrapper.h"
|
||||
#include "src/code-factory.h"
|
||||
#include "src/compiler/code-stub-assembler.h"
|
||||
#include "src/dateparser-inl.h"
|
||||
#include "src/elements.h"
|
||||
#include "src/frames-inl.h"
|
||||
@ -2029,6 +2031,81 @@ BUILTIN(MathImul) {
|
||||
return *isolate->factory()->NewNumberFromInt(product);
|
||||
}
|
||||
|
||||
// ES6 section 20.2.2.32 Math.sqrt ( x )
|
||||
void Builtins::Generate_MathSqrt(compiler::CodeStubAssembler* assembler) {
|
||||
typedef compiler::CodeStubAssembler::Label Label;
|
||||
typedef compiler::Node Node;
|
||||
typedef compiler::CodeStubAssembler::Variable Variable;
|
||||
|
||||
Node* context = assembler->Parameter(4);
|
||||
|
||||
// Shared entry for the floating point sqrt.
|
||||
Label do_fsqrt(assembler);
|
||||
Variable var_fsqrt_x(assembler, MachineRepresentation::kFloat64);
|
||||
|
||||
// We might need to loop once due to the ToNumber conversion.
|
||||
Variable var_x(assembler, MachineRepresentation::kTagged);
|
||||
Label loop(assembler, &var_x);
|
||||
var_x.Bind(assembler->Parameter(1));
|
||||
assembler->Goto(&loop);
|
||||
assembler->Bind(&loop);
|
||||
{
|
||||
// Load the current {x} value.
|
||||
Node* x = var_x.value();
|
||||
|
||||
// Check if {x} is a Smi or a HeapObject.
|
||||
Label if_xissmi(assembler), if_xisnotsmi(assembler);
|
||||
assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
|
||||
|
||||
assembler->Bind(&if_xissmi);
|
||||
{
|
||||
// Perform the floating point sqrt.
|
||||
var_fsqrt_x.Bind(assembler->SmiToFloat64(x));
|
||||
assembler->Goto(&do_fsqrt);
|
||||
}
|
||||
|
||||
assembler->Bind(&if_xisnotsmi);
|
||||
{
|
||||
// Load the map of {x}.
|
||||
Node* x_map = assembler->LoadMap(x);
|
||||
|
||||
// Check if {x} is a HeapNumber.
|
||||
Label if_xisnumber(assembler),
|
||||
if_xisnotnumber(assembler, Label::kDeferred);
|
||||
assembler->Branch(
|
||||
assembler->WordEqual(x_map, assembler->HeapNumberMapConstant()),
|
||||
&if_xisnumber, &if_xisnotnumber);
|
||||
|
||||
assembler->Bind(&if_xisnumber);
|
||||
{
|
||||
// Perform the floating point sqrt.
|
||||
var_fsqrt_x.Bind(assembler->LoadHeapNumberValue(x));
|
||||
assembler->Goto(&do_fsqrt);
|
||||
}
|
||||
|
||||
assembler->Bind(&if_xisnotnumber);
|
||||
{
|
||||
// Convert {x} to a Number first.
|
||||
Callable callable =
|
||||
CodeFactory::NonNumberToNumber(assembler->isolate());
|
||||
var_x.Bind(assembler->CallStub(callable, context, x));
|
||||
assembler->Goto(&loop);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assembler->Bind(&do_fsqrt);
|
||||
{
|
||||
Node* x = var_fsqrt_x.value();
|
||||
Node* value = assembler->Float64Sqrt(x);
|
||||
Node* result = assembler->Allocate(HeapNumber::kSize,
|
||||
compiler::CodeStubAssembler::kNone);
|
||||
assembler->StoreMapNoWriteBarrier(result,
|
||||
assembler->HeapNumberMapConstant());
|
||||
assembler->StoreHeapNumberValue(result, value);
|
||||
assembler->Return(result);
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// ES6 section 26.1 The Reflect Object
|
||||
@ -4292,12 +4369,14 @@ Address const Builtins::c_functions_[cfunction_count] = {
|
||||
|
||||
|
||||
struct BuiltinDesc {
|
||||
Handle<Code> (*builder)(Isolate*, struct BuiltinDesc const*);
|
||||
byte* generator;
|
||||
byte* c_code;
|
||||
const char* s_name; // name is only used for generating log information.
|
||||
int name;
|
||||
Code::Flags flags;
|
||||
BuiltinExtraArguments extra_args;
|
||||
int argc;
|
||||
};
|
||||
|
||||
#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
|
||||
@ -4315,8 +4394,60 @@ class BuiltinFunctionTable {
|
||||
friend class Builtins;
|
||||
};
|
||||
|
||||
static BuiltinFunctionTable builtin_function_table =
|
||||
BUILTIN_FUNCTION_TABLE_INIT;
|
||||
namespace {
|
||||
|
||||
BuiltinFunctionTable builtin_function_table = BUILTIN_FUNCTION_TABLE_INIT;
|
||||
|
||||
Handle<Code> MacroAssemblerBuilder(Isolate* isolate,
|
||||
BuiltinDesc const* builtin_desc) {
|
||||
// For now we generate builtin adaptor code into a stack-allocated
|
||||
// buffer, before copying it into individual code objects. Be careful
|
||||
// with alignment, some platforms don't like unaligned code.
|
||||
#ifdef DEBUG
|
||||
// We can generate a lot of debug code on Arm64.
|
||||
const size_t buffer_size = 32 * KB;
|
||||
#elif V8_TARGET_ARCH_PPC64
|
||||
// 8 KB is insufficient on PPC64 when FLAG_debug_code is on.
|
||||
const size_t buffer_size = 10 * KB;
|
||||
#else
|
||||
const size_t buffer_size = 8 * KB;
|
||||
#endif
|
||||
union {
|
||||
int force_alignment;
|
||||
byte buffer[buffer_size]; // NOLINT(runtime/arrays)
|
||||
} u;
|
||||
|
||||
MacroAssembler masm(isolate, u.buffer, sizeof(u.buffer),
|
||||
CodeObjectRequired::kYes);
|
||||
// Generate the code/adaptor.
|
||||
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
|
||||
Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
|
||||
// We pass all arguments to the generator, but it may not use all of
|
||||
// them. This works because the first arguments are on top of the
|
||||
// stack.
|
||||
DCHECK(!masm.has_frame());
|
||||
g(&masm, builtin_desc->name, builtin_desc->extra_args);
|
||||
// Move the code into the object heap.
|
||||
CodeDesc desc;
|
||||
masm.GetCode(&desc);
|
||||
Code::Flags flags = builtin_desc->flags;
|
||||
return isolate->factory()->NewCode(desc, flags, masm.CodeObject());
|
||||
}
|
||||
|
||||
Handle<Code> CodeStubAssemblerBuilder(Isolate* isolate,
|
||||
BuiltinDesc const* builtin_desc) {
|
||||
Zone zone;
|
||||
compiler::CodeStubAssembler assembler(isolate, &zone, builtin_desc->argc,
|
||||
builtin_desc->flags,
|
||||
builtin_desc->s_name);
|
||||
// Generate the code/adaptor.
|
||||
typedef void (*Generator)(compiler::CodeStubAssembler*);
|
||||
Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
|
||||
g(&assembler);
|
||||
return assembler.GenerateCode();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Define array of pointers to generators and C builtin functions.
|
||||
// We do this in a sort of roundabout way so that we can do the initialization
|
||||
@ -4324,47 +4455,70 @@ static BuiltinFunctionTable builtin_function_table =
|
||||
// Code::Flags names a non-abstract type.
|
||||
void Builtins::InitBuiltinFunctionTable() {
|
||||
BuiltinDesc* functions = builtin_function_table.functions_;
|
||||
functions[builtin_count].generator = NULL;
|
||||
functions[builtin_count].c_code = NULL;
|
||||
functions[builtin_count].s_name = NULL;
|
||||
functions[builtin_count].builder = nullptr;
|
||||
functions[builtin_count].generator = nullptr;
|
||||
functions[builtin_count].c_code = nullptr;
|
||||
functions[builtin_count].s_name = nullptr;
|
||||
functions[builtin_count].name = builtin_count;
|
||||
functions[builtin_count].flags = static_cast<Code::Flags>(0);
|
||||
functions[builtin_count].extra_args = BuiltinExtraArguments::kNone;
|
||||
functions[builtin_count].argc = 0;
|
||||
|
||||
#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
|
||||
functions->builder = &MacroAssemblerBuilder; \
|
||||
functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
|
||||
functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
|
||||
functions->s_name = #aname; \
|
||||
functions->name = c_##aname; \
|
||||
functions->flags = Code::ComputeFlags(Code::BUILTIN); \
|
||||
functions->extra_args = BuiltinExtraArguments::aextra_args; \
|
||||
functions->argc = 0; \
|
||||
++functions;
|
||||
|
||||
#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
|
||||
functions->builder = &MacroAssemblerBuilder; \
|
||||
functions->generator = FUNCTION_ADDR(Generate_##aname); \
|
||||
functions->c_code = NULL; \
|
||||
functions->s_name = #aname; \
|
||||
functions->name = k##aname; \
|
||||
functions->flags = Code::ComputeFlags(Code::kind, state, extra); \
|
||||
functions->extra_args = BuiltinExtraArguments::kNone; \
|
||||
functions->argc = 0; \
|
||||
++functions;
|
||||
|
||||
#define DEF_FUNCTION_PTR_T(aname, aargc) \
|
||||
functions->builder = &CodeStubAssemblerBuilder; \
|
||||
functions->generator = FUNCTION_ADDR(Generate_##aname); \
|
||||
functions->c_code = NULL; \
|
||||
functions->s_name = #aname; \
|
||||
functions->name = k##aname; \
|
||||
functions->flags = \
|
||||
Code::ComputeFlags(Code::BUILTIN, UNINITIALIZED, kNoExtraICState); \
|
||||
functions->extra_args = BuiltinExtraArguments::kNone; \
|
||||
functions->argc = aargc; \
|
||||
++functions;
|
||||
|
||||
#define DEF_FUNCTION_PTR_H(aname, kind) \
|
||||
functions->builder = &MacroAssemblerBuilder; \
|
||||
functions->generator = FUNCTION_ADDR(Generate_##aname); \
|
||||
functions->c_code = NULL; \
|
||||
functions->s_name = #aname; \
|
||||
functions->name = k##aname; \
|
||||
functions->flags = Code::ComputeHandlerFlags(Code::kind); \
|
||||
functions->extra_args = BuiltinExtraArguments::kNone; \
|
||||
functions->argc = 0; \
|
||||
++functions;
|
||||
|
||||
BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
|
||||
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
|
||||
BUILTIN_LIST_T(DEF_FUNCTION_PTR_T)
|
||||
BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
|
||||
BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
|
||||
|
||||
#undef DEF_FUNCTION_PTR_C
|
||||
#undef DEF_FUNCTION_PTR_A
|
||||
#undef DEF_FUNCTION_PTR_H
|
||||
#undef DEF_FUNCTION_PTR_T
|
||||
}
|
||||
|
||||
|
||||
@ -4376,40 +4530,11 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
|
||||
|
||||
const BuiltinDesc* functions = builtin_function_table.functions();
|
||||
|
||||
// For now we generate builtin adaptor code into a stack-allocated
|
||||
// buffer, before copying it into individual code objects. Be careful
|
||||
// with alignment, some platforms don't like unaligned code.
|
||||
#ifdef DEBUG
|
||||
// We can generate a lot of debug code on Arm64.
|
||||
const size_t buffer_size = 32*KB;
|
||||
#elif V8_TARGET_ARCH_PPC64
|
||||
// 8 KB is insufficient on PPC64 when FLAG_debug_code is on.
|
||||
const size_t buffer_size = 10 * KB;
|
||||
#else
|
||||
const size_t buffer_size = 8*KB;
|
||||
#endif
|
||||
union { int force_alignment; byte buffer[buffer_size]; } u;
|
||||
|
||||
// Traverse the list of builtins and generate an adaptor in a
|
||||
// separate code object for each one.
|
||||
for (int i = 0; i < builtin_count; i++) {
|
||||
if (create_heap_objects) {
|
||||
MacroAssembler masm(isolate, u.buffer, sizeof u.buffer,
|
||||
CodeObjectRequired::kYes);
|
||||
// Generate the code/adaptor.
|
||||
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
|
||||
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
|
||||
// We pass all arguments to the generator, but it may not use all of
|
||||
// them. This works because the first arguments are on top of the
|
||||
// stack.
|
||||
DCHECK(!masm.has_frame());
|
||||
g(&masm, functions[i].name, functions[i].extra_args);
|
||||
// Move the code into the object heap.
|
||||
CodeDesc desc;
|
||||
masm.GetCode(&desc);
|
||||
Code::Flags flags = functions[i].flags;
|
||||
Handle<Code> code =
|
||||
isolate->factory()->NewCode(desc, flags, masm.CodeObject());
|
||||
Handle<Code> code = (*functions[i].builder)(isolate, functions + i);
|
||||
// Log the event and add the code to the builtins array.
|
||||
PROFILE(isolate,
|
||||
CodeCreateEvent(Logger::BUILTIN_TAG, AbstractCode::cast(*code),
|
||||
@ -4483,6 +4608,11 @@ Handle<Code> Builtins::name() { \
|
||||
reinterpret_cast<Code**>(builtin_address(k##name)); \
|
||||
return Handle<Code>(code_address); \
|
||||
}
|
||||
#define DEFINE_BUILTIN_ACCESSOR_T(name, argc) \
|
||||
Handle<Code> Builtins::name() { \
|
||||
Code** code_address = reinterpret_cast<Code**>(builtin_address(k##name)); \
|
||||
return Handle<Code>(code_address); \
|
||||
}
|
||||
#define DEFINE_BUILTIN_ACCESSOR_H(name, kind) \
|
||||
Handle<Code> Builtins::name() { \
|
||||
Code** code_address = \
|
||||
@ -4491,11 +4621,13 @@ Handle<Code> Builtins::name() { \
|
||||
}
|
||||
BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
|
||||
BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
|
||||
BUILTIN_LIST_T(DEFINE_BUILTIN_ACCESSOR_T)
|
||||
BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
|
||||
BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
|
||||
#undef DEFINE_BUILTIN_ACCESSOR_C
|
||||
#undef DEFINE_BUILTIN_ACCESSOR_A
|
||||
|
||||
#undef DEFINE_BUILTIN_ACCESSOR_T
|
||||
#undef DEFINE_BUILTIN_ACCESSOR_H
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -11,6 +11,13 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
namespace compiler {
|
||||
|
||||
// Forward declarations.
|
||||
class CodeStubAssembler;
|
||||
|
||||
} // namespace compiler
|
||||
|
||||
// Specifies extra arguments required by a C++ builtin.
|
||||
enum class BuiltinExtraArguments : uint8_t {
|
||||
kNone = 0u,
|
||||
@ -299,6 +306,9 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
|
||||
V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
|
||||
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
|
||||
|
||||
// Define list of builtins implemented in TurboFan (with JS linkage).
|
||||
#define BUILTIN_LIST_T(V) V(MathSqrt, 2)
|
||||
|
||||
// Define list of builtin handlers implemented in assembly.
|
||||
#define BUILTIN_LIST_H(V) \
|
||||
V(LoadIC_Slow, LOAD_IC) \
|
||||
@ -337,14 +347,16 @@ class Builtins {
|
||||
enum Name {
|
||||
#define DEF_ENUM_C(name, ignore) k##name,
|
||||
#define DEF_ENUM_A(name, kind, state, extra) k##name,
|
||||
#define DEF_ENUM_T(name, argc) k##name,
|
||||
#define DEF_ENUM_H(name, kind) k##name,
|
||||
BUILTIN_LIST_C(DEF_ENUM_C)
|
||||
BUILTIN_LIST_A(DEF_ENUM_A)
|
||||
BUILTIN_LIST_H(DEF_ENUM_H)
|
||||
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
|
||||
BUILTIN_LIST_C(DEF_ENUM_C) BUILTIN_LIST_A(DEF_ENUM_A)
|
||||
BUILTIN_LIST_T(DEF_ENUM_T) BUILTIN_LIST_H(DEF_ENUM_H)
|
||||
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
|
||||
#undef DEF_ENUM_C
|
||||
#undef DEF_ENUM_A
|
||||
builtin_count
|
||||
#undef DEF_ENUM_T
|
||||
#undef DEF_ENUM_H
|
||||
builtin_count
|
||||
};
|
||||
|
||||
enum CFunctionId {
|
||||
@ -357,13 +369,17 @@ class Builtins {
|
||||
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
|
||||
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
|
||||
Handle<Code> name();
|
||||
#define DECLARE_BUILTIN_ACCESSOR_T(name, argc) Handle<Code> name();
|
||||
#define DECLARE_BUILTIN_ACCESSOR_H(name, kind) Handle<Code> name();
|
||||
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
|
||||
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
|
||||
BUILTIN_LIST_T(DECLARE_BUILTIN_ACCESSOR_T)
|
||||
BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
|
||||
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
|
||||
#undef DECLARE_BUILTIN_ACCESSOR_C
|
||||
#undef DECLARE_BUILTIN_ACCESSOR_A
|
||||
#undef DECLARE_BUILTIN_ACCESSOR_T
|
||||
#undef DECLARE_BUILTIN_ACCESSOR_H
|
||||
|
||||
// Convenience wrappers.
|
||||
Handle<Code> CallFunction(
|
||||
@ -574,6 +590,8 @@ class Builtins {
|
||||
static void Generate_MathMin(MacroAssembler* masm) {
|
||||
Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
|
||||
}
|
||||
// ES6 section 20.2.2.32 Math.sqrt ( x )
|
||||
static void Generate_MathSqrt(compiler::CodeStubAssembler* assembler);
|
||||
|
||||
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
|
||||
static void Generate_NumberConstructor(MacroAssembler* masm);
|
||||
|
@ -243,6 +243,12 @@ Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
|
||||
IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
|
||||
return StoreNoWriteBarrier(
|
||||
MachineRepresentation::kFloat64, object,
|
||||
IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::LoadMapBitField(Node* map) {
|
||||
return Load(MachineType::Uint8(), map,
|
||||
IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
|
||||
@ -459,6 +465,12 @@ Node* CodeStubAssembler::LoadMap(Node* object) {
|
||||
return LoadObjectField(object, HeapObject::kMapOffset);
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
|
||||
return StoreNoWriteBarrier(
|
||||
MachineRepresentation::kTagged, object,
|
||||
IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::LoadInstanceType(Node* object) {
|
||||
return LoadMapInstanceType(LoadMap(object));
|
||||
}
|
||||
|
@ -90,6 +90,7 @@ class Schedule;
|
||||
V(Word64Ror)
|
||||
|
||||
#define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \
|
||||
V(Float64Sqrt) \
|
||||
V(ChangeFloat64ToUint32) \
|
||||
V(ChangeInt32ToFloat64) \
|
||||
V(ChangeInt32ToInt64) \
|
||||
@ -284,6 +285,8 @@ class CodeStubAssembler {
|
||||
MachineType rep = MachineType::AnyTagged());
|
||||
// Load the floating point value of a HeapNumber.
|
||||
Node* LoadHeapNumberValue(Node* object);
|
||||
// Store the floating point value of a HeapNumber.
|
||||
Node* StoreHeapNumberValue(Node* object, Node* value);
|
||||
// Load the bit field of a Map.
|
||||
Node* LoadMapBitField(Node* map);
|
||||
// Load the instance type of a Map.
|
||||
@ -302,6 +305,8 @@ class CodeStubAssembler {
|
||||
Node* value);
|
||||
// Load the Map of an HeapObject.
|
||||
Node* LoadMap(Node* object);
|
||||
// Store the Map of an HeapObject.
|
||||
Node* StoreMapNoWriteBarrier(Node* object, Node* map);
|
||||
// Load the instance type of an HeapObject.
|
||||
Node* LoadInstanceType(Node* object);
|
||||
|
||||
|
@ -168,6 +168,17 @@ Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
// ES6 section 20.2.2.32 Math.sqrt ( x )
|
||||
Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
|
||||
JSCallReduction r(node);
|
||||
if (r.InputsMatchOne(Type::Number())) {
|
||||
// Math.sqrt(a:number) -> Float64Sqrt(a)
|
||||
Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
|
||||
return Replace(value);
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
Reduction JSBuiltinReducer::Reduce(Node* node) {
|
||||
Reduction reduction = NoChange();
|
||||
JSCallReduction r(node);
|
||||
@ -187,6 +198,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
|
||||
case kMathRound:
|
||||
reduction = ReduceMathRound(node);
|
||||
break;
|
||||
case kMathSqrt:
|
||||
reduction = ReduceMathSqrt(node);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ class JSBuiltinReducer final : public AdvancedReducer {
|
||||
Reduction ReduceMathMax(Node* node);
|
||||
Reduction ReduceMathImul(Node* node);
|
||||
Reduction ReduceMathFround(Node* node);
|
||||
Reduction ReduceMathSqrt(Node* node);
|
||||
Reduction ReduceMathRound(Node* node);
|
||||
|
||||
Graph* graph() const;
|
||||
|
@ -59,8 +59,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
|
||||
return ReduceMathClz32(node);
|
||||
case Runtime::kInlineMathFloor:
|
||||
return ReduceMathFloor(node);
|
||||
case Runtime::kInlineMathSqrt:
|
||||
return ReduceMathSqrt(node);
|
||||
case Runtime::kInlineValueOf:
|
||||
return ReduceValueOf(node);
|
||||
case Runtime::kInlineFixedArrayGet:
|
||||
@ -224,15 +222,6 @@ Reduction JSIntrinsicLowering::ReduceMathFloor(Node* node) {
|
||||
}
|
||||
|
||||
|
||||
Reduction JSIntrinsicLowering::ReduceMathSqrt(Node* node) {
|
||||
// Tell the compiler to assume number input.
|
||||
Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
|
||||
node->InputAt(0), graph()->start());
|
||||
node->ReplaceInput(0, renamed);
|
||||
return Change(node, machine()->Float64Sqrt());
|
||||
}
|
||||
|
||||
|
||||
Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
|
||||
// if (%_IsSmi(value)) {
|
||||
// return value;
|
||||
|
@ -48,7 +48,6 @@ class JSIntrinsicLowering final : public AdvancedReducer {
|
||||
Reduction ReduceIsSmi(Node* node);
|
||||
Reduction ReduceMathClz32(Node* node);
|
||||
Reduction ReduceMathFloor(Node* node);
|
||||
Reduction ReduceMathSqrt(Node* node);
|
||||
Reduction ReduceValueOf(Node* node);
|
||||
Reduction ReduceFixedArrayGet(Node* node);
|
||||
Reduction ReduceFixedArraySet(Node* node);
|
||||
|
@ -1586,7 +1586,6 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
|
||||
return Type::Signed32();
|
||||
case Runtime::kInlineConstructDouble:
|
||||
case Runtime::kInlineMathFloor:
|
||||
case Runtime::kInlineMathSqrt:
|
||||
case Runtime::kInlineMathAtan2:
|
||||
return Type::Number();
|
||||
case Runtime::kInlineMathClz32:
|
||||
|
@ -94,7 +94,8 @@ enum BindingFlags {
|
||||
V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
|
||||
V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
|
||||
V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
|
||||
V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance)
|
||||
V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance) \
|
||||
V(MATH_SQRT, JSFunction, math_sqrt)
|
||||
|
||||
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
|
||||
V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
|
||||
|
@ -12752,15 +12752,6 @@ void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
|
||||
}
|
||||
|
||||
|
||||
void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
|
||||
DCHECK(call->arguments()->length() == 1);
|
||||
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
|
||||
HValue* value = Pop();
|
||||
HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt);
|
||||
return ast_context()->ReturnInstruction(result, call->id());
|
||||
}
|
||||
|
||||
|
||||
void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
|
||||
DCHECK(call->arguments()->length() == 2);
|
||||
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
|
||||
|
@ -2244,7 +2244,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
|
||||
F(DoubleLo) \
|
||||
F(MathClz32) \
|
||||
F(MathFloor) \
|
||||
F(MathSqrt) \
|
||||
F(MathLogRT) \
|
||||
/* ES6 Collections */ \
|
||||
F(MapClear) \
|
||||
|
@ -456,6 +456,12 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
|
||||
return INTERPRETED;
|
||||
}
|
||||
switch (code_obj->kind()) {
|
||||
case Code::BUILTIN:
|
||||
if (marker->IsSmi()) break;
|
||||
// We treat frames for BUILTIN Code objects as OptimizedFrame for now
|
||||
// (all the builtins with JavaScript linkage are actually generated
|
||||
// with TurboFan currently, so this is sound).
|
||||
return OPTIMIZED;
|
||||
case Code::FUNCTION:
|
||||
return JAVA_SCRIPT;
|
||||
case Code::OPTIMIZED_FUNCTION:
|
||||
@ -981,8 +987,10 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
|
||||
|
||||
// Delegate to JS frame in absence of turbofan deoptimization.
|
||||
// TODO(turbofan): Revisit once we support deoptimization across the board.
|
||||
if (LookupCode()->is_turbofanned() && function()->shared()->asm_function() &&
|
||||
!FLAG_turbo_asm_deoptimization) {
|
||||
Code* code = LookupCode();
|
||||
if (code->kind() == Code::BUILTIN ||
|
||||
(code->is_turbofanned() && function()->shared()->asm_function() &&
|
||||
!FLAG_turbo_asm_deoptimization)) {
|
||||
return JavaScriptFrame::Summarize(frames);
|
||||
}
|
||||
|
||||
@ -1085,7 +1093,6 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
|
||||
int OptimizedFrame::LookupExceptionHandlerInTable(
|
||||
int* stack_slots, HandlerTable::CatchPrediction* prediction) {
|
||||
Code* code = LookupCode();
|
||||
DCHECK(code->is_optimized_code());
|
||||
HandlerTable* table = HandlerTable::cast(code->handler_table());
|
||||
int pc_offset = static_cast<int>(pc() - code->entry());
|
||||
if (stack_slots) *stack_slots = code->stack_slots();
|
||||
|
@ -91,11 +91,6 @@ function MathRound(x) {
|
||||
return %RoundNumber(TO_NUMBER(x));
|
||||
}
|
||||
|
||||
// ECMA 262 - 15.8.2.17
|
||||
function MathSqrtJS(x) {
|
||||
return %_MathSqrt(+x);
|
||||
}
|
||||
|
||||
// ES6 draft 09-27-13, section 20.2.2.28.
|
||||
function MathSign(x) {
|
||||
x = +x;
|
||||
@ -119,9 +114,9 @@ function MathAsinh(x) {
|
||||
x = TO_NUMBER(x);
|
||||
// Idempotent for NaN, +/-0 and +/-Infinity.
|
||||
if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
|
||||
if (x > 0) return MathLog(x + %_MathSqrt(x * x + 1));
|
||||
if (x > 0) return MathLog(x + %math_sqrt(x * x + 1));
|
||||
// This is to prevent numerical errors caused by large negative x.
|
||||
return -MathLog(-x + %_MathSqrt(x * x + 1));
|
||||
return -MathLog(-x + %math_sqrt(x * x + 1));
|
||||
}
|
||||
|
||||
// ES6 draft 09-27-13, section 20.2.2.3.
|
||||
@ -130,7 +125,7 @@ function MathAcosh(x) {
|
||||
if (x < 1) return NaN;
|
||||
// Idempotent for NaN and +Infinity.
|
||||
if (!NUMBER_IS_FINITE(x)) return x;
|
||||
return MathLog(x + %_MathSqrt(x + 1) * %_MathSqrt(x - 1));
|
||||
return MathLog(x + %math_sqrt(x + 1) * %math_sqrt(x - 1));
|
||||
}
|
||||
|
||||
// ES6 draft 09-27-13, section 20.2.2.7.
|
||||
@ -169,7 +164,7 @@ function MathHypot(x, y) { // Function length is 2.
|
||||
compensation = (preliminary - sum) - summand;
|
||||
sum = preliminary;
|
||||
}
|
||||
return %_MathSqrt(sum) * max;
|
||||
return %math_sqrt(sum) * max;
|
||||
}
|
||||
|
||||
// ES6 draft 07-18-14, section 20.2.2.11
|
||||
@ -234,7 +229,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
|
||||
"floor", MathFloorJS,
|
||||
"log", MathLog,
|
||||
"round", MathRound,
|
||||
"sqrt", MathSqrtJS,
|
||||
"atan2", MathAtan2JS,
|
||||
"pow", MathPowJS,
|
||||
"sign", MathSign,
|
||||
@ -254,7 +248,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
|
||||
%SetForceInlineFlag(MathFloorJS);
|
||||
%SetForceInlineFlag(MathRandom);
|
||||
%SetForceInlineFlag(MathSign);
|
||||
%SetForceInlineFlag(MathSqrtJS);
|
||||
%SetForceInlineFlag(MathTrunc);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
@ -4925,14 +4925,10 @@ void Code::set_profiler_ticks(int ticks) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int Code::builtin_index() {
|
||||
return READ_INT32_FIELD(this, kKindSpecificFlags1Offset);
|
||||
}
|
||||
|
||||
int Code::builtin_index() { return READ_INT_FIELD(this, kBuiltinIndexOffset); }
|
||||
|
||||
void Code::set_builtin_index(int index) {
|
||||
WRITE_INT32_FIELD(this, kKindSpecificFlags1Offset, index);
|
||||
WRITE_INT_FIELD(this, kBuiltinIndexOffset, index);
|
||||
}
|
||||
|
||||
|
||||
|
@ -5318,8 +5318,9 @@ class Code: public HeapObject {
|
||||
// Note: We might be able to squeeze this into the flags above.
|
||||
static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
|
||||
static const int kConstantPoolOffset = kPrologueOffset + kIntSize;
|
||||
static const int kHeaderPaddingStart =
|
||||
static const int kBuiltinIndexOffset =
|
||||
kConstantPoolOffset + kConstantPoolSize;
|
||||
static const int kHeaderPaddingStart = kBuiltinIndexOffset + kIntSize;
|
||||
|
||||
// Add padding to align the instruction start following right after
|
||||
// the Code object header.
|
||||
@ -5347,7 +5348,7 @@ class Code: public HeapObject {
|
||||
: public BitField<ExtraICState, 11, PlatformSmiTagging::kSmiValueSize -
|
||||
11 + 1> {}; // NOLINT
|
||||
|
||||
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
|
||||
// KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
|
||||
static const int kStackSlotsFirstBit = 0;
|
||||
static const int kStackSlotsBitCount = 24;
|
||||
static const int kMarkedForDeoptimizationBit =
|
||||
|
@ -214,17 +214,6 @@ RUNTIME_FUNCTION(Runtime_RoundNumber) {
|
||||
}
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_MathSqrt) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK(args.length() == 1);
|
||||
isolate->counters()->math_sqrt_runtime()->Increment();
|
||||
|
||||
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
|
||||
lazily_initialize_fast_sqrt(isolate);
|
||||
return *isolate->factory()->NewNumber(fast_sqrt(x, isolate));
|
||||
}
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK(args.length() == 1);
|
||||
|
@ -362,7 +362,6 @@ namespace internal {
|
||||
F(MathPow, 2, 1) \
|
||||
F(MathPowRT, 2, 1) \
|
||||
F(RoundNumber, 1, 1) \
|
||||
F(MathSqrt, 1, 1) \
|
||||
F(GenerateRandomNumbers, 1, 1)
|
||||
|
||||
|
||||
|
@ -76,11 +76,11 @@ TEST(SimpleCallRuntime1Arg) {
|
||||
CodeStubAssemblerTester m(isolate, descriptor);
|
||||
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
|
||||
Node* b = m.SmiTag(m.Int32Constant(256));
|
||||
m.Return(m.CallRuntime(Runtime::kMathSqrt, context, b));
|
||||
m.Return(m.CallRuntime(Runtime::kMathFloor, context, b));
|
||||
Handle<Code> code = m.GenerateCode();
|
||||
FunctionTester ft(descriptor, code);
|
||||
MaybeHandle<Object> result = ft.Call();
|
||||
CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
|
||||
CHECK_EQ(256, Handle<Smi>::cast(result.ToHandleChecked())->value());
|
||||
}
|
||||
|
||||
|
||||
@ -90,11 +90,11 @@ TEST(SimpleTailCallRuntime1Arg) {
|
||||
CodeStubAssemblerTester m(isolate, descriptor);
|
||||
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
|
||||
Node* b = m.SmiTag(m.Int32Constant(256));
|
||||
m.TailCallRuntime(Runtime::kMathSqrt, context, b);
|
||||
m.TailCallRuntime(Runtime::kMathFloor, context, b);
|
||||
Handle<Code> code = m.GenerateCode();
|
||||
FunctionTester ft(descriptor, code);
|
||||
MaybeHandle<Object> result = ft.Call();
|
||||
CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
|
||||
CHECK_EQ(256, Handle<Smi>::cast(result.ToHandleChecked())->value());
|
||||
}
|
||||
|
||||
|
||||
|
@ -34,7 +34,7 @@ var global = 3;
|
||||
function f(a) {
|
||||
// This will trigger a deopt since global was previously a SMI, with the
|
||||
// accumulator holding an unboxed double which needs materialized.
|
||||
global = %_MathSqrt(a);
|
||||
global = %math_sqrt(a);
|
||||
}
|
||||
%OptimizeFunctionOnNextCall(f);
|
||||
f(0.25);
|
||||
|
@ -259,24 +259,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineMathFloor) {
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// %_MathSqrt
|
||||
|
||||
|
||||
TEST_F(JSIntrinsicLoweringTest, InlineMathSqrt) {
|
||||
Node* const input = Parameter(0);
|
||||
Node* const context = Parameter(1);
|
||||
Node* const effect = graph()->start();
|
||||
Node* const control = graph()->start();
|
||||
Reduction const r = Reduce(
|
||||
graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathSqrt, 1),
|
||||
input, context, effect, control));
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(),
|
||||
IsFloat64Sqrt(IsGuard(Type::Number(), input, _)));
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// %_MathClz32
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user