[builtins] Provide Math.floor as TurboFan builtin.

This way we avoid the second deoptimization for the Math.floor and
Math.ceil builtins when -0 is involved. We still deoptimize the inlined
Crankshaft version in various cases, that's a separate issue.

The algorithm used for implement CodeStubAssembler::Float64Floor is
vaguely based on the fast math version used in the libm of various BSDs,
but had to be reengineered to match the EcmaScript specification.

R=epertoso@chromium.org
BUG=v8:2890, v8:4059
LOG=n

Review URL: https://codereview.chromium.org/1828253002

Cr-Commit-Position: refs/heads/master@{#35083}
This commit is contained in:
bmeurer 2016-03-28 10:30:05 -07:00 committed by Commit bot
parent 0d24a0fcfe
commit 36ead519c8
21 changed files with 264 additions and 74 deletions

View File

@ -1519,6 +1519,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(math, "acos", Builtins::kMathAcos, 1, true);
SimpleInstallFunction(math, "asin", Builtins::kMathAsin, 1, true);
SimpleInstallFunction(math, "atan", Builtins::kMathAtan, 1, true);
Handle<JSFunction> math_floor =
SimpleInstallFunction(math, "floor", Builtins::kMathFloor, 1, true);
native_context()->set_math_floor(*math_floor);
SimpleInstallFunction(math, "fround", Builtins::kMathFround, 1, true);
SimpleInstallFunction(math, "imul", Builtins::kMathImul, 2, true);
SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);

View File

@ -2007,6 +2007,63 @@ BUILTIN(MathAtan) {
return *isolate->factory()->NewHeapNumber(std::atan(x->Number()));
}
// ES6 section 20.2.2.16 Math.floor ( x )
void Builtins::Generate_MathFloor(compiler::CodeStubAssembler* assembler) {
typedef compiler::CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef compiler::CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(4);
// We might need to loop once for ToNumber conversion.
Variable var_x(assembler, MachineRepresentation::kTagged);
Label loop(assembler, &var_x);
var_x.Bind(assembler->Parameter(1));
assembler->Goto(&loop);
assembler->Bind(&loop);
{
// Load the current {x} value.
Node* x = var_x.value();
// Check if {x} is a Smi or a HeapObject.
Label if_xissmi(assembler), if_xisnotsmi(assembler);
assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
assembler->Bind(&if_xissmi);
{
// Nothing to do when {x} is a Smi.
assembler->Return(x);
}
assembler->Bind(&if_xisnotsmi);
{
// Check if {x} is a HeapNumber.
Label if_xisheapnumber(assembler),
if_xisnotheapnumber(assembler, Label::kDeferred);
assembler->Branch(
assembler->WordEqual(assembler->LoadMap(x),
assembler->HeapNumberMapConstant()),
&if_xisheapnumber, &if_xisnotheapnumber);
assembler->Bind(&if_xisheapnumber);
{
Node* x_value = assembler->LoadHeapNumberValue(x);
Node* value = assembler->Float64Floor(x_value);
Node* result = assembler->ChangeFloat64ToTagged(value);
assembler->Return(result);
}
assembler->Bind(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
Callable callable =
CodeFactory::NonNumberToNumber(assembler->isolate());
var_x.Bind(assembler->CallStub(callable, context, x));
assembler->Goto(&loop);
}
}
}
}
// ES6 section 20.2.2.17 Math.fround ( x )
BUILTIN(MathFround) {
@ -2018,7 +2075,6 @@ BUILTIN(MathFround) {
return *isolate->factory()->NewNumber(x32);
}
// ES6 section 20.2.2.19 Math.imul ( x, y )
BUILTIN(MathImul) {
HandleScope scope(isolate);
@ -2039,7 +2095,7 @@ void Builtins::Generate_MathSqrt(compiler::CodeStubAssembler* assembler) {
Node* context = assembler->Parameter(4);
Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
Node* value = assembler->Float64Sqrt(x_value);
Node* result = assembler->AllocateHeapNumberWithValue(value);
Node* result = assembler->ChangeFloat64ToTagged(value);
assembler->Return(result);
}

View File

@ -307,7 +307,9 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtins implemented in TurboFan (with JS linkage).
#define BUILTIN_LIST_T(V) V(MathSqrt, 2)
#define BUILTIN_LIST_T(V) \
V(MathFloor, 2) \
V(MathSqrt, 2)
// Define list of builtin handlers implemented in assembly.
#define BUILTIN_LIST_H(V) \
@ -580,6 +582,8 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
// ES6 section 20.2.2.16 Math.floor ( x )
static void Generate_MathFloor(compiler::CodeStubAssembler* assembler);
enum class MathMaxMinKind { kMax, kMin };
static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
// ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )

View File

@ -859,8 +859,7 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
Node* lhs_value = var_fadd_lhs.value();
Node* rhs_value = var_fadd_rhs.value();
Node* value = assembler->Float64Add(lhs_value, rhs_value);
// TODO(bmeurer): Introduce a ChangeFloat64ToTagged.
Node* result = assembler->AllocateHeapNumberWithValue(value);
Node* result = assembler->ChangeFloat64ToTagged(value);
assembler->Return(result);
}
}
@ -1027,8 +1026,7 @@ void SubtractStub::GenerateAssembly(
Node* lhs_value = var_fsub_lhs.value();
Node* rhs_value = var_fsub_rhs.value();
Node* value = assembler->Float64Sub(lhs_value, rhs_value);
// TODO(bmeurer): Introduce a ChangeFloat64ToTagged.
Node* result = assembler->AllocateHeapNumberWithValue(value);
Node* result = assembler->ChangeFloat64ToTagged(value);
assembler->Return(result);
}
}

View File

@ -152,6 +152,113 @@ Node* CodeStubAssembler::SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
Node* CodeStubAssembler::Float64Floor(Node* x) {
if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
return raw_assembler_->Float64RoundDown(x);
}
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
Variable var_x(this, MachineRepresentation::kFloat64);
var_x.Bind(x);
Label return_x(this);
// Check if {x} is a large positive integer.
Label if_xlargeposint(this), if_xnotlargeposint(this);
Branch(Float64GreaterThanOrEqual(x, two_52), &if_xlargeposint,
&if_xnotlargeposint);
Bind(&if_xlargeposint);
{
// The {x} is already an even integer.
Goto(&return_x);
}
Bind(&if_xnotlargeposint);
{
// Check if {x} is negative.
Label if_xnegative(this), if_xpositive(this);
Branch(Float64LessThan(x, Float64Constant(0.0)), &if_xnegative,
&if_xpositive);
Bind(&if_xnegative);
{
// Check if {x} is a large negative integer.
Label if_xlargenegint(this), if_xnotlargenegint(this);
Branch(Float64LessThanOrEqual(x, minus_two_52), &if_xlargenegint,
&if_xnotlargenegint);
Bind(&if_xlargenegint);
{
// The {x} is already an even integer.
Goto(&return_x);
}
Bind(&if_xnotlargenegint);
{
// Round negative {x} towards -Infinity.
Node* z = Float64Sub(Float64Constant(-0.0), x);
Node* y = Float64Sub(Float64Add(two_52, z), two_52);
// Check if we need to adjust {y}.
Label if_adjust(this), if_notadjust(this);
Branch(Float64GreaterThan(z, y), &if_adjust, &if_notadjust);
Bind(&if_adjust);
{
var_x.Bind(Float64Sub(Float64Constant(-1.0), y));
Goto(&return_x);
}
Bind(&if_notadjust);
{
var_x.Bind(Float64Sub(Float64Constant(-0.0), y));
Goto(&return_x);
}
}
}
Bind(&if_xpositive);
{
// Check if {x} is zero (either positive or negative).
Label if_xzero(this), if_xnotzero(this);
Branch(Float64Equal(x, Float64Constant(0.0)), &if_xzero, &if_xnotzero);
Bind(&if_xzero);
{
// We have to return both 0.0 and -0.0 as is.
Goto(&return_x);
}
Bind(&if_xnotzero);
{
// Round positive {x} towards -Infinity.
Node* y = Float64Sub(Float64Add(two_52, x), two_52);
// Check if we need to adjust {y}.
Label if_adjust(this), if_notadjust(this);
Branch(Float64LessThan(x, y), &if_adjust, &if_notadjust);
Bind(&if_adjust);
{
var_x.Bind(Float64Sub(y, Float64Constant(1.0)));
Goto(&return_x);
}
Bind(&if_notadjust);
{
var_x.Bind(y);
Goto(&return_x);
}
}
}
}
Bind(&return_x);
return var_x.value();
}
Node* CodeStubAssembler::SmiTag(Node* value) {
return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
@ -510,6 +617,65 @@ Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
raw_assembler_->Int32Constant(shift));
}
Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
Node* value32 = raw_assembler_->TruncateFloat64ToInt32(
TruncationMode::kRoundToZero, value);
Node* value64 = ChangeInt32ToFloat64(value32);
Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
Label if_valueisequal(this), if_valueisnotequal(this);
Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
Bind(&if_valueisequal);
{
Label if_valueiszero(this), if_valueisnotzero(this);
Branch(Float64Equal(value, Float64Constant(0.0)), &if_valueiszero,
&if_valueisnotzero);
Bind(&if_valueiszero);
BranchIfInt32LessThan(raw_assembler_->Float64ExtractHighWord32(value),
Int32Constant(0), &if_valueisheapnumber,
&if_valueisint32);
Bind(&if_valueisnotzero);
Goto(&if_valueisint32);
}
Bind(&if_valueisnotequal);
Goto(&if_valueisheapnumber);
Variable var_result(this, MachineRepresentation::kTagged);
Bind(&if_valueisint32);
{
if (raw_assembler_->machine()->Is64()) {
Node* result = SmiTag(ChangeInt32ToInt64(value32));
var_result.Bind(result);
Goto(&if_join);
} else {
Node* pair = Int32AddWithOverflow(value32, value32);
Node* overflow = Projection(1, pair);
Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
if_join(this);
Branch(overflow, &if_overflow, &if_notoverflow);
Bind(&if_overflow);
Goto(&if_valueisheapnumber);
Bind(&if_notoverflow);
{
Node* result = Projection(0, pair);
var_result.Bind(result);
Goto(&if_join);
}
}
}
Bind(&if_valueisheapnumber);
{
Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
Goto(&if_join);
}
Bind(&if_join);
return var_result.value();
}
Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
if (raw_assembler_->machine()->Is64()) {
return SmiTag(ChangeInt32ToInt64(value));

View File

@ -65,6 +65,8 @@ class Schedule;
CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
V(Float64Add) \
V(Float64Sub) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(IntPtrAdd) \
V(IntPtrAddWithOverflow) \
V(IntPtrSub) \
@ -257,6 +259,9 @@ class CodeStubAssembler {
// Macros
// ===========================================================================
// Float64 operations.
Node* Float64Floor(Node* x);
// Tag a Word as a Smi value.
Node* SmiTag(Node* value);
// Untag a Smi value as a Word.
@ -333,6 +338,7 @@ class CodeStubAssembler {
Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask);
// Conversions.
Node* ChangeFloat64ToTagged(Node* value);
Node* ChangeInt32ToTagged(Node* value);
Node* TruncateTaggedToFloat64(Node* context, Node* value);
Node* TruncateTaggedToWord32(Node* context, Node* value);

View File

@ -129,6 +129,18 @@ Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
return NoChange();
}
// ES6 draft 08-24-14, section 20.2.2.16.
Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
JSCallReduction r(node);
if (r.InputsMatchOne(Type::Number()) &&
machine()->Float64RoundDown().IsSupported()) {
// Math.floor(a:number) -> Float64RoundDown(a)
Node* value =
graph()->NewNode(machine()->Float64RoundDown().op(), r.left());
return Replace(value);
}
return NoChange();
}
// ES6 draft 08-24-14, section 20.2.2.17.
Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
@ -192,6 +204,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kMathImul:
reduction = ReduceMathImul(node);
break;
case kMathFloor:
reduction = ReduceMathFloor(node);
break;
case kMathFround:
reduction = ReduceMathFround(node);
break;

View File

@ -33,6 +33,7 @@ class JSBuiltinReducer final : public AdvancedReducer {
Reduction ReduceFunctionCall(Node* node);
Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathFloor(Node* node);
Reduction ReduceMathFround(Node* node);
Reduction ReduceMathSqrt(Node* node);
Reduction ReduceMathRound(Node* node);

View File

@ -57,8 +57,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsSmi(node);
case Runtime::kInlineMathClz32:
return ReduceMathClz32(node);
case Runtime::kInlineMathFloor:
return ReduceMathFloor(node);
case Runtime::kInlineValueOf:
return ReduceValueOf(node);
case Runtime::kInlineFixedArrayGet:
@ -216,12 +214,6 @@ Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
}
Reduction JSIntrinsicLowering::ReduceMathFloor(Node* node) {
if (!machine()->Float64RoundDown().IsSupported()) return NoChange();
return Change(node, machine()->Float64RoundDown().op());
}
Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
// if (%_IsSmi(value)) {
// return value;

View File

@ -47,7 +47,6 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
Reduction ReduceMathClz32(Node* node);
Reduction ReduceMathFloor(Node* node);
Reduction ReduceValueOf(Node* node);
Reduction ReduceFixedArrayGet(Node* node);
Reduction ReduceFixedArraySet(Node* node);

View File

@ -1585,7 +1585,6 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineDoubleHi:
return Type::Signed32();
case Runtime::kInlineConstructDouble:
case Runtime::kInlineMathFloor:
case Runtime::kInlineMathAtan2:
return Type::Number();
case Runtime::kInlineMathClz32:

View File

@ -95,6 +95,7 @@ enum BindingFlags {
V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance) \
V(MATH_FLOOR, JSFunction, math_floor) \
V(MATH_SQRT, JSFunction, math_sqrt)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \

View File

@ -759,7 +759,6 @@ class RuntimeCallTimerScope {
SC(math_atan2_runtime, V8.MathAtan2Runtime) \
SC(math_clz32_runtime, V8.MathClz32Runtime) \
SC(math_exp_runtime, V8.MathExpRuntime) \
SC(math_floor_runtime, V8.MathFloorRuntime) \
SC(math_log_runtime, V8.MathLogRuntime) \
SC(math_pow_runtime, V8.MathPowRuntime) \
SC(math_round_runtime, V8.MathRoundRuntime) \

View File

@ -12725,15 +12725,6 @@ void HOptimizedGraphBuilder::GenerateMathClz32(CallRuntime* call) {
}
void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathFloor);
return ast_context()->ReturnInstruction(result, call->id());
}
void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));

View File

@ -2269,7 +2269,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(DoubleHi) \
F(DoubleLo) \
F(MathClz32) \
F(MathFloor) \
F(MathLogRT) \
/* ES6 Collections */ \
F(MapClear) \

View File

@ -32,7 +32,6 @@ var GlobalString = global.String;
var MakeError;
var MakeRangeError;
var MakeTypeError;
var MathFloor;
var ObjectDefineProperties = utils.ImportNow("ObjectDefineProperties");
var ObjectDefineProperty = utils.ImportNow("ObjectDefineProperty");
var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
@ -58,7 +57,6 @@ utils.Import(function(from) {
MakeError = from.MakeError;
MakeRangeError = from.MakeRangeError;
MakeTypeError = from.MakeTypeError;
MathFloor = from.MathFloor;
RegExpTest = from.RegExpTest;
StringIndexOf = from.StringIndexOf;
StringLastIndexOf = from.StringLastIndexOf;
@ -1102,7 +1100,7 @@ function getNumberOption(options, property, min, max, fallback) {
if (IsNaN(value) || value < min || value > max) {
throw MakeRangeError(kPropertyValueOutOfRange, property);
}
return MathFloor(value);
return %math_floor(value);
}
return fallback;

View File

@ -41,7 +41,7 @@ function MathAtan2JS(y, x) {
// ECMA 262 - 15.8.2.6
function MathCeil(x) {
return -%_MathFloor(-x);
return -%math_floor(-x);
}
// ECMA 262 - 15.8.2.8
@ -49,11 +49,6 @@ function MathExp(x) {
return %MathExpRT(TO_NUMBER(x));
}
// ECMA 262 - 15.8.2.9
function MathFloorJS(x) {
return %_MathFloor(+x);
}
// ECMA 262 - 15.8.2.10
function MathLog(x) {
return %_MathLogRT(TO_NUMBER(x));
@ -103,8 +98,8 @@ function MathSign(x) {
// ES6 draft 09-27-13, section 20.2.2.34.
function MathTrunc(x) {
x = +x;
if (x > 0) return %_MathFloor(x);
if (x < 0) return -%_MathFloor(-x);
if (x > 0) return %math_floor(x);
if (x < 0) return -%math_floor(-x);
// -0, 0 or NaN.
return x;
}
@ -187,7 +182,7 @@ macro NEWTON_ITERATION_CBRT(x, approx)
endmacro
function CubeRoot(x) {
var approx_hi = MathFloorJS(%_DoubleHi(x) / 3) + 0x2A9F7893;
var approx_hi = %math_floor(%_DoubleHi(x) / 3) + 0x2A9F7893;
var approx = %_ConstructDouble(approx_hi | 0, 0);
approx = NEWTON_ITERATION_CBRT(x, approx);
approx = NEWTON_ITERATION_CBRT(x, approx);
@ -226,7 +221,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"abs", MathAbs,
"ceil", MathCeil,
"exp", MathExp,
"floor", MathFloorJS,
"log", MathLog,
"round", MathRound,
"atan2", MathAtan2JS,
@ -245,7 +239,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
%SetForceInlineFlag(MathAtan2JS);
%SetForceInlineFlag(MathCeil);
%SetForceInlineFlag(MathClz32JS);
%SetForceInlineFlag(MathFloorJS);
%SetForceInlineFlag(MathRandom);
%SetForceInlineFlag(MathSign);
%SetForceInlineFlag(MathTrunc);
@ -256,7 +249,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
utils.Export(function(to) {
to.MathAbs = MathAbs;
to.MathExp = MathExp;
to.MathFloor = MathFloorJS;
to.IntRandom = MathRandomRaw;
});

View File

@ -119,16 +119,6 @@ RUNTIME_FUNCTION(Runtime_MathClz32) {
}
RUNTIME_FUNCTION(Runtime_MathFloor) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
isolate->counters()->math_floor_runtime()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return *isolate->factory()->NewNumber(Floor(x));
}
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if VFP3 is not available.
RUNTIME_FUNCTION(Runtime_MathPow) {

View File

@ -358,7 +358,6 @@ namespace internal {
F(MathAtan2, 2, 1) \
F(MathExpRT, 1, 1) \
F(MathClz32, 1, 1) \
F(MathFloor, 1, 1) \
F(MathPow, 2, 1) \
F(MathPowRT, 2, 1) \
F(RoundNumber, 1, 1) \

View File

@ -76,7 +76,7 @@ TEST(SimpleCallRuntime1Arg) {
CodeStubAssemblerTester m(isolate, descriptor);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
Node* b = m.SmiTag(m.Int32Constant(256));
m.Return(m.CallRuntime(Runtime::kMathFloor, context, b));
m.Return(m.CallRuntime(Runtime::kRoundNumber, context, b));
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();
@ -90,7 +90,7 @@ TEST(SimpleTailCallRuntime1Arg) {
CodeStubAssemblerTester m(isolate, descriptor);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
Node* b = m.SmiTag(m.Int32Constant(256));
m.TailCallRuntime(Runtime::kMathFloor, context, b);
m.TailCallRuntime(Runtime::kRoundNumber, context, b);
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();

View File

@ -241,24 +241,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiver) {
}
// -----------------------------------------------------------------------------
// %_MathFloor
TEST_F(JSIntrinsicLoweringTest, InlineMathFloor) {
Node* const input = Parameter(0);
Node* const context = Parameter(1);
Node* const effect = graph()->start();
Node* const control = graph()->start();
Reduction const r = Reduce(
graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathFloor, 1),
input, context, effect, control),
MachineOperatorBuilder::kFloat64RoundDown);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64RoundDown(input));
}
// -----------------------------------------------------------------------------
// %_MathClz32