Add floor, ceil, round (truncate) instructions for ia32, x64 (if SSE4.1) and
add floor, ceil, round (truncate and away from zero) for arm64. R=bmeurer@chromium.org, dcarney@chromium.org, mstarzinger@chromium.org, rodolph.perfetta@arm.com TEST=test/mjsunit/asm/math-floor.js,test/mjsunit/asm/math-ceil.js,test/unittest/compiler/js-builtin-reducer-unittest.cc Review URL: https://codereview.chromium.org/677433002 Cr-Commit-Position: refs/heads/master@{#25018} git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@25018 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
7c27d234f3
commit
75ac43928b
@ -363,6 +363,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
case kArmVsqrtF64:
|
||||
__ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
|
||||
break;
|
||||
case kArmVfloorF64:
|
||||
__ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
|
||||
break;
|
||||
case kArmVceilF64:
|
||||
__ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
|
||||
break;
|
||||
case kArmVroundTruncateF64:
|
||||
__ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
|
||||
break;
|
||||
case kArmVroundTiesAwayF64:
|
||||
__ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
|
||||
break;
|
||||
case kArmVnegF64:
|
||||
__ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
|
||||
break;
|
||||
|
@ -44,6 +44,10 @@ namespace compiler {
|
||||
V(ArmVmodF64) \
|
||||
V(ArmVnegF64) \
|
||||
V(ArmVsqrtF64) \
|
||||
V(ArmVfloorF64) \
|
||||
V(ArmVceilF64) \
|
||||
V(ArmVroundTruncateF64) \
|
||||
V(ArmVroundTiesAwayF64) \
|
||||
V(ArmVcvtF32F64) \
|
||||
V(ArmVcvtF64F32) \
|
||||
V(ArmVcvtF64S32) \
|
||||
|
@ -100,6 +100,10 @@ class ArmOperandGenerator : public OperandGenerator {
|
||||
case kArmVmodF64:
|
||||
case kArmVnegF64:
|
||||
case kArmVsqrtF64:
|
||||
case kArmVfloorF64:
|
||||
case kArmVceilF64:
|
||||
case kArmVroundTruncateF64:
|
||||
case kArmVroundTiesAwayF64:
|
||||
case kArmVcvtF32F64:
|
||||
case kArmVcvtF64F32:
|
||||
case kArmVcvtF64S32:
|
||||
@ -115,6 +119,14 @@ class ArmOperandGenerator : public OperandGenerator {
|
||||
};
|
||||
|
||||
|
||||
static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
ArmOperandGenerator g(selector);
|
||||
selector->Emit(opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
ArmOperandGenerator g(selector);
|
||||
@ -826,6 +838,30 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Floor(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(ARMv8));
|
||||
VisitRRFloat64(this, kArmVfloorF64, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Ceil(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(ARMv8));
|
||||
VisitRRFloat64(this, kArmVceilF64, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(ARMv8));
|
||||
VisitRRFloat64(this, kArmVroundTruncateF64, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(ARMv8));
|
||||
VisitRRFloat64(this, kArmVroundTiesAwayF64, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitCall(Node* node) {
|
||||
ArmOperandGenerator g(this);
|
||||
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
|
||||
@ -1139,10 +1175,19 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
|
||||
// static
|
||||
MachineOperatorBuilder::Flags
|
||||
InstructionSelector::SupportedMachineOperatorFlags() {
|
||||
return MachineOperatorBuilder::kInt32DivIsSafe |
|
||||
MachineOperatorBuilder::kInt32ModIsSafe |
|
||||
MachineOperatorBuilder::kUint32DivIsSafe |
|
||||
MachineOperatorBuilder::kUint32ModIsSafe;
|
||||
MachineOperatorBuilder::Flags flags =
|
||||
MachineOperatorBuilder::kInt32DivIsSafe |
|
||||
MachineOperatorBuilder::kInt32ModIsSafe |
|
||||
MachineOperatorBuilder::kUint32DivIsSafe |
|
||||
MachineOperatorBuilder::kUint32ModIsSafe;
|
||||
|
||||
if (CpuFeatures::IsSupported(ARMv8)) {
|
||||
flags |= MachineOperatorBuilder::kFloat64Floor |
|
||||
MachineOperatorBuilder::kFloat64Ceil |
|
||||
MachineOperatorBuilder::kFloat64RoundTruncate |
|
||||
MachineOperatorBuilder::kFloat64RoundTiesAway;
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
|
@ -222,6 +222,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
case kArchTruncateDoubleToI:
|
||||
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArm64Float64Ceil:
|
||||
__ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArm64Float64Floor:
|
||||
__ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArm64Float64RoundTruncate:
|
||||
__ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArm64Float64RoundTiesAway:
|
||||
__ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArm64Add:
|
||||
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
|
||||
break;
|
||||
|
@ -78,6 +78,10 @@ namespace compiler {
|
||||
V(Arm64Float64Div) \
|
||||
V(Arm64Float64Mod) \
|
||||
V(Arm64Float64Sqrt) \
|
||||
V(Arm64Float64Floor) \
|
||||
V(Arm64Float64Ceil) \
|
||||
V(Arm64Float64RoundTruncate) \
|
||||
V(Arm64Float64RoundTiesAway) \
|
||||
V(Arm64Float32ToFloat64) \
|
||||
V(Arm64Float64ToFloat32) \
|
||||
V(Arm64Float64ToInt32) \
|
||||
|
@ -86,6 +86,14 @@ class Arm64OperandGenerator FINAL : public OperandGenerator {
|
||||
};
|
||||
|
||||
|
||||
static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
selector->Emit(opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
Arm64OperandGenerator g(selector);
|
||||
@ -899,9 +907,27 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
|
||||
Arm64OperandGenerator g(this);
|
||||
Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
VisitRRFloat64(this, kArm64Float64Sqrt, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Floor(Node* node) {
|
||||
VisitRRFloat64(this, kArm64Float64Floor, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Ceil(Node* node) {
|
||||
VisitRRFloat64(this, kArm64Float64Ceil, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
|
||||
VisitRRFloat64(this, kArm64Float64RoundTruncate, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
|
||||
VisitRRFloat64(this, kArm64Float64RoundTiesAway, node);
|
||||
}
|
||||
|
||||
|
||||
@ -1317,9 +1343,11 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
|
||||
// static
|
||||
MachineOperatorBuilder::Flags
|
||||
InstructionSelector::SupportedMachineOperatorFlags() {
|
||||
return MachineOperatorBuilder::kNoFlags;
|
||||
return MachineOperatorBuilder::kFloat64Floor |
|
||||
MachineOperatorBuilder::kFloat64Ceil |
|
||||
MachineOperatorBuilder::kFloat64RoundTruncate |
|
||||
MachineOperatorBuilder::kFloat64RoundTiesAway;
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -353,6 +353,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
case kSSEFloat64Sqrt:
|
||||
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
|
||||
break;
|
||||
case kSSEFloat64Floor: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
v8::internal::Assembler::kRoundDown);
|
||||
break;
|
||||
}
|
||||
case kSSEFloat64Ceil: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
v8::internal::Assembler::kRoundUp);
|
||||
break;
|
||||
}
|
||||
case kSSEFloat64RoundTruncate: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
v8::internal::Assembler::kRoundToZero);
|
||||
break;
|
||||
}
|
||||
case kSSECvtss2sd:
|
||||
__ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
|
||||
break;
|
||||
|
@ -36,6 +36,9 @@ namespace compiler {
|
||||
V(SSEFloat64Div) \
|
||||
V(SSEFloat64Mod) \
|
||||
V(SSEFloat64Sqrt) \
|
||||
V(SSEFloat64Floor) \
|
||||
V(SSEFloat64Ceil) \
|
||||
V(SSEFloat64RoundTruncate) \
|
||||
V(SSECvtss2sd) \
|
||||
V(SSECvtsd2ss) \
|
||||
V(SSEFloat64ToInt32) \
|
||||
|
@ -142,6 +142,14 @@ class AddressingModeMatcher {
|
||||
};
|
||||
|
||||
|
||||
static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
IA32OperandGenerator g(selector);
|
||||
selector->Emit(opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitLoad(Node* node) {
|
||||
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
|
||||
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
|
||||
@ -596,6 +604,29 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Floor(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(SSE4_1));
|
||||
VisitRRFloat64(this, kSSEFloat64Floor, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Ceil(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(SSE4_1));
|
||||
VisitRRFloat64(this, kSSEFloat64Ceil, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(SSE4_1));
|
||||
VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitCall(Node* node) {
|
||||
IA32OperandGenerator g(this);
|
||||
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
|
||||
@ -881,9 +912,13 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
|
||||
// static
|
||||
MachineOperatorBuilder::Flags
|
||||
InstructionSelector::SupportedMachineOperatorFlags() {
|
||||
return MachineOperatorBuilder::kNoFlags;
|
||||
if (CpuFeatures::IsSupported(SSE4_1)) {
|
||||
return MachineOperatorBuilder::kFloat64Floor |
|
||||
MachineOperatorBuilder::kFloat64Ceil |
|
||||
MachineOperatorBuilder::kFloat64RoundTruncate;
|
||||
}
|
||||
return MachineOperatorBuilder::Flag::kNoFlags;
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -603,6 +603,10 @@ MachineType InstructionSelector::GetMachineType(Node* node) {
|
||||
case IrOpcode::kFloat64Div:
|
||||
case IrOpcode::kFloat64Mod:
|
||||
case IrOpcode::kFloat64Sqrt:
|
||||
case IrOpcode::kFloat64Floor:
|
||||
case IrOpcode::kFloat64Ceil:
|
||||
case IrOpcode::kFloat64RoundTruncate:
|
||||
case IrOpcode::kFloat64RoundTiesAway:
|
||||
return kMachFloat64;
|
||||
case IrOpcode::kFloat64Equal:
|
||||
case IrOpcode::kFloat64LessThan:
|
||||
@ -792,11 +796,20 @@ void InstructionSelector::VisitNode(Node* node) {
|
||||
return VisitFloat64LessThan(node);
|
||||
case IrOpcode::kFloat64LessThanOrEqual:
|
||||
return VisitFloat64LessThanOrEqual(node);
|
||||
case IrOpcode::kFloat64Floor:
|
||||
return MarkAsDouble(node), VisitFloat64Floor(node);
|
||||
case IrOpcode::kFloat64Ceil:
|
||||
return MarkAsDouble(node), VisitFloat64Ceil(node);
|
||||
case IrOpcode::kFloat64RoundTruncate:
|
||||
return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
|
||||
case IrOpcode::kFloat64RoundTiesAway:
|
||||
return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
|
||||
case IrOpcode::kLoadStackPointer:
|
||||
return VisitLoadStackPointer(node);
|
||||
default:
|
||||
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
|
||||
node->opcode(), node->op()->mnemonic(), node->id());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -191,6 +191,32 @@ Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
|
||||
}
|
||||
|
||||
|
||||
// ES6 draft 10-14-14, section 20.2.2.16.
|
||||
Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
|
||||
if (!machine()->HasFloat64Floor()) return NoChange();
|
||||
JSCallReduction r(node);
|
||||
if (r.InputsMatchOne(Type::Number())) {
|
||||
// Math.floor(a:number) -> Float64Floor(a)
|
||||
Node* value = graph()->NewNode(machine()->Float64Floor(), r.left());
|
||||
return Replace(value);
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
// ES6 draft 10-14-14, section 20.2.2.10.
|
||||
Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
|
||||
if (!machine()->HasFloat64Ceil()) return NoChange();
|
||||
JSCallReduction r(node);
|
||||
if (r.InputsMatchOne(Type::Number())) {
|
||||
// Math.ceil(a:number) -> Float64Ceil(a)
|
||||
Node* value = graph()->NewNode(machine()->Float64Ceil(), r.left());
|
||||
return Replace(value);
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
|
||||
Reduction JSBuiltinReducer::Reduce(Node* node) {
|
||||
JSCallReduction r(node);
|
||||
|
||||
@ -207,6 +233,10 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
|
||||
return ReplaceWithPureReduction(node, ReduceMathImul(node));
|
||||
case kMathFround:
|
||||
return ReplaceWithPureReduction(node, ReduceMathFround(node));
|
||||
case kMathFloor:
|
||||
return ReplaceWithPureReduction(node, ReduceMathFloor(node));
|
||||
case kMathCeil:
|
||||
return ReplaceWithPureReduction(node, ReduceMathCeil(node));
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -35,6 +35,8 @@ class JSBuiltinReducer FINAL : public Reducer {
|
||||
Reduction ReduceMathMax(Node* node);
|
||||
Reduction ReduceMathImul(Node* node);
|
||||
Reduction ReduceMathFround(Node* node);
|
||||
Reduction ReduceMathFloor(Node* node);
|
||||
Reduction ReduceMathCeil(Node* node);
|
||||
|
||||
JSGraph* jsgraph_;
|
||||
SimplifiedOperatorBuilder simplified_;
|
||||
|
@ -409,6 +409,22 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Floor(Node* node) { UNREACHABLE(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Ceil(Node* node) { UNREACHABLE(); }
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitCall(Node* node) {
|
||||
MipsOperandGenerator g(this);
|
||||
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
|
||||
|
@ -13,10 +13,11 @@ namespace compiler {
|
||||
|
||||
RawMachineAssembler::RawMachineAssembler(Graph* graph,
|
||||
MachineSignature* machine_sig,
|
||||
MachineType word)
|
||||
MachineType word,
|
||||
MachineOperatorBuilder::Flags flags)
|
||||
: GraphBuilder(graph),
|
||||
schedule_(new (zone()) Schedule(zone())),
|
||||
machine_(word),
|
||||
machine_(word, flags),
|
||||
common_(zone()),
|
||||
machine_sig_(machine_sig),
|
||||
call_descriptor_(
|
||||
|
@ -45,7 +45,9 @@ class RawMachineAssembler : public GraphBuilder {
|
||||
};
|
||||
|
||||
RawMachineAssembler(Graph* graph, MachineSignature* machine_sig,
|
||||
MachineType word = kMachPtr);
|
||||
MachineType word = kMachPtr,
|
||||
MachineOperatorBuilder::Flags flags =
|
||||
MachineOperatorBuilder::Flag::kNoFlags);
|
||||
virtual ~RawMachineAssembler() {}
|
||||
|
||||
Isolate* isolate() const { return zone()->isolate(); }
|
||||
@ -380,6 +382,14 @@ class RawMachineAssembler : public GraphBuilder {
|
||||
Node* TruncateInt64ToInt32(Node* a) {
|
||||
return NewNode(machine()->TruncateInt64ToInt32(), a);
|
||||
}
|
||||
Node* Float64Floor(Node* a) { return NewNode(machine()->Float64Floor(), a); }
|
||||
Node* Float64Ceil(Node* a) { return NewNode(machine()->Float64Ceil(), a); }
|
||||
Node* Float64RoundTruncate(Node* a) {
|
||||
return NewNode(machine()->Float64RoundTruncate(), a);
|
||||
}
|
||||
Node* Float64RoundTiesAway(Node* a) {
|
||||
return NewNode(machine()->Float64RoundTiesAway(), a);
|
||||
}
|
||||
|
||||
// Parameters.
|
||||
Node* Parameter(size_t index);
|
||||
|
@ -407,6 +407,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
||||
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
|
||||
}
|
||||
break;
|
||||
case kSSEFloat64Floor: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
v8::internal::Assembler::kRoundDown);
|
||||
break;
|
||||
}
|
||||
case kSSEFloat64Ceil: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
v8::internal::Assembler::kRoundUp);
|
||||
break;
|
||||
}
|
||||
case kSSEFloat64RoundTruncate: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
v8::internal::Assembler::kRoundToZero);
|
||||
break;
|
||||
}
|
||||
case kSSECvtss2sd:
|
||||
if (instr->InputAt(0)->IsDoubleRegister()) {
|
||||
__ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
|
@ -52,6 +52,9 @@ namespace compiler {
|
||||
V(SSEFloat64Div) \
|
||||
V(SSEFloat64Mod) \
|
||||
V(SSEFloat64Sqrt) \
|
||||
V(SSEFloat64Floor) \
|
||||
V(SSEFloat64Ceil) \
|
||||
V(SSEFloat64RoundTruncate) \
|
||||
V(SSECvtss2sd) \
|
||||
V(SSECvtsd2ss) \
|
||||
V(SSEFloat64ToInt32) \
|
||||
|
@ -120,6 +120,14 @@ class AddressingModeMatcher {
|
||||
};
|
||||
|
||||
|
||||
static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
|
||||
Node* node) {
|
||||
X64OperandGenerator g(selector);
|
||||
selector->Emit(opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(node->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitLoad(Node* node) {
|
||||
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
|
||||
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
|
||||
@ -723,6 +731,29 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Floor(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(SSE4_1));
|
||||
VisitRRFloat64(this, kSSEFloat64Floor, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64Ceil(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(SSE4_1));
|
||||
VisitRRFloat64(this, kSSEFloat64Ceil, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
|
||||
DCHECK(CpuFeatures::IsSupported(SSE4_1));
|
||||
VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
||||
void InstructionSelector::VisitCall(Node* node) {
|
||||
X64OperandGenerator g(this);
|
||||
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
|
||||
@ -1112,9 +1143,13 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
|
||||
// static
|
||||
MachineOperatorBuilder::Flags
|
||||
InstructionSelector::SupportedMachineOperatorFlags() {
|
||||
if (CpuFeatures::IsSupported(SSE4_1)) {
|
||||
return MachineOperatorBuilder::kFloat64Floor |
|
||||
MachineOperatorBuilder::kFloat64Ceil |
|
||||
MachineOperatorBuilder::kFloat64RoundTruncate;
|
||||
}
|
||||
return MachineOperatorBuilder::kNoFlags;
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/compiler/instruction-selector.h"
|
||||
#include "src/compiler/pipeline.h"
|
||||
#include "src/compiler/raw-machine-assembler.h"
|
||||
#include "src/simulator.h"
|
||||
@ -23,7 +24,9 @@ class MachineAssemblerTester : public HandleAndZoneScope,
|
||||
public:
|
||||
MachineAssemblerTester(MachineType return_type, MachineType p0,
|
||||
MachineType p1, MachineType p2, MachineType p3,
|
||||
MachineType p4)
|
||||
MachineType p4,
|
||||
MachineOperatorBuilder::Flags flags =
|
||||
MachineOperatorBuilder::Flag::kNoFlags)
|
||||
: HandleAndZoneScope(),
|
||||
CallHelper(
|
||||
main_isolate(),
|
||||
@ -31,7 +34,7 @@ class MachineAssemblerTester : public HandleAndZoneScope,
|
||||
MachineAssembler(
|
||||
new (main_zone()) Graph(main_zone()),
|
||||
MakeMachineSignature(main_zone(), return_type, p0, p1, p2, p3, p4),
|
||||
kMachPtr) {}
|
||||
kMachPtr, flags) {}
|
||||
|
||||
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
|
||||
return this->Load(rep, this->PointerConstant(address),
|
||||
@ -89,8 +92,8 @@ class RawMachineAssemblerTester
|
||||
MachineType p3 = kMachNone,
|
||||
MachineType p4 = kMachNone)
|
||||
: MachineAssemblerTester<RawMachineAssembler>(
|
||||
ReturnValueTraits<ReturnType>::Representation(), p0, p1, p2, p3,
|
||||
p4) {}
|
||||
ReturnValueTraits<ReturnType>::Representation(), p0, p1, p2, p3, p4,
|
||||
InstructionSelector::SupportedMachineOperatorFlags()) {}
|
||||
|
||||
template <typename Ci, typename Fn>
|
||||
void Run(const Ci& ci, const Fn& fn) {
|
||||
|
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <cmath>
|
||||
#include <functional>
|
||||
#include <limits>
|
||||
|
||||
@ -4536,4 +4537,171 @@ TEST(RunFloat32Constant) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static double two_30 = 1 << 30; // 2^30 is a smi boundary.
|
||||
static double two_52 = two_30 * (1 << 22); // 2^52 is a precision boundary.
|
||||
static double kValues[] = {0.1,
|
||||
0.2,
|
||||
0.49999999999999994,
|
||||
0.5,
|
||||
0.7,
|
||||
1.0 - std::numeric_limits<double>::epsilon(),
|
||||
-0.1,
|
||||
-0.49999999999999994,
|
||||
-0.5,
|
||||
-0.7,
|
||||
1.1,
|
||||
1.0 + std::numeric_limits<double>::epsilon(),
|
||||
1.5,
|
||||
1.7,
|
||||
-1,
|
||||
-1 + std::numeric_limits<double>::epsilon(),
|
||||
-1 - std::numeric_limits<double>::epsilon(),
|
||||
-1.1,
|
||||
-1.5,
|
||||
-1.7,
|
||||
std::numeric_limits<double>::min(),
|
||||
-std::numeric_limits<double>::min(),
|
||||
std::numeric_limits<double>::max(),
|
||||
-std::numeric_limits<double>::max(),
|
||||
std::numeric_limits<double>::infinity(),
|
||||
-std::numeric_limits<double>::infinity(),
|
||||
two_30,
|
||||
two_30 + 0.1,
|
||||
two_30 + 0.5,
|
||||
two_30 + 0.7,
|
||||
two_30 - 1,
|
||||
two_30 - 1 + 0.1,
|
||||
two_30 - 1 + 0.5,
|
||||
two_30 - 1 + 0.7,
|
||||
-two_30,
|
||||
-two_30 + 0.1,
|
||||
-two_30 + 0.5,
|
||||
-two_30 + 0.7,
|
||||
-two_30 + 1,
|
||||
-two_30 + 1 + 0.1,
|
||||
-two_30 + 1 + 0.5,
|
||||
-two_30 + 1 + 0.7,
|
||||
two_52,
|
||||
two_52 + 0.1,
|
||||
two_52 + 0.5,
|
||||
two_52 + 0.5,
|
||||
two_52 + 0.7,
|
||||
two_52 + 0.7,
|
||||
two_52 - 1,
|
||||
two_52 - 1 + 0.1,
|
||||
two_52 - 1 + 0.5,
|
||||
two_52 - 1 + 0.7,
|
||||
-two_52,
|
||||
-two_52 + 0.1,
|
||||
-two_52 + 0.5,
|
||||
-two_52 + 0.7,
|
||||
-two_52 + 1,
|
||||
-two_52 + 1 + 0.1,
|
||||
-two_52 + 1 + 0.5,
|
||||
-two_52 + 1 + 0.7,
|
||||
two_30,
|
||||
two_30 - 0.1,
|
||||
two_30 - 0.5,
|
||||
two_30 - 0.7,
|
||||
two_30 - 1,
|
||||
two_30 - 1 - 0.1,
|
||||
two_30 - 1 - 0.5,
|
||||
two_30 - 1 - 0.7,
|
||||
-two_30,
|
||||
-two_30 - 0.1,
|
||||
-two_30 - 0.5,
|
||||
-two_30 - 0.7,
|
||||
-two_30 + 1,
|
||||
-two_30 + 1 - 0.1,
|
||||
-two_30 + 1 - 0.5,
|
||||
-two_30 + 1 - 0.7,
|
||||
two_52,
|
||||
two_52 - 0.1,
|
||||
two_52 - 0.5,
|
||||
two_52 - 0.5,
|
||||
two_52 - 0.7,
|
||||
two_52 - 0.7,
|
||||
two_52 - 1,
|
||||
two_52 - 1 - 0.1,
|
||||
two_52 - 1 - 0.5,
|
||||
two_52 - 1 - 0.7,
|
||||
-two_52,
|
||||
-two_52 - 0.1,
|
||||
-two_52 - 0.5,
|
||||
-two_52 - 0.7,
|
||||
-two_52 + 1,
|
||||
-two_52 + 1 - 0.1,
|
||||
-two_52 + 1 - 0.5,
|
||||
-two_52 + 1 - 0.7};
|
||||
|
||||
|
||||
TEST(RunFloat64Floor) {
|
||||
double input = -1.0;
|
||||
double result = 0.0;
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
if (!m.machine()->HasFloat64Floor()) return;
|
||||
m.StoreToPointer(&result, kMachFloat64,
|
||||
m.Float64Floor(m.LoadFromPointer(&input, kMachFloat64)));
|
||||
m.Return(m.Int32Constant(0));
|
||||
for (size_t i = 0; i < arraysize(kValues); ++i) {
|
||||
input = kValues[i];
|
||||
CHECK_EQ(0, m.Call());
|
||||
double expected = std::floor(kValues[i]);
|
||||
CHECK_EQ(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(RunFloat64Ceil) {
|
||||
double input = -1.0;
|
||||
double result = 0.0;
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
if (!m.machine()->HasFloat64Ceil()) return;
|
||||
m.StoreToPointer(&result, kMachFloat64,
|
||||
m.Float64Ceil(m.LoadFromPointer(&input, kMachFloat64)));
|
||||
m.Return(m.Int32Constant(0));
|
||||
for (size_t i = 0; i < arraysize(kValues); ++i) {
|
||||
input = kValues[i];
|
||||
CHECK_EQ(0, m.Call());
|
||||
double expected = std::ceil(kValues[i]);
|
||||
CHECK_EQ(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(RunFloat64RoundTruncate) {
|
||||
double input = -1.0;
|
||||
double result = 0.0;
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
if (!m.machine()->HasFloat64Ceil()) return;
|
||||
m.StoreToPointer(
|
||||
&result, kMachFloat64,
|
||||
m.Float64RoundTruncate(m.LoadFromPointer(&input, kMachFloat64)));
|
||||
m.Return(m.Int32Constant(0));
|
||||
for (size_t i = 0; i < arraysize(kValues); ++i) {
|
||||
input = kValues[i];
|
||||
CHECK_EQ(0, m.Call());
|
||||
double expected = trunc(kValues[i]);
|
||||
CHECK_EQ(expected, result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(RunFloat64RoundTiesAway) {
|
||||
double input = -1.0;
|
||||
double result = 0.0;
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
if (!m.machine()->HasFloat64RoundTiesAway()) return;
|
||||
m.StoreToPointer(
|
||||
&result, kMachFloat64,
|
||||
m.Float64RoundTiesAway(m.LoadFromPointer(&input, kMachFloat64)));
|
||||
m.Return(m.Int32Constant(0));
|
||||
for (size_t i = 0; i < arraysize(kValues); ++i) {
|
||||
input = kValues[i];
|
||||
CHECK_EQ(0, m.Call());
|
||||
double expected = round(kValues[i]);
|
||||
CHECK_EQ(expected, result);
|
||||
}
|
||||
}
|
||||
#endif // V8_TURBOFAN_TARGET
|
||||
|
38
test/mjsunit/asm/math-ceil.js
Normal file
38
test/mjsunit/asm/math-ceil.js
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
function Module(stdlib) {
|
||||
"use asm";
|
||||
|
||||
var ceil = stdlib.Math.ceil;
|
||||
|
||||
// f: double -> float
|
||||
function f(a) {
|
||||
a = +a;
|
||||
return ceil(a);
|
||||
}
|
||||
|
||||
return { f: f };
|
||||
}
|
||||
|
||||
var f = Module({ Math: Math }).f;
|
||||
|
||||
assertTrue(isNaN(f(NaN)));
|
||||
assertTrue(isNaN(f(undefined)));
|
||||
assertTrue(isNaN(f(function() {})));
|
||||
|
||||
assertEquals(0, f(0));
|
||||
assertEquals(+0, f(+0));
|
||||
assertEquals(-0, f(-0));
|
||||
assertEquals(1, f(0.49999));
|
||||
assertEquals(1, f(0.6));
|
||||
assertEquals(1, f(0.5));
|
||||
assertEquals(-0, f(-0.1));
|
||||
assertEquals(-0, f(-0.5));
|
||||
assertEquals(-0, f(-0.6));
|
||||
assertEquals(-1, f(-1.6));
|
||||
assertEquals(-0, f(-0.50001));
|
||||
|
||||
assertEquals("Infinity", String(f(Infinity)));
|
||||
assertEquals("-Infinity", String(f(-Infinity)));
|
38
test/mjsunit/asm/math-floor.js
Normal file
38
test/mjsunit/asm/math-floor.js
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
function Module(stdlib) {
|
||||
"use asm";
|
||||
|
||||
var floor = stdlib.Math.floor;
|
||||
|
||||
// f: double -> float
|
||||
function f(a) {
|
||||
a = +a;
|
||||
return floor(a);
|
||||
}
|
||||
|
||||
return { f: f };
|
||||
}
|
||||
|
||||
var f = Module({ Math: Math }).f;
|
||||
|
||||
assertTrue(isNaN(f(NaN)));
|
||||
assertTrue(isNaN(f(undefined)));
|
||||
assertTrue(isNaN(f(function() {})));
|
||||
|
||||
assertEquals(0, f(0));
|
||||
assertEquals(+0, f(+0));
|
||||
assertEquals(-0, f(-0));
|
||||
assertEquals(0, f(0.49999));
|
||||
assertEquals(+0, f(0.6));
|
||||
assertEquals(+0, f(0.5));
|
||||
assertEquals(-1, f(-0.1));
|
||||
assertEquals(-1, f(-0.5));
|
||||
assertEquals(-1, f(-0.6));
|
||||
assertEquals(-2, f(-1.6));
|
||||
assertEquals(-1, f(-0.50001));
|
||||
|
||||
assertEquals("Infinity", String(f(Infinity)));
|
||||
assertEquals("-Infinity", String(f(-Infinity)));
|
@ -21,8 +21,9 @@ class JSBuiltinReducerTest : public TypedGraphTest {
|
||||
JSBuiltinReducerTest() : javascript_(zone()) {}
|
||||
|
||||
protected:
|
||||
Reduction Reduce(Node* node) {
|
||||
MachineOperatorBuilder machine;
|
||||
Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
|
||||
MachineOperatorBuilder::Flag::kNoFlags) {
|
||||
MachineOperatorBuilder machine(kMachPtr, flags);
|
||||
JSGraph jsgraph(graph(), common(), javascript(), &machine);
|
||||
JSBuiltinReducer reducer(&jsgraph);
|
||||
return reducer.Reduce(node);
|
||||
@ -237,6 +238,79 @@ TEST_F(JSBuiltinReducerTest, MathFround) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Math.floor
|
||||
|
||||
|
||||
TEST_F(JSBuiltinReducerTest, MathFloorAvailable) {
|
||||
Handle<JSFunction> f = MathFunction("floor");
|
||||
|
||||
TRACED_FOREACH(Type*, t0, kNumberTypes) {
|
||||
Node* p0 = Parameter(t0, 0);
|
||||
Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
|
||||
Node* call =
|
||||
graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
|
||||
fun, UndefinedConstant(), p0);
|
||||
Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Floor);
|
||||
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsFloat64Floor(p0));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST_F(JSBuiltinReducerTest, MathFloorUnavailable) {
|
||||
Handle<JSFunction> f = MathFunction("floor");
|
||||
|
||||
TRACED_FOREACH(Type*, t0, kNumberTypes) {
|
||||
Node* p0 = Parameter(t0, 0);
|
||||
Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
|
||||
Node* call =
|
||||
graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
|
||||
fun, UndefinedConstant(), p0);
|
||||
Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
|
||||
|
||||
ASSERT_FALSE(r.Changed());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Math.ceil
|
||||
|
||||
|
||||
TEST_F(JSBuiltinReducerTest, MathCeilAvailable) {
|
||||
Handle<JSFunction> f = MathFunction("ceil");
|
||||
|
||||
TRACED_FOREACH(Type*, t0, kNumberTypes) {
|
||||
Node* p0 = Parameter(t0, 0);
|
||||
Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
|
||||
Node* call =
|
||||
graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
|
||||
fun, UndefinedConstant(), p0);
|
||||
Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Ceil);
|
||||
|
||||
ASSERT_TRUE(r.Changed());
|
||||
EXPECT_THAT(r.replacement(), IsFloat64Ceil(p0));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST_F(JSBuiltinReducerTest, MathCeilUnavailable) {
|
||||
Handle<JSFunction> f = MathFunction("ceil");
|
||||
|
||||
TRACED_FOREACH(Type*, t0, kNumberTypes) {
|
||||
Node* p0 = Parameter(t0, 0);
|
||||
Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
|
||||
Node* call =
|
||||
graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
|
||||
fun, UndefinedConstant(), p0);
|
||||
Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
|
||||
|
||||
ASSERT_FALSE(r.Changed());
|
||||
}
|
||||
}
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -949,6 +949,10 @@ IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
|
||||
IS_UNOP_MATCHER(TruncateFloat64ToInt32)
|
||||
IS_UNOP_MATCHER(TruncateInt64ToInt32)
|
||||
IS_UNOP_MATCHER(Float64Sqrt)
|
||||
IS_UNOP_MATCHER(Float64Floor)
|
||||
IS_UNOP_MATCHER(Float64Ceil)
|
||||
IS_UNOP_MATCHER(Float64RoundTruncate)
|
||||
IS_UNOP_MATCHER(Float64RoundTiesAway)
|
||||
#undef IS_UNOP_MATCHER
|
||||
|
||||
} // namespace compiler
|
||||
|
@ -149,6 +149,10 @@ Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
|
||||
Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
|
||||
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
|
||||
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
|
||||
Matcher<Node*> IsFloat64Floor(const Matcher<Node*>& input_matcher);
|
||||
Matcher<Node*> IsFloat64Ceil(const Matcher<Node*>& input_matcher);
|
||||
Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
|
||||
Matcher<Node*> IsFloat64RoundTiesAway(const Matcher<Node*>& input_matcher);
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
|
Loading…
Reference in New Issue
Block a user