[turbofan] Add support for reinterpreting integers as floating point and vice versa.

R=bmeurer@chromium.org

Review URL: https://codereview.chromium.org/1356913002

Cr-Commit-Position: refs/heads/master@{#30849}
This commit is contained in:
titzer 2015-09-21 07:00:51 -07:00 committed by Commit bot
parent efc3fa02b0
commit c610a22231
26 changed files with 349 additions and 8 deletions

View File

@ -925,6 +925,19 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kArmVmovLowU32F64, node);
}
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVmovLowF64U32, g.DefineAsRegister(node),
ImmediateOperand(ImmediateOperand::INLINE, 0),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32Add(Node* node) {
ArmOperandGenerator g(this);
Float32BinopMatcher m(node);

View File

@ -886,10 +886,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Fmov(i.OutputFloat64Register(), tmp);
break;
}
case kArm64Float64MoveU64: {
case kArm64Float64MoveU64:
__ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
break;
}
case kArm64U64MoveFloat64:
__ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
break;

View File

@ -115,6 +115,7 @@ namespace compiler {
V(Arm64Float64InsertLowWord32) \
V(Arm64Float64InsertHighWord32) \
V(Arm64Float64MoveU64) \
V(Arm64U64MoveFloat64) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \

View File

@ -1276,6 +1276,26 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kArm64Float64ExtractLowWord32, node);
}
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
VisitRR(this, kArm64U64MoveFloat64, node);
}
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
VisitRR(this, kArm64Float64MoveU64, node);
}
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
VisitRR(this, kArm64Float64MoveU64, node);
}
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kArm64Float32Add, node);
}

View File

@ -840,6 +840,20 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movss(operand, i.InputDoubleRegister(index));
}
break;
case kIA32BitcastFI:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kIA32BitcastIF:
if (instr->InputAt(0)->IsRegister()) {
__ movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
__ movss(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kIA32Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation

View File

@ -89,6 +89,8 @@ namespace compiler {
V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \
V(IA32BitcastFI) \
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
V(IA32Poke) \

View File

@ -687,6 +687,18 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
IA32OperandGenerator g(this);
Emit(kIA32BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
IA32OperandGenerator g(this);
Emit(kIA32BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRROFloat(this, node, kAVXFloat32Add, kSSEFloat32Add);
}

View File

@ -699,6 +699,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
case IrOpcode::kTruncateInt64ToInt32:
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
case IrOpcode::kBitcastFloat32ToInt32:
return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
case IrOpcode::kBitcastFloat64ToInt64:
return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
case IrOpcode::kBitcastInt32ToFloat32:
return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
case IrOpcode::kBitcastInt64ToFloat64:
return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
case IrOpcode::kFloat32Add:
return MarkAsFloat32(node), VisitFloat32Add(node);
case IrOpcode::kFloat32Sub:
@ -904,6 +912,16 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
#endif // V8_TARGET_ARCH_32_BIT

View File

@ -136,6 +136,10 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Add, Operator::kCommutative, 2, 0, 1) \
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \

View File

@ -195,6 +195,13 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
// These operators reinterpret the bits of a floating point number as an
// integer and vice versa.
const Operator* BitcastFloat32ToInt32();
const Operator* BitcastFloat64ToInt64();
const Operator* BitcastInt32ToFloat32();
const Operator* BitcastInt64ToFloat64();
// Floating point operators always operate with IEEE 754 round-to-nearest
// (single-precision).
const Operator* Float32Add();

View File

@ -402,6 +402,19 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMipsFloat64ExtractLowWord32, node);
}
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
ImmediateOperand(ImmediateOperand::INLINE, 0),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMipsAddS, node);
}

View File

@ -799,6 +799,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
case kMips64BitcastDL:
__ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kMips64BitcastLD:
__ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
break;
case kMips64Float64ExtractLowWord32:
__ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
break;

View File

@ -88,6 +88,8 @@ namespace compiler {
V(Mips64Swc1) \
V(Mips64Ldc1) \
V(Mips64Sdc1) \
V(Mips64BitcastDL) \
V(Mips64BitcastLD) \
V(Mips64Float64ExtractLowWord32) \
V(Mips64Float64ExtractHighWord32) \
V(Mips64Float64InsertLowWord32) \

View File

@ -578,6 +578,29 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMips64Float64ExtractLowWord32, node);
}
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
VisitRR(this, kMips64BitcastDL, node);
}
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
ImmediateOperand(ImmediateOperand::INLINE, 0),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
VisitRR(this, kMips64BitcastLD, node);
}
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMips64AddS, node);
}
@ -1331,16 +1354,12 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Float64ExtractLowWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64Float64ExtractLowWord32, node);
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Float64ExtractHighWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64Float64ExtractHighWord32, node);
}

View File

@ -261,6 +261,10 @@
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt32) \
V(TruncateInt64ToInt32) \
V(BitcastFloat32ToInt32) \
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
V(BitcastInt64ToFloat64) \
V(Float32Add) \
V(Float32Sub) \
V(Float32Mul) \

View File

@ -1076,6 +1076,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
#endif
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_BitcastDL:
__ mffprd(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kPPC_BitcastLD:
__ mtfprd(i.OutputDoubleRegister(), i.InputRegister(0));
break;
#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
break;

View File

@ -86,6 +86,8 @@ namespace compiler {
V(PPC_DoubleInsertLowWord32) \
V(PPC_DoubleInsertHighWord32) \
V(PPC_DoubleConstruct) \
V(PPC_BitcastDL) \
V(PPC_BitcastLD) \
V(PPC_LoadWordS8) \
V(PPC_LoadWordU8) \
V(PPC_LoadWordS16) \

View File

@ -910,6 +910,33 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
#endif
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kPPC_DoubleExtractLowWord32, node);
}
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
VisitRR(this, kPPC_BitcastDL, node);
}
#endif
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_DoubleInsertLowWord32, g.DefineAsRegister(node),
ImmediateOperand(ImmediateOperand::INLINE, 0),
g.UseRegister(node->InputAt(0)));
}
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
VisitRR(this, kPPC_BitcastLD, node);
}
#endif
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kPPC_AddDouble, node);
}

View File

@ -433,6 +433,18 @@ class RawMachineAssembler {
Node* TruncateInt64ToInt32(Node* a) {
return NewNode(machine()->TruncateInt64ToInt32(), a);
}
Node* BitcastFloat32ToInt32(Node* a) {
return NewNode(machine()->BitcastFloat32ToInt32(), a);
}
Node* BitcastFloat64ToInt64(Node* a) {
return NewNode(machine()->BitcastFloat64ToInt64(), a);
}
Node* BitcastInt32ToFloat32(Node* a) {
return NewNode(machine()->BitcastInt32ToFloat32(), a);
}
Node* BitcastInt64ToFloat64(Node* a) {
return NewNode(machine()->BitcastInt64ToFloat64(), a);
}
Node* Float64RoundDown(Node* a) {
return NewNode(machine()->Float64RoundDown().op(), a);
}

View File

@ -1865,6 +1865,26 @@ Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
}
Type* Typer::Visitor::TypeBitcastFloat32ToInt32(Node* node) {
return Type::Number();
}
Type* Typer::Visitor::TypeBitcastFloat64ToInt64(Node* node) {
return Type::Number();
}
Type* Typer::Visitor::TypeBitcastInt32ToFloat32(Node* node) {
return Type::Number();
}
Type* Typer::Visitor::TypeBitcastInt64ToFloat64(Node* node) {
return Type::Number();
}
Type* Typer::Visitor::TypeFloat32Add(Node* node) { return Type::Number(); }

View File

@ -870,6 +870,10 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kTruncateInt64ToInt32:
case IrOpcode::kTruncateFloat64ToFloat32:
case IrOpcode::kTruncateFloat64ToInt32:
case IrOpcode::kBitcastFloat32ToInt32:
case IrOpcode::kBitcastFloat64ToInt64:
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastInt64ToFloat64:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
case IrOpcode::kChangeInt32ToFloat64:

View File

@ -1171,6 +1171,34 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
case kX64BitcastFI:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kX64BitcastDL:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movq(i.OutputRegister(), i.InputOperand(0));
} else {
__ movq(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kX64BitcastIF:
if (instr->InputAt(0)->IsRegister()) {
__ movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
__ movss(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64BitcastLD:
if (instr->InputAt(0)->IsRegister()) {
__ movq(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
__ movsd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64Lea32: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation

View File

@ -109,6 +109,10 @@ namespace compiler {
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
V(X64BitcastFI) \
V(X64BitcastDL) \
V(X64BitcastIF) \
V(X64BitcastLD) \
V(X64Lea32) \
V(X64Lea) \
V(X64Dec32) \

View File

@ -906,6 +906,30 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
X64OperandGenerator g(this);
Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
X64OperandGenerator g(this);
Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
X64OperandGenerator g(this);
Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
X64OperandGenerator g(this);
Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
}

View File

@ -664,6 +664,16 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitFloat32Add(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));

View File

@ -5322,4 +5322,69 @@ TEST(RunCheckedStoreInt64) {
CHECK_EQ(write, buffer[0]);
CHECK_EQ(write, buffer[1]);
}
TEST(RunBitcastInt64ToFloat64) {
// TODO(titzer): run int64 tests on all platforms when supported.
int64_t input = 1;
double output = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&output, kMachFloat64,
m.BitcastInt64ToFloat64(m.LoadFromPointer(&input, kMachInt64)));
m.Return(m.Int32Constant(11));
FOR_INT32_INPUTS(i) {
input = static_cast<int64_t>(*i) * 14444;
CHECK_EQ(11, m.Call());
double expected = bit_cast<double>(input);
CHECK_EQ(bit_cast<int64_t>(expected), bit_cast<int64_t>(output));
}
}
TEST(RunBitcastFloat64ToInt64) {
// TODO(titzer): run int64 tests on all platforms when supported.
double input = 0;
int64_t output = 0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&output, kMachInt64,
m.BitcastFloat64ToInt64(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(11));
FOR_FLOAT64_INPUTS(i) {
input = *i;
CHECK_EQ(11, m.Call());
double expected = bit_cast<int64_t>(input);
CHECK_EQ(expected, output);
}
}
#endif
TEST(RunBitcastFloat32ToInt32) {
float input = 32.25;
RawMachineAssemblerTester<int32_t> m;
m.Return(m.BitcastFloat32ToInt32(m.LoadFromPointer(&input, kMachFloat32)));
FOR_FLOAT32_INPUTS(i) {
input = *i;
int32_t expected = bit_cast<int32_t>(input);
CHECK_EQ(expected, m.Call());
}
}
TEST(RunBitcastInt32ToFloat32) {
int32_t input = 1;
float output = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&output, kMachFloat32,
m.BitcastInt32ToFloat32(m.LoadFromPointer(&input, kMachInt32)));
m.Return(m.Int32Constant(11));
FOR_INT32_INPUTS(i) {
input = *i;
CHECK_EQ(11, m.Call());
float expected = bit_cast<float>(input);
CHECK_EQ(bit_cast<int32_t>(expected), bit_cast<int32_t>(output));
}
}