[turbofan] Add backend support for float32.

LOG=n
BUG=v8:3589
TEST=compiler-unittests,cctest
R=titzer@chromium.org

Review URL: https://codereview.chromium.org/596703004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24179 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
bmeurer@chromium.org 2014-09-24 11:08:35 +00:00
parent 82e5de6bdd
commit 50c466e883
31 changed files with 424 additions and 141 deletions

View File

@ -70,6 +70,12 @@ int DwVfpRegister::NumAllocatableRegisters() {
}
// static
int DwVfpRegister::NumAllocatableAliasedRegisters() {
return LowDwVfpRegister::kMaxNumLowRegisters - kNumReservedRegisters;
}
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
DCHECK(!reg.is(kDoubleRegZero));
DCHECK(!reg.is(kScratchDoubleReg));

View File

@ -218,6 +218,11 @@ struct DwVfpRegister {
inline static int NumReservedRegisters();
inline static int NumAllocatableRegisters();
// TODO(turbofan): This is a temporary work-around required because our
// register allocator does not yet support the aliasing of single/double
// registers on ARM.
inline static int NumAllocatableAliasedRegisters();
inline static int ToAllocationIndex(DwVfpRegister reg);
static const char* AllocationIndexToString(int index);
inline static DwVfpRegister FromAllocationIndex(int index);

View File

@ -276,6 +276,11 @@ struct FPRegister : public CPURegister {
(kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
// TODO(turbofan): Proper float32 support.
static int NumAllocatableAliasedRegisters() {
return NumAllocatableRegisters();
}
// Return true if the register is one that crankshaft can allocate.
bool IsAllocatable() const {
return (Bit() & kAllocatableFPRegisters) != 0;

View File

@ -22,11 +22,35 @@ namespace compiler {
// Adds Arm-specific methods to convert InstructionOperands.
class ArmOperandConverter : public InstructionOperandConverter {
class ArmOperandConverter FINAL : public InstructionOperandConverter {
public:
ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
SwVfpRegister OutputFloat32Register(int index = 0) {
return ToFloat32Register(instr_->OutputAt(index));
}
SwVfpRegister InputFloat32Register(int index) {
return ToFloat32Register(instr_->InputAt(index));
}
SwVfpRegister ToFloat32Register(InstructionOperand* op) {
return ToFloat64Register(op).low();
}
LowDwVfpRegister OutputFloat64Register(int index = 0) {
return ToFloat64Register(instr_->OutputAt(index));
}
LowDwVfpRegister InputFloat64Register(int index) {
return ToFloat64Register(instr_->InputAt(index));
}
LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
}
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
@ -178,7 +202,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
__ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmAdd:
@ -272,38 +296,38 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVcmpF64:
__ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ VFPCompareAndSetFlags(i.InputFloat64Register(0),
i.InputFloat64Register(1));
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF64:
__ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsubF64:
__ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmulF64:
__ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlaF64:
__ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
__ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
i.InputFloat64Register(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlsF64:
__ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
i.InputDoubleRegister(2));
__ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
i.InputFloat64Register(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVdivF64:
__ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmodF64: {
@ -311,45 +335,55 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// and generate a CallAddress instruction instead.
FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
__ MovToFloatParameters(i.InputFloat64Register(0),
i.InputFloat64Register(1));
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputDoubleRegister());
__ MovFromFloatResult(i.OutputFloat64Register());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVnegF64:
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVsqrtF64:
__ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
__ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
case kArmVnegF64:
__ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
case kArmVcvtF32F64: {
__ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64F32: {
__ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
__ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64U32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
__ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtS32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
__ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtU32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
__ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@ -392,30 +426,26 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVldr32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vldr(scratch, i.InputOffset());
__ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
case kArmVldrF32: {
__ vldr(i.OutputFloat32Register(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVstr32: {
case kArmVstrF32: {
int index = 0;
SwVfpRegister scratch = kScratchDoubleReg.low();
MemOperand operand = i.InputOffset(&index);
__ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
__ vstr(scratch, operand);
__ vstr(i.InputFloat32Register(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVldr64:
__ vldr(i.OutputDoubleRegister(), i.InputOffset());
case kArmVldrF64:
__ vldr(i.OutputFloat64Register(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVstr64: {
case kArmVstrF64: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
__ vstr(i.InputDoubleRegister(index), operand);
__ vstr(i.InputFloat64Register(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}

View File

@ -42,14 +42,16 @@ namespace compiler {
V(ArmVmodF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
V(ArmVcvtF64S32) \
V(ArmVcvtF64U32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
V(ArmVldr32) \
V(ArmVstr32) \
V(ArmVldr64) \
V(ArmVstr64) \
V(ArmVldrF32) \
V(ArmVstrF32) \
V(ArmVldrF64) \
V(ArmVstrF64) \
V(ArmLdrb) \
V(ArmLdrsb) \
V(ArmStrb) \

View File

@ -1247,15 +1247,15 @@ static const MemoryAccess kMemoryAccesses[] = {
-80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
{kMachFloat32,
kArmVldr32,
kArmVstr32,
kArmVldrF32,
kArmVstrF32,
&InstructionSelectorTest::Stream::IsDouble,
{-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
-84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
{kMachFloat64,
kArmVldr64,
kArmVstr64,
kArmVldrF64,
kArmVstrF64,
&InstructionSelectorTest::Stream::IsDouble,
{-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
-96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
@ -1338,6 +1338,32 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
::testing::ValuesIn(kMemoryAccesses));
// -----------------------------------------------------------------------------
// Conversions.
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
StreamBuilder m(this, kMachFloat64, kMachFloat32);
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVcvtF64F32, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
StreamBuilder m(this, kMachFloat32, kMachFloat64);
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVcvtF32F64, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
// -----------------------------------------------------------------------------
// Miscellaneous.
@ -1895,6 +1921,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
}
}
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -11,7 +11,7 @@ namespace internal {
namespace compiler {
// Adds Arm-specific methods for generating InstructionOperands.
class ArmOperandGenerator FINAL : public OperandGenerator {
class ArmOperandGenerator : public OperandGenerator {
public:
explicit ArmOperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
@ -49,10 +49,10 @@ class ArmOperandGenerator FINAL : public OperandGenerator {
case kArmRsb:
return ImmediateFitsAddrMode1Instruction(value);
case kArmVldr32:
case kArmVstr32:
case kArmVldr64:
case kArmVstr64:
case kArmVldrF32:
case kArmVstrF32:
case kArmVldrF64:
case kArmVstrF64:
return value >= -1020 && value <= 1020 && (value % 4) == 0;
case kArmLdrb:
@ -91,6 +91,8 @@ class ArmOperandGenerator FINAL : public OperandGenerator {
case kArmVmodF64:
case kArmVnegF64:
case kArmVsqrtF64:
case kArmVcvtF32F64:
case kArmVcvtF64F32:
case kArmVcvtF64S32:
case kArmVcvtF64U32:
case kArmVcvtS32F64:
@ -291,10 +293,10 @@ void InstructionSelector::VisitLoad(Node* node) {
ArchOpcode opcode;
switch (rep) {
case kRepFloat32:
opcode = kArmVldr32;
opcode = kArmVldrF32;
break;
case kRepFloat64:
opcode = kArmVldr64;
opcode = kArmVldrF64;
break;
case kRepBit: // Fall through.
case kRepWord8:
@ -346,10 +348,10 @@ void InstructionSelector::VisitStore(Node* node) {
ArchOpcode opcode;
switch (rep) {
case kRepFloat32:
opcode = kArmVstr32;
opcode = kArmVstrF32;
break;
case kRepFloat64:
opcode = kArmVstr64;
opcode = kArmVstrF64;
break;
case kRepBit: // Fall through.
case kRepWord8:
@ -683,6 +685,13 @@ void InstructionSelector::VisitInt32UMod(Node* node) {
}
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVcvtF64F32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
@ -711,6 +720,13 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVcvtF32F64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);

View File

@ -376,6 +376,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Float64Sqrt:
__ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Float32ToFloat64:
__ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
break;
case kArm64Float64ToFloat32:
__ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
break;
case kArm64Float64ToInt32:
__ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
@ -418,20 +424,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Str:
__ Str(i.InputRegister(2), i.MemoryOperand());
break;
case kArm64LdrS: {
UseScratchRegisterScope scope(masm());
FPRegister scratch = scope.AcquireS();
__ Ldr(scratch, i.MemoryOperand());
__ Fcvt(i.OutputDoubleRegister(), scratch);
case kArm64LdrS:
__ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break;
}
case kArm64StrS: {
UseScratchRegisterScope scope(masm());
FPRegister scratch = scope.AcquireS();
__ Fcvt(scratch, i.InputDoubleRegister(2));
__ Str(scratch, i.MemoryOperand());
case kArm64StrS:
__ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
break;
}
case kArm64LdrD:
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;

View File

@ -63,6 +63,8 @@ namespace compiler {
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
V(Arm64Float64Sqrt) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \

View File

@ -197,6 +197,12 @@ std::ostream& operator<<(std::ostream& os, const Conversion& conv) {
// ARM64 type conversion instructions.
static const Conversion kConversionInstructions[] = {
{{&RawMachineAssembler::ChangeFloat32ToFloat64, "ChangeFloat32ToFloat64",
kArm64Float32ToFloat64, kMachFloat64},
kMachFloat32},
{{&RawMachineAssembler::TruncateFloat64ToFloat32,
"TruncateFloat64ToFloat32", kArm64Float64ToFloat32, kMachFloat32},
kMachFloat64},
{{&RawMachineAssembler::ChangeInt32ToInt64, "ChangeInt32ToInt64",
kArm64Sxtw, kMachInt64},
kMachInt32},

View File

@ -432,6 +432,13 @@ void InstructionSelector::VisitInt64UMod(Node* node) {
}
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float32ToFloat64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
@ -472,6 +479,13 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64ToFloat32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));

View File

@ -288,6 +288,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEFloat64Sqrt:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSECvtss2sd:
__ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kSSECvtsd2ss:
__ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
@ -363,12 +369,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kIA32Movss:
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
__ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
__ movss(operand, xmm0);
__ movss(operand, i.InputDoubleRegister(index));
}
break;
case kIA32Push:

View File

@ -35,6 +35,8 @@ namespace compiler {
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
V(SSECvtss2sd) \
V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \

View File

@ -74,6 +74,32 @@ TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
}
// -----------------------------------------------------------------------------
// Conversions.
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
StreamBuilder m(this, kMachFloat32, kMachFloat64);
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
StreamBuilder m(this, kMachFloat64, kMachFloat32);
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
// -----------------------------------------------------------------------------
// Loads and stores

View File

@ -354,6 +354,13 @@ void InstructionSelector::VisitInt32UMod(Node* node) {
}
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
IA32OperandGenerator g(this);
// TODO(turbofan): IA32 SSE conversions should take an operand.
Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@ -380,6 +387,13 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
IA32OperandGenerator g(this);
// TODO(turbofan): IA32 SSE conversions should take an operand.
Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64Add(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),

View File

@ -570,6 +570,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitInt64LessThan(node);
case IrOpcode::kInt64LessThanOrEqual:
return VisitInt64LessThanOrEqual(node);
case IrOpcode::kChangeFloat32ToFloat64:
return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
case IrOpcode::kChangeUint32ToFloat64:
@ -582,6 +584,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
return VisitChangeUint32ToUint64(node);
case IrOpcode::kTruncateFloat64ToFloat32:
return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
case IrOpcode::kTruncateFloat64ToInt32:
return VisitTruncateFloat64ToInt32(node);
case IrOpcode::kTruncateInt64ToInt32:

View File

@ -559,6 +559,14 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
graph()->NewNode(simplified()->LoadElement(element_access), elements,
key, jsgraph()->Uint32Constant(length),
NodeProperties::GetEffectInput(node));
// TODO(titzer): Remove this hack once float32 is properly supported in
// simplified lowering.
if (element_access.machine_type == kRepFloat32) {
Node* change =
graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value);
NodeProperties::ReplaceWithValue(node, change, value);
return Changed(value);
}
return ReplaceEagerly(node, value);
}
return NoChange();
@ -602,6 +610,11 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
NodeProperties::GetControlInput(node));
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
// TODO(titzer): Remove this hack once float32 is properly supported in
// simplified lowering.
if (element_access.machine_type == kRepFloat32) {
value = graph()->NewNode(machine()->TruncateFloat64ToFloat32(), value);
}
Node* store =
graph()->NewNode(simplified()->StoreElement(element_access), elements,
key, jsgraph()->Uint32Constant(length), value,

View File

@ -344,6 +344,9 @@ class RawMachineAssembler : public GraphBuilder {
}
// Conversions.
Node* ChangeFloat32ToFloat64(Node* a) {
return NewNode(machine()->ChangeFloat32ToFloat64(), a);
}
Node* ChangeInt32ToFloat64(Node* a) {
return NewNode(machine()->ChangeInt32ToFloat64(), a);
}
@ -362,6 +365,9 @@ class RawMachineAssembler : public GraphBuilder {
Node* ChangeUint32ToUint64(Node* a) {
return NewNode(machine()->ChangeUint32ToUint64(), a);
}
Node* TruncateFloat64ToFloat32(Node* a) {
return NewNode(machine()->TruncateFloat64ToFloat32(), a);
}
Node* TruncateFloat64ToInt32(Node* a) {
return NewNode(machine()->TruncateFloat64ToInt32(), a);
}

View File

@ -626,7 +626,7 @@ LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
DCHECK(index < DoubleRegister::NumAllocatableRegisters());
DCHECK(index < DoubleRegister::NumAllocatableAliasedRegisters());
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
@ -1016,7 +1016,8 @@ void RegisterAllocator::ProcessInstructions(BasicBlock* block,
}
if (instr->ClobbersDoubleRegisters()) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < DoubleRegister::NumAllocatableAliasedRegisters();
++i) {
if (!IsOutputDoubleRegisterOf(instr, i)) {
LiveRange* range = FixedDoubleLiveRangeFor(i);
range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
@ -1110,7 +1111,7 @@ bool RegisterAllocator::Allocate() {
assigned_registers_ = new (code_zone())
BitVector(Register::NumAllocatableRegisters(), code_zone());
assigned_double_registers_ = new (code_zone())
BitVector(DoubleRegister::NumAllocatableRegisters(), code_zone());
BitVector(DoubleRegister::NumAllocatableAliasedRegisters(), code_zone());
MeetRegisterConstraints();
if (!AllocationOk()) return false;
ResolvePhis();
@ -1514,7 +1515,7 @@ void RegisterAllocator::AllocateGeneralRegisters() {
void RegisterAllocator::AllocateDoubleRegisters() {
RegisterAllocatorPhase phase("L_Allocate double registers", this);
num_registers_ = DoubleRegister::NumAllocatableRegisters();
num_registers_ = DoubleRegister::NumAllocatableAliasedRegisters();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@ -1538,7 +1539,7 @@ void RegisterAllocator::AllocateRegisters() {
DCHECK(inactive_live_ranges_.is_empty());
if (mode_ == DOUBLE_REGISTERS) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
for (int i = 0; i < DoubleRegister::NumAllocatableAliasedRegisters(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
AddToInactive(current);

View File

@ -51,10 +51,10 @@ class RepresentationChanger {
}
if (use_type & kRepTagged) {
return GetTaggedRepresentationFor(node, output_type);
} else if (use_type & kRepFloat32) {
return GetFloat32RepresentationFor(node, output_type);
} else if (use_type & kRepFloat64) {
return GetFloat64RepresentationFor(node, output_type);
} else if (use_type & kRepFloat32) {
return TypeError(node, output_type, use_type); // TODO(titzer): handle
} else if (use_type & kRepBit) {
return GetBitRepresentationFor(node, output_type);
} else if (use_type & rWord) {
@ -103,6 +103,10 @@ class RepresentationChanger {
} else {
return TypeError(node, output_type, kRepTagged);
}
} else if (output_type & kRepFloat32) {
node = jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(),
node);
op = simplified()->ChangeFloat64ToTagged();
} else if (output_type & kRepFloat64) {
op = simplified()->ChangeFloat64ToTagged();
} else {
@ -111,6 +115,19 @@ class RepresentationChanger {
return jsgraph()->graph()->NewNode(op, node);
}
Node* GetFloat32RepresentationFor(Node* node, MachineTypeUnion output_type) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
// TODO(turbofan): NumberConstant, Int32Constant, and Float64Constant?
case IrOpcode::kFloat32Constant:
return node; // No change necessary.
default:
break;
}
// TODO(turbofan): Select the correct X -> Float32 operator.
return TypeError(node, output_type, kRepFloat32);
}
Node* GetFloat64RepresentationFor(Node* node, MachineTypeUnion output_type) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
@ -141,6 +158,8 @@ class RepresentationChanger {
}
} else if (output_type & kRepTagged) {
op = simplified()->ChangeTaggedToFloat64();
} else if (output_type & kRepFloat32) {
op = machine()->ChangeFloat32ToFloat64();
} else {
return TypeError(node, output_type, kRepFloat64);
}
@ -353,8 +372,9 @@ class RepresentationChanger {
SimplifiedOperatorBuilder* simplified() { return simplified_; }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
};
}
}
} // namespace v8::internal::compiler
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_REPRESENTATION_CHANGE_H_

View File

@ -288,6 +288,8 @@ class RepresentationSelector {
MachineTypeUnion rep = 0;
if (use_rep & kRepTagged) {
rep = kRepTagged; // Tagged overrides everything.
} else if (use_rep & kRepFloat32) {
rep = kRepFloat32;
} else if (use_rep & kRepFloat64) {
rep = kRepFloat64;
} else if (use_rep & kRepWord64) {
@ -349,15 +351,6 @@ class RepresentationSelector {
return changer_->Float64OperatorFor(node->opcode());
}
static MachineType AssumeImplicitFloat32Change(MachineType type) {
// TODO(titzer): Assume loads of float32 change representation to float64.
// Fix this with full support for float32 representations.
if (type & kRepFloat32) {
return static_cast<MachineType>((type & ~kRepFloat32) | kRepFloat64);
}
return type;
}
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, MachineTypeUnion use,
@ -579,14 +572,14 @@ class RepresentationSelector {
FieldAccess access = FieldAccessOf(node->op());
ProcessInput(node, 0, changer_->TypeForBasePointer(access));
ProcessRemainingInputs(node, 1);
SetOutput(node, AssumeImplicitFloat32Change(access.machine_type));
SetOutput(node, access.machine_type);
if (lower()) lowering->DoLoadField(node);
break;
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
ProcessInput(node, 0, changer_->TypeForBasePointer(access));
ProcessInput(node, 1, AssumeImplicitFloat32Change(access.machine_type));
ProcessInput(node, 1, access.machine_type);
ProcessRemainingInputs(node, 2);
SetOutput(node, 0);
if (lower()) lowering->DoStoreField(node);
@ -598,7 +591,7 @@ class RepresentationSelector {
ProcessInput(node, 1, kMachInt32); // element index
ProcessInput(node, 2, kMachInt32); // length
ProcessRemainingInputs(node, 3);
SetOutput(node, AssumeImplicitFloat32Change(access.machine_type));
SetOutput(node, access.machine_type);
if (lower()) lowering->DoLoadElement(node);
break;
}
@ -607,7 +600,7 @@ class RepresentationSelector {
ProcessInput(node, 0, changer_->TypeForBasePointer(access));
ProcessInput(node, 1, kMachInt32); // element index
ProcessInput(node, 2, kMachInt32); // length
ProcessInput(node, 3, AssumeImplicitFloat32Change(access.machine_type));
ProcessInput(node, 3, access.machine_type);
ProcessRemainingInputs(node, 4);
SetOutput(node, 0);
if (lower()) lowering->DoStoreElement(node);
@ -700,11 +693,17 @@ class RepresentationSelector {
case IrOpcode::kChangeUint32ToUint64:
return VisitUnop(node, kTypeUint32 | kRepWord32,
kTypeUint32 | kRepWord64);
case IrOpcode::kTruncateFloat64ToFloat32:
return VisitUnop(node, kTypeNumber | kRepFloat64,
kTypeNumber | kRepFloat32);
case IrOpcode::kTruncateInt64ToInt32:
// TODO(titzer): Is kTypeInt32 correct here?
return VisitUnop(node, kTypeInt32 | kRepWord64,
kTypeInt32 | kRepWord32);
case IrOpcode::kChangeFloat32ToFloat64:
return VisitUnop(node, kTypeNumber | kRepFloat32,
kTypeNumber | kRepFloat64);
case IrOpcode::kChangeInt32ToFloat64:
return VisitUnop(node, kTypeInt32 | kRepWord32,
kTypeInt32 | kRepFloat64);

View File

@ -456,6 +456,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
case kSSECvtss2sd:
__ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kSSECvtsd2ss:
__ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kSSEFloat64ToInt32: {
RegisterOrOperand input = i.InputRegisterOrOperand(0);
if (input.type == kDoubleRegister) {
@ -570,12 +576,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64Movss:
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
__ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
__ movss(operand, xmm0);
__ movss(operand, i.InputDoubleRegister(index));
}
break;
case kX64Movsd:

View File

@ -51,6 +51,8 @@ namespace compiler {
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
V(SSECvtss2sd) \
V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \

View File

@ -12,6 +12,17 @@ namespace compiler {
// Conversions.
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
StreamBuilder m(this, kMachFloat32, kMachFloat64);
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
StreamBuilder m(this, kMachInt64, kMachInt32);
m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
@ -30,6 +41,17 @@ TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
}
TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
StreamBuilder m(this, kMachFloat64, kMachFloat32);
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
StreamBuilder m(this, kMachInt32, kMachInt64);
m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));

View File

@ -478,6 +478,13 @@ void InstructionSelector::VisitInt64UMod(Node* node) {
}
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
X64OperandGenerator g(this);
// TODO(turbofan): X64 SSE conversions should take an operand.
Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@ -516,6 +523,13 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
X64OperandGenerator g(this);
// TODO(turbofan): X64 SSE conversions should take an operand.
Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));

View File

@ -148,6 +148,11 @@ struct XMMRegister {
return kMaxNumAllocatableRegisters;
}
// TODO(turbofan): Proper support for float32.
static int NumAllocatableAliasedRegisters() {
return NumAllocatableRegisters();
}
static int ToAllocationIndex(XMMRegister reg) {
DCHECK(reg.code() != 0);
return reg.code() - 1;

View File

@ -200,6 +200,11 @@ struct XMMRegister {
return kMaxNumAllocatableRegisters;
}
// TODO(turbofan): Proper support for float32.
static int NumAllocatableAliasedRegisters() {
return NumAllocatableRegisters();
}
static int ToAllocationIndex(XMMRegister reg) {
DCHECK(reg.code() != 0);
return reg.code() - 1;

View File

@ -89,8 +89,8 @@ class RepresentationChangerTester : public HandleAndZoneScope,
// TODO(titzer): add kRepFloat32 when fully supported.
static const MachineType all_reps[] = {kRepBit, kRepWord32, kRepWord64,
kRepFloat64, kRepTagged};
static const MachineType all_reps[] = {kRepBit, kRepWord32, kRepWord64,
kRepFloat32, kRepFloat64, kRepTagged};
// TODO(titzer): lift this to ValueHelper
@ -295,11 +295,4 @@ TEST(TypeErrors) {
r.CheckTypeError(all_reps[i] | all_reps[j], kRepTagged);
}
}
// TODO(titzer): Float32 representation changes trigger type errors now.
// Enforce current behavior to test all paths through representation changer.
for (size_t i = 0; i < arraysize(all_reps); i++) {
r.CheckTypeError(all_reps[i], kRepFloat32);
r.CheckTypeError(kRepFloat32, all_reps[i]);
}
}

View File

@ -4242,4 +4242,37 @@ TEST(RunTruncateFloat64ToInt32P) {
}
}
TEST(RunChangeFloat32ToFloat64) {
double actual = 0.0f;
float expected = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&actual, kMachFloat64,
m.ChangeFloat32ToFloat64(m.LoadFromPointer(&expected, kMachFloat32)));
m.Return(m.Int32Constant(0));
FOR_FLOAT32_INPUTS(i) {
expected = *i;
CHECK_EQ(0, m.Call());
CHECK_EQ(expected, actual);
}
}
TEST(RunTruncateFloat64ToFloat32) {
float actual = 0.0f;
double input = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&actual, kMachFloat32,
m.TruncateFloat64ToFloat32(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
input = *i;
volatile double expected = DoubleToFloat32(input);
CHECK_EQ(0, m.Call());
CHECK_EQ(expected, actual);
}
}
#endif // V8_TURBOFAN_TARGET

View File

@ -1523,38 +1523,3 @@ TEST(UpdatePhi) {
RepresentationOf(OpParameter<MachineType>(phi)));
}
}
// TODO(titzer): this tests current behavior of assuming an implicit
// representation change in loading float32s. Fix when float32 is fully
// supported.
TEST(ImplicitFloat32ToFloat64InLoads) {
TestingGraph t(Type::Any());
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Handle<Name>::null(), Type::Any(), kMachFloat32};
Node* load =
t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
t.Return(load);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
CHECK_EQ(t.p0, load->InputAt(0));
CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
}
TEST(ImplicitFloat64ToFloat32InStores) {
TestingGraph t(Type::Any(), Type::Signed32());
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Handle<Name>::null(), Type::Any(), kMachFloat32};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
t.p1, t.start, t.start);
t.Effect(store);
t.Lower();
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p0, store->InputAt(0));
CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
}

View File

@ -60,6 +60,45 @@ class ValueHelper {
CheckHeapConstant(isolate_->heap()->false_value(), node);
}
static std::vector<float> float32_vector() {
static const float kValues[] = {
-std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
-1.22813e+35f, -1.20555e+35f, -1.34584e+34f,
-1.0079e+32f, -6.49364e+26f, -3.06077e+25f,
-1.46821e+25f, -1.17658e+23f, -1.9617e+22f,
-2.7357e+20f, -1.48708e+13f, -1.89633e+12f,
-4.66622e+11f, -2.22581e+11f, -1.45381e+10f,
-1.3956e+09f, -1.32951e+09f, -1.30721e+09f,
-1.19756e+09f, -9.26822e+08f, -6.35647e+08f,
-4.00037e+08f, -1.81227e+08f, -5.09256e+07f,
-964300.0f, -192446.0f, -28455.0f,
-27194.0f, -26401.0f, -20575.0f,
-17069.0f, -9167.0f, -960.178f,
-113.0f, -62.0f, -15.0f,
-7.0f, -0.0256635f, -4.60374e-07f,
-3.63759e-10f, -4.30175e-14f, -5.27385e-15f,
-1.48084e-15f, -1.05755e-19f, -3.2995e-21f,
-1.67354e-23f, -1.11885e-23f, -1.78506e-30f,
-5.07594e-31f, -3.65799e-31f, -1.43718e-34f,
-1.27126e-38f, -0.0f, 0.0f,
1.17549e-38f, 1.56657e-37f, 4.08512e-29f,
3.31357e-28f, 6.25073e-22f, 4.1723e-13f,
1.44343e-09f, 5.27004e-08f, 9.48298e-08f,
5.57888e-07f, 4.89988e-05f, 0.244326f,
12.4895f, 19.0f, 47.0f,
106.0f, 538.324f, 564.536f,
819.124f, 7048.0f, 12611.0f,
19878.0f, 20309.0f, 797056.0f,
1.77219e+09f, 1.51116e+11f, 4.18193e+13f,
3.59167e+16f, 3.38211e+19f, 2.67488e+20f,
1.78831e+21f, 9.20914e+21f, 8.35654e+23f,
1.4495e+24f, 5.94015e+25f, 4.43608e+30f,
2.44502e+33f, 2.61152e+33f, 1.38178e+37f,
1.71306e+37f, 3.31899e+38f, 3.40282e+38f,
std::numeric_limits<float>::infinity()};
return std::vector<float>(&kValues[0], &kValues[arraysize(kValues)]);
}
static std::vector<double> float64_vector() {
static const double nan = v8::base::OS::nan_value();
static const double values[] = {
@ -117,6 +156,7 @@ class ValueHelper {
#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
#define FOR_FLOAT32_INPUTS(var) FOR_INPUTS(float, float32, var)
#define FOR_FLOAT64_INPUTS(var) FOR_INPUTS(double, float64, var)
#define FOR_INT32_SHIFTS(var) for (int32_t var = 0; var < 32; var++)