Implement truncated d-to-i as a stub on x86

- Added a general DoubleToIStub so that it's possible to extend to other platforms and non-truncating case.
- This version handles all cases of truncation (previous code deopted in some cases) and all source/destination register combinations without clobbering any temps.

R=yangguo@chromium.org

Review URL: https://codereview.chromium.org/18612005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15645 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
danno@chromium.org 2013-07-12 12:28:32 +00:00
parent 4780c99790
commit 48b65f8cd5
6 changed files with 553 additions and 200 deletions

View File

@ -72,6 +72,7 @@ namespace internal {
V(ArgumentsAccess) \
V(RegExpConstructResult) \
V(NumberToString) \
V(DoubleToI) \
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
@ -1746,6 +1747,60 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
};
class DoubleToIStub : public PlatformCodeStub {
public:
DoubleToIStub(Register source,
Register destination,
int offset,
bool is_truncating) : bit_field_(0) {
bit_field_ = SourceRegisterBits::encode(source.code_) |
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating);
}
Register source() {
Register result = { SourceRegisterBits::decode(bit_field_) };
return result;
}
Register destination() {
Register result = { DestinationRegisterBits::decode(bit_field_) };
return result;
}
bool is_truncating() {
return IsTruncatingBits::decode(bit_field_);
}
int offset() {
return OffsetBits::decode(bit_field_);
}
void Generate(MacroAssembler* masm);
private:
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
class SourceRegisterBits:
public BitField<int, 0, kBitsPerRegisterNumber> {}; // NOLINT
class DestinationRegisterBits:
public BitField<int, kBitsPerRegisterNumber,
kBitsPerRegisterNumber> {}; // NOLINT
class IsTruncatingBits:
public BitField<bool, 2 * kBitsPerRegisterNumber, 1> {}; // NOLINT
class OffsetBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
int bit_field_;
DISALLOW_COPY_AND_ASSIGN(DoubleToIStub);
};
class KeyedLoadFastElementStub : public HydrogenCodeStub {
public:
KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {

View File

@ -662,131 +662,143 @@ class FloatingPointHelper : public AllStatic {
};
// Get the integer part of a heap number. Surprisingly, all this bit twiddling
// is faster than using the built-in instructions on floating point registers.
// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
// trashed registers.
static void IntegerConvert(MacroAssembler* masm,
Register source,
bool use_sse3,
Label* conversion_failure) {
ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
// Get exponent word.
__ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
__ shr(scratch2, HeapNumber::kExponentShift);
__ sub(scratch2, Immediate(HeapNumber::kExponentBias));
// Load ecx with zero. We use this either for the final shift or
// for the answer.
__ xor_(ecx, ecx);
// If the exponent is above 83, the number contains no significant
// bits in the range 0..2^31, so the result is zero.
static const uint32_t kResultIsZeroExponent = 83;
__ cmp(scratch2, Immediate(kResultIsZeroExponent));
__ j(above, &done);
if (use_sse3) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Register input_reg = this->source();
Register final_result_reg = this->destination();
ASSERT(is_truncating());
Label check_negative, process_64_bits, done, done_no_stash;
int double_offset = offset();
// Account for return address and saved regs if input is esp.
if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
MemOperand exponent_operand(MemOperand(input_reg,
double_offset + kPointerSize));
Register scratch1;
{
Register scratch_candidates[3] = { ebx, edx, edi };
for (int i = 0; i < 3; i++) {
scratch1 = scratch_candidates[i];
if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
}
}
// Since we must use ecx for shifts below, use some other register (eax)
// to calculate the result if ecx is the requested return register.
Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
// Save ecx if it isn't the return register and therefore volatile, or if it
// is the return register, then save the temp register we use in its stead for
// the result.
Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
__ push(scratch1);
__ push(save_reg);
bool stash_exponent_copy = !input_reg.is(esp);
__ mov(scratch1, mantissa_operand);
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatureScope scope(masm, SSE3);
// Check whether the exponent is too big for a 64 bit signed integer.
static const uint32_t kTooBigExponent = 63;
__ cmp(scratch2, Immediate(kTooBigExponent));
__ j(greater_equal, conversion_failure);
// Load x87 register with heap number.
__ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
// Reserve space for 64 bit answer.
__ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
__ fld_d(mantissa_operand);
}
__ mov(ecx, exponent_operand);
if (stash_exponent_copy) __ push(ecx);
__ and_(ecx, HeapNumber::kExponentMask);
__ shr(ecx, HeapNumber::kExponentShift);
__ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
__ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
__ j(below, &process_64_bits);
// Result is entirely in lower 32-bits of mantissa
int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
if (CpuFeatures::IsSupported(SSE3)) {
__ fstp(0);
}
__ sub(ecx, Immediate(delta));
__ xor_(result_reg, result_reg);
__ cmp(ecx, Immediate(31));
__ j(above, &done);
__ shl_cl(scratch1);
__ jmp(&check_negative);
__ bind(&process_64_bits);
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatureScope scope(masm, SSE3);
if (stash_exponent_copy) {
// Already a copy of the exponent on the stack, overwrite it.
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
__ sub(esp, Immediate(kDoubleSize / 2));
} else {
// Reserve space for 64 bit answer.
__ sub(esp, Immediate(kDoubleSize)); // Nolint.
}
// Do conversion, which cannot fail because we checked the exponent.
__ fisttp_d(Operand(esp, 0));
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
__ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
__ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
__ add(esp, Immediate(kDoubleSize));
__ jmp(&done_no_stash);
} else {
// Check whether the exponent matches a 32 bit signed int that cannot be
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
// exponent is 30 (biased). This is the exponent that we are fastest at and
// also the highest exponent we can handle here.
const uint32_t non_smi_exponent = 30;
__ cmp(scratch2, Immediate(non_smi_exponent));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
__ j(equal, &right_exponent, Label::kNear);
// If the exponent is higher than that then go to slow case. This catches
// numbers that don't fit in a signed int32, infinities and NaNs.
__ j(less, &normal_exponent, Label::kNear);
{
// Handle a big exponent. The only reason we have this code is that the
// >>> operator has a tendency to generate numbers with an exponent of 31.
const uint32_t big_non_smi_exponent = 31;
__ cmp(scratch2, Immediate(big_non_smi_exponent));
__ j(not_equal, conversion_failure);
// We have the big exponent, typically from >>>. This means the number is
// in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kMantissaMask);
// Put back the implicit 1.
__ or_(scratch2, 1 << HeapNumber::kExponentShift);
// Shift up the mantissa bits to take up the space the exponent used to
// take. We just orred in the implicit bit so that took care of one and
// we want to use the full unsigned range so we subtract 1 bit from the
// shift distance.
const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
__ shl(scratch2, big_shift_distance);
// Get the second half of the double.
__ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
// Shift down 21 bits to get the most significant 11 bits or the low
// mantissa word.
__ shr(ecx, 32 - big_shift_distance);
__ or_(ecx, scratch2);
// We have the answer in ecx, but we may need to negate it.
__ test(scratch, scratch);
__ j(positive, &done, Label::kNear);
__ neg(ecx);
__ jmp(&done, Label::kNear);
// Result must be extracted from shifted 32-bit mantissa
__ sub(ecx, Immediate(delta));
__ neg(ecx);
if (stash_exponent_copy) {
__ mov(result_reg, MemOperand(esp, 0));
} else {
__ mov(result_reg, exponent_operand);
}
__ and_(result_reg,
Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
__ add(result_reg,
Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
__ shrd(result_reg, scratch1);
__ shr_cl(result_reg);
__ test(ecx, Immediate(32));
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatureScope use_cmov(masm, CMOV);
__ cmov(not_equal, scratch1, result_reg);
} else {
Label skip_mov;
__ j(equal, &skip_mov, Label::kNear);
__ mov(scratch1, result_reg);
__ bind(&skip_mov);
}
__ bind(&normal_exponent);
// Exponent word in scratch, exponent in scratch2. Zero in ecx.
// We know that 0 <= exponent < 30.
__ mov(ecx, Immediate(30));
__ sub(ecx, scratch2);
__ bind(&right_exponent);
// Here ecx is the shift, scratch is the exponent word.
// Get the top bits of the mantissa.
__ and_(scratch, HeapNumber::kMantissaMask);
// Put back the implicit 1.
__ or_(scratch, 1 << HeapNumber::kExponentShift);
// Shift up the mantissa bits to take up the space the exponent used to
// take. We have kExponentShift + 1 significant bits int he low end of the
// word. Shift them to the top bits.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
__ shl(scratch, shift_distance);
// Get the second half of the double. For some exponents we don't
// actually need this because the bits get shifted out again, but
// it's probably slower to test than just to do it.
__ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
// Shift down 22 bits to get the most significant 10 bits or the low
// mantissa word.
__ shr(scratch2, 32 - shift_distance);
__ or_(scratch2, scratch);
// Move down according to the exponent.
__ shr_cl(scratch2);
// Now the unsigned answer is in scratch2. We need to move it to ecx and
// we may need to fix the sign.
Label negative;
__ xor_(ecx, ecx);
__ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
__ j(greater, &negative, Label::kNear);
__ mov(ecx, scratch2);
__ jmp(&done, Label::kNear);
__ bind(&negative);
__ sub(ecx, scratch2);
}
// If the double was negative, negate the integer result.
__ bind(&check_negative);
__ mov(result_reg, scratch1);
__ neg(result_reg);
if (stash_exponent_copy) {
__ cmp(MemOperand(esp, 0), Immediate(0));
} else {
__ cmp(exponent_operand, Immediate(0));
}
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatureScope use_cmov(masm, CMOV);
__ cmov(greater, result_reg, scratch1);
} else {
Label skip_mov;
__ j(less_equal, &skip_mov, Label::kNear);
__ mov(result_reg, scratch1);
__ bind(&skip_mov);
}
// Restore registers
__ bind(&done);
if (stash_exponent_copy) {
__ add(esp, Immediate(kDoubleSize / 2));
}
__ bind(&done_no_stash);
if (!final_result_reg.is(result_reg)) {
ASSERT(final_result_reg.is(ecx));
__ mov(final_result_reg, result_reg);
}
__ pop(save_reg);
__ pop(scratch1);
__ ret(0);
}
@ -2407,7 +2419,9 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
CpuFeatureScope use_sse2(masm, SSE2);
ConvertHeapNumberToInt32(masm, edx, conversion_failure);
} else {
IntegerConvert(masm, edx, use_sse3, conversion_failure);
DoubleToIStub stub(edx, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
true);
__ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
}
__ mov(edx, ecx);
@ -2442,7 +2456,9 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
CpuFeatureScope use_sse2(masm, SSE2);
ConvertHeapNumberToInt32(masm, eax, conversion_failure);
} else {
IntegerConvert(masm, eax, use_sse3, conversion_failure);
DoubleToIStub stub(eax, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
true);
__ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
}
__ bind(&done);

View File

@ -5645,93 +5645,22 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
__ cvttsd2si(result_reg, Operand(input_reg));
if (instr->truncating()) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
__ cvttsd2si(result_reg, Operand(input_reg));
Label fast_case_succeeded;
__ cmp(result_reg, 0x80000000u);
if (CpuFeatures::IsSupported(SSE3)) {
// This will deoptimize if the exponent of the input in out of range.
CpuFeatureScope scope(masm(), SSE3);
Label convert, done;
__ j(not_equal, &done, Label::kNear);
__ sub(Operand(esp), Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), input_reg);
// Get exponent alone and check for too-big exponent.
__ mov(result_reg, Operand(esp, sizeof(int32_t)));
__ and_(result_reg, HeapNumber::kExponentMask);
const uint32_t kTooBigExponent =
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
__ cmp(Operand(result_reg), Immediate(kTooBigExponent));
__ j(less, &convert, Label::kNear);
__ add(Operand(esp), Immediate(kDoubleSize));
DeoptimizeIf(no_condition, instr->environment());
__ bind(&convert);
// Do conversion, which cannot fail because we checked the exponent.
__ fld_d(Operand(esp, 0));
__ fisttp_d(Operand(esp, 0));
__ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
} else {
Label done;
Register temp_reg = ToRegister(instr->temp());
XMMRegister xmm_scratch = xmm0;
// If cvttsd2si succeeded, we're done. Otherwise, we attempt
// manual conversion.
__ j(not_equal, &done, Label::kNear);
// Get high 32 bits of the input in result_reg and temp_reg.
__ pshufd(xmm_scratch, input_reg, 1);
__ movd(Operand(temp_reg), xmm_scratch);
__ mov(result_reg, temp_reg);
// Prepare negation mask in temp_reg.
__ sar(temp_reg, kBitsPerInt - 1);
// Extract the exponent from result_reg and subtract adjusted
// bias from it. The adjustment is selected in a way such that
// when the difference is zero, the answer is in the low 32 bits
// of the input, otherwise a shift has to be performed.
__ shr(result_reg, HeapNumber::kExponentShift);
__ and_(result_reg,
HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
__ sub(Operand(result_reg),
Immediate(HeapNumber::kExponentBias +
HeapNumber::kExponentBits +
HeapNumber::kMantissaBits));
// Don't handle big (> kMantissaBits + kExponentBits == 63) or
// special exponents.
DeoptimizeIf(greater, instr->environment());
// Zero out the sign and the exponent in the input (by shifting
// it to the left) and restore the implicit mantissa bit,
// i.e. convert the input to unsigned int64 shifted left by
// kExponentBits.
ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
// Minus zero has the most significant bit set and the other
// bits cleared.
__ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
__ psllq(input_reg, HeapNumber::kExponentBits);
__ por(input_reg, xmm_scratch);
// Get the amount to shift the input right in xmm_scratch.
__ neg(result_reg);
__ movd(xmm_scratch, Operand(result_reg));
// Shift the input right and extract low 32 bits.
__ psrlq(input_reg, xmm_scratch);
__ movd(Operand(result_reg), input_reg);
// Use the prepared mask in temp_reg to negate the result if necessary.
__ xor_(result_reg, Operand(temp_reg));
__ sub(result_reg, Operand(temp_reg));
__ bind(&done);
}
__ j(not_equal, &fast_case_succeeded);
__ sub(esp, Immediate(kDoubleSize));
__ movdbl(MemOperand(esp, 0), input_reg);
DoubleToIStub stub(esp, result_reg, 0, true);
__ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
__ add(esp, Immediate(kDoubleSize));
__ bind(&fast_case_succeeded);
} else {
Label done;
__ cvttsd2si(result_reg, Operand(input_reg));
__ cvtsi2sd(xmm0, Operand(result_reg));
__ ucomisd(xmm0, input_reg);
DeoptimizeIf(not_equal, instr->environment());

View File

@ -110,6 +110,7 @@
['v8_target_arch=="ia32"', {
'sources': [
'test-assembler-ia32.cc',
'test-code-stubs-ia32.cc',
'test-disasm-ia32.cc',
'test-log-stack-tracer.cc'
],

View File

@ -0,0 +1,274 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "v8.h"
#include "cctest.h"
#include "code-stubs.h"
#include "factory.h"
#include "macro-assembler.h"
#include "platform.h"
#if __GNUC__
#define STDCALL __attribute__((stdcall))
#else
#define STDCALL __stdcall
#endif
using namespace v8::internal;
typedef int32_t STDCALL (*ConvertDToIFunc)(double input);
int STDCALL ConvertDToICVersion(double d) {
Address double_ptr = reinterpret_cast<Address>(&d);
uint32_t exponent_bits = Memory::uint32_at(double_ptr + kDoubleSize / 2);
int32_t shifted_mask = static_cast<int32_t>(Double::kExponentMask >> 32);
int32_t exponent = (((exponent_bits & shifted_mask) >>
(Double::kPhysicalSignificandSize - 32)) -
HeapNumber::kExponentBias);
uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
int result = 0;
uint32_t max_exponent =
static_cast<uint32_t>(Double::kPhysicalSignificandSize);
if (unsigned_exponent >= max_exponent) {
if ((exponent - Double::kPhysicalSignificandSize) < 32) {
result = Memory::uint32_at(double_ptr) <<
(exponent - Double::kPhysicalSignificandSize);
}
} else {
uint64_t big_result =
(BitCast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
big_result = big_result >> (Double::kPhysicalSignificandSize - exponent);
result = static_cast<uint32_t>(big_result);
}
if (static_cast<int32_t>(exponent_bits) < 0) {
return (0 - result);
} else {
return result;
}
}
void RunOneTruncationTestWithTest(ConvertDToIFunc func,
double from,
int64_t to) {
int result = (*func)(from);
CHECK_EQ(static_cast<int>(to), result);
}
// #define NaN and Infinity so that it's possible to cut-and-paste these tests
// directly to a .js file and run them.
#define NaN NAN
#define Infinity INFINITY
#define RunOneTruncationTest(p1, p2) RunOneTruncationTestWithTest(func, p1, p2)
void RunAllTruncationTests(ConvertDToIFunc func) {
RunOneTruncationTest(0, 0);
RunOneTruncationTest(0.5, 0);
RunOneTruncationTest(-0.5, 0);
RunOneTruncationTest(1.5, 1);
RunOneTruncationTest(-1.5, -1);
RunOneTruncationTest(5.5, 5);
RunOneTruncationTest(-5.0, -5);
RunOneTruncationTest(NaN, 0);
RunOneTruncationTest(Infinity, 0);
RunOneTruncationTest(-NaN, 0);
RunOneTruncationTest(-Infinity, 0);
RunOneTruncationTest(4.5036e+15, 0x1635E000);
RunOneTruncationTest(-4.5036e+15, -372629504);
RunOneTruncationTest(4503603922337791.0, -1);
RunOneTruncationTest(-4503603922337791.0, 1);
RunOneTruncationTest(4503601774854143.0, 2147483647);
RunOneTruncationTest(-4503601774854143.0, -2147483647);
RunOneTruncationTest(9007207844675582.0, -2);
RunOneTruncationTest(-9007207844675582.0, 2);
RunOneTruncationTest(2.4178527921507624e+24, -536870912);
RunOneTruncationTest(-2.4178527921507624e+24, 536870912);
RunOneTruncationTest(2.417853945072267e+24, -536870912);
RunOneTruncationTest(-2.417853945072267e+24, 536870912);
RunOneTruncationTest(4.8357055843015248e+24, -1073741824);
RunOneTruncationTest(-4.8357055843015248e+24, 1073741824);
RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
}
#undef NaN
#undef Infinity
#undef RunOneTruncationTest
#define __ assm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register source_reg,
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
assm.set_allow_stub_calls(false);
int offset =
source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
DoubleToIStub stub(source_reg, destination_reg, offset, true);
byte* start = stub.GetCode(isolate)->instruction_start();
__ push(ebx);
__ push(ecx);
__ push(edx);
__ push(esi);
__ push(edi);
if (!source_reg.is(esp)) {
__ lea(source_reg, MemOperand(esp, 6 * kPointerSize - offset));
}
int param_offset = 7 * kPointerSize;
// Save registers make sure they don't get clobbered.
int reg_num = 0;
for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ push(reg);
param_offset += kPointerSize;
}
}
// Re-push the double argument
__ push(MemOperand(esp, param_offset));
__ push(MemOperand(esp, param_offset));
// Call through to the actual stub
__ call(start, RelocInfo::EXTERNAL_REFERENCE);
__ add(esp, Immediate(kDoubleSize));
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ cmp(reg, MemOperand(esp, 0));
__ Assert(equal, "register was clobbered");
__ add(esp, Immediate(kPointerSize));
}
}
__ mov(eax, destination_reg);
__ pop(edi);
__ pop(esi);
__ pop(edx);
__ pop(ecx);
__ pop(ebx);
__ ret(kDoubleSize);
CodeDesc desc;
assm.GetCode(&desc);
return reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer));
}
#undef __
static Isolate* GetIsolateFrom(LocalContext* context) {
return reinterpret_cast<Isolate*>((*context)->GetIsolate());
}
TEST(ConvertDToI) {
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
HandleScope scope(isolate);
#if DEBUG
// Verify that the tests actually work with the C version. In the release
// code, the compiler optimizes it away because it's all constant, but does it
// wrong, triggering an assert on gcc.
RunAllTruncationTests(&ConvertDToICVersion);
#endif
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, eax));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, ebx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, ecx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, edx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, edi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esp, esi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, eax));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, ebx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, ecx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, edx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, edi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, eax, esi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, eax));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, ebx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, ecx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, edx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, edi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ebx, esi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, eax));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, ebx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, ecx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, edx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, edi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, ecx, esi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, eax));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, ebx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, ecx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, edx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, edi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edx, esi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, eax));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, ebx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, ecx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, edx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, edi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, esi, esi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, eax));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, ebx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, ecx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, edx));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, edi));
RunAllTruncationTests(MakeConvertDToIFuncTrampoline(isolate, edi, esi));
}

View File

@ -0,0 +1,78 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
function RunOneTruncationTest(a, b) {
var temp = a | 0;
assertEquals(b, temp);
}
function RunAllTruncationTests() {
RunOneTruncationTest(0, 0);
RunOneTruncationTest(0.5, 0);
RunOneTruncationTest(-0.5, 0);
RunOneTruncationTest(1.5, 1);
RunOneTruncationTest(-1.5, -1);
RunOneTruncationTest(5.5, 5);
RunOneTruncationTest(-5.0, -5);
RunOneTruncationTest(NaN, 0);
RunOneTruncationTest(Infinity, 0);
RunOneTruncationTest(-NaN, 0);
RunOneTruncationTest(-Infinity, 0);
RunOneTruncationTest(4.5036e+15, 0x1635E000);
RunOneTruncationTest(-4.5036e+15, -372629504);
RunOneTruncationTest(4503603922337791.0, -1);
RunOneTruncationTest(-4503603922337791.0, 1);
RunOneTruncationTest(4503601774854143.0, 2147483647);
RunOneTruncationTest(-4503601774854143.0, -2147483647);
RunOneTruncationTest(9007207844675582.0, -2);
RunOneTruncationTest(-9007207844675582.0, 2);
RunOneTruncationTest(2.4178527921507624e+24, -536870912);
RunOneTruncationTest(-2.4178527921507624e+24, 536870912);
RunOneTruncationTest(2.417853945072267e+24, -536870912);
RunOneTruncationTest(-2.417853945072267e+24, 536870912);
RunOneTruncationTest(4.8357055843015248e+24, -1073741824);
RunOneTruncationTest(-4.8357055843015248e+24, 1073741824);
RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
RunOneTruncationTest(9.6714111686030497e+24, -2147483648);
RunOneTruncationTest(-9.6714111686030497e+24, -2147483648);
RunOneTruncationTest(9.6714157802890681e+24, -2147483648);
RunOneTruncationTest(-9.6714157802890681e+24, -2147483648);
}
RunAllTruncationTests();
RunAllTruncationTests();
%OptimizeFunctionOnNextCall(RunOneTruncationTest);
RunAllTruncationTests();
RunAllTruncationTests();