Replace our home-grown BitCast with bit_cast from Chrome/Google3.
R=svenpanne@chromium.org Review URL: https://codereview.chromium.org/553843002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23767 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
b0b67762fd
commit
dab61bc310
@ -500,8 +500,8 @@ void MacroAssembler::RecordWriteField(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
|
||||
mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
|
||||
mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
|
||||
mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -570,8 +570,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
|
||||
mov(map, Operand(BitCast<int32_t>(kZapValue + 16)));
|
||||
mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
|
||||
mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -642,8 +642,8 @@ void MacroAssembler::RecordWrite(
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
|
||||
mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
|
||||
mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
|
||||
mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -311,7 +311,7 @@ void ArmDebugger::Debug() {
|
||||
}
|
||||
for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
|
||||
dvalue = GetVFPDoubleRegisterValue(i);
|
||||
uint64_t as_words = BitCast<uint64_t>(dvalue);
|
||||
uint64_t as_words = bit_cast<uint64_t>(dvalue);
|
||||
PrintF("%3s: %f 0x%08x %08x\n",
|
||||
VFPRegisters::Name(i, true),
|
||||
dvalue,
|
||||
@ -322,10 +322,10 @@ void ArmDebugger::Debug() {
|
||||
if (GetValue(arg1, &value)) {
|
||||
PrintF("%s: 0x%08x %d \n", arg1, value, value);
|
||||
} else if (GetVFPSingleValue(arg1, &svalue)) {
|
||||
uint32_t as_word = BitCast<uint32_t>(svalue);
|
||||
uint32_t as_word = bit_cast<uint32_t>(svalue);
|
||||
PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
|
||||
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
|
||||
uint64_t as_words = BitCast<uint64_t>(dvalue);
|
||||
uint64_t as_words = bit_cast<uint64_t>(dvalue);
|
||||
PrintF("%s: %f 0x%08x %08x\n",
|
||||
arg1,
|
||||
dvalue,
|
||||
|
@ -4442,8 +4442,8 @@ void MacroAssembler::RecordWriteField(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
|
||||
Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
|
||||
Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
|
||||
Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -4514,8 +4514,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
Mov(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
|
||||
Mov(map, Operand(BitCast<int64_t>(kZapValue + 16)));
|
||||
Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
|
||||
Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -4587,8 +4587,8 @@ void MacroAssembler::RecordWrite(
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
|
||||
Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
|
||||
Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
|
||||
Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -883,7 +883,7 @@ void ExternalReference::SetUp() {
|
||||
double_constants.one_half = 0.5;
|
||||
double_constants.minus_one_half = -0.5;
|
||||
double_constants.canonical_non_hole_nan = base::OS::nan_value();
|
||||
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
|
||||
double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
|
||||
double_constants.negative_infinity = -V8_INFINITY;
|
||||
double_constants.uint32_bias =
|
||||
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
|
||||
@ -924,9 +924,9 @@ void ExternalReference::InitializeMathExpData() {
|
||||
math_exp_log_table_array = new double[kTableSize];
|
||||
for (int i = 0; i < kTableSize; i++) {
|
||||
double value = std::pow(2, i / kTableSizeDouble);
|
||||
uint64_t bits = BitCast<uint64_t, double>(value);
|
||||
uint64_t bits = bit_cast<uint64_t, double>(value);
|
||||
bits &= (static_cast<uint64_t>(1) << 52) - 1;
|
||||
double mantissa = BitCast<double, uint64_t>(bits);
|
||||
double mantissa = bit_cast<double, uint64_t>(bits);
|
||||
math_exp_log_table_array[i] = mantissa;
|
||||
}
|
||||
|
||||
|
@ -459,9 +459,7 @@ class RelocInfo {
|
||||
Mode rmode() const { return rmode_; }
|
||||
intptr_t data() const { return data_; }
|
||||
double data64() const { return data64_; }
|
||||
uint64_t raw_data64() {
|
||||
return BitCast<uint64_t>(data64_);
|
||||
}
|
||||
uint64_t raw_data64() { return bit_cast<uint64_t>(data64_); }
|
||||
Code* host() const { return host_; }
|
||||
void set_host(Code* host) { host_ = host; }
|
||||
|
||||
|
@ -5,6 +5,8 @@
|
||||
#ifndef V8_BASE_MACROS_H_
|
||||
#define V8_BASE_MACROS_H_
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "include/v8stdint.h"
|
||||
#include "src/base/build_config.h"
|
||||
#include "src/base/compiler-specific.h"
|
||||
@ -102,6 +104,141 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
|
||||
#endif // V8_OS_NACL
|
||||
|
||||
|
||||
// The COMPILE_ASSERT macro can be used to verify that a compile time
|
||||
// expression is true. For example, you could use it to verify the
|
||||
// size of a static array:
|
||||
//
|
||||
// COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
|
||||
// content_type_names_incorrect_size);
|
||||
//
|
||||
// or to make sure a struct is smaller than a certain size:
|
||||
//
|
||||
// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
|
||||
//
|
||||
// The second argument to the macro is the name of the variable. If
|
||||
// the expression is false, most compilers will issue a warning/error
|
||||
// containing the name of the variable.
|
||||
#if V8_HAS_CXX11_STATIC_ASSERT
|
||||
|
||||
// Under C++11, just use static_assert.
|
||||
#define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
|
||||
|
||||
#else
|
||||
|
||||
template <bool>
|
||||
struct CompileAssert {};
|
||||
|
||||
#define COMPILE_ASSERT(expr, msg) \
|
||||
typedef CompileAssert<static_cast<bool>(expr)> \
|
||||
msg[static_cast<bool>(expr) ? 1 : -1] ALLOW_UNUSED
|
||||
|
||||
// Implementation details of COMPILE_ASSERT:
|
||||
//
|
||||
// - COMPILE_ASSERT works by defining an array type that has -1
|
||||
// elements (and thus is invalid) when the expression is false.
|
||||
//
|
||||
// - The simpler definition
|
||||
//
|
||||
// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
|
||||
//
|
||||
// does not work, as gcc supports variable-length arrays whose sizes
|
||||
// are determined at run-time (this is gcc's extension and not part
|
||||
// of the C++ standard). As a result, gcc fails to reject the
|
||||
// following code with the simple definition:
|
||||
//
|
||||
// int foo;
|
||||
// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
|
||||
// // not a compile-time constant.
|
||||
//
|
||||
// - By using the type CompileAssert<(bool(expr))>, we ensures that
|
||||
// expr is a compile-time constant. (Template arguments must be
|
||||
// determined at compile-time.)
|
||||
//
|
||||
// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
|
||||
// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
|
||||
//
|
||||
// CompileAssert<bool(expr)>
|
||||
//
|
||||
// instead, these compilers will refuse to compile
|
||||
//
|
||||
// COMPILE_ASSERT(5 > 0, some_message);
|
||||
//
|
||||
// (They seem to think the ">" in "5 > 0" marks the end of the
|
||||
// template argument list.)
|
||||
//
|
||||
// - The array size is (bool(expr) ? 1 : -1), instead of simply
|
||||
//
|
||||
// ((expr) ? 1 : -1).
|
||||
//
|
||||
// This is to avoid running into a bug in MS VC 7.1, which
|
||||
// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// bit_cast<Dest,Source> is a template function that implements the
|
||||
// equivalent of "*reinterpret_cast<Dest*>(&source)". We need this in
|
||||
// very low-level functions like the protobuf library and fast math
|
||||
// support.
|
||||
//
|
||||
// float f = 3.14159265358979;
|
||||
// int i = bit_cast<int32>(f);
|
||||
// // i = 0x40490fdb
|
||||
//
|
||||
// The classical address-casting method is:
|
||||
//
|
||||
// // WRONG
|
||||
// float f = 3.14159265358979; // WRONG
|
||||
// int i = * reinterpret_cast<int*>(&f); // WRONG
|
||||
//
|
||||
// The address-casting method actually produces undefined behavior
|
||||
// according to ISO C++ specification section 3.10 -15 -. Roughly, this
|
||||
// section says: if an object in memory has one type, and a program
|
||||
// accesses it with a different type, then the result is undefined
|
||||
// behavior for most values of "different type".
|
||||
//
|
||||
// This is true for any cast syntax, either *(int*)&f or
|
||||
// *reinterpret_cast<int*>(&f). And it is particularly true for
|
||||
// conversions between integral lvalues and floating-point lvalues.
|
||||
//
|
||||
// The purpose of 3.10 -15- is to allow optimizing compilers to assume
|
||||
// that expressions with different types refer to different memory. gcc
|
||||
// 4.0.1 has an optimizer that takes advantage of this. So a
|
||||
// non-conforming program quietly produces wildly incorrect output.
|
||||
//
|
||||
// The problem is not the use of reinterpret_cast. The problem is type
|
||||
// punning: holding an object in memory of one type and reading its bits
|
||||
// back using a different type.
|
||||
//
|
||||
// The C++ standard is more subtle and complex than this, but that
|
||||
// is the basic idea.
|
||||
//
|
||||
// Anyways ...
|
||||
//
|
||||
// bit_cast<> calls memcpy() which is blessed by the standard,
|
||||
// especially by the example in section 3.9 . Also, of course,
|
||||
// bit_cast<> wraps up the nasty logic in one place.
|
||||
//
|
||||
// Fortunately memcpy() is very fast. In optimized mode, with a
|
||||
// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
|
||||
// code with the minimal amount of data movement. On a 32-bit system,
|
||||
// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
|
||||
// compiles to two loads and two stores.
|
||||
//
|
||||
// I tested this code with gcc 2.95.3, gcc 4.0.1, icc 8.1, and msvc 7.1.
|
||||
//
|
||||
// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
|
||||
// is likely to surprise you.
|
||||
template <class Dest, class Source>
|
||||
inline Dest bit_cast(const Source& source) {
|
||||
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
|
||||
|
||||
Dest dest;
|
||||
memcpy(&dest, &source, sizeof(dest));
|
||||
return dest;
|
||||
}
|
||||
|
||||
|
||||
// A macro to disallow the evil copy constructor and operator= functions
|
||||
// This should be used in the private: declarations for a class
|
||||
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
|
||||
|
@ -24,7 +24,7 @@ class SourceCodeCache FINAL BASE_EMBEDDED {
|
||||
}
|
||||
|
||||
void Iterate(ObjectVisitor* v) {
|
||||
v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
|
||||
v->VisitPointer(bit_cast<Object**, FixedArray**>(&cache_));
|
||||
}
|
||||
|
||||
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
|
||||
|
@ -23,7 +23,7 @@ class CommonNodeCache FINAL : public ZoneObject {
|
||||
|
||||
Node** FindFloat64Constant(double value) {
|
||||
// We canonicalize double constants at the bit representation level.
|
||||
return float64_constants_.Find(zone_, BitCast<int64_t>(value));
|
||||
return float64_constants_.Find(zone_, bit_cast<int64_t>(value));
|
||||
}
|
||||
|
||||
Node** FindExternalConstant(ExternalReference reference) {
|
||||
@ -32,7 +32,7 @@ class CommonNodeCache FINAL : public ZoneObject {
|
||||
|
||||
Node** FindNumberConstant(double value) {
|
||||
// We canonicalize double constants at the bit representation level.
|
||||
return number_constants_.Find(zone_, BitCast<int64_t>(value));
|
||||
return number_constants_.Find(zone_, bit_cast<int64_t>(value));
|
||||
}
|
||||
|
||||
Zone* zone() const { return zone_; }
|
||||
|
@ -847,7 +847,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
__ mov(dst, g.ToImmediate(source));
|
||||
} else {
|
||||
double v = g.ToDouble(source);
|
||||
uint64_t int_val = BitCast<uint64_t, double>(v);
|
||||
uint64_t int_val = bit_cast<uint64_t, double>(v);
|
||||
int32_t lower = static_cast<int32_t>(int_val);
|
||||
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
|
@ -658,11 +658,11 @@ class Constant FINAL {
|
||||
|
||||
explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
|
||||
explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
|
||||
explicit Constant(double v) : type_(kFloat64), value_(BitCast<int64_t>(v)) {}
|
||||
explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
|
||||
explicit Constant(ExternalReference ref)
|
||||
: type_(kExternalReference), value_(BitCast<intptr_t>(ref)) {}
|
||||
: type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
|
||||
explicit Constant(Handle<HeapObject> obj)
|
||||
: type_(kHeapObject), value_(BitCast<intptr_t>(obj)) {}
|
||||
: type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
|
||||
|
||||
Type type() const { return type_; }
|
||||
|
||||
@ -680,17 +680,17 @@ class Constant FINAL {
|
||||
double ToFloat64() const {
|
||||
if (type() == kInt32) return ToInt32();
|
||||
DCHECK_EQ(kFloat64, type());
|
||||
return BitCast<double>(value_);
|
||||
return bit_cast<double>(value_);
|
||||
}
|
||||
|
||||
ExternalReference ToExternalReference() const {
|
||||
DCHECK_EQ(kExternalReference, type());
|
||||
return BitCast<ExternalReference>(static_cast<intptr_t>(value_));
|
||||
return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
|
||||
}
|
||||
|
||||
Handle<HeapObject> ToHeapObject() const {
|
||||
DCHECK_EQ(kHeapObject, type());
|
||||
return BitCast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
|
||||
return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -134,8 +134,8 @@ Node* JSGraph::Constant(Handle<Object> value) {
|
||||
|
||||
|
||||
Node* JSGraph::Constant(double value) {
|
||||
if (BitCast<int64_t>(value) == BitCast<int64_t>(0.0)) return ZeroConstant();
|
||||
if (BitCast<int64_t>(value) == BitCast<int64_t>(1.0)) return OneConstant();
|
||||
if (bit_cast<int64_t>(value) == bit_cast<int64_t>(0.0)) return ZeroConstant();
|
||||
if (bit_cast<int64_t>(value) == bit_cast<int64_t>(1.0)) return OneConstant();
|
||||
return NumberConstant(value);
|
||||
}
|
||||
|
||||
|
@ -249,7 +249,7 @@ TEST_F(MachineOperatorReducerTest, ChangeFloat64ToUint32WithConstant) {
|
||||
Reduction reduction = Reduce(graph()->NewNode(
|
||||
machine()->ChangeFloat64ToUint32(), Float64Constant(FastUI2D(x))));
|
||||
ASSERT_TRUE(reduction.Changed());
|
||||
EXPECT_THAT(reduction.replacement(), IsInt32Constant(BitCast<int32_t>(x)));
|
||||
EXPECT_THAT(reduction.replacement(), IsInt32Constant(bit_cast<int32_t>(x)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,7 +290,7 @@ TEST_F(MachineOperatorReducerTest, ChangeUint32ToFloat64WithConstant) {
|
||||
TRACED_FOREACH(uint32_t, x, kUint32Values) {
|
||||
Reduction reduction =
|
||||
Reduce(graph()->NewNode(machine()->ChangeUint32ToFloat64(),
|
||||
Int32Constant(BitCast<int32_t>(x))));
|
||||
Int32Constant(bit_cast<int32_t>(x))));
|
||||
ASSERT_TRUE(reduction.Changed());
|
||||
EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastUI2D(x)));
|
||||
}
|
||||
@ -303,11 +303,12 @@ TEST_F(MachineOperatorReducerTest, ChangeUint32ToFloat64WithConstant) {
|
||||
|
||||
TEST_F(MachineOperatorReducerTest, ChangeUint32ToUint64WithConstant) {
|
||||
TRACED_FOREACH(uint32_t, x, kUint32Values) {
|
||||
Reduction reduction = Reduce(graph()->NewNode(
|
||||
machine()->ChangeUint32ToUint64(), Int32Constant(BitCast<int32_t>(x))));
|
||||
Reduction reduction =
|
||||
Reduce(graph()->NewNode(machine()->ChangeUint32ToUint64(),
|
||||
Int32Constant(bit_cast<int32_t>(x))));
|
||||
ASSERT_TRUE(reduction.Changed());
|
||||
EXPECT_THAT(reduction.replacement(),
|
||||
IsInt64Constant(BitCast<int64_t>(static_cast<uint64_t>(x))));
|
||||
IsInt64Constant(bit_cast<int64_t>(static_cast<uint64_t>(x))));
|
||||
}
|
||||
}
|
||||
|
||||
@ -357,8 +358,8 @@ TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithConstant) {
|
||||
graph()->NewNode(machine()->TruncateInt64ToInt32(), Int64Constant(x)));
|
||||
ASSERT_TRUE(reduction.Changed());
|
||||
EXPECT_THAT(reduction.replacement(),
|
||||
IsInt32Constant(BitCast<int32_t>(
|
||||
static_cast<uint32_t>(BitCast<uint64_t>(x)))));
|
||||
IsInt32Constant(bit_cast<int32_t>(
|
||||
static_cast<uint32_t>(bit_cast<uint64_t>(x)))));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ inline int32_t NodeCacheHash(int64_t key) {
|
||||
|
||||
template <>
|
||||
inline int32_t NodeCacheHash(double key) {
|
||||
return ComputeLongHash(BitCast<int64_t>(key));
|
||||
return ComputeLongHash(bit_cast<int64_t>(key));
|
||||
}
|
||||
|
||||
|
||||
|
@ -171,10 +171,10 @@ struct StaticParameterTraits<double> {
|
||||
return os << val;
|
||||
}
|
||||
static int HashCode(double a) {
|
||||
return static_cast<int>(BitCast<int64_t>(a));
|
||||
return static_cast<int>(bit_cast<int64_t>(a));
|
||||
}
|
||||
static bool Equals(double a, double b) {
|
||||
return BitCast<int64_t>(a) == BitCast<int64_t>(b);
|
||||
return bit_cast<int64_t>(a) == bit_cast<int64_t>(b);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -439,7 +439,7 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
|
||||
simplified()->ChangeTaggedToUint32(), NumberConstant(n)));
|
||||
ASSERT_TRUE(reduction.Changed());
|
||||
EXPECT_THAT(reduction.replacement(),
|
||||
IsInt32Constant(BitCast<int32_t>(DoubleToUint32(n))));
|
||||
IsInt32Constant(bit_cast<int32_t>(DoubleToUint32(n))));
|
||||
}
|
||||
}
|
||||
|
||||
@ -470,7 +470,7 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
|
||||
TRACED_FOREACH(uint32_t, n, kUint32Values) {
|
||||
Reduction reduction =
|
||||
Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(),
|
||||
Int32Constant(BitCast<int32_t>(n))));
|
||||
Int32Constant(bit_cast<int32_t>(n))));
|
||||
ASSERT_TRUE(reduction.Changed());
|
||||
EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastUI2D(n)));
|
||||
}
|
||||
|
@ -918,7 +918,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
}
|
||||
} else {
|
||||
__ movq(kScratchRegister,
|
||||
BitCast<uint64_t, double>(g.ToDouble(constant_source)));
|
||||
bit_cast<uint64_t, double>(g.ToDouble(constant_source)));
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ movq(g.ToDoubleRegister(destination), kScratchRegister);
|
||||
} else {
|
||||
|
@ -25,7 +25,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
inline double JunkStringValue() {
|
||||
return BitCast<double, uint64_t>(kQuietNaNMask);
|
||||
return bit_cast<double, uint64_t>(kQuietNaNMask);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3675,7 +3675,7 @@ DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
|
||||
|
||||
|
||||
void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
|
||||
v->VisitPointer(BitCast<Object**>(&function_));
|
||||
v->VisitPointer(bit_cast<Object**>(&function_));
|
||||
v->VisitPointer(&context_);
|
||||
v->VisitPointers(parameters_, parameters_ + parameters_count_);
|
||||
v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
|
||||
|
@ -11,8 +11,8 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// We assume that doubles and uint64_t have the same endianness.
|
||||
inline uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
|
||||
inline double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
|
||||
inline uint64_t double_to_uint64(double d) { return bit_cast<uint64_t>(d); }
|
||||
inline double uint64_to_double(uint64_t d64) { return bit_cast<double>(d64); }
|
||||
|
||||
// Helper functions for doubles.
|
||||
class Double {
|
||||
|
@ -568,26 +568,26 @@ class Factory FINAL {
|
||||
MUST_USE_RESULT MaybeHandle<FunctionTemplateInfo> ConfigureInstance(
|
||||
Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance);
|
||||
|
||||
#define ROOT_ACCESSOR(type, name, camel_name) \
|
||||
inline Handle<type> name() { \
|
||||
return Handle<type>(BitCast<type**>( \
|
||||
&isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
|
||||
#define ROOT_ACCESSOR(type, name, camel_name) \
|
||||
inline Handle<type> name() { \
|
||||
return Handle<type>(bit_cast<type**>( \
|
||||
&isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
|
||||
}
|
||||
ROOT_LIST(ROOT_ACCESSOR)
|
||||
#undef ROOT_ACCESSOR
|
||||
|
||||
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
|
||||
inline Handle<Map> name##_map() { \
|
||||
return Handle<Map>(BitCast<Map**>( \
|
||||
&isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
|
||||
}
|
||||
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
|
||||
inline Handle<Map> name##_map() { \
|
||||
return Handle<Map>(bit_cast<Map**>( \
|
||||
&isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
|
||||
}
|
||||
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
|
||||
#undef STRUCT_MAP_ACCESSOR
|
||||
|
||||
#define STRING_ACCESSOR(name, str) \
|
||||
inline Handle<String> name() { \
|
||||
return Handle<String>(BitCast<String**>( \
|
||||
&isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
|
||||
#define STRING_ACCESSOR(name, str) \
|
||||
inline Handle<String> name() { \
|
||||
return Handle<String>(bit_cast<String**>( \
|
||||
&isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
|
||||
}
|
||||
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
|
||||
#undef STRING_ACCESSOR
|
||||
|
@ -41,7 +41,7 @@ inline bool Handle<T>::is_identical_to(const Handle<T> o) const {
|
||||
template <typename T>
|
||||
inline T* Handle<T>::operator*() const {
|
||||
SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
|
||||
return *BitCast<T**>(location_);
|
||||
return *bit_cast<T**>(location_);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -55,7 +55,7 @@ inline T** Handle<T>::location() const {
|
||||
template <typename T>
|
||||
bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
|
||||
DCHECK(location_ != NULL);
|
||||
Object* object = *BitCast<T**>(location_);
|
||||
Object* object = *bit_cast<T**>(location_);
|
||||
if (object->IsSmi()) return true;
|
||||
HeapObject* heap_object = HeapObject::cast(object);
|
||||
Heap* heap = heap_object->GetHeap();
|
||||
|
@ -4724,7 +4724,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
|
||||
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
|
||||
v->Synchronize(VisitorSynchronization::kStrongRootList);
|
||||
|
||||
v->VisitPointer(BitCast<Object**>(&hidden_string_));
|
||||
v->VisitPointer(bit_cast<Object**>(&hidden_string_));
|
||||
v->Synchronize(VisitorSynchronization::kInternalizedString);
|
||||
|
||||
isolate_->bootstrapper()->Iterate(v);
|
||||
|
@ -2644,7 +2644,7 @@ OStream& HEnterInlined::PrintDataTo(OStream& os) const { // NOLINT
|
||||
|
||||
static bool IsInteger32(double value) {
|
||||
double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
|
||||
return BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(value);
|
||||
return bit_cast<int64_t>(roundtrip_value) == bit_cast<int64_t>(value);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3542,9 +3542,9 @@ class HConstant FINAL : public HTemplateInstruction<0> {
|
||||
|
||||
bool IsSpecialDouble() const {
|
||||
return has_double_value_ &&
|
||||
(BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
|
||||
FixedDoubleArray::is_the_hole_nan(double_value_) ||
|
||||
std::isnan(double_value_));
|
||||
(bit_cast<int64_t>(double_value_) == bit_cast<int64_t>(-0.0) ||
|
||||
FixedDoubleArray::is_the_hole_nan(double_value_) ||
|
||||
std::isnan(double_value_));
|
||||
}
|
||||
|
||||
bool NotInNewSpace() const {
|
||||
@ -3648,7 +3648,7 @@ class HConstant FINAL : public HTemplateInstruction<0> {
|
||||
if (has_int32_value_) {
|
||||
return static_cast<intptr_t>(int32_value_);
|
||||
} else if (has_double_value_) {
|
||||
return static_cast<intptr_t>(BitCast<int64_t>(double_value_));
|
||||
return static_cast<intptr_t>(bit_cast<int64_t>(double_value_));
|
||||
} else if (has_external_reference_value_) {
|
||||
return reinterpret_cast<intptr_t>(external_reference_value_.address());
|
||||
} else {
|
||||
@ -3679,8 +3679,8 @@ class HConstant FINAL : public HTemplateInstruction<0> {
|
||||
int32_value_ == other_constant->int32_value_;
|
||||
} else if (has_double_value_) {
|
||||
return other_constant->has_double_value_ &&
|
||||
BitCast<int64_t>(double_value_) ==
|
||||
BitCast<int64_t>(other_constant->double_value_);
|
||||
bit_cast<int64_t>(double_value_) ==
|
||||
bit_cast<int64_t>(other_constant->double_value_);
|
||||
} else if (has_external_reference_value_) {
|
||||
return other_constant->has_external_reference_value_ &&
|
||||
external_reference_value_ ==
|
||||
|
@ -2530,9 +2530,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
static const int kDeltaToCmpImmediate = 2;
|
||||
static const int kDeltaToMov = 8;
|
||||
static const int kDeltaToMovImmediate = 9;
|
||||
static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
|
||||
static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
|
||||
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
|
||||
static const int8_t kCmpEdiOperandByte1 = bit_cast<int8_t, uint8_t>(0x3b);
|
||||
static const int8_t kCmpEdiOperandByte2 = bit_cast<int8_t, uint8_t>(0x3d);
|
||||
static const int8_t kMovEaxImmediateByte = bit_cast<int8_t, uint8_t>(0xb8);
|
||||
|
||||
DCHECK_EQ(object.code(), InstanceofStub::left().code());
|
||||
DCHECK_EQ(function.code(), InstanceofStub::right().code());
|
||||
|
@ -1715,7 +1715,7 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
|
||||
|
||||
void LCodeGen::DoConstantD(LConstantD* instr) {
|
||||
double v = instr->value();
|
||||
uint64_t int_val = BitCast<uint64_t, double>(v);
|
||||
uint64_t int_val = bit_cast<uint64_t, double>(v);
|
||||
int32_t lower = static_cast<int32_t>(int_val);
|
||||
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
|
||||
DCHECK(instr->result()->IsDoubleRegister());
|
||||
|
@ -292,7 +292,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
}
|
||||
} else if (destination->IsDoubleRegister()) {
|
||||
double v = cgen_->ToDouble(constant_source);
|
||||
uint64_t int_val = BitCast<uint64_t, double>(v);
|
||||
uint64_t int_val = bit_cast<uint64_t, double>(v);
|
||||
int32_t lower = static_cast<int32_t>(int_val);
|
||||
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
|
||||
XMMRegister dst = cgen_->ToDoubleRegister(destination);
|
||||
|
@ -2082,7 +2082,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
|
||||
return DefineAsRegister(new(zone()) LConstantI);
|
||||
} else if (r.IsDouble()) {
|
||||
double value = instr->DoubleValue();
|
||||
bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
|
||||
bool value_is_zero = bit_cast<uint64_t, double>(value) == 0;
|
||||
LOperand* temp = value_is_zero ? NULL : TempRegister();
|
||||
return DefineAsRegister(new(zone()) LConstantD(temp));
|
||||
} else if (r.IsExternal()) {
|
||||
|
@ -427,8 +427,8 @@ void MacroAssembler::RecordWriteArray(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(index, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -472,8 +472,8 @@ void MacroAssembler::RecordWriteField(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -533,9 +533,9 @@ void MacroAssembler::RecordWriteForMap(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -603,8 +603,8 @@ void MacroAssembler::RecordWrite(
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(address, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2124,7 +2124,7 @@ Operand ApiParameterOperand(int index) {
|
||||
void MacroAssembler::PrepareCallApiFunction(int argc) {
|
||||
EnterApiExitFrame(argc);
|
||||
if (emit_debug_code()) {
|
||||
mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2659,7 +2659,7 @@ void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
|
||||
|
||||
void MacroAssembler::Move(XMMRegister dst, double val) {
|
||||
// TODO(titzer): recognize double constants with ExternalReferences.
|
||||
uint64_t int_val = BitCast<uint64_t, double>(val);
|
||||
uint64_t int_val = bit_cast<uint64_t, double>(val);
|
||||
if (int_val == 0) {
|
||||
xorps(dst, dst);
|
||||
} else {
|
||||
|
@ -730,9 +730,9 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
// Clobber registers when generating debug-code to provoke errors.
|
||||
__ bind(&interceptor_failed);
|
||||
if (FLAG_debug_code) {
|
||||
__ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
|
||||
__ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
__ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
|
||||
__ mov(receiver(), Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
__ mov(holder_reg, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
__ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
|
||||
__ pop(this->name());
|
||||
|
@ -732,9 +732,9 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
|
||||
// Clobber registers when generating debug-code to provoke errors.
|
||||
__ bind(&interceptor_failed);
|
||||
if (FLAG_debug_code) {
|
||||
__ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
|
||||
__ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
__ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
|
||||
__ mov(receiver(), Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
__ mov(holder_reg, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
__ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
|
||||
__ pop(this->name());
|
||||
|
@ -191,16 +191,16 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
|
||||
// Visit the roots from the top for a given thread.
|
||||
v->VisitPointer(&thread->pending_exception_);
|
||||
v->VisitPointer(&(thread->pending_message_obj_));
|
||||
v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
|
||||
v->VisitPointer(BitCast<Object**>(&(thread->context_)));
|
||||
v->VisitPointer(bit_cast<Object**>(&(thread->pending_message_script_)));
|
||||
v->VisitPointer(bit_cast<Object**>(&(thread->context_)));
|
||||
v->VisitPointer(&thread->scheduled_exception_);
|
||||
|
||||
for (v8::TryCatch* block = thread->try_catch_handler();
|
||||
block != NULL;
|
||||
block = block->next_) {
|
||||
v->VisitPointer(BitCast<Object**>(&(block->exception_)));
|
||||
v->VisitPointer(BitCast<Object**>(&(block->message_obj_)));
|
||||
v->VisitPointer(BitCast<Object**>(&(block->message_script_)));
|
||||
v->VisitPointer(bit_cast<Object**>(&(block->exception_)));
|
||||
v->VisitPointer(bit_cast<Object**>(&(block->message_obj_)));
|
||||
v->VisitPointer(bit_cast<Object**>(&(block->message_script_)));
|
||||
}
|
||||
|
||||
// Iterate over pointers on native execution stack.
|
||||
|
@ -209,8 +209,8 @@ void MacroAssembler::RecordWriteField(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
|
||||
li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
|
||||
li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
|
||||
li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -284,8 +284,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
li(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
|
||||
li(map, Operand(BitCast<int32_t>(kZapValue + 16)));
|
||||
li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
|
||||
li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -359,8 +359,8 @@ void MacroAssembler::RecordWrite(
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
|
||||
li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
|
||||
li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
|
||||
li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include "src/assembler.h"
|
||||
#include "src/base/bits.h"
|
||||
#include "src/disasm.h"
|
||||
#include "src/globals.h" // Need the BitCast.
|
||||
#include "src/mips/constants-mips.h"
|
||||
#include "src/mips/simulator-mips.h"
|
||||
#include "src/ostreams.h"
|
||||
@ -1126,17 +1125,17 @@ void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
|
||||
|
||||
void Simulator::set_fpu_register_float(int fpureg, float value) {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
*BitCast<float*>(&FPUregisters_[fpureg]) = value;
|
||||
*bit_cast<float*>(&FPUregisters_[fpureg]) = value;
|
||||
}
|
||||
|
||||
|
||||
void Simulator::set_fpu_register_double(int fpureg, double value) {
|
||||
if (IsFp64Mode()) {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
*BitCast<double*>(&FPUregisters_[fpureg]) = value;
|
||||
*bit_cast<double*>(&FPUregisters_[fpureg]) = value;
|
||||
} else {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
|
||||
int64_t i64 = BitCast<int64_t>(value);
|
||||
int64_t i64 = bit_cast<int64_t>(value);
|
||||
set_fpu_register_word(fpureg, i64 & 0xffffffff);
|
||||
set_fpu_register_word(fpureg + 1, i64 >> 32);
|
||||
}
|
||||
@ -1195,21 +1194,20 @@ int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
|
||||
|
||||
float Simulator::get_fpu_register_float(int fpureg) const {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
return *BitCast<float*>(
|
||||
const_cast<int64_t*>(&FPUregisters_[fpureg]));
|
||||
return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
|
||||
}
|
||||
|
||||
|
||||
double Simulator::get_fpu_register_double(int fpureg) const {
|
||||
if (IsFp64Mode()) {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
return *BitCast<double*>(&FPUregisters_[fpureg]);
|
||||
return *bit_cast<double*>(&FPUregisters_[fpureg]);
|
||||
} else {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
|
||||
int64_t i64;
|
||||
i64 = static_cast<uint32_t>(get_fpu_register_word(fpureg));
|
||||
i64 |= static_cast<uint64_t>(get_fpu_register_word(fpureg + 1)) << 32;
|
||||
return BitCast<double>(i64);
|
||||
return bit_cast<double>(i64);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -212,8 +212,8 @@ void MacroAssembler::RecordWriteField(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
li(value, Operand(BitCast<int64_t>(kZapValue + 4)));
|
||||
li(dst, Operand(BitCast<int64_t>(kZapValue + 8)));
|
||||
li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
|
||||
li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -287,8 +287,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
li(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
|
||||
li(map, Operand(BitCast<int64_t>(kZapValue + 16)));
|
||||
li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
|
||||
li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -362,8 +362,8 @@ void MacroAssembler::RecordWrite(
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
li(address, Operand(BitCast<int64_t>(kZapValue + 12)));
|
||||
li(value, Operand(BitCast<int64_t>(kZapValue + 16)));
|
||||
li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
|
||||
li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include "src/assembler.h"
|
||||
#include "src/base/bits.h"
|
||||
#include "src/disasm.h"
|
||||
#include "src/globals.h" // Need the BitCast.
|
||||
#include "src/mips64/constants-mips64.h"
|
||||
#include "src/mips64/simulator-mips64.h"
|
||||
#include "src/ostreams.h"
|
||||
@ -1057,13 +1056,13 @@ void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
|
||||
|
||||
void Simulator::set_fpu_register_float(int fpureg, float value) {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
*BitCast<float*>(&FPUregisters_[fpureg]) = value;
|
||||
*bit_cast<float*>(&FPUregisters_[fpureg]) = value;
|
||||
}
|
||||
|
||||
|
||||
void Simulator::set_fpu_register_double(int fpureg, double value) {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
*BitCast<double*>(&FPUregisters_[fpureg]) = value;
|
||||
*bit_cast<double*>(&FPUregisters_[fpureg]) = value;
|
||||
}
|
||||
|
||||
|
||||
@ -1118,14 +1117,13 @@ uint32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
|
||||
|
||||
float Simulator::get_fpu_register_float(int fpureg) const {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
return *BitCast<float*>(
|
||||
const_cast<int64_t*>(&FPUregisters_[fpureg]));
|
||||
return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
|
||||
}
|
||||
|
||||
|
||||
double Simulator::get_fpu_register_double(int fpureg) const {
|
||||
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
|
||||
return *BitCast<double*>(&FPUregisters_[fpureg]);
|
||||
return *bit_cast<double*>(&FPUregisters_[fpureg]);
|
||||
}
|
||||
|
||||
|
||||
|
@ -371,9 +371,9 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
|
||||
if (!is_the_hole(i)) {
|
||||
double value = get_scalar(i);
|
||||
CHECK(!std::isnan(value) ||
|
||||
(BitCast<uint64_t>(value) ==
|
||||
BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
|
||||
((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
|
||||
(bit_cast<uint64_t>(value) ==
|
||||
bit_cast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
|
||||
((bit_cast<uint64_t>(value) & Double::kSignMask) != 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2193,18 +2193,18 @@ void FixedArray::set(int index, Object* value) {
|
||||
|
||||
|
||||
inline bool FixedDoubleArray::is_the_hole_nan(double value) {
|
||||
return BitCast<uint64_t, double>(value) == kHoleNanInt64;
|
||||
return bit_cast<uint64_t, double>(value) == kHoleNanInt64;
|
||||
}
|
||||
|
||||
|
||||
inline double FixedDoubleArray::hole_nan_as_double() {
|
||||
return BitCast<double, uint64_t>(kHoleNanInt64);
|
||||
return bit_cast<double, uint64_t>(kHoleNanInt64);
|
||||
}
|
||||
|
||||
|
||||
inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
|
||||
DCHECK(BitCast<uint64_t>(base::OS::nan_value()) != kHoleNanInt64);
|
||||
DCHECK((BitCast<uint64_t>(base::OS::nan_value()) >> 32) != kHoleNanUpper32);
|
||||
DCHECK(bit_cast<uint64_t>(base::OS::nan_value()) != kHoleNanInt64);
|
||||
DCHECK((bit_cast<uint64_t>(base::OS::nan_value()) >> 32) != kHoleNanUpper32);
|
||||
return base::OS::nan_value();
|
||||
}
|
||||
|
||||
|
@ -13,8 +13,8 @@ namespace internal {
|
||||
void LookupResult::Iterate(ObjectVisitor* visitor) {
|
||||
LookupResult* current = this; // Could be NULL.
|
||||
while (current != NULL) {
|
||||
visitor->VisitPointer(BitCast<Object**>(¤t->holder_));
|
||||
visitor->VisitPointer(BitCast<Object**>(¤t->transition_));
|
||||
visitor->VisitPointer(bit_cast<Object**>(¤t->holder_));
|
||||
visitor->VisitPointer(bit_cast<Object**>(¤t->transition_));
|
||||
current = current->next_;
|
||||
}
|
||||
}
|
||||
|
56
src/utils.h
56
src/utils.h
@ -741,62 +741,6 @@ inline int TenToThe(int exponent) {
|
||||
}
|
||||
|
||||
|
||||
// The type-based aliasing rule allows the compiler to assume that pointers of
|
||||
// different types (for some definition of different) never alias each other.
|
||||
// Thus the following code does not work:
|
||||
//
|
||||
// float f = foo();
|
||||
// int fbits = *(int*)(&f);
|
||||
//
|
||||
// The compiler 'knows' that the int pointer can't refer to f since the types
|
||||
// don't match, so the compiler may cache f in a register, leaving random data
|
||||
// in fbits. Using C++ style casts makes no difference, however a pointer to
|
||||
// char data is assumed to alias any other pointer. This is the 'memcpy
|
||||
// exception'.
|
||||
//
|
||||
// Bit_cast uses the memcpy exception to move the bits from a variable of one
|
||||
// type of a variable of another type. Of course the end result is likely to
|
||||
// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
|
||||
// will completely optimize BitCast away.
|
||||
//
|
||||
// There is an additional use for BitCast.
|
||||
// Recent gccs will warn when they see casts that may result in breakage due to
|
||||
// the type-based aliasing rule. If you have checked that there is no breakage
|
||||
// you can use BitCast to cast one pointer type to another. This confuses gcc
|
||||
// enough that it can no longer see that you have cast one pointer type to
|
||||
// another thus avoiding the warning.
|
||||
|
||||
// We need different implementations of BitCast for pointer and non-pointer
|
||||
// values. We use partial specialization of auxiliary struct to work around
|
||||
// issues with template functions overloading.
|
||||
template <class Dest, class Source>
|
||||
struct BitCastHelper {
|
||||
STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
|
||||
|
||||
INLINE(static Dest cast(const Source& source)) {
|
||||
Dest dest;
|
||||
memcpy(&dest, &source, sizeof(dest));
|
||||
return dest;
|
||||
}
|
||||
};
|
||||
|
||||
template <class Dest, class Source>
|
||||
struct BitCastHelper<Dest, Source*> {
|
||||
INLINE(static Dest cast(Source* source)) {
|
||||
return BitCastHelper<Dest, uintptr_t>::
|
||||
cast(reinterpret_cast<uintptr_t>(source));
|
||||
}
|
||||
};
|
||||
|
||||
template <class Dest, class Source>
|
||||
INLINE(Dest BitCast(const Source& source));
|
||||
|
||||
template <class Dest, class Source>
|
||||
inline Dest BitCast(const Source& source) {
|
||||
return BitCastHelper<Dest, Source>::cast(source);
|
||||
}
|
||||
|
||||
|
||||
template<typename ElementType, int NumElements>
|
||||
class EmbeddedContainer {
|
||||
public:
|
||||
|
@ -162,7 +162,7 @@ Operand::Operand(const Operand& operand, int32_t offset) {
|
||||
int32_t disp_value = 0;
|
||||
if (mode == 0x80 || is_baseless) {
|
||||
// Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
|
||||
disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
|
||||
disp_value = *bit_cast<const int32_t*>(&operand.buf_[disp_offset]);
|
||||
} else if (mode == 0x40) {
|
||||
// Mode 1: Byte displacement.
|
||||
disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
|
||||
|
@ -286,7 +286,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
||||
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
|
||||
|
||||
Label loop, entry, convert_hole;
|
||||
__ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
|
||||
__ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
|
||||
// r15: the-hole NaN
|
||||
__ jmp(&entry);
|
||||
|
||||
@ -393,7 +393,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
__ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
|
||||
__ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
|
||||
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
|
||||
// rsi: the-hole NaN
|
||||
// rdi: pointer to the-hole
|
||||
|
@ -1721,7 +1721,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
|
||||
DCHECK(instr->result()->IsDoubleRegister());
|
||||
XMMRegister res = ToDoubleRegister(instr->result());
|
||||
double v = instr->value();
|
||||
uint64_t int_val = BitCast<uint64_t, double>(v);
|
||||
uint64_t int_val = bit_cast<uint64_t, double>(v);
|
||||
// Use xor to produce +0.0 in a fast and compact way, but avoid to
|
||||
// do so if the constant is -0.0.
|
||||
if (int_val == 0) {
|
||||
@ -4347,8 +4347,9 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
||||
__ ucomisd(value, value);
|
||||
__ j(parity_odd, &have_value, Label::kNear); // NaN.
|
||||
|
||||
__ Set(kScratchRegister, BitCast<uint64_t>(
|
||||
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
||||
__ Set(kScratchRegister,
|
||||
bit_cast<uint64_t>(
|
||||
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
||||
__ movq(value, kScratchRegister);
|
||||
|
||||
__ bind(&have_value);
|
||||
|
@ -188,7 +188,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
}
|
||||
} else if (destination->IsDoubleRegister()) {
|
||||
double v = cgen_->ToDouble(constant_source);
|
||||
uint64_t int_val = BitCast<uint64_t, double>(v);
|
||||
uint64_t int_val = bit_cast<uint64_t, double>(v);
|
||||
XMMRegister dst = cgen_->ToDoubleRegister(destination);
|
||||
if (int_val == 0) {
|
||||
__ xorps(dst, dst);
|
||||
|
@ -3395,8 +3395,9 @@ void MacroAssembler::StoreNumberToDoubleElements(
|
||||
bind(&is_nan);
|
||||
// Convert all NaNs to the same canonical NaN value when they are stored in
|
||||
// the double array.
|
||||
Set(kScratchRegister, BitCast<uint64_t>(
|
||||
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
||||
Set(kScratchRegister,
|
||||
bit_cast<uint64_t>(
|
||||
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
||||
movq(xmm_scratch, kScratchRegister);
|
||||
jmp(&have_double_value, Label::kNear);
|
||||
|
||||
|
@ -2205,9 +2205,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
static const int kDeltaToCmpImmediate = 2;
|
||||
static const int kDeltaToMov = 8;
|
||||
static const int kDeltaToMovImmediate = 9;
|
||||
static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
|
||||
static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
|
||||
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
|
||||
static const int8_t kCmpEdiOperandByte1 = bit_cast<int8_t, uint8_t>(0x3b);
|
||||
static const int8_t kCmpEdiOperandByte2 = bit_cast<int8_t, uint8_t>(0x3d);
|
||||
static const int8_t kMovEaxImmediateByte = bit_cast<int8_t, uint8_t>(0xb8);
|
||||
|
||||
DCHECK_EQ(object.code(), InstanceofStub::left().code());
|
||||
DCHECK_EQ(function.code(), InstanceofStub::right().code());
|
||||
|
@ -1908,7 +1908,7 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
|
||||
|
||||
void LCodeGen::DoConstantD(LConstantD* instr) {
|
||||
double v = instr->value();
|
||||
uint64_t int_val = BitCast<uint64_t, double>(v);
|
||||
uint64_t int_val = bit_cast<uint64_t, double>(v);
|
||||
int32_t lower = static_cast<int32_t>(int_val);
|
||||
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
|
||||
DCHECK(instr->result()->IsDoubleRegister());
|
||||
@ -4088,7 +4088,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
||||
// This means we should store the (double) hole. No floating point
|
||||
// registers required.
|
||||
double nan_double = FixedDoubleArray::hole_nan_as_double();
|
||||
uint64_t int_val = BitCast<uint64_t, double>(nan_double);
|
||||
uint64_t int_val = bit_cast<uint64_t, double>(nan_double);
|
||||
int32_t lower = static_cast<int32_t>(int_val);
|
||||
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
|
||||
|
||||
|
@ -292,7 +292,7 @@ void LGapResolver::EmitMove(int index) {
|
||||
}
|
||||
} else if (destination->IsDoubleRegister()) {
|
||||
double v = cgen_->ToDouble(constant_source);
|
||||
uint64_t int_val = BitCast<uint64_t, double>(v);
|
||||
uint64_t int_val = bit_cast<uint64_t, double>(v);
|
||||
int32_t lower = static_cast<int32_t>(int_val);
|
||||
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
|
||||
__ push(Immediate(upper));
|
||||
|
@ -349,8 +349,8 @@ void MacroAssembler::RecordWriteArray(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(index, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -393,8 +393,8 @@ void MacroAssembler::RecordWriteField(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -452,9 +452,9 @@ void MacroAssembler::RecordWriteForMap(
|
||||
// Clobber clobbered input registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -521,8 +521,8 @@ void MacroAssembler::RecordWrite(
|
||||
// Clobber clobbered registers when running with the debug-code flag
|
||||
// turned on to provoke errors.
|
||||
if (emit_debug_code()) {
|
||||
mov(address, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2016,7 +2016,7 @@ Operand ApiParameterOperand(int index) {
|
||||
void MacroAssembler::PrepareCallApiFunction(int argc) {
|
||||
EnterApiExitFrame(argc);
|
||||
if (emit_debug_code()) {
|
||||
mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
|
||||
mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1182,7 +1182,7 @@ TEST(14) {
|
||||
code->Print(os);
|
||||
#endif
|
||||
F3 f = FUNCTION_CAST<F3>(code->entry());
|
||||
t.left = BitCast<double>(kHoleNanInt64);
|
||||
t.left = bit_cast<double>(kHoleNanInt64);
|
||||
t.right = 1;
|
||||
t.add_result = 0;
|
||||
t.sub_result = 0;
|
||||
@ -1199,14 +1199,18 @@ TEST(14) {
|
||||
#endif
|
||||
// With VFP2 the sign of the canonicalized Nan is undefined. So
|
||||
// we remove the sign bit for the upper tests.
|
||||
CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
|
||||
CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.add_result) & 0xffffffffu);
|
||||
CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
|
||||
CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.sub_result) & 0xffffffffu);
|
||||
CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
|
||||
CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.mul_result) & 0xffffffffu);
|
||||
CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
|
||||
CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.div_result) & 0xffffffffu);
|
||||
CHECK_EQ(kArmNanUpper32,
|
||||
(bit_cast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
|
||||
CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.add_result) & 0xffffffffu);
|
||||
CHECK_EQ(kArmNanUpper32,
|
||||
(bit_cast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
|
||||
CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.sub_result) & 0xffffffffu);
|
||||
CHECK_EQ(kArmNanUpper32,
|
||||
(bit_cast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
|
||||
CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.mul_result) & 0xffffffffu);
|
||||
CHECK_EQ(kArmNanUpper32,
|
||||
(bit_cast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
|
||||
CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.div_result) & 0xffffffffu);
|
||||
}
|
||||
|
||||
|
||||
|
@ -62,7 +62,7 @@ int STDCALL ConvertDToICVersion(double d) {
|
||||
}
|
||||
} else {
|
||||
uint64_t big_result =
|
||||
(BitCast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
|
||||
(bit_cast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
|
||||
big_result = big_result >> (Double::kPhysicalSignificandSize - exponent);
|
||||
result = static_cast<uint32_t>(big_result);
|
||||
}
|
||||
|
@ -149,9 +149,9 @@ static void TestNaN(const char *code) {
|
||||
i::FixedDoubleArray* a = i::FixedDoubleArray::cast(array1->elements());
|
||||
double value = a->get_scalar(0);
|
||||
CHECK(std::isnan(value) &&
|
||||
i::BitCast<uint64_t>(value) ==
|
||||
i::BitCast<uint64_t>(
|
||||
i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
||||
i::bit_cast<uint64_t>(value) ==
|
||||
i::bit_cast<uint64_t>(
|
||||
i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user