[ptr-compr] Fix incorrectly used machine types

in TurboFan, CSA, Wasm and compiler tests. Tagged values decompression
logic will depend on the machine type of the value being loaded so it must
be correct.

Bug: v8:7703
Change-Id: Ia9e7cc1e273e5a458d9de8aaa4adb0c970413b8b
Reviewed-on: https://chromium-review.googlesource.com/c/1319573
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57280}
This commit is contained in:
Igor Sheludko 2018-11-06 14:40:24 +01:00 committed by Commit Bot
parent 1444bebe76
commit b8e8b0de4f
7 changed files with 200 additions and 101 deletions

View File

@ -524,8 +524,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
{
// Copy over in-object properties.
Label continue_with_write_barrier(this), done_init(this);
VARIABLE(offset, MachineType::PointerRepresentation(),
IntPtrConstant(JSObject::kHeaderSize));
TVARIABLE(IntPtrT, offset, IntPtrConstant(JSObject::kHeaderSize));
// Mutable heap numbers only occur on 32-bit platforms.
bool may_use_mutable_heap_numbers =
FLAG_track_double_fields && !FLAG_unbox_double_fields;
@ -535,16 +534,21 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
Branch(WordEqual(offset.value(), instance_size), &done_init,
&continue_fast);
BIND(&continue_fast);
Node* field = LoadObjectField(boilerplate, offset.value());
if (may_use_mutable_heap_numbers) {
TNode<Object> field = LoadObjectField(boilerplate, offset.value());
Label store_field(this);
GotoIf(TaggedIsSmi(field), &store_field);
GotoIf(IsMutableHeapNumber(field), &continue_with_write_barrier);
GotoIf(IsMutableHeapNumber(CAST(field)), &continue_with_write_barrier);
Goto(&store_field);
BIND(&store_field);
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
} else {
// Copy fields as raw data.
TNode<IntPtrT> field =
LoadObjectField<IntPtrT>(boilerplate, offset.value());
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
}
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
offset = IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize));
Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
&done_init);
}

View File

@ -864,6 +864,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return UncheckedCast<Object>(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset) {
return UncheckedCast<T>(
LoadObjectField(object, offset, MachineTypeOf<T>::value));
}
// Load a SMI field and untag it.
TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
int offset);
@ -1231,6 +1238,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* StoreObjectFieldNoWriteBarrier(
Node* object, Node* offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
template <class T = Object>
TNode<T> StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
TNode<IntPtrT> offset,
TNode<T> value) {
return UncheckedCast<T>(StoreObjectFieldNoWriteBarrier(
object, offset, value, MachineRepresentationOf<T>::value));
}
// Store the Map of an HeapObject.
Node* StoreMap(Node* object, Node* map);
Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index);

View File

@ -2947,7 +2947,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
Node* frame = __ LoadFramePointer();
Node* parent_frame =
__ Load(MachineType::AnyTagged(), frame,
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
Node* parent_frame_type = __ Load(
MachineType::AnyTagged(), parent_frame,

View File

@ -81,22 +81,33 @@ MachineType assert_size(int expected_size, MachineType type) {
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
#define LOAD_INSTANCE_FIELD(name, type) \
SetEffect(graph()->NewNode( \
mcgraph()->machine()->Load( \
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type)), \
instance_node_.get(), \
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), Effect(), \
Control()))
#define LOAD_RAW(base_pointer, byte_offset, type) \
SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type), base_pointer, \
mcgraph()->Int32Constant(byte_offset), Effect(), \
Control()))
#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
SetEffect(graph()->NewNode( \
mcgraph()->machine()->Load(MachineType::TaggedPointer()), base_pointer, \
mcgraph()->Int32Constant(byte_offset), Effect(), Control()))
#define LOAD_INSTANCE_FIELD(name, type) \
LOAD_RAW(instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name), \
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type))
#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
LOAD_TAGGED_POINTER( \
array_node, wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index))
#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
LOAD_RAW(base_pointer, byte_offset, MachineType::TaggedPointer())
#define LOAD_TAGGED_ANY(base_pointer, byte_offset) \
LOAD_RAW(base_pointer, byte_offset, MachineType::AnyTagged())
#define LOAD_FIXED_ARRAY_SLOT(array_node, index, type) \
LOAD_RAW(array_node, \
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), type)
#define LOAD_FIXED_ARRAY_SLOT_SMI(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedSigned())
#define LOAD_FIXED_ARRAY_SLOT_PTR(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedPointer())
#define LOAD_FIXED_ARRAY_SLOT_ANY(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
// This can be used to store tagged Smi values only.
#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
@ -2179,11 +2190,11 @@ Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array,
uint32_t* index) {
MachineOperatorBuilder* machine = mcgraph()->machine();
Node* upper =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT(values_array, *index));
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++;
upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16));
Node* lower =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT(values_array, *index));
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++;
Node* value = graph()->NewNode(machine->Word32Or(), upper, lower);
return value;
@ -2223,7 +2234,7 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
Node* exceptions_table =
LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
Node* tag = LOAD_FIXED_ARRAY_SLOT(exceptions_table, exception_index);
Node* tag = LOAD_FIXED_ARRAY_SLOT_PTR(exceptions_table, exception_index);
return tag;
}
@ -2259,7 +2270,7 @@ Node** WasmGraphBuilder::GetExceptionValues(
break;
}
case wasm::kWasmAnyRef:
value = LOAD_FIXED_ARRAY_SLOT(values_array, index);
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
default:
@ -2659,7 +2670,8 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
// Load the imported function refs array from the instance.
Node* imported_function_refs =
LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
Node* ref_node = LOAD_FIXED_ARRAY_SLOT(imported_function_refs, func_index);
Node* ref_node =
LOAD_FIXED_ARRAY_SLOT_PTR(imported_function_refs, func_index);
// Load the target from the imported_targets array at a known offset.
Node* imported_targets =
@ -4615,7 +4627,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = callable_node; // target callable.
// Receiver.
if (sloppy_receiver) {
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR(
native_context, Context::GLOBAL_PROXY_INDEX);
args[pos++] = global_proxy;
} else {
@ -4678,7 +4690,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Receiver.
if (sloppy_receiver) {
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR(
native_context, Context::GLOBAL_PROXY_INDEX);
args[pos++] = global_proxy;
} else {
@ -5539,9 +5551,14 @@ AssemblerOptions WasmAssemblerOptions() {
#undef FATAL_UNSUPPORTED_OPCODE
#undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_RAW
#undef LOAD_INSTANCE_FIELD
#undef LOAD_TAGGED_POINTER
#undef LOAD_TAGGED_ANY
#undef LOAD_FIXED_ARRAY_SLOT
#undef LOAD_FIXED_ARRAY_SLOT_SMI
#undef LOAD_FIXED_ARRAY_SLOT_PTR
#undef LOAD_FIXED_ARRAY_SLOT_ANY
#undef STORE_FIXED_ARRAY_SLOT_SMI
#undef STORE_FIXED_ARRAY_SLOT_ANY

View File

@ -2336,7 +2336,7 @@ void AccessorAssembler::TryProbeStubCacheTable(
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
stub_cache->key_reference(table).address());
TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>(
Load(MachineType::TaggedPointer(), key_base,
Load(MachineType::AnyTagged(), key_base,
IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize))));
// We found the handler.

View File

@ -94,6 +94,12 @@ class MachineType {
representation() == MachineRepresentation::kTaggedSigned ||
representation() == MachineRepresentation::kTagged;
}
constexpr bool IsTaggedSigned() const {
return representation() == MachineRepresentation::kTaggedSigned;
}
constexpr bool IsTaggedPointer() const {
return representation() == MachineRepresentation::kTaggedPointer;
}
constexpr static MachineRepresentation PointerRepresentation() {
return (kPointerSize == 4) ? MachineRepresentation::kWord32
: MachineRepresentation::kWord64;

View File

@ -25,6 +25,14 @@ enum TestAlignment {
kUnaligned,
};
#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes) addr
#elif V8_TARGET_BIG_ENDIAN
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
#else
#error "Unknown Architecture"
#endif
// This is a America!
#define A_BILLION 1000000000ULL
#define A_GIG (1024ULL * 1024ULL * 1024ULL)
@ -178,22 +186,61 @@ TEST(RunUnalignedLoadStoreFloat64Offset) {
}
namespace {
template <typename Type>
void RunLoadImmIndex(MachineType rep, TestAlignment t) {
const int kNumElems = 3;
Type buffer[kNumElems];
// initialize the buffer with some raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
// Initializes the buffer with some raw data respecting requested representation
// of the values.
template <typename CType>
void InitBuffer(CType* buffer, size_t length, MachineType rep) {
const size_t kBufferSize = sizeof(CType) * length;
if (!rep.IsTagged()) {
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < kBufferSize; i++) {
raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
}
return;
}
// Tagged field loads require values to be properly tagged because of
// pointer decompression that may be happenning during load.
Isolate* isolate = CcTest::InitIsolateOnce();
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
if (rep.IsTaggedSigned()) {
for (size_t i = 0; i < length; i++) {
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
}
} else {
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
if (!rep.IsTaggedPointer()) {
// Also add some Smis if we are checking AnyTagged case.
for (size_t i = 0; i < length / 2; i++) {
smi_view[i] =
Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
}
}
}
}
template <typename CType>
void RunLoadImmIndex(MachineType rep, TestAlignment t) {
const int kNumElems = 16;
CType buffer[kNumElems];
InitBuffer(buffer, kNumElems, rep);
// Test with various large and small offsets.
for (int offset = -1; offset <= 200000; offset *= -5) {
for (int i = 0; i < kNumElems; i++) {
BufferedRawMachineAssemblerTester<Type> m;
Node* base = m.PointerConstant(buffer - offset);
BufferedRawMachineAssemblerTester<CType> m;
void* base_pointer = &buffer[0] - offset;
#if V8_POINTER_COMPRESSION
if (rep.IsTagged()) {
// When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values.
base_pointer = LSB(base_pointer, kPointerSize / 2);
}
#endif
Node* base = m.PointerConstant(base_pointer);
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
if (t == TestAlignment::kAligned) {
m.Return(m.Load(rep, base, index));
@ -203,82 +250,76 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
UNREACHABLE();
}
volatile Type expected = buffer[i];
volatile Type actual = m.Call();
CHECK_EQ(expected, actual);
CHECK_EQ(buffer[i], m.Call());
}
}
}
template <typename CType>
void RunLoadStore(MachineType rep, TestAlignment t) {
const int kNumElems = 4;
CType buffer[kNumElems];
const int kNumElems = 16;
CType in_buffer[kNumElems];
CType out_buffer[kNumElems];
InitBuffer(in_buffer, kNumElems, rep);
for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1;
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
Node* base = m.PointerConstant(buffer);
Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
Node* in_base = m.PointerConstant(in_buffer);
Node* in_index = m.IntPtrConstant(x * sizeof(CType));
Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y * sizeof(CType));
if (t == TestAlignment::kAligned) {
Node* load = m.Load(rep, base, index0);
m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
Node* load = m.Load(rep, in_base, in_index);
m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier);
} else if (t == TestAlignment::kUnaligned) {
Node* load = m.UnalignedLoad(rep, base, index0);
m.UnalignedStore(rep.representation(), base, index1, load);
Node* load = m.UnalignedLoad(rep, in_base, in_index);
m.UnalignedStore(rep.representation(), out_base, out_index, load);
}
m.Return(m.Int32Constant(OK));
CHECK(buffer[x] != buffer[y]);
memset(out_buffer, 0, sizeof(out_buffer));
CHECK_NE(in_buffer[x], out_buffer[y]);
CHECK_EQ(OK, m.Call());
CHECK(buffer[x] == buffer[y]);
CHECK_EQ(in_buffer[x], out_buffer[y]);
for (int32_t z = 0; z < kNumElems; z++) {
if (z != y) CHECK_EQ(CType{0}, out_buffer[z]);
}
}
}
template <typename CType>
void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
CType in, out;
CType in_buffer[2];
CType out_buffer[2];
byte* raw;
byte in_buffer[2 * sizeof(CType)];
byte out_buffer[2 * sizeof(CType)];
for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
int y = sizeof(CType) - x;
CType* in = reinterpret_cast<CType*>(&in_buffer[x]);
InitBuffer(in, 1, rep);
raw = reinterpret_cast<byte*>(&in);
for (size_t i = 0; i < sizeof(CType); i++) {
raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) {
CType* out = reinterpret_cast<CType*>(&out_buffer[y]);
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
Node* in_base = m.PointerConstant(in_buffer);
Node* in_index = m.IntPtrConstant(x);
Node* load = m.UnalignedLoad(rep, in_base, in_index);
Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y);
m.UnalignedStore(rep.representation(), out_base, out_index, load);
m.Return(m.Int32Constant(OK));
CHECK_EQ(OK, m.Call());
CHECK_EQ(in[0], out[0]);
}
raw = reinterpret_cast<byte*>(in_buffer);
MemCopy(raw + x, &in, sizeof(CType));
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
Node* base0 = m.PointerConstant(in_buffer);
Node* base1 = m.PointerConstant(out_buffer);
Node* index0 = m.IntPtrConstant(x);
Node* index1 = m.IntPtrConstant(y);
Node* load = m.UnalignedLoad(rep, base0, index0);
m.UnalignedStore(rep.representation(), base1, index1, load);
m.Return(m.Int32Constant(OK));
CHECK_EQ(OK, m.Call());
raw = reinterpret_cast<byte*>(&out_buffer);
MemCopy(&out, raw + y, sizeof(CType));
CHECK(in == out);
}
}
} // namespace
@ -290,7 +331,11 @@ TEST(RunLoadImmIndex) {
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned);
RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
RunLoadImmIndex<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kAligned);
RunLoadImmIndex<Object*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT
@ -304,8 +349,11 @@ TEST(RunUnalignedLoadImmIndex) {
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(),
TestAlignment::kUnaligned);
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
RunLoadImmIndex<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kUnaligned);
RunLoadImmIndex<Object*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT
@ -321,7 +369,11 @@ TEST(RunLoadStore) {
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned);
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
RunLoadStore<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kAligned);
RunLoadStore<Object*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT
@ -334,7 +386,11 @@ TEST(RunUnalignedLoadStore) {
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
RunLoadStore<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kUnaligned);
RunLoadStore<Object*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT
@ -347,7 +403,11 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged());
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
RunUnalignedLoadStoreUnalignedAccess<HeapObject*>(
MachineType::TaggedPointer());
RunUnalignedLoadStoreUnalignedAccess<Object*>(MachineType::AnyTagged());
RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
#if V8_TARGET_ARCH_64_BIT
@ -355,14 +415,6 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
#endif
}
#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes) addr
#elif V8_TARGET_BIG_ENDIAN
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes
#else
#error "Unknown Architecture"
#endif
namespace {
void RunLoadStoreSignExtend32(TestAlignment t) {
int32_t buffer[4];
@ -608,6 +660,10 @@ TEST(RunUnalignedLoadStoreTruncation) {
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
}
#undef LSB
#undef A_BILLION
#undef A_GIG
} // namespace compiler
} // namespace internal
} // namespace v8