Revert "[ptr-compr] Fix incorrectly used machine types"

This reverts commit b8e8b0de4f.

Reason for revert:
https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Arm%20-%20debug/8276

Original change's description:
> [ptr-compr] Fix incorrectly used machine types
> 
> in TurboFan, CSA, Wasm and compiler tests. Tagged values decompression
> logic will depend on the machine type of the value being loaded so it must
> be correct.
> 
> Bug: v8:7703
> Change-Id: Ia9e7cc1e273e5a458d9de8aaa4adb0c970413b8b
> Reviewed-on: https://chromium-review.googlesource.com/c/1319573
> Commit-Queue: Igor Sheludko <ishell@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#57280}

TBR=mstarzinger@chromium.org,ishell@chromium.org

Change-Id: Ia97d5bfebf8d8fe1b2b7607f63024b60cf2c584f
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7703
Reviewed-on: https://chromium-review.googlesource.com/c/1320349
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57294}
This commit is contained in:
Michael Achenbach 2018-11-06 18:10:35 +00:00 committed by Commit Bot
parent ab445f8b4b
commit 045756f32b
7 changed files with 101 additions and 200 deletions

View File

@ -524,7 +524,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
{ {
// Copy over in-object properties. // Copy over in-object properties.
Label continue_with_write_barrier(this), done_init(this); Label continue_with_write_barrier(this), done_init(this);
TVARIABLE(IntPtrT, offset, IntPtrConstant(JSObject::kHeaderSize)); VARIABLE(offset, MachineType::PointerRepresentation(),
IntPtrConstant(JSObject::kHeaderSize));
// Mutable heap numbers only occur on 32-bit platforms. // Mutable heap numbers only occur on 32-bit platforms.
bool may_use_mutable_heap_numbers = bool may_use_mutable_heap_numbers =
FLAG_track_double_fields && !FLAG_unbox_double_fields; FLAG_track_double_fields && !FLAG_unbox_double_fields;
@ -534,21 +535,16 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
Branch(WordEqual(offset.value(), instance_size), &done_init, Branch(WordEqual(offset.value(), instance_size), &done_init,
&continue_fast); &continue_fast);
BIND(&continue_fast); BIND(&continue_fast);
Node* field = LoadObjectField(boilerplate, offset.value());
if (may_use_mutable_heap_numbers) { if (may_use_mutable_heap_numbers) {
TNode<Object> field = LoadObjectField(boilerplate, offset.value());
Label store_field(this); Label store_field(this);
GotoIf(TaggedIsSmi(field), &store_field); GotoIf(TaggedIsSmi(field), &store_field);
GotoIf(IsMutableHeapNumber(CAST(field)), &continue_with_write_barrier); GotoIf(IsMutableHeapNumber(field), &continue_with_write_barrier);
Goto(&store_field); Goto(&store_field);
BIND(&store_field); BIND(&store_field);
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
} else {
// Copy fields as raw data.
TNode<IntPtrT> field =
LoadObjectField<IntPtrT>(boilerplate, offset.value());
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
} }
offset = IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)); StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
Branch(WordNotEqual(offset.value(), instance_size), &continue_fast, Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
&done_init); &done_init);
} }

View File

@ -864,13 +864,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return UncheckedCast<Object>( return UncheckedCast<Object>(
LoadObjectField(object, offset, MachineType::AnyTagged())); LoadObjectField(object, offset, MachineType::AnyTagged()));
} }
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset) {
return UncheckedCast<T>(
LoadObjectField(object, offset, MachineTypeOf<T>::value));
}
// Load a SMI field and untag it. // Load a SMI field and untag it.
TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object, TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
int offset); int offset);
@ -1238,15 +1231,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* StoreObjectFieldNoWriteBarrier( Node* StoreObjectFieldNoWriteBarrier(
Node* object, Node* offset, Node* value, Node* object, Node* offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged); MachineRepresentation rep = MachineRepresentation::kTagged);
template <class T = Object>
TNode<T> StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
TNode<IntPtrT> offset,
TNode<T> value) {
return UncheckedCast<T>(StoreObjectFieldNoWriteBarrier(
object, offset, value, MachineRepresentationOf<T>::value));
}
// Store the Map of an HeapObject. // Store the Map of an HeapObject.
Node* StoreMap(Node* object, Node* map); Node* StoreMap(Node* object, Node* map);
Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index); Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index);

View File

@ -2947,7 +2947,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
Node* frame = __ LoadFramePointer(); Node* frame = __ LoadFramePointer();
Node* parent_frame = Node* parent_frame =
__ Load(MachineType::Pointer(), frame, __ Load(MachineType::AnyTagged(), frame,
__ IntPtrConstant(StandardFrameConstants::kCallerFPOffset)); __ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
Node* parent_frame_type = __ Load( Node* parent_frame_type = __ Load(
MachineType::AnyTagged(), parent_frame, MachineType::AnyTagged(), parent_frame,

View File

@ -81,33 +81,22 @@ MachineType assert_size(int expected_size, MachineType type) {
#define WASM_INSTANCE_OBJECT_OFFSET(name) \ #define WASM_INSTANCE_OBJECT_OFFSET(name) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset) wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
#define LOAD_RAW(base_pointer, byte_offset, type) \ #define LOAD_INSTANCE_FIELD(name, type) \
SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type), base_pointer, \ SetEffect(graph()->NewNode( \
mcgraph()->Int32Constant(byte_offset), Effect(), \ mcgraph()->machine()->Load( \
Control())) assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type)), \
instance_node_.get(), \
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), Effect(), \
Control()))
#define LOAD_INSTANCE_FIELD(name, type) \ #define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
LOAD_RAW(instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name), \ SetEffect(graph()->NewNode( \
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type)) mcgraph()->machine()->Load(MachineType::TaggedPointer()), base_pointer, \
mcgraph()->Int32Constant(byte_offset), Effect(), Control()))
#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \ #define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
LOAD_RAW(base_pointer, byte_offset, MachineType::TaggedPointer()) LOAD_TAGGED_POINTER( \
array_node, wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index))
#define LOAD_TAGGED_ANY(base_pointer, byte_offset) \
LOAD_RAW(base_pointer, byte_offset, MachineType::AnyTagged())
#define LOAD_FIXED_ARRAY_SLOT(array_node, index, type) \
LOAD_RAW(array_node, \
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), type)
#define LOAD_FIXED_ARRAY_SLOT_SMI(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedSigned())
#define LOAD_FIXED_ARRAY_SLOT_PTR(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedPointer())
#define LOAD_FIXED_ARRAY_SLOT_ANY(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
// This can be used to store tagged Smi values only. // This can be used to store tagged Smi values only.
#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \ #define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
@ -2190,11 +2179,11 @@ Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array,
uint32_t* index) { uint32_t* index) {
MachineOperatorBuilder* machine = mcgraph()->machine(); MachineOperatorBuilder* machine = mcgraph()->machine();
Node* upper = Node* upper =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index)); BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT(values_array, *index));
(*index)++; (*index)++;
upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16)); upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16));
Node* lower = Node* lower =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index)); BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT(values_array, *index));
(*index)++; (*index)++;
Node* value = graph()->NewNode(machine->Word32Or(), upper, lower); Node* value = graph()->NewNode(machine->Word32Or(), upper, lower);
return value; return value;
@ -2234,7 +2223,7 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) { Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
Node* exceptions_table = Node* exceptions_table =
LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
Node* tag = LOAD_FIXED_ARRAY_SLOT_PTR(exceptions_table, exception_index); Node* tag = LOAD_FIXED_ARRAY_SLOT(exceptions_table, exception_index);
return tag; return tag;
} }
@ -2270,7 +2259,7 @@ Node** WasmGraphBuilder::GetExceptionValues(
break; break;
} }
case wasm::kWasmAnyRef: case wasm::kWasmAnyRef:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index); value = LOAD_FIXED_ARRAY_SLOT(values_array, index);
++index; ++index;
break; break;
default: default:
@ -2670,8 +2659,7 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
// Load the imported function refs array from the instance. // Load the imported function refs array from the instance.
Node* imported_function_refs = Node* imported_function_refs =
LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
Node* ref_node = Node* ref_node = LOAD_FIXED_ARRAY_SLOT(imported_function_refs, func_index);
LOAD_FIXED_ARRAY_SLOT_PTR(imported_function_refs, func_index);
// Load the target from the imported_targets array at a known offset. // Load the target from the imported_targets array at a known offset.
Node* imported_targets = Node* imported_targets =
@ -4627,7 +4615,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = callable_node; // target callable. args[pos++] = callable_node; // target callable.
// Receiver. // Receiver.
if (sloppy_receiver) { if (sloppy_receiver) {
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR( Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
native_context, Context::GLOBAL_PROXY_INDEX); native_context, Context::GLOBAL_PROXY_INDEX);
args[pos++] = global_proxy; args[pos++] = global_proxy;
} else { } else {
@ -4690,7 +4678,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Receiver. // Receiver.
if (sloppy_receiver) { if (sloppy_receiver) {
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR( Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
native_context, Context::GLOBAL_PROXY_INDEX); native_context, Context::GLOBAL_PROXY_INDEX);
args[pos++] = global_proxy; args[pos++] = global_proxy;
} else { } else {
@ -5551,14 +5539,9 @@ AssemblerOptions WasmAssemblerOptions() {
#undef FATAL_UNSUPPORTED_OPCODE #undef FATAL_UNSUPPORTED_OPCODE
#undef WASM_INSTANCE_OBJECT_SIZE #undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET #undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_RAW
#undef LOAD_INSTANCE_FIELD #undef LOAD_INSTANCE_FIELD
#undef LOAD_TAGGED_POINTER #undef LOAD_TAGGED_POINTER
#undef LOAD_TAGGED_ANY
#undef LOAD_FIXED_ARRAY_SLOT #undef LOAD_FIXED_ARRAY_SLOT
#undef LOAD_FIXED_ARRAY_SLOT_SMI
#undef LOAD_FIXED_ARRAY_SLOT_PTR
#undef LOAD_FIXED_ARRAY_SLOT_ANY
#undef STORE_FIXED_ARRAY_SLOT_SMI #undef STORE_FIXED_ARRAY_SLOT_SMI
#undef STORE_FIXED_ARRAY_SLOT_ANY #undef STORE_FIXED_ARRAY_SLOT_ANY

View File

@ -2336,7 +2336,7 @@ void AccessorAssembler::TryProbeStubCacheTable(
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() - DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
stub_cache->key_reference(table).address()); stub_cache->key_reference(table).address());
TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>( TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>(
Load(MachineType::AnyTagged(), key_base, Load(MachineType::TaggedPointer(), key_base,
IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)))); IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize))));
// We found the handler. // We found the handler.

View File

@ -94,12 +94,6 @@ class MachineType {
representation() == MachineRepresentation::kTaggedSigned || representation() == MachineRepresentation::kTaggedSigned ||
representation() == MachineRepresentation::kTagged; representation() == MachineRepresentation::kTagged;
} }
constexpr bool IsTaggedSigned() const {
return representation() == MachineRepresentation::kTaggedSigned;
}
constexpr bool IsTaggedPointer() const {
return representation() == MachineRepresentation::kTaggedPointer;
}
constexpr static MachineRepresentation PointerRepresentation() { constexpr static MachineRepresentation PointerRepresentation() {
return (kPointerSize == 4) ? MachineRepresentation::kWord32 return (kPointerSize == 4) ? MachineRepresentation::kWord32
: MachineRepresentation::kWord64; : MachineRepresentation::kWord64;

View File

@ -25,14 +25,6 @@ enum TestAlignment {
kUnaligned, kUnaligned,
}; };
#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes) addr
#elif V8_TARGET_BIG_ENDIAN
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
#else
#error "Unknown Architecture"
#endif
// This is a America! // This is a America!
#define A_BILLION 1000000000ULL #define A_BILLION 1000000000ULL
#define A_GIG (1024ULL * 1024ULL * 1024ULL) #define A_GIG (1024ULL * 1024ULL * 1024ULL)
@ -186,61 +178,22 @@ TEST(RunUnalignedLoadStoreFloat64Offset) {
} }
namespace { namespace {
template <typename Type>
// Initializes the buffer with some raw data respecting requested representation
// of the values.
template <typename CType>
void InitBuffer(CType* buffer, size_t length, MachineType rep) {
const size_t kBufferSize = sizeof(CType) * length;
if (!rep.IsTagged()) {
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < kBufferSize; i++) {
raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
}
return;
}
// Tagged field loads require values to be properly tagged because of
// pointer decompression that may be happenning during load.
Isolate* isolate = CcTest::InitIsolateOnce();
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
if (rep.IsTaggedSigned()) {
for (size_t i = 0; i < length; i++) {
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
}
} else {
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
if (!rep.IsTaggedPointer()) {
// Also add some Smis if we are checking AnyTagged case.
for (size_t i = 0; i < length / 2; i++) {
smi_view[i] =
Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
}
}
}
}
template <typename CType>
void RunLoadImmIndex(MachineType rep, TestAlignment t) { void RunLoadImmIndex(MachineType rep, TestAlignment t) {
const int kNumElems = 16; const int kNumElems = 3;
CType buffer[kNumElems]; Type buffer[kNumElems];
InitBuffer(buffer, kNumElems, rep); // initialize the buffer with some raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
// Test with various large and small offsets. // Test with various large and small offsets.
for (int offset = -1; offset <= 200000; offset *= -5) { for (int offset = -1; offset <= 200000; offset *= -5) {
for (int i = 0; i < kNumElems; i++) { for (int i = 0; i < kNumElems; i++) {
BufferedRawMachineAssemblerTester<CType> m; BufferedRawMachineAssemblerTester<Type> m;
void* base_pointer = &buffer[0] - offset; Node* base = m.PointerConstant(buffer - offset);
#if V8_POINTER_COMPRESSION
if (rep.IsTagged()) {
// When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values.
base_pointer = LSB(base_pointer, kPointerSize / 2);
}
#endif
Node* base = m.PointerConstant(base_pointer);
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0])); Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
if (t == TestAlignment::kAligned) { if (t == TestAlignment::kAligned) {
m.Return(m.Load(rep, base, index)); m.Return(m.Load(rep, base, index));
@ -250,76 +203,82 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
UNREACHABLE(); UNREACHABLE();
} }
CHECK_EQ(buffer[i], m.Call()); volatile Type expected = buffer[i];
volatile Type actual = m.Call();
CHECK_EQ(expected, actual);
} }
} }
} }
template <typename CType> template <typename CType>
void RunLoadStore(MachineType rep, TestAlignment t) { void RunLoadStore(MachineType rep, TestAlignment t) {
const int kNumElems = 16; const int kNumElems = 4;
CType in_buffer[kNumElems]; CType buffer[kNumElems];
CType out_buffer[kNumElems];
InitBuffer(in_buffer, kNumElems, rep);
for (int32_t x = 0; x < kNumElems; x++) { for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1; int32_t y = kNumElems - x - 1;
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
RawMachineAssemblerTester<int32_t> m; RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x; int32_t OK = 0x29000 + x;
Node* in_base = m.PointerConstant(in_buffer); Node* base = m.PointerConstant(buffer);
Node* in_index = m.IntPtrConstant(x * sizeof(CType)); Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
Node* out_base = m.PointerConstant(out_buffer); Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
Node* out_index = m.IntPtrConstant(y * sizeof(CType));
if (t == TestAlignment::kAligned) { if (t == TestAlignment::kAligned) {
Node* load = m.Load(rep, in_base, in_index); Node* load = m.Load(rep, base, index0);
m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier); m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
} else if (t == TestAlignment::kUnaligned) { } else if (t == TestAlignment::kUnaligned) {
Node* load = m.UnalignedLoad(rep, in_base, in_index); Node* load = m.UnalignedLoad(rep, base, index0);
m.UnalignedStore(rep.representation(), out_base, out_index, load); m.UnalignedStore(rep.representation(), base, index1, load);
} }
m.Return(m.Int32Constant(OK)); m.Return(m.Int32Constant(OK));
memset(out_buffer, 0, sizeof(out_buffer)); CHECK(buffer[x] != buffer[y]);
CHECK_NE(in_buffer[x], out_buffer[y]);
CHECK_EQ(OK, m.Call()); CHECK_EQ(OK, m.Call());
CHECK_EQ(in_buffer[x], out_buffer[y]); CHECK(buffer[x] == buffer[y]);
for (int32_t z = 0; z < kNumElems; z++) {
if (z != y) CHECK_EQ(CType{0}, out_buffer[z]);
}
} }
} }
template <typename CType> template <typename CType>
void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) { void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
byte in_buffer[2 * sizeof(CType)]; CType in, out;
byte out_buffer[2 * sizeof(CType)]; CType in_buffer[2];
CType out_buffer[2];
byte* raw;
for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) { for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
CType* in = reinterpret_cast<CType*>(&in_buffer[x]); int y = sizeof(CType) - x;
InitBuffer(in, 1, rep);
for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) { raw = reinterpret_cast<byte*>(&in);
CType* out = reinterpret_cast<CType*>(&out_buffer[y]); for (size_t i = 0; i < sizeof(CType); i++) {
raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
Node* in_base = m.PointerConstant(in_buffer);
Node* in_index = m.IntPtrConstant(x);
Node* load = m.UnalignedLoad(rep, in_base, in_index);
Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y);
m.UnalignedStore(rep.representation(), out_base, out_index, load);
m.Return(m.Int32Constant(OK));
CHECK_EQ(OK, m.Call());
CHECK_EQ(in[0], out[0]);
} }
raw = reinterpret_cast<byte*>(in_buffer);
MemCopy(raw + x, &in, sizeof(CType));
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
Node* base0 = m.PointerConstant(in_buffer);
Node* base1 = m.PointerConstant(out_buffer);
Node* index0 = m.IntPtrConstant(x);
Node* index1 = m.IntPtrConstant(y);
Node* load = m.UnalignedLoad(rep, base0, index0);
m.UnalignedStore(rep.representation(), base1, index1, load);
m.Return(m.Int32Constant(OK));
CHECK_EQ(OK, m.Call());
raw = reinterpret_cast<byte*>(&out_buffer);
MemCopy(&out, raw + y, sizeof(CType));
CHECK(in == out);
} }
} }
} // namespace } // namespace
@ -331,11 +290,7 @@ TEST(RunLoadImmIndex) {
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned); RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned); RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned); RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned); RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
RunLoadImmIndex<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kAligned);
RunLoadImmIndex<Object*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned); RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned); RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
@ -349,11 +304,8 @@ TEST(RunUnalignedLoadImmIndex) {
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned); RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned); RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(),
RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned); TestAlignment::kUnaligned);
RunLoadImmIndex<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kUnaligned);
RunLoadImmIndex<Object*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned); RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned); RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
@ -369,11 +321,7 @@ TEST(RunLoadStore) {
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned); RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned); RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned); RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned); RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
RunLoadStore<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kAligned);
RunLoadStore<Object*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned); RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned); RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
@ -386,11 +334,7 @@ TEST(RunUnalignedLoadStore) {
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned); RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned); RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
RunLoadStore<HeapObject*>(MachineType::TaggedPointer(),
TestAlignment::kUnaligned);
RunLoadStore<Object*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned); RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned); RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
@ -403,11 +347,7 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16()); RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32()); RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32()); RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer()); RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged());
RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
RunUnalignedLoadStoreUnalignedAccess<HeapObject*>(
MachineType::TaggedPointer());
RunUnalignedLoadStoreUnalignedAccess<Object*>(MachineType::AnyTagged());
RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32()); RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64()); RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
@ -415,6 +355,14 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
#endif #endif
} }
#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes) addr
#elif V8_TARGET_BIG_ENDIAN
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes
#else
#error "Unknown Architecture"
#endif
namespace { namespace {
void RunLoadStoreSignExtend32(TestAlignment t) { void RunLoadStoreSignExtend32(TestAlignment t) {
int32_t buffer[4]; int32_t buffer[4];
@ -660,10 +608,6 @@ TEST(RunUnalignedLoadStoreTruncation) {
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned); LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
} }
#undef LSB
#undef A_BILLION
#undef A_GIG
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8