Revert "[ptr-compr] Fix incorrectly used machine types"
This reverts commit b8e8b0de4f
.
Reason for revert:
https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Arm%20-%20debug/8276
Original change's description:
> [ptr-compr] Fix incorrectly used machine types
>
> in TurboFan, CSA, Wasm and compiler tests. Tagged values decompression
> logic will depend on the machine type of the value being loaded so it must
> be correct.
>
> Bug: v8:7703
> Change-Id: Ia9e7cc1e273e5a458d9de8aaa4adb0c970413b8b
> Reviewed-on: https://chromium-review.googlesource.com/c/1319573
> Commit-Queue: Igor Sheludko <ishell@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#57280}
TBR=mstarzinger@chromium.org,ishell@chromium.org
Change-Id: Ia97d5bfebf8d8fe1b2b7607f63024b60cf2c584f
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7703
Reviewed-on: https://chromium-review.googlesource.com/c/1320349
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57294}
This commit is contained in:
parent
ab445f8b4b
commit
045756f32b
@ -524,7 +524,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
|
||||
{
|
||||
// Copy over in-object properties.
|
||||
Label continue_with_write_barrier(this), done_init(this);
|
||||
TVARIABLE(IntPtrT, offset, IntPtrConstant(JSObject::kHeaderSize));
|
||||
VARIABLE(offset, MachineType::PointerRepresentation(),
|
||||
IntPtrConstant(JSObject::kHeaderSize));
|
||||
// Mutable heap numbers only occur on 32-bit platforms.
|
||||
bool may_use_mutable_heap_numbers =
|
||||
FLAG_track_double_fields && !FLAG_unbox_double_fields;
|
||||
@ -534,21 +535,16 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
|
||||
Branch(WordEqual(offset.value(), instance_size), &done_init,
|
||||
&continue_fast);
|
||||
BIND(&continue_fast);
|
||||
Node* field = LoadObjectField(boilerplate, offset.value());
|
||||
if (may_use_mutable_heap_numbers) {
|
||||
TNode<Object> field = LoadObjectField(boilerplate, offset.value());
|
||||
Label store_field(this);
|
||||
GotoIf(TaggedIsSmi(field), &store_field);
|
||||
GotoIf(IsMutableHeapNumber(CAST(field)), &continue_with_write_barrier);
|
||||
GotoIf(IsMutableHeapNumber(field), &continue_with_write_barrier);
|
||||
Goto(&store_field);
|
||||
BIND(&store_field);
|
||||
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
|
||||
} else {
|
||||
// Copy fields as raw data.
|
||||
TNode<IntPtrT> field =
|
||||
LoadObjectField<IntPtrT>(boilerplate, offset.value());
|
||||
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
|
||||
}
|
||||
offset = IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize));
|
||||
StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
|
||||
offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
|
||||
Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
|
||||
&done_init);
|
||||
}
|
||||
|
@ -864,13 +864,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
|
||||
return UncheckedCast<Object>(
|
||||
LoadObjectField(object, offset, MachineType::AnyTagged()));
|
||||
}
|
||||
template <class T, typename std::enable_if<
|
||||
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
|
||||
int>::type = 0>
|
||||
TNode<T> LoadObjectField(TNode<HeapObject> object, TNode<IntPtrT> offset) {
|
||||
return UncheckedCast<T>(
|
||||
LoadObjectField(object, offset, MachineTypeOf<T>::value));
|
||||
}
|
||||
// Load a SMI field and untag it.
|
||||
TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
|
||||
int offset);
|
||||
@ -1238,15 +1231,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
|
||||
Node* StoreObjectFieldNoWriteBarrier(
|
||||
Node* object, Node* offset, Node* value,
|
||||
MachineRepresentation rep = MachineRepresentation::kTagged);
|
||||
|
||||
template <class T = Object>
|
||||
TNode<T> StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
|
||||
TNode<IntPtrT> offset,
|
||||
TNode<T> value) {
|
||||
return UncheckedCast<T>(StoreObjectFieldNoWriteBarrier(
|
||||
object, offset, value, MachineRepresentationOf<T>::value));
|
||||
}
|
||||
|
||||
// Store the Map of an HeapObject.
|
||||
Node* StoreMap(Node* object, Node* map);
|
||||
Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index);
|
||||
|
@ -2947,7 +2947,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
|
||||
|
||||
Node* frame = __ LoadFramePointer();
|
||||
Node* parent_frame =
|
||||
__ Load(MachineType::Pointer(), frame,
|
||||
__ Load(MachineType::AnyTagged(), frame,
|
||||
__ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
|
||||
Node* parent_frame_type = __ Load(
|
||||
MachineType::AnyTagged(), parent_frame,
|
||||
|
@ -81,33 +81,22 @@ MachineType assert_size(int expected_size, MachineType type) {
|
||||
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
|
||||
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
|
||||
|
||||
#define LOAD_RAW(base_pointer, byte_offset, type) \
|
||||
SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type), base_pointer, \
|
||||
mcgraph()->Int32Constant(byte_offset), Effect(), \
|
||||
Control()))
|
||||
#define LOAD_INSTANCE_FIELD(name, type) \
|
||||
SetEffect(graph()->NewNode( \
|
||||
mcgraph()->machine()->Load( \
|
||||
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type)), \
|
||||
instance_node_.get(), \
|
||||
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), Effect(), \
|
||||
Control()))
|
||||
|
||||
#define LOAD_INSTANCE_FIELD(name, type) \
|
||||
LOAD_RAW(instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name), \
|
||||
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type))
|
||||
#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
|
||||
SetEffect(graph()->NewNode( \
|
||||
mcgraph()->machine()->Load(MachineType::TaggedPointer()), base_pointer, \
|
||||
mcgraph()->Int32Constant(byte_offset), Effect(), Control()))
|
||||
|
||||
#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
|
||||
LOAD_RAW(base_pointer, byte_offset, MachineType::TaggedPointer())
|
||||
|
||||
#define LOAD_TAGGED_ANY(base_pointer, byte_offset) \
|
||||
LOAD_RAW(base_pointer, byte_offset, MachineType::AnyTagged())
|
||||
|
||||
#define LOAD_FIXED_ARRAY_SLOT(array_node, index, type) \
|
||||
LOAD_RAW(array_node, \
|
||||
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), type)
|
||||
|
||||
#define LOAD_FIXED_ARRAY_SLOT_SMI(array_node, index) \
|
||||
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedSigned())
|
||||
|
||||
#define LOAD_FIXED_ARRAY_SLOT_PTR(array_node, index) \
|
||||
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedPointer())
|
||||
|
||||
#define LOAD_FIXED_ARRAY_SLOT_ANY(array_node, index) \
|
||||
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
|
||||
#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
|
||||
LOAD_TAGGED_POINTER( \
|
||||
array_node, wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index))
|
||||
|
||||
// This can be used to store tagged Smi values only.
|
||||
#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
|
||||
@ -2190,11 +2179,11 @@ Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array,
|
||||
uint32_t* index) {
|
||||
MachineOperatorBuilder* machine = mcgraph()->machine();
|
||||
Node* upper =
|
||||
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
|
||||
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT(values_array, *index));
|
||||
(*index)++;
|
||||
upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16));
|
||||
Node* lower =
|
||||
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
|
||||
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT(values_array, *index));
|
||||
(*index)++;
|
||||
Node* value = graph()->NewNode(machine->Word32Or(), upper, lower);
|
||||
return value;
|
||||
@ -2234,7 +2223,7 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
|
||||
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
|
||||
Node* exceptions_table =
|
||||
LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
|
||||
Node* tag = LOAD_FIXED_ARRAY_SLOT_PTR(exceptions_table, exception_index);
|
||||
Node* tag = LOAD_FIXED_ARRAY_SLOT(exceptions_table, exception_index);
|
||||
return tag;
|
||||
}
|
||||
|
||||
@ -2270,7 +2259,7 @@ Node** WasmGraphBuilder::GetExceptionValues(
|
||||
break;
|
||||
}
|
||||
case wasm::kWasmAnyRef:
|
||||
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
|
||||
value = LOAD_FIXED_ARRAY_SLOT(values_array, index);
|
||||
++index;
|
||||
break;
|
||||
default:
|
||||
@ -2670,8 +2659,7 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
|
||||
// Load the imported function refs array from the instance.
|
||||
Node* imported_function_refs =
|
||||
LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
|
||||
Node* ref_node =
|
||||
LOAD_FIXED_ARRAY_SLOT_PTR(imported_function_refs, func_index);
|
||||
Node* ref_node = LOAD_FIXED_ARRAY_SLOT(imported_function_refs, func_index);
|
||||
|
||||
// Load the target from the imported_targets array at a known offset.
|
||||
Node* imported_targets =
|
||||
@ -4627,7 +4615,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
|
||||
args[pos++] = callable_node; // target callable.
|
||||
// Receiver.
|
||||
if (sloppy_receiver) {
|
||||
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR(
|
||||
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
|
||||
native_context, Context::GLOBAL_PROXY_INDEX);
|
||||
args[pos++] = global_proxy;
|
||||
} else {
|
||||
@ -4690,7 +4678,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
|
||||
|
||||
// Receiver.
|
||||
if (sloppy_receiver) {
|
||||
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT_PTR(
|
||||
Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
|
||||
native_context, Context::GLOBAL_PROXY_INDEX);
|
||||
args[pos++] = global_proxy;
|
||||
} else {
|
||||
@ -5551,14 +5539,9 @@ AssemblerOptions WasmAssemblerOptions() {
|
||||
#undef FATAL_UNSUPPORTED_OPCODE
|
||||
#undef WASM_INSTANCE_OBJECT_SIZE
|
||||
#undef WASM_INSTANCE_OBJECT_OFFSET
|
||||
#undef LOAD_RAW
|
||||
#undef LOAD_INSTANCE_FIELD
|
||||
#undef LOAD_TAGGED_POINTER
|
||||
#undef LOAD_TAGGED_ANY
|
||||
#undef LOAD_FIXED_ARRAY_SLOT
|
||||
#undef LOAD_FIXED_ARRAY_SLOT_SMI
|
||||
#undef LOAD_FIXED_ARRAY_SLOT_PTR
|
||||
#undef LOAD_FIXED_ARRAY_SLOT_ANY
|
||||
#undef STORE_FIXED_ARRAY_SLOT_SMI
|
||||
#undef STORE_FIXED_ARRAY_SLOT_ANY
|
||||
|
||||
|
@ -2336,7 +2336,7 @@ void AccessorAssembler::TryProbeStubCacheTable(
|
||||
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
|
||||
stub_cache->key_reference(table).address());
|
||||
TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>(
|
||||
Load(MachineType::AnyTagged(), key_base,
|
||||
Load(MachineType::TaggedPointer(), key_base,
|
||||
IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize))));
|
||||
|
||||
// We found the handler.
|
||||
|
@ -94,12 +94,6 @@ class MachineType {
|
||||
representation() == MachineRepresentation::kTaggedSigned ||
|
||||
representation() == MachineRepresentation::kTagged;
|
||||
}
|
||||
constexpr bool IsTaggedSigned() const {
|
||||
return representation() == MachineRepresentation::kTaggedSigned;
|
||||
}
|
||||
constexpr bool IsTaggedPointer() const {
|
||||
return representation() == MachineRepresentation::kTaggedPointer;
|
||||
}
|
||||
constexpr static MachineRepresentation PointerRepresentation() {
|
||||
return (kPointerSize == 4) ? MachineRepresentation::kWord32
|
||||
: MachineRepresentation::kWord64;
|
||||
|
@ -25,14 +25,6 @@ enum TestAlignment {
|
||||
kUnaligned,
|
||||
};
|
||||
|
||||
#if V8_TARGET_LITTLE_ENDIAN
|
||||
#define LSB(addr, bytes) addr
|
||||
#elif V8_TARGET_BIG_ENDIAN
|
||||
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
|
||||
#else
|
||||
#error "Unknown Architecture"
|
||||
#endif
|
||||
|
||||
// This is a America!
|
||||
#define A_BILLION 1000000000ULL
|
||||
#define A_GIG (1024ULL * 1024ULL * 1024ULL)
|
||||
@ -186,61 +178,22 @@ TEST(RunUnalignedLoadStoreFloat64Offset) {
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// Initializes the buffer with some raw data respecting requested representation
|
||||
// of the values.
|
||||
template <typename CType>
|
||||
void InitBuffer(CType* buffer, size_t length, MachineType rep) {
|
||||
const size_t kBufferSize = sizeof(CType) * length;
|
||||
if (!rep.IsTagged()) {
|
||||
byte* raw = reinterpret_cast<byte*>(buffer);
|
||||
for (size_t i = 0; i < kBufferSize; i++) {
|
||||
raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Tagged field loads require values to be properly tagged because of
|
||||
// pointer decompression that may be happenning during load.
|
||||
Isolate* isolate = CcTest::InitIsolateOnce();
|
||||
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
|
||||
if (rep.IsTaggedSigned()) {
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
|
||||
}
|
||||
} else {
|
||||
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
|
||||
if (!rep.IsTaggedPointer()) {
|
||||
// Also add some Smis if we are checking AnyTagged case.
|
||||
for (size_t i = 0; i < length / 2; i++) {
|
||||
smi_view[i] =
|
||||
Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename CType>
|
||||
template <typename Type>
|
||||
void RunLoadImmIndex(MachineType rep, TestAlignment t) {
|
||||
const int kNumElems = 16;
|
||||
CType buffer[kNumElems];
|
||||
const int kNumElems = 3;
|
||||
Type buffer[kNumElems];
|
||||
|
||||
InitBuffer(buffer, kNumElems, rep);
|
||||
// initialize the buffer with some raw data.
|
||||
byte* raw = reinterpret_cast<byte*>(buffer);
|
||||
for (size_t i = 0; i < sizeof(buffer); i++) {
|
||||
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
|
||||
}
|
||||
|
||||
// Test with various large and small offsets.
|
||||
for (int offset = -1; offset <= 200000; offset *= -5) {
|
||||
for (int i = 0; i < kNumElems; i++) {
|
||||
BufferedRawMachineAssemblerTester<CType> m;
|
||||
void* base_pointer = &buffer[0] - offset;
|
||||
#if V8_POINTER_COMPRESSION
|
||||
if (rep.IsTagged()) {
|
||||
// When pointer compression is enabled then we need to access only
|
||||
// the lower 32-bit of the tagged value while the buffer contains
|
||||
// full 64-bit values.
|
||||
base_pointer = LSB(base_pointer, kPointerSize / 2);
|
||||
}
|
||||
#endif
|
||||
Node* base = m.PointerConstant(base_pointer);
|
||||
BufferedRawMachineAssemblerTester<Type> m;
|
||||
Node* base = m.PointerConstant(buffer - offset);
|
||||
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
|
||||
if (t == TestAlignment::kAligned) {
|
||||
m.Return(m.Load(rep, base, index));
|
||||
@ -250,76 +203,82 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
CHECK_EQ(buffer[i], m.Call());
|
||||
volatile Type expected = buffer[i];
|
||||
volatile Type actual = m.Call();
|
||||
CHECK_EQ(expected, actual);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename CType>
|
||||
void RunLoadStore(MachineType rep, TestAlignment t) {
|
||||
const int kNumElems = 16;
|
||||
CType in_buffer[kNumElems];
|
||||
CType out_buffer[kNumElems];
|
||||
|
||||
InitBuffer(in_buffer, kNumElems, rep);
|
||||
const int kNumElems = 4;
|
||||
CType buffer[kNumElems];
|
||||
|
||||
for (int32_t x = 0; x < kNumElems; x++) {
|
||||
int32_t y = kNumElems - x - 1;
|
||||
// initialize the buffer with raw data.
|
||||
byte* raw = reinterpret_cast<byte*>(buffer);
|
||||
for (size_t i = 0; i < sizeof(buffer); i++) {
|
||||
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
|
||||
}
|
||||
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
int32_t OK = 0x29000 + x;
|
||||
Node* in_base = m.PointerConstant(in_buffer);
|
||||
Node* in_index = m.IntPtrConstant(x * sizeof(CType));
|
||||
Node* out_base = m.PointerConstant(out_buffer);
|
||||
Node* out_index = m.IntPtrConstant(y * sizeof(CType));
|
||||
Node* base = m.PointerConstant(buffer);
|
||||
Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
|
||||
Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
|
||||
if (t == TestAlignment::kAligned) {
|
||||
Node* load = m.Load(rep, in_base, in_index);
|
||||
m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier);
|
||||
Node* load = m.Load(rep, base, index0);
|
||||
m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
|
||||
} else if (t == TestAlignment::kUnaligned) {
|
||||
Node* load = m.UnalignedLoad(rep, in_base, in_index);
|
||||
m.UnalignedStore(rep.representation(), out_base, out_index, load);
|
||||
Node* load = m.UnalignedLoad(rep, base, index0);
|
||||
m.UnalignedStore(rep.representation(), base, index1, load);
|
||||
}
|
||||
|
||||
m.Return(m.Int32Constant(OK));
|
||||
|
||||
memset(out_buffer, 0, sizeof(out_buffer));
|
||||
CHECK_NE(in_buffer[x], out_buffer[y]);
|
||||
CHECK(buffer[x] != buffer[y]);
|
||||
CHECK_EQ(OK, m.Call());
|
||||
CHECK_EQ(in_buffer[x], out_buffer[y]);
|
||||
for (int32_t z = 0; z < kNumElems; z++) {
|
||||
if (z != y) CHECK_EQ(CType{0}, out_buffer[z]);
|
||||
}
|
||||
CHECK(buffer[x] == buffer[y]);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename CType>
|
||||
void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
|
||||
byte in_buffer[2 * sizeof(CType)];
|
||||
byte out_buffer[2 * sizeof(CType)];
|
||||
CType in, out;
|
||||
CType in_buffer[2];
|
||||
CType out_buffer[2];
|
||||
byte* raw;
|
||||
|
||||
for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
|
||||
CType* in = reinterpret_cast<CType*>(&in_buffer[x]);
|
||||
InitBuffer(in, 1, rep);
|
||||
int y = sizeof(CType) - x;
|
||||
|
||||
for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) {
|
||||
CType* out = reinterpret_cast<CType*>(&out_buffer[y]);
|
||||
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
int32_t OK = 0x29000 + x;
|
||||
|
||||
Node* in_base = m.PointerConstant(in_buffer);
|
||||
Node* in_index = m.IntPtrConstant(x);
|
||||
Node* load = m.UnalignedLoad(rep, in_base, in_index);
|
||||
|
||||
Node* out_base = m.PointerConstant(out_buffer);
|
||||
Node* out_index = m.IntPtrConstant(y);
|
||||
m.UnalignedStore(rep.representation(), out_base, out_index, load);
|
||||
|
||||
m.Return(m.Int32Constant(OK));
|
||||
|
||||
CHECK_EQ(OK, m.Call());
|
||||
CHECK_EQ(in[0], out[0]);
|
||||
raw = reinterpret_cast<byte*>(&in);
|
||||
for (size_t i = 0; i < sizeof(CType); i++) {
|
||||
raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
|
||||
}
|
||||
|
||||
raw = reinterpret_cast<byte*>(in_buffer);
|
||||
MemCopy(raw + x, &in, sizeof(CType));
|
||||
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
int32_t OK = 0x29000 + x;
|
||||
|
||||
Node* base0 = m.PointerConstant(in_buffer);
|
||||
Node* base1 = m.PointerConstant(out_buffer);
|
||||
Node* index0 = m.IntPtrConstant(x);
|
||||
Node* index1 = m.IntPtrConstant(y);
|
||||
Node* load = m.UnalignedLoad(rep, base0, index0);
|
||||
m.UnalignedStore(rep.representation(), base1, index1, load);
|
||||
|
||||
m.Return(m.Int32Constant(OK));
|
||||
|
||||
CHECK_EQ(OK, m.Call());
|
||||
|
||||
raw = reinterpret_cast<byte*>(&out_buffer);
|
||||
MemCopy(&out, raw + y, sizeof(CType));
|
||||
CHECK(in == out);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
@ -331,11 +290,7 @@ TEST(RunLoadImmIndex) {
|
||||
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
|
||||
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
|
||||
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
|
||||
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned);
|
||||
RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
|
||||
RunLoadImmIndex<HeapObject*>(MachineType::TaggedPointer(),
|
||||
TestAlignment::kAligned);
|
||||
RunLoadImmIndex<Object*>(MachineType::AnyTagged(), TestAlignment::kAligned);
|
||||
RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), TestAlignment::kAligned);
|
||||
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
|
||||
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
|
||||
#if V8_TARGET_ARCH_64_BIT
|
||||
@ -349,11 +304,8 @@ TEST(RunUnalignedLoadImmIndex) {
|
||||
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
|
||||
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
|
||||
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
|
||||
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
|
||||
RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
|
||||
RunLoadImmIndex<HeapObject*>(MachineType::TaggedPointer(),
|
||||
TestAlignment::kUnaligned);
|
||||
RunLoadImmIndex<Object*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
|
||||
RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(),
|
||||
TestAlignment::kUnaligned);
|
||||
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
|
||||
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
|
||||
#if V8_TARGET_ARCH_64_BIT
|
||||
@ -369,11 +321,7 @@ TEST(RunLoadStore) {
|
||||
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
|
||||
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
|
||||
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
|
||||
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned);
|
||||
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
|
||||
RunLoadStore<HeapObject*>(MachineType::TaggedPointer(),
|
||||
TestAlignment::kAligned);
|
||||
RunLoadStore<Object*>(MachineType::AnyTagged(), TestAlignment::kAligned);
|
||||
RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kAligned);
|
||||
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
|
||||
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
|
||||
#if V8_TARGET_ARCH_64_BIT
|
||||
@ -386,11 +334,7 @@ TEST(RunUnalignedLoadStore) {
|
||||
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
|
||||
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
|
||||
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
|
||||
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
|
||||
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
|
||||
RunLoadStore<HeapObject*>(MachineType::TaggedPointer(),
|
||||
TestAlignment::kUnaligned);
|
||||
RunLoadStore<Object*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
|
||||
RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
|
||||
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
|
||||
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
|
||||
#if V8_TARGET_ARCH_64_BIT
|
||||
@ -403,11 +347,7 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
|
||||
RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
|
||||
RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
|
||||
RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
|
||||
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
|
||||
RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
|
||||
RunUnalignedLoadStoreUnalignedAccess<HeapObject*>(
|
||||
MachineType::TaggedPointer());
|
||||
RunUnalignedLoadStoreUnalignedAccess<Object*>(MachineType::AnyTagged());
|
||||
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged());
|
||||
RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
|
||||
RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
|
||||
#if V8_TARGET_ARCH_64_BIT
|
||||
@ -415,6 +355,14 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
|
||||
#endif
|
||||
}
|
||||
|
||||
#if V8_TARGET_LITTLE_ENDIAN
|
||||
#define LSB(addr, bytes) addr
|
||||
#elif V8_TARGET_BIG_ENDIAN
|
||||
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes
|
||||
#else
|
||||
#error "Unknown Architecture"
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
void RunLoadStoreSignExtend32(TestAlignment t) {
|
||||
int32_t buffer[4];
|
||||
@ -660,10 +608,6 @@ TEST(RunUnalignedLoadStoreTruncation) {
|
||||
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
|
||||
}
|
||||
|
||||
#undef LSB
|
||||
#undef A_BILLION
|
||||
#undef A_GIG
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
Loading…
Reference in New Issue
Block a user