2016-04-20 09:34:22 +00:00
|
|
|
// Copyright 2016 the V8 project authors. All rights reserved. Use of this
|
|
|
|
// source code is governed by a BSD-style license that can be found in the
|
|
|
|
// LICENSE file.
|
|
|
|
|
|
|
|
#include <cmath>
|
|
|
|
#include <functional>
|
|
|
|
#include <limits>
|
|
|
|
|
|
|
|
#include "src/base/bits.h"
|
2019-01-10 10:38:25 +00:00
|
|
|
#include "src/base/overflowing-math.h"
|
2016-04-20 09:34:22 +00:00
|
|
|
#include "src/base/utils/random-number-generator.h"
|
2019-05-23 08:51:46 +00:00
|
|
|
#include "src/objects/objects-inl.h"
|
2016-04-20 09:34:22 +00:00
|
|
|
#include "test/cctest/cctest.h"
|
|
|
|
#include "test/cctest/compiler/codegen-tester.h"
|
|
|
|
#include "test/cctest/compiler/value-helper.h"
|
|
|
|
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace compiler {
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
enum TestAlignment {
|
|
|
|
kAligned,
|
|
|
|
kUnaligned,
|
|
|
|
};
|
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
#if V8_TARGET_LITTLE_ENDIAN
|
|
|
|
#define LSB(addr, bytes) addr
|
|
|
|
#elif V8_TARGET_BIG_ENDIAN
|
|
|
|
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
|
|
|
|
#else
|
|
|
|
#error "Unknown Architecture"
|
|
|
|
#endif
|
|
|
|
|
2016-04-20 09:34:22 +00:00
|
|
|
// This is a America!
|
|
|
|
#define A_BILLION 1000000000ULL
|
|
|
|
#define A_GIG (1024ULL * 1024ULL * 1024ULL)
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
namespace {
|
2019-09-16 14:19:10 +00:00
|
|
|
byte* ComputeOffset(void* real_address, int32_t offset) {
|
|
|
|
return reinterpret_cast<byte*>(reinterpret_cast<Address>(real_address) -
|
|
|
|
offset);
|
|
|
|
}
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
void RunLoadInt32(const TestAlignment t) {
|
2016-04-20 09:34:22 +00:00
|
|
|
RawMachineAssemblerTester<int32_t> m;
|
|
|
|
|
|
|
|
int32_t p1 = 0; // loads directly from this location.
|
2016-07-22 20:55:03 +00:00
|
|
|
|
|
|
|
if (t == TestAlignment::kAligned) {
|
|
|
|
m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
|
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
|
|
|
m.Return(m.UnalignedLoadFromPointer(&p1, MachineType::Int32()));
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
2019-02-01 10:37:04 +00:00
|
|
|
p1 = i;
|
2016-04-20 09:34:22 +00:00
|
|
|
CHECK_EQ(p1, m.Call());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
void RunLoadInt32Offset(TestAlignment t) {
|
2016-04-20 09:34:22 +00:00
|
|
|
int32_t p1 = 0; // loads directly from this location.
|
|
|
|
|
|
|
|
int32_t offsets[] = {-2000000, -100, -101, 1, 3,
|
2017-12-02 00:30:37 +00:00
|
|
|
7, 120, 2000, 2000000000, 0xFF};
|
2016-04-20 09:34:22 +00:00
|
|
|
|
|
|
|
for (size_t i = 0; i < arraysize(offsets); i++) {
|
|
|
|
RawMachineAssemblerTester<int32_t> m;
|
|
|
|
int32_t offset = offsets[i];
|
2019-09-16 14:19:10 +00:00
|
|
|
byte* pointer = ComputeOffset(&p1, offset);
|
2016-07-22 20:55:03 +00:00
|
|
|
|
2016-04-20 09:34:22 +00:00
|
|
|
// generate load [#base + #index]
|
2016-07-22 20:55:03 +00:00
|
|
|
if (t == TestAlignment::kAligned) {
|
|
|
|
m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
|
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
|
|
|
m.Return(
|
|
|
|
m.UnalignedLoadFromPointer(pointer, MachineType::Int32(), offset));
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
|
|
|
|
FOR_INT32_INPUTS(j) {
|
2019-02-01 10:37:04 +00:00
|
|
|
p1 = j;
|
2016-04-20 09:34:22 +00:00
|
|
|
CHECK_EQ(p1, m.Call());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
void RunLoadStoreFloat32Offset(TestAlignment t) {
|
2016-04-20 09:34:22 +00:00
|
|
|
float p1 = 0.0f; // loads directly from this location.
|
|
|
|
float p2 = 0.0f; // and stores directly into this location.
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
2019-01-10 10:38:25 +00:00
|
|
|
int32_t magic =
|
2019-02-01 10:37:04 +00:00
|
|
|
base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
|
2016-04-20 09:34:22 +00:00
|
|
|
RawMachineAssemblerTester<int32_t> m;
|
2019-02-01 10:37:04 +00:00
|
|
|
int32_t offset = i;
|
2019-09-16 14:19:10 +00:00
|
|
|
byte* from = ComputeOffset(&p1, offset);
|
|
|
|
byte* to = ComputeOffset(&p2, offset);
|
2016-04-20 09:34:22 +00:00
|
|
|
// generate load [#base + #index]
|
2016-07-22 20:55:03 +00:00
|
|
|
if (t == TestAlignment::kAligned) {
|
|
|
|
Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
|
|
|
|
m.IntPtrConstant(offset));
|
|
|
|
m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
|
|
|
|
m.IntPtrConstant(offset), load, kNoWriteBarrier);
|
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
|
|
|
Node* load =
|
|
|
|
m.UnalignedLoad(MachineType::Float32(), m.PointerConstant(from),
|
|
|
|
m.IntPtrConstant(offset));
|
|
|
|
m.UnalignedStore(MachineRepresentation::kFloat32, m.PointerConstant(to),
|
|
|
|
m.IntPtrConstant(offset), load);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
m.Return(m.Int32Constant(magic));
|
|
|
|
|
|
|
|
FOR_FLOAT32_INPUTS(j) {
|
2019-02-01 10:37:04 +00:00
|
|
|
p1 = j;
|
|
|
|
p2 = j - 5;
|
2016-04-20 09:34:22 +00:00
|
|
|
CHECK_EQ(magic, m.Call());
|
2018-06-27 14:39:48 +00:00
|
|
|
CHECK_DOUBLE_EQ(p1, p2);
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
void RunLoadStoreFloat64Offset(TestAlignment t) {
|
2016-04-20 09:34:22 +00:00
|
|
|
double p1 = 0; // loads directly from this location.
|
|
|
|
double p2 = 0; // and stores directly into this location.
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
2019-01-10 10:38:25 +00:00
|
|
|
int32_t magic =
|
2019-02-01 10:37:04 +00:00
|
|
|
base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
|
2016-04-20 09:34:22 +00:00
|
|
|
RawMachineAssemblerTester<int32_t> m;
|
2019-02-01 10:37:04 +00:00
|
|
|
int32_t offset = i;
|
2019-09-16 14:19:10 +00:00
|
|
|
byte* from = ComputeOffset(&p1, offset);
|
|
|
|
byte* to = ComputeOffset(&p2, offset);
|
2016-04-20 09:34:22 +00:00
|
|
|
// generate load [#base + #index]
|
2016-07-22 20:55:03 +00:00
|
|
|
if (t == TestAlignment::kAligned) {
|
|
|
|
Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
|
|
|
|
m.IntPtrConstant(offset));
|
|
|
|
m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
|
|
|
|
m.IntPtrConstant(offset), load, kNoWriteBarrier);
|
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
|
|
|
Node* load =
|
|
|
|
m.UnalignedLoad(MachineType::Float64(), m.PointerConstant(from),
|
|
|
|
m.IntPtrConstant(offset));
|
|
|
|
m.UnalignedStore(MachineRepresentation::kFloat64, m.PointerConstant(to),
|
|
|
|
m.IntPtrConstant(offset), load);
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
m.Return(m.Int32Constant(magic));
|
|
|
|
|
|
|
|
FOR_FLOAT64_INPUTS(j) {
|
2019-02-01 10:37:04 +00:00
|
|
|
p1 = j;
|
|
|
|
p2 = j - 5;
|
2016-04-20 09:34:22 +00:00
|
|
|
CHECK_EQ(magic, m.Call());
|
2018-06-27 14:39:48 +00:00
|
|
|
CHECK_DOUBLE_EQ(p1, p2);
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-22 20:55:03 +00:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST(RunLoadInt32) { RunLoadInt32(TestAlignment::kAligned); }
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadInt32) { RunLoadInt32(TestAlignment::kUnaligned); }
|
|
|
|
|
|
|
|
TEST(RunLoadInt32Offset) { RunLoadInt32Offset(TestAlignment::kAligned); }
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadInt32Offset) {
|
|
|
|
RunLoadInt32Offset(TestAlignment::kUnaligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunLoadStoreFloat32Offset) {
|
|
|
|
RunLoadStoreFloat32Offset(TestAlignment::kAligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadStoreFloat32Offset) {
|
|
|
|
RunLoadStoreFloat32Offset(TestAlignment::kUnaligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunLoadStoreFloat64Offset) {
|
|
|
|
RunLoadStoreFloat64Offset(TestAlignment::kAligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadStoreFloat64Offset) {
|
|
|
|
RunLoadStoreFloat64Offset(TestAlignment::kUnaligned);
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
|
|
|
|
namespace {
|
2018-11-06 13:40:24 +00:00
|
|
|
|
2019-02-11 15:31:20 +00:00
|
|
|
// Mostly same as CHECK_EQ() but customized for compressed tagged values.
|
|
|
|
template <typename CType>
|
|
|
|
void CheckEq(CType in_value, CType out_value) {
|
|
|
|
CHECK_EQ(in_value, out_value);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef V8_COMPRESS_POINTERS
|
|
|
|
// Specializations for checking the result of compressing store.
|
|
|
|
template <>
|
|
|
|
void CheckEq<Object>(Object in_value, Object out_value) {
|
2019-03-06 10:24:13 +00:00
|
|
|
// Compare only lower 32-bits of the value because tagged load/stores are
|
|
|
|
// 32-bit operations anyway.
|
|
|
|
CHECK_EQ(static_cast<Tagged_t>(in_value.ptr()),
|
|
|
|
static_cast<Tagged_t>(out_value.ptr()));
|
2019-02-11 15:31:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
|
|
|
void CheckEq<HeapObject>(HeapObject in_value, HeapObject out_value) {
|
|
|
|
return CheckEq<Object>(in_value, out_value);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
|
|
|
void CheckEq<Smi>(Smi in_value, Smi out_value) {
|
|
|
|
return CheckEq<Object>(in_value, out_value);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
// Initializes the buffer with some raw data respecting requested representation
|
|
|
|
// of the values.
|
|
|
|
template <typename CType>
|
2019-05-10 10:10:34 +00:00
|
|
|
void InitBuffer(CType* buffer, size_t length, MachineType type) {
|
2018-11-07 10:41:11 +00:00
|
|
|
const size_t kBufferSize = sizeof(CType) * length;
|
2019-05-10 10:10:34 +00:00
|
|
|
if (!type.IsTagged()) {
|
2018-11-07 10:41:11 +00:00
|
|
|
byte* raw = reinterpret_cast<byte*>(buffer);
|
|
|
|
for (size_t i = 0; i < kBufferSize; i++) {
|
|
|
|
raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
|
|
|
|
}
|
|
|
|
return;
|
2018-11-06 18:10:35 +00:00
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
// Tagged field loads require values to be properly tagged because of
|
|
|
|
// pointer decompression that may be happenning during load.
|
|
|
|
Isolate* isolate = CcTest::InitIsolateOnce();
|
|
|
|
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
|
2019-05-10 10:10:34 +00:00
|
|
|
if (type.IsTaggedSigned()) {
|
2018-11-07 10:41:11 +00:00
|
|
|
for (size_t i = 0; i < length; i++) {
|
|
|
|
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
|
2019-05-10 10:10:34 +00:00
|
|
|
if (!type.IsTaggedPointer()) {
|
2018-11-07 10:41:11 +00:00
|
|
|
// Also add some Smis if we are checking AnyTagged case.
|
|
|
|
for (size_t i = 0; i < length / 2; i++) {
|
|
|
|
smi_view[i] =
|
|
|
|
Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename CType>
|
2019-05-10 10:10:34 +00:00
|
|
|
void RunLoadImmIndex(MachineType type, TestAlignment t) {
|
2018-11-07 10:41:11 +00:00
|
|
|
const int kNumElems = 16;
|
|
|
|
CType buffer[kNumElems];
|
|
|
|
|
2019-05-10 10:10:34 +00:00
|
|
|
InitBuffer(buffer, kNumElems, type);
|
2018-11-07 10:41:11 +00:00
|
|
|
|
2016-04-20 09:34:22 +00:00
|
|
|
// Test with various large and small offsets.
|
|
|
|
for (int offset = -1; offset <= 200000; offset *= -5) {
|
|
|
|
for (int i = 0; i < kNumElems; i++) {
|
2018-11-07 10:41:11 +00:00
|
|
|
BufferedRawMachineAssemblerTester<CType> m;
|
2020-03-31 15:16:41 +00:00
|
|
|
CType* base_pointer = reinterpret_cast<CType*>(
|
|
|
|
ComputeOffset(&buffer[0], offset * sizeof(CType)));
|
2018-12-06 08:07:00 +00:00
|
|
|
#ifdef V8_COMPRESS_POINTERS
|
2019-05-10 10:10:34 +00:00
|
|
|
if (type.IsTagged()) {
|
2018-11-07 10:41:11 +00:00
|
|
|
// When pointer compression is enabled then we need to access only
|
|
|
|
// the lower 32-bit of the tagged value while the buffer contains
|
|
|
|
// full 64-bit values.
|
2020-03-31 15:16:41 +00:00
|
|
|
base_pointer = reinterpret_cast<CType*>(LSB(base_pointer, kTaggedSize));
|
2018-11-07 10:41:11 +00:00
|
|
|
}
|
|
|
|
#endif
|
2020-03-31 15:16:41 +00:00
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
Node* base = m.PointerConstant(base_pointer);
|
2016-04-20 09:34:22 +00:00
|
|
|
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
|
2016-07-22 20:55:03 +00:00
|
|
|
if (t == TestAlignment::kAligned) {
|
2019-05-10 10:10:34 +00:00
|
|
|
m.Return(m.Load(type, base, index));
|
2016-07-22 20:55:03 +00:00
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
2019-05-10 10:10:34 +00:00
|
|
|
m.Return(m.UnalignedLoad(type, base, index));
|
2016-07-22 20:55:03 +00:00
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
|
2019-02-11 15:31:20 +00:00
|
|
|
CheckEq<CType>(buffer[i], m.Call());
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename CType>
|
2019-05-10 10:10:34 +00:00
|
|
|
void RunLoadStore(MachineType type, TestAlignment t) {
|
2018-11-07 10:41:11 +00:00
|
|
|
const int kNumElems = 16;
|
|
|
|
CType in_buffer[kNumElems];
|
|
|
|
CType out_buffer[kNumElems];
|
2019-02-11 15:31:20 +00:00
|
|
|
uintptr_t zap_data[] = {kZapValue, kZapValue};
|
|
|
|
CType zap_value;
|
2018-11-07 10:41:11 +00:00
|
|
|
|
2019-02-11 15:31:20 +00:00
|
|
|
STATIC_ASSERT(sizeof(CType) <= sizeof(zap_data));
|
|
|
|
MemCopy(&zap_value, &zap_data, sizeof(CType));
|
2019-05-10 10:10:34 +00:00
|
|
|
InitBuffer(in_buffer, kNumElems, type);
|
2016-04-20 09:34:22 +00:00
|
|
|
|
2020-03-31 15:16:41 +00:00
|
|
|
#ifdef V8_TARGET_BIG_ENDIAN
|
|
|
|
int offset = sizeof(CType) - ElementSizeInBytes(type.representation());
|
|
|
|
#else
|
|
|
|
int offset = 0;
|
|
|
|
#endif
|
|
|
|
|
2016-04-20 09:34:22 +00:00
|
|
|
for (int32_t x = 0; x < kNumElems; x++) {
|
|
|
|
int32_t y = kNumElems - x - 1;
|
|
|
|
|
|
|
|
RawMachineAssemblerTester<int32_t> m;
|
|
|
|
int32_t OK = 0x29000 + x;
|
2018-11-07 10:41:11 +00:00
|
|
|
Node* in_base = m.PointerConstant(in_buffer);
|
2020-03-31 15:16:41 +00:00
|
|
|
Node* in_index = m.IntPtrConstant(x * sizeof(CType) + offset);
|
2018-11-07 10:41:11 +00:00
|
|
|
Node* out_base = m.PointerConstant(out_buffer);
|
2020-03-31 15:16:41 +00:00
|
|
|
Node* out_index = m.IntPtrConstant(y * sizeof(CType) + offset);
|
2016-07-22 20:55:03 +00:00
|
|
|
if (t == TestAlignment::kAligned) {
|
2019-05-10 10:10:34 +00:00
|
|
|
Node* load = m.Load(type, in_base, in_index);
|
|
|
|
m.Store(type.representation(), out_base, out_index, load,
|
|
|
|
kNoWriteBarrier);
|
2016-07-22 20:55:03 +00:00
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
2019-05-10 10:10:34 +00:00
|
|
|
Node* load = m.UnalignedLoad(type, in_base, in_index);
|
|
|
|
m.UnalignedStore(type.representation(), out_base, out_index, load);
|
2016-07-22 20:55:03 +00:00
|
|
|
}
|
|
|
|
|
2016-04-20 09:34:22 +00:00
|
|
|
m.Return(m.Int32Constant(OK));
|
|
|
|
|
2019-02-11 15:31:20 +00:00
|
|
|
for (int32_t z = 0; z < kNumElems; z++) {
|
|
|
|
out_buffer[z] = zap_value;
|
|
|
|
}
|
2018-11-07 10:41:11 +00:00
|
|
|
CHECK_NE(in_buffer[x], out_buffer[y]);
|
2016-04-20 09:34:22 +00:00
|
|
|
CHECK_EQ(OK, m.Call());
|
2019-02-11 15:31:20 +00:00
|
|
|
// Mostly same as CHECK_EQ() but customized for compressed tagged values.
|
|
|
|
CheckEq<CType>(in_buffer[x], out_buffer[y]);
|
2018-11-07 10:41:11 +00:00
|
|
|
for (int32_t z = 0; z < kNumElems; z++) {
|
2019-02-11 15:31:20 +00:00
|
|
|
if (z != y) CHECK_EQ(zap_value, out_buffer[z]);
|
2018-11-07 10:41:11 +00:00
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
}
|
2016-07-22 20:55:03 +00:00
|
|
|
|
|
|
|
template <typename CType>
|
2019-05-10 10:10:34 +00:00
|
|
|
void RunUnalignedLoadStoreUnalignedAccess(MachineType type) {
|
2018-11-06 18:10:35 +00:00
|
|
|
CType in, out;
|
2018-11-07 10:41:11 +00:00
|
|
|
byte in_buffer[2 * sizeof(CType)];
|
|
|
|
byte out_buffer[2 * sizeof(CType)];
|
2016-07-22 20:55:03 +00:00
|
|
|
|
2019-05-10 10:10:34 +00:00
|
|
|
InitBuffer(&in, 1, type);
|
2016-07-22 20:55:03 +00:00
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
|
|
|
|
// Direct write to &in_buffer[x] may cause unaligned access in C++ code so
|
|
|
|
// we use MemCopy() to handle that.
|
|
|
|
MemCopy(&in_buffer[x], &in, sizeof(CType));
|
2016-07-22 20:55:03 +00:00
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) {
|
|
|
|
RawMachineAssemblerTester<int32_t> m;
|
|
|
|
int32_t OK = 0x29000 + x;
|
2016-07-22 20:55:03 +00:00
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
Node* in_base = m.PointerConstant(in_buffer);
|
|
|
|
Node* in_index = m.IntPtrConstant(x);
|
2019-05-10 10:10:34 +00:00
|
|
|
Node* load = m.UnalignedLoad(type, in_base, in_index);
|
2016-07-22 20:55:03 +00:00
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
Node* out_base = m.PointerConstant(out_buffer);
|
|
|
|
Node* out_index = m.IntPtrConstant(y);
|
2019-05-10 10:10:34 +00:00
|
|
|
m.UnalignedStore(type.representation(), out_base, out_index, load);
|
2016-07-22 20:55:03 +00:00
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
m.Return(m.Int32Constant(OK));
|
2018-11-06 18:10:35 +00:00
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
CHECK_EQ(OK, m.Call());
|
|
|
|
// Direct read of &out_buffer[y] may cause unaligned access in C++ code
|
|
|
|
// so we use MemCopy() to handle that.
|
|
|
|
MemCopy(&out, &out_buffer[y], sizeof(CType));
|
2019-02-11 15:31:20 +00:00
|
|
|
// Mostly same as CHECK_EQ() but customized for compressed tagged values.
|
|
|
|
CheckEq<CType>(in, out);
|
2018-11-07 10:41:11 +00:00
|
|
|
}
|
2016-07-22 20:55:03 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST(RunLoadImmIndex) {
|
2016-07-22 20:55:03 +00:00
|
|
|
RunLoadImmIndex<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
|
|
|
|
RunLoadImmIndex<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
|
|
|
|
RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
|
|
|
|
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
|
|
|
|
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
|
|
|
|
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
|
2018-11-07 10:41:11 +00:00
|
|
|
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned);
|
|
|
|
RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
|
2018-12-20 15:47:47 +00:00
|
|
|
RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
|
|
|
|
TestAlignment::kAligned);
|
2018-12-25 00:19:47 +00:00
|
|
|
RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
|
2016-07-22 20:55:03 +00:00
|
|
|
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
|
|
|
|
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
|
|
|
|
#if V8_TARGET_ARCH_64_BIT
|
|
|
|
RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
|
|
|
|
#endif
|
|
|
|
// TODO(titzer): test various indexing modes.
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadImmIndex) {
|
|
|
|
RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
|
|
|
|
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
|
|
|
|
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
|
|
|
|
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
|
2018-11-07 10:41:11 +00:00
|
|
|
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
|
2016-07-22 20:55:03 +00:00
|
|
|
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
|
|
|
|
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
|
2016-04-20 09:34:22 +00:00
|
|
|
#if V8_TARGET_ARCH_64_BIT
|
2016-07-22 20:55:03 +00:00
|
|
|
RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
|
2016-04-20 09:34:22 +00:00
|
|
|
#endif
|
|
|
|
// TODO(titzer): test various indexing modes.
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunLoadStore) {
|
2016-07-22 20:55:03 +00:00
|
|
|
RunLoadStore<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
|
|
|
|
RunLoadStore<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
|
|
|
|
RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
|
|
|
|
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
|
|
|
|
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
|
|
|
|
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
|
2018-11-07 10:41:11 +00:00
|
|
|
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned);
|
|
|
|
RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned);
|
2018-12-20 15:47:47 +00:00
|
|
|
RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
|
|
|
|
TestAlignment::kAligned);
|
2018-12-25 00:19:47 +00:00
|
|
|
RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kAligned);
|
2016-07-22 20:55:03 +00:00
|
|
|
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
|
|
|
|
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
|
|
|
|
#if V8_TARGET_ARCH_64_BIT
|
|
|
|
RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadStore) {
|
|
|
|
RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
|
|
|
|
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
|
|
|
|
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
|
|
|
|
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
|
2018-11-07 10:41:11 +00:00
|
|
|
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
|
2016-07-22 20:55:03 +00:00
|
|
|
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
|
|
|
|
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
|
2016-04-20 09:34:22 +00:00
|
|
|
#if V8_TARGET_ARCH_64_BIT
|
2016-07-22 20:55:03 +00:00
|
|
|
RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadStoreUnalignedAccess) {
|
|
|
|
RunUnalignedLoadStoreUnalignedAccess<int16_t>(MachineType::Int16());
|
|
|
|
RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
|
|
|
|
RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
|
|
|
|
RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
|
2018-11-07 10:41:11 +00:00
|
|
|
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
|
2016-07-22 20:55:03 +00:00
|
|
|
RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
|
|
|
|
RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
|
|
|
|
#if V8_TARGET_ARCH_64_BIT
|
|
|
|
RunUnalignedLoadStoreUnalignedAccess<int64_t>(MachineType::Int64());
|
2016-04-20 09:34:22 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
namespace {
|
|
|
|
void RunLoadStoreSignExtend32(TestAlignment t) {
|
2016-04-20 09:34:22 +00:00
|
|
|
int32_t buffer[4];
|
|
|
|
RawMachineAssemblerTester<int32_t> m;
|
2016-04-25 14:00:56 +00:00
|
|
|
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
|
2016-07-22 20:55:03 +00:00
|
|
|
if (t == TestAlignment::kAligned) {
|
|
|
|
Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
|
|
|
|
Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32());
|
|
|
|
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
|
|
|
|
m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
|
|
|
|
m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
|
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
|
|
|
Node* load16 =
|
|
|
|
m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
|
|
|
|
Node* load32 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int32());
|
|
|
|
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
|
|
|
|
load16);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
|
|
|
|
load32);
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
m.Return(load8);
|
|
|
|
|
|
|
|
FOR_INT32_INPUTS(i) {
|
2019-02-01 10:37:04 +00:00
|
|
|
buffer[0] = i;
|
2016-04-20 09:34:22 +00:00
|
|
|
|
2019-02-01 10:37:04 +00:00
|
|
|
CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
|
|
|
|
CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
|
|
|
|
CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
|
|
|
|
CHECK_EQ(i, buffer[3]);
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
void RunLoadStoreZeroExtend32(TestAlignment t) {
|
2016-04-20 09:34:22 +00:00
|
|
|
uint32_t buffer[4];
|
|
|
|
RawMachineAssemblerTester<uint32_t> m;
|
2016-04-25 14:00:56 +00:00
|
|
|
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
|
2016-07-22 20:55:03 +00:00
|
|
|
if (t == TestAlignment::kAligned) {
|
|
|
|
Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
|
|
|
|
Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32());
|
|
|
|
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
|
|
|
|
m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
|
|
|
|
m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
|
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
|
|
|
Node* load16 =
|
|
|
|
m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
|
|
|
|
Node* load32 =
|
|
|
|
m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint32());
|
|
|
|
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
|
|
|
|
load16);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
|
|
|
|
load32);
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
m.Return(load8);
|
|
|
|
|
|
|
|
FOR_UINT32_INPUTS(i) {
|
2019-02-01 10:37:04 +00:00
|
|
|
buffer[0] = i;
|
2016-04-20 09:34:22 +00:00
|
|
|
|
2019-02-01 10:37:04 +00:00
|
|
|
CHECK_EQ((i & 0xFF), m.Call());
|
|
|
|
CHECK_EQ((i & 0xFF), buffer[1]);
|
|
|
|
CHECK_EQ((i & 0xFFFF), buffer[2]);
|
|
|
|
CHECK_EQ(i, buffer[3]);
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
}
|
2016-07-22 20:55:03 +00:00
|
|
|
} // namespace
|
2016-04-20 09:34:22 +00:00
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
TEST(RunLoadStoreSignExtend32) {
|
|
|
|
RunLoadStoreSignExtend32(TestAlignment::kAligned);
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
TEST(RunUnalignedLoadStoreSignExtend32) {
|
|
|
|
RunLoadStoreSignExtend32(TestAlignment::kUnaligned);
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
TEST(RunLoadStoreZeroExtend32) {
|
|
|
|
RunLoadStoreZeroExtend32(TestAlignment::kAligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadStoreZeroExtend32) {
|
|
|
|
RunLoadStoreZeroExtend32(TestAlignment::kUnaligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if V8_TARGET_ARCH_64_BIT
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
void RunLoadStoreSignExtend64(TestAlignment t) {
|
2017-12-01 11:00:00 +00:00
|
|
|
if ((true)) return; // TODO(titzer): sign extension of loads to 64-bit.
|
2016-04-20 09:34:22 +00:00
|
|
|
int64_t buffer[5];
|
|
|
|
RawMachineAssemblerTester<int64_t> m;
|
2016-04-25 14:00:56 +00:00
|
|
|
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
|
2016-07-22 20:55:03 +00:00
|
|
|
if (t == TestAlignment::kAligned) {
|
|
|
|
Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
|
|
|
|
Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
|
|
|
|
Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64());
|
|
|
|
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
|
|
|
|
m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
|
|
|
|
m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
|
|
|
|
m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
|
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
|
|
|
Node* load16 =
|
|
|
|
m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
|
|
|
|
Node* load32 =
|
|
|
|
m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
|
|
|
|
Node* load64 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int64());
|
|
|
|
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
|
|
|
|
load16);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
|
|
|
|
load32);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
|
|
|
|
load64);
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
m.Return(load8);
|
|
|
|
|
|
|
|
FOR_INT64_INPUTS(i) {
|
2019-02-01 10:37:04 +00:00
|
|
|
buffer[0] = i;
|
2016-04-20 09:34:22 +00:00
|
|
|
|
2019-02-01 10:37:04 +00:00
|
|
|
CHECK_EQ(static_cast<int8_t>(i & 0xFF), m.Call());
|
|
|
|
CHECK_EQ(static_cast<int8_t>(i & 0xFF), buffer[1]);
|
|
|
|
CHECK_EQ(static_cast<int16_t>(i & 0xFFFF), buffer[2]);
|
|
|
|
CHECK_EQ(static_cast<int32_t>(i & 0xFFFFFFFF), buffer[3]);
|
|
|
|
CHECK_EQ(i, buffer[4]);
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
void RunLoadStoreZeroExtend64(TestAlignment t) {
|
2019-02-14 13:01:52 +00:00
|
|
|
if (kSystemPointerSize < 8) return;
|
2016-04-20 09:34:22 +00:00
|
|
|
uint64_t buffer[5];
|
2016-11-11 12:00:34 +00:00
|
|
|
RawMachineAssemblerTester<uint64_t> m;
|
2016-04-25 14:00:56 +00:00
|
|
|
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
|
2016-07-22 20:55:03 +00:00
|
|
|
if (t == TestAlignment::kAligned) {
|
|
|
|
Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
|
|
|
|
Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
|
|
|
|
Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64());
|
|
|
|
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
|
|
|
|
m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
|
|
|
|
m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
|
|
|
|
m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
|
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
|
|
|
Node* load16 =
|
|
|
|
m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
|
|
|
|
Node* load32 =
|
|
|
|
m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
|
|
|
|
Node* load64 =
|
|
|
|
m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint64());
|
|
|
|
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
|
|
|
|
load16);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
|
|
|
|
load32);
|
|
|
|
m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
|
|
|
|
load64);
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
m.Return(load8);
|
|
|
|
|
|
|
|
FOR_UINT64_INPUTS(i) {
|
2019-02-01 10:37:04 +00:00
|
|
|
buffer[0] = i;
|
2016-04-20 09:34:22 +00:00
|
|
|
|
2019-02-01 10:37:04 +00:00
|
|
|
CHECK_EQ((i & 0xFF), m.Call());
|
|
|
|
CHECK_EQ((i & 0xFF), buffer[1]);
|
|
|
|
CHECK_EQ((i & 0xFFFF), buffer[2]);
|
|
|
|
CHECK_EQ((i & 0xFFFFFFFF), buffer[3]);
|
|
|
|
CHECK_EQ(i, buffer[4]);
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-22 20:55:03 +00:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST(RunLoadStoreSignExtend64) {
|
|
|
|
RunLoadStoreSignExtend64(TestAlignment::kAligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadStoreSignExtend64) {
|
|
|
|
RunLoadStoreSignExtend64(TestAlignment::kUnaligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunLoadStoreZeroExtend64) {
|
|
|
|
RunLoadStoreZeroExtend64(TestAlignment::kAligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadStoreZeroExtend64) {
|
|
|
|
RunLoadStoreZeroExtend64(TestAlignment::kUnaligned);
|
|
|
|
}
|
|
|
|
|
2016-04-20 09:34:22 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
template <typename IntType>
|
2016-07-22 20:55:03 +00:00
|
|
|
void LoadStoreTruncation(MachineType kRepresentation, TestAlignment t) {
|
2016-04-20 09:34:22 +00:00
|
|
|
IntType input;
|
|
|
|
|
|
|
|
RawMachineAssemblerTester<int32_t> m;
|
2016-07-22 20:55:03 +00:00
|
|
|
Node* ap1;
|
|
|
|
if (t == TestAlignment::kAligned) {
|
|
|
|
Node* a = m.LoadFromPointer(&input, kRepresentation);
|
|
|
|
ap1 = m.Int32Add(a, m.Int32Constant(1));
|
|
|
|
m.StoreToPointer(&input, kRepresentation.representation(), ap1);
|
|
|
|
} else if (t == TestAlignment::kUnaligned) {
|
|
|
|
Node* a = m.UnalignedLoadFromPointer(&input, kRepresentation);
|
|
|
|
ap1 = m.Int32Add(a, m.Int32Constant(1));
|
|
|
|
m.UnalignedStoreToPointer(&input, kRepresentation.representation(), ap1);
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-04-20 09:34:22 +00:00
|
|
|
m.Return(ap1);
|
|
|
|
|
|
|
|
const IntType max = std::numeric_limits<IntType>::max();
|
|
|
|
const IntType min = std::numeric_limits<IntType>::min();
|
|
|
|
|
|
|
|
// Test upper bound.
|
|
|
|
input = max;
|
|
|
|
CHECK_EQ(max + 1, m.Call());
|
|
|
|
CHECK_EQ(min, input);
|
|
|
|
|
|
|
|
// Test lower bound.
|
|
|
|
input = min;
|
|
|
|
CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
|
|
|
|
CHECK_EQ(min + 1, input);
|
|
|
|
|
|
|
|
// Test all one byte values that are not one byte bounds.
|
|
|
|
for (int i = -127; i < 127; i++) {
|
|
|
|
input = i;
|
|
|
|
int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
|
|
|
|
CHECK_EQ(static_cast<IntType>(expected), m.Call());
|
|
|
|
CHECK_EQ(static_cast<IntType>(i + 1), input);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST(RunLoadStoreTruncation) {
|
2016-07-22 20:55:03 +00:00
|
|
|
LoadStoreTruncation<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
|
|
|
|
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(RunUnalignedLoadStoreTruncation) {
|
|
|
|
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
|
2016-04-20 09:34:22 +00:00
|
|
|
}
|
|
|
|
|
2018-11-07 10:41:11 +00:00
|
|
|
#undef LSB
|
|
|
|
#undef A_BILLION
|
|
|
|
#undef A_GIG
|
|
|
|
|
2016-04-20 09:34:22 +00:00
|
|
|
} // namespace compiler
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|