v8/test/cctest/compiler/test-run-load-store.cc
Michael Achenbach 045756f32b Revert "[ptr-compr] Fix incorrectly used machine types"
This reverts commit b8e8b0de4f.

Reason for revert:
https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Arm%20-%20debug/8276

Original change's description:
> [ptr-compr] Fix incorrectly used machine types
> 
> in TurboFan, CSA, Wasm and compiler tests. Tagged values decompression
> logic will depend on the machine type of the value being loaded so it must
> be correct.
> 
> Bug: v8:7703
> Change-Id: Ia9e7cc1e273e5a458d9de8aaa4adb0c970413b8b
> Reviewed-on: https://chromium-review.googlesource.com/c/1319573
> Commit-Queue: Igor Sheludko <ishell@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#57280}

TBR=mstarzinger@chromium.org,ishell@chromium.org

Change-Id: Ia97d5bfebf8d8fe1b2b7607f63024b60cf2c584f
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7703
Reviewed-on: https://chromium-review.googlesource.com/c/1320349
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57294}
2018-11-06 18:10:54 +00:00

614 lines
21 KiB
C++

// Copyright 2016 the V8 project authors. All rights reserved. Use of this
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
#include <cmath>
#include <functional>
#include <limits>
#include "src/base/bits.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/graph-builder-tester.h"
#include "test/cctest/compiler/value-helper.h"
namespace v8 {
namespace internal {
namespace compiler {
enum TestAlignment {
kAligned,
kUnaligned,
};
// This is a America!
#define A_BILLION 1000000000ULL
#define A_GIG (1024ULL * 1024ULL * 1024ULL)
namespace {
void RunLoadInt32(const TestAlignment t) {
RawMachineAssemblerTester<int32_t> m;
int32_t p1 = 0; // loads directly from this location.
if (t == TestAlignment::kAligned) {
m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
} else if (t == TestAlignment::kUnaligned) {
m.Return(m.UnalignedLoadFromPointer(&p1, MachineType::Int32()));
} else {
UNREACHABLE();
}
FOR_INT32_INPUTS(i) {
p1 = *i;
CHECK_EQ(p1, m.Call());
}
}
void RunLoadInt32Offset(TestAlignment t) {
int32_t p1 = 0; // loads directly from this location.
int32_t offsets[] = {-2000000, -100, -101, 1, 3,
7, 120, 2000, 2000000000, 0xFF};
for (size_t i = 0; i < arraysize(offsets); i++) {
RawMachineAssemblerTester<int32_t> m;
int32_t offset = offsets[i];
byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
// generate load [#base + #index]
if (t == TestAlignment::kAligned) {
m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
} else if (t == TestAlignment::kUnaligned) {
m.Return(
m.UnalignedLoadFromPointer(pointer, MachineType::Int32(), offset));
} else {
UNREACHABLE();
}
FOR_INT32_INPUTS(j) {
p1 = *j;
CHECK_EQ(p1, m.Call());
}
}
}
void RunLoadStoreFloat32Offset(TestAlignment t) {
float p1 = 0.0f; // loads directly from this location.
float p2 = 0.0f; // and stores directly into this location.
FOR_INT32_INPUTS(i) {
int32_t magic = 0x2342AABB + *i * 3;
RawMachineAssemblerTester<int32_t> m;
int32_t offset = *i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
byte* to = reinterpret_cast<byte*>(&p2) - offset;
// generate load [#base + #index]
if (t == TestAlignment::kAligned) {
Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
m.IntPtrConstant(offset));
m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
m.IntPtrConstant(offset), load, kNoWriteBarrier);
} else if (t == TestAlignment::kUnaligned) {
Node* load =
m.UnalignedLoad(MachineType::Float32(), m.PointerConstant(from),
m.IntPtrConstant(offset));
m.UnalignedStore(MachineRepresentation::kFloat32, m.PointerConstant(to),
m.IntPtrConstant(offset), load);
} else {
UNREACHABLE();
}
m.Return(m.Int32Constant(magic));
FOR_FLOAT32_INPUTS(j) {
p1 = *j;
p2 = *j - 5;
CHECK_EQ(magic, m.Call());
CHECK_DOUBLE_EQ(p1, p2);
}
}
}
void RunLoadStoreFloat64Offset(TestAlignment t) {
double p1 = 0; // loads directly from this location.
double p2 = 0; // and stores directly into this location.
FOR_INT32_INPUTS(i) {
int32_t magic = 0x2342AABB + *i * 3;
RawMachineAssemblerTester<int32_t> m;
int32_t offset = *i;
byte* from = reinterpret_cast<byte*>(&p1) - offset;
byte* to = reinterpret_cast<byte*>(&p2) - offset;
// generate load [#base + #index]
if (t == TestAlignment::kAligned) {
Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
m.IntPtrConstant(offset));
m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
m.IntPtrConstant(offset), load, kNoWriteBarrier);
} else if (t == TestAlignment::kUnaligned) {
Node* load =
m.UnalignedLoad(MachineType::Float64(), m.PointerConstant(from),
m.IntPtrConstant(offset));
m.UnalignedStore(MachineRepresentation::kFloat64, m.PointerConstant(to),
m.IntPtrConstant(offset), load);
} else {
UNREACHABLE();
}
m.Return(m.Int32Constant(magic));
FOR_FLOAT64_INPUTS(j) {
p1 = *j;
p2 = *j - 5;
CHECK_EQ(magic, m.Call());
CHECK_DOUBLE_EQ(p1, p2);
}
}
}
} // namespace
TEST(RunLoadInt32) { RunLoadInt32(TestAlignment::kAligned); }
TEST(RunUnalignedLoadInt32) { RunLoadInt32(TestAlignment::kUnaligned); }
TEST(RunLoadInt32Offset) { RunLoadInt32Offset(TestAlignment::kAligned); }
TEST(RunUnalignedLoadInt32Offset) {
RunLoadInt32Offset(TestAlignment::kUnaligned);
}
TEST(RunLoadStoreFloat32Offset) {
RunLoadStoreFloat32Offset(TestAlignment::kAligned);
}
TEST(RunUnalignedLoadStoreFloat32Offset) {
RunLoadStoreFloat32Offset(TestAlignment::kUnaligned);
}
TEST(RunLoadStoreFloat64Offset) {
RunLoadStoreFloat64Offset(TestAlignment::kAligned);
}
TEST(RunUnalignedLoadStoreFloat64Offset) {
RunLoadStoreFloat64Offset(TestAlignment::kUnaligned);
}
namespace {
template <typename Type>
void RunLoadImmIndex(MachineType rep, TestAlignment t) {
const int kNumElems = 3;
Type buffer[kNumElems];
// initialize the buffer with some raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
// Test with various large and small offsets.
for (int offset = -1; offset <= 200000; offset *= -5) {
for (int i = 0; i < kNumElems; i++) {
BufferedRawMachineAssemblerTester<Type> m;
Node* base = m.PointerConstant(buffer - offset);
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
if (t == TestAlignment::kAligned) {
m.Return(m.Load(rep, base, index));
} else if (t == TestAlignment::kUnaligned) {
m.Return(m.UnalignedLoad(rep, base, index));
} else {
UNREACHABLE();
}
volatile Type expected = buffer[i];
volatile Type actual = m.Call();
CHECK_EQ(expected, actual);
}
}
}
template <typename CType>
void RunLoadStore(MachineType rep, TestAlignment t) {
const int kNumElems = 4;
CType buffer[kNumElems];
for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1;
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
}
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
Node* base = m.PointerConstant(buffer);
Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
if (t == TestAlignment::kAligned) {
Node* load = m.Load(rep, base, index0);
m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
} else if (t == TestAlignment::kUnaligned) {
Node* load = m.UnalignedLoad(rep, base, index0);
m.UnalignedStore(rep.representation(), base, index1, load);
}
m.Return(m.Int32Constant(OK));
CHECK(buffer[x] != buffer[y]);
CHECK_EQ(OK, m.Call());
CHECK(buffer[x] == buffer[y]);
}
}
template <typename CType>
void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
CType in, out;
CType in_buffer[2];
CType out_buffer[2];
byte* raw;
for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
int y = sizeof(CType) - x;
raw = reinterpret_cast<byte*>(&in);
for (size_t i = 0; i < sizeof(CType); i++) {
raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
}
raw = reinterpret_cast<byte*>(in_buffer);
MemCopy(raw + x, &in, sizeof(CType));
RawMachineAssemblerTester<int32_t> m;
int32_t OK = 0x29000 + x;
Node* base0 = m.PointerConstant(in_buffer);
Node* base1 = m.PointerConstant(out_buffer);
Node* index0 = m.IntPtrConstant(x);
Node* index1 = m.IntPtrConstant(y);
Node* load = m.UnalignedLoad(rep, base0, index0);
m.UnalignedStore(rep.representation(), base1, index1, load);
m.Return(m.Int32Constant(OK));
CHECK_EQ(OK, m.Call());
raw = reinterpret_cast<byte*>(&out_buffer);
MemCopy(&out, raw + y, sizeof(CType));
CHECK(in == out);
}
}
} // namespace
TEST(RunLoadImmIndex) {
RunLoadImmIndex<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
RunLoadImmIndex<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT
RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
#endif
// TODO(titzer): test various indexing modes.
}
TEST(RunUnalignedLoadImmIndex) {
RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(),
TestAlignment::kUnaligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT
RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
#endif
// TODO(titzer): test various indexing modes.
}
TEST(RunLoadStore) {
RunLoadStore<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
RunLoadStore<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kAligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
#if V8_TARGET_ARCH_64_BIT
RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
#endif
}
TEST(RunUnalignedLoadStore) {
RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT
RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
#endif
}
TEST(RunUnalignedLoadStoreUnalignedAccess) {
RunUnalignedLoadStoreUnalignedAccess<int16_t>(MachineType::Int16());
RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged());
RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
#if V8_TARGET_ARCH_64_BIT
RunUnalignedLoadStoreUnalignedAccess<int64_t>(MachineType::Int64());
#endif
}
#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes) addr
#elif V8_TARGET_BIG_ENDIAN
#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes
#else
#error "Unknown Architecture"
#endif
namespace {
void RunLoadStoreSignExtend32(TestAlignment t) {
int32_t buffer[4];
RawMachineAssemblerTester<int32_t> m;
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
if (t == TestAlignment::kAligned) {
Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32());
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
} else if (t == TestAlignment::kUnaligned) {
Node* load16 =
m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
Node* load32 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int32());
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
load16);
m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
load32);
} else {
UNREACHABLE();
}
m.Return(load8);
FOR_INT32_INPUTS(i) {
buffer[0] = *i;
CHECK_EQ(static_cast<int8_t>(*i & 0xFF), m.Call());
CHECK_EQ(static_cast<int8_t>(*i & 0xFF), buffer[1]);
CHECK_EQ(static_cast<int16_t>(*i & 0xFFFF), buffer[2]);
CHECK_EQ(*i, buffer[3]);
}
}
void RunLoadStoreZeroExtend32(TestAlignment t) {
uint32_t buffer[4];
RawMachineAssemblerTester<uint32_t> m;
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
if (t == TestAlignment::kAligned) {
Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32());
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
} else if (t == TestAlignment::kUnaligned) {
Node* load16 =
m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
Node* load32 =
m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint32());
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
load16);
m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
load32);
}
m.Return(load8);
FOR_UINT32_INPUTS(i) {
buffer[0] = *i;
CHECK_EQ((*i & 0xFF), m.Call());
CHECK_EQ((*i & 0xFF), buffer[1]);
CHECK_EQ((*i & 0xFFFF), buffer[2]);
CHECK_EQ(*i, buffer[3]);
}
}
} // namespace
TEST(RunLoadStoreSignExtend32) {
RunLoadStoreSignExtend32(TestAlignment::kAligned);
}
TEST(RunUnalignedLoadStoreSignExtend32) {
RunLoadStoreSignExtend32(TestAlignment::kUnaligned);
}
TEST(RunLoadStoreZeroExtend32) {
RunLoadStoreZeroExtend32(TestAlignment::kAligned);
}
TEST(RunUnalignedLoadStoreZeroExtend32) {
RunLoadStoreZeroExtend32(TestAlignment::kUnaligned);
}
#if V8_TARGET_ARCH_64_BIT
namespace {
void RunLoadStoreSignExtend64(TestAlignment t) {
if ((true)) return; // TODO(titzer): sign extension of loads to 64-bit.
int64_t buffer[5];
RawMachineAssemblerTester<int64_t> m;
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
if (t == TestAlignment::kAligned) {
Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64());
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
} else if (t == TestAlignment::kUnaligned) {
Node* load16 =
m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
Node* load32 =
m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
Node* load64 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int64());
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
load16);
m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
load32);
m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
load64);
} else {
UNREACHABLE();
}
m.Return(load8);
FOR_INT64_INPUTS(i) {
buffer[0] = *i;
CHECK_EQ(static_cast<int8_t>(*i & 0xFF), m.Call());
CHECK_EQ(static_cast<int8_t>(*i & 0xFF), buffer[1]);
CHECK_EQ(static_cast<int16_t>(*i & 0xFFFF), buffer[2]);
CHECK_EQ(static_cast<int32_t>(*i & 0xFFFFFFFF), buffer[3]);
CHECK_EQ(*i, buffer[4]);
}
}
void RunLoadStoreZeroExtend64(TestAlignment t) {
if (kPointerSize < 8) return;
uint64_t buffer[5];
RawMachineAssemblerTester<uint64_t> m;
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
if (t == TestAlignment::kAligned) {
Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64());
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
} else if (t == TestAlignment::kUnaligned) {
Node* load16 =
m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
Node* load32 =
m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
Node* load64 =
m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint64());
m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
load16);
m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
load32);
m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
load64);
} else {
UNREACHABLE();
}
m.Return(load8);
FOR_UINT64_INPUTS(i) {
buffer[0] = *i;
CHECK_EQ((*i & 0xFF), m.Call());
CHECK_EQ((*i & 0xFF), buffer[1]);
CHECK_EQ((*i & 0xFFFF), buffer[2]);
CHECK_EQ((*i & 0xFFFFFFFF), buffer[3]);
CHECK_EQ(*i, buffer[4]);
}
}
} // namespace
TEST(RunLoadStoreSignExtend64) {
RunLoadStoreSignExtend64(TestAlignment::kAligned);
}
TEST(RunUnalignedLoadStoreSignExtend64) {
RunLoadStoreSignExtend64(TestAlignment::kUnaligned);
}
TEST(RunLoadStoreZeroExtend64) {
RunLoadStoreZeroExtend64(TestAlignment::kAligned);
}
TEST(RunUnalignedLoadStoreZeroExtend64) {
RunLoadStoreZeroExtend64(TestAlignment::kUnaligned);
}
#endif
namespace {
template <typename IntType>
void LoadStoreTruncation(MachineType kRepresentation, TestAlignment t) {
IntType input;
RawMachineAssemblerTester<int32_t> m;
Node* ap1;
if (t == TestAlignment::kAligned) {
Node* a = m.LoadFromPointer(&input, kRepresentation);
ap1 = m.Int32Add(a, m.Int32Constant(1));
m.StoreToPointer(&input, kRepresentation.representation(), ap1);
} else if (t == TestAlignment::kUnaligned) {
Node* a = m.UnalignedLoadFromPointer(&input, kRepresentation);
ap1 = m.Int32Add(a, m.Int32Constant(1));
m.UnalignedStoreToPointer(&input, kRepresentation.representation(), ap1);
} else {
UNREACHABLE();
}
m.Return(ap1);
const IntType max = std::numeric_limits<IntType>::max();
const IntType min = std::numeric_limits<IntType>::min();
// Test upper bound.
input = max;
CHECK_EQ(max + 1, m.Call());
CHECK_EQ(min, input);
// Test lower bound.
input = min;
CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
CHECK_EQ(min + 1, input);
// Test all one byte values that are not one byte bounds.
for (int i = -127; i < 127; i++) {
input = i;
int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
CHECK_EQ(static_cast<IntType>(expected), m.Call());
CHECK_EQ(static_cast<IntType>(i + 1), input);
}
}
} // namespace
TEST(RunLoadStoreTruncation) {
LoadStoreTruncation<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
}
TEST(RunUnalignedLoadStoreTruncation) {
LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
}
} // namespace compiler
} // namespace internal
} // namespace v8