[cleanup] MachineType 'rep' variables renamed to 'type'
Bug: v8:9183 Change-Id: Idb1910ae30984f548996651e8b2f153531b8cdb0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1605729 Reviewed-by: Michael Stanton <mvstanton@chromium.org> Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org> Cr-Commit-Position: refs/heads/master@{#61519}
This commit is contained in:
parent
078cf26ac1
commit
0ff813c57b
@ -1362,27 +1362,27 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
|
||||
}
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) {
|
||||
Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType type) {
|
||||
Node* frame_pointer = LoadParentFramePointer();
|
||||
return Load(rep, frame_pointer, IntPtrConstant(offset));
|
||||
return Load(type, frame_pointer, IntPtrConstant(offset));
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
|
||||
MachineType rep) {
|
||||
return Load(rep, buffer, IntPtrConstant(offset));
|
||||
MachineType type) {
|
||||
return Load(type, buffer, IntPtrConstant(offset));
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
|
||||
int offset, MachineType rep) {
|
||||
int offset, MachineType type) {
|
||||
CSA_ASSERT(this, IsStrong(object));
|
||||
return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
|
||||
return Load(type, object, IntPtrConstant(offset - kHeapObjectTag));
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
|
||||
SloppyTNode<IntPtrT> offset,
|
||||
MachineType rep) {
|
||||
MachineType type) {
|
||||
CSA_ASSERT(this, IsStrong(object));
|
||||
return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
|
||||
return Load(type, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
|
||||
}
|
||||
|
||||
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
|
||||
|
@ -804,11 +804,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
|
||||
// Load value from current parent frame by given offset in bytes.
|
||||
Node* LoadFromParentFrame(int offset,
|
||||
MachineType rep = MachineType::AnyTagged());
|
||||
MachineType type = MachineType::AnyTagged());
|
||||
|
||||
// Load an object pointer from a buffer that isn't in the heap.
|
||||
Node* LoadBufferObject(Node* buffer, int offset,
|
||||
MachineType rep = MachineType::AnyTagged());
|
||||
MachineType type = MachineType::AnyTagged());
|
||||
TNode<RawPtrT> LoadBufferPointer(TNode<RawPtrT> buffer, int offset) {
|
||||
return UncheckedCast<RawPtrT>(
|
||||
LoadBufferObject(buffer, offset, MachineType::Pointer()));
|
||||
@ -818,7 +818,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
}
|
||||
// Load a field from an object on the heap.
|
||||
Node* LoadObjectField(SloppyTNode<HeapObject> object, int offset,
|
||||
MachineType rep);
|
||||
MachineType type);
|
||||
template <class T, typename std::enable_if<
|
||||
std::is_convertible<TNode<T>, TNode<Object>>::value,
|
||||
int>::type = 0>
|
||||
@ -837,7 +837,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
LoadObjectField(object, offset, MachineType::AnyTagged()));
|
||||
}
|
||||
Node* LoadObjectField(SloppyTNode<HeapObject> object,
|
||||
SloppyTNode<IntPtrT> offset, MachineType rep);
|
||||
SloppyTNode<IntPtrT> offset, MachineType type);
|
||||
TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object,
|
||||
SloppyTNode<IntPtrT> offset) {
|
||||
return UncheckedCast<Object>(
|
||||
|
@ -941,14 +941,14 @@ Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
|
||||
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
|
||||
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
|
||||
|
||||
Node* CodeAssembler::Load(MachineType rep, Node* base,
|
||||
Node* CodeAssembler::Load(MachineType type, Node* base,
|
||||
LoadSensitivity needs_poisoning) {
|
||||
return raw_assembler()->Load(rep, base, needs_poisoning);
|
||||
return raw_assembler()->Load(type, base, needs_poisoning);
|
||||
}
|
||||
|
||||
Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset,
|
||||
Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
|
||||
LoadSensitivity needs_poisoning) {
|
||||
return raw_assembler()->Load(rep, base, offset, needs_poisoning);
|
||||
return raw_assembler()->Load(type, base, offset, needs_poisoning);
|
||||
}
|
||||
|
||||
Node* CodeAssembler::LoadFullTagged(Node* base,
|
||||
@ -963,8 +963,8 @@ Node* CodeAssembler::LoadFullTagged(Node* base, Node* offset,
|
||||
Load(MachineType::Pointer(), base, offset, needs_poisoning));
|
||||
}
|
||||
|
||||
Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
|
||||
return raw_assembler()->AtomicLoad(rep, base, offset);
|
||||
Node* CodeAssembler::AtomicLoad(MachineType type, Node* base, Node* offset) {
|
||||
return raw_assembler()->AtomicLoad(type, base, offset);
|
||||
}
|
||||
|
||||
TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
|
||||
|
@ -936,17 +936,17 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
TNode<WordT> WordPoisonOnSpeculation(SloppyTNode<WordT> value);
|
||||
|
||||
// Load raw memory location.
|
||||
Node* Load(MachineType rep, Node* base,
|
||||
Node* Load(MachineType type, Node* base,
|
||||
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
|
||||
template <class Type>
|
||||
TNode<Type> Load(MachineType rep, TNode<RawPtr<Type>> base) {
|
||||
TNode<Type> Load(MachineType type, TNode<RawPtr<Type>> base) {
|
||||
DCHECK(
|
||||
IsSubtype(rep.representation(), MachineRepresentationOf<Type>::value));
|
||||
return UncheckedCast<Type>(Load(rep, static_cast<Node*>(base)));
|
||||
IsSubtype(type.representation(), MachineRepresentationOf<Type>::value));
|
||||
return UncheckedCast<Type>(Load(type, static_cast<Node*>(base)));
|
||||
}
|
||||
Node* Load(MachineType rep, Node* base, Node* offset,
|
||||
Node* Load(MachineType type, Node* base, Node* offset,
|
||||
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
|
||||
Node* AtomicLoad(MachineType rep, Node* base, Node* offset);
|
||||
Node* AtomicLoad(MachineType type, Node* base, Node* offset);
|
||||
// Load uncompressed tagged value from (most likely off JS heap) memory
|
||||
// location.
|
||||
Node* LoadFullTagged(
|
||||
|
@ -645,9 +645,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
||||
// atomic-or [base + index], value
|
||||
const Operator* Word32AtomicOr(MachineType type);
|
||||
// atomic-xor [base + index], value
|
||||
const Operator* Word32AtomicXor(MachineType rep);
|
||||
const Operator* Word32AtomicXor(MachineType type);
|
||||
// atomic-add [base + index], value
|
||||
const Operator* Word64AtomicAdd(MachineType rep);
|
||||
const Operator* Word64AtomicAdd(MachineType type);
|
||||
// atomic-sub [base + index], value
|
||||
const Operator* Word64AtomicSub(MachineType type);
|
||||
// atomic-and [base + index], value
|
||||
@ -655,7 +655,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
||||
// atomic-or [base + index], value
|
||||
const Operator* Word64AtomicOr(MachineType type);
|
||||
// atomic-xor [base + index], value
|
||||
const Operator* Word64AtomicXor(MachineType rep);
|
||||
const Operator* Word64AtomicXor(MachineType type);
|
||||
// atomic-pair-load [base + index]
|
||||
const Operator* Word32AtomicPairLoad();
|
||||
// atomic-pair-sub [base + index], value_high, value-low
|
||||
|
@ -127,37 +127,37 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
}
|
||||
|
||||
// Memory Operations.
|
||||
Node* Load(MachineType rep, Node* base,
|
||||
Node* Load(MachineType type, Node* base,
|
||||
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
|
||||
return Load(rep, base, IntPtrConstant(0), needs_poisoning);
|
||||
return Load(type, base, IntPtrConstant(0), needs_poisoning);
|
||||
}
|
||||
Node* Load(MachineType rep, Node* base, Node* index,
|
||||
Node* Load(MachineType type, Node* base, Node* index,
|
||||
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
|
||||
// change_op is used below to change to the correct Tagged representation
|
||||
const Operator* change_op = nullptr;
|
||||
if (COMPRESS_POINTERS_BOOL) {
|
||||
switch (rep.representation()) {
|
||||
switch (type.representation()) {
|
||||
case MachineRepresentation::kTaggedPointer:
|
||||
rep = MachineType::CompressedPointer();
|
||||
type = MachineType::CompressedPointer();
|
||||
change_op = machine()->ChangeCompressedPointerToTaggedPointer();
|
||||
break;
|
||||
case MachineRepresentation::kTaggedSigned:
|
||||
rep = MachineType::CompressedSigned();
|
||||
type = MachineType::CompressedSigned();
|
||||
change_op = machine()->ChangeCompressedSignedToTaggedSigned();
|
||||
break;
|
||||
case MachineRepresentation::kTagged:
|
||||
rep = MachineType::AnyCompressed();
|
||||
type = MachineType::AnyCompressed();
|
||||
change_op = machine()->ChangeCompressedToTagged();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
const Operator* op = machine()->Load(rep);
|
||||
const Operator* op = machine()->Load(type);
|
||||
CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level_);
|
||||
if (needs_poisoning == LoadSensitivity::kCritical &&
|
||||
poisoning_level_ == PoisoningMitigationLevel::kPoisonCriticalOnly) {
|
||||
op = machine()->PoisonedLoad(rep);
|
||||
op = machine()->PoisonedLoad(type);
|
||||
}
|
||||
|
||||
Node* load = AddNode(op, base, index);
|
||||
@ -286,21 +286,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
DCHECK_NULL(value_high);
|
||||
return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
|
||||
}
|
||||
#define ATOMIC_FUNCTION(name) \
|
||||
Node* Atomic##name(MachineType rep, Node* base, Node* index, Node* value, \
|
||||
Node* value_high) { \
|
||||
if (rep.representation() == MachineRepresentation::kWord64) { \
|
||||
if (machine()->Is64()) { \
|
||||
DCHECK_NULL(value_high); \
|
||||
return AddNode(machine()->Word64Atomic##name(rep), base, index, \
|
||||
value); \
|
||||
} else { \
|
||||
return AddNode(machine()->Word32AtomicPair##name(), base, index, \
|
||||
VALUE_HALVES); \
|
||||
} \
|
||||
} \
|
||||
DCHECK_NULL(value_high); \
|
||||
return AddNode(machine()->Word32Atomic##name(rep), base, index, value); \
|
||||
#define ATOMIC_FUNCTION(name) \
|
||||
Node* Atomic##name(MachineType type, Node* base, Node* index, Node* value, \
|
||||
Node* value_high) { \
|
||||
if (type.representation() == MachineRepresentation::kWord64) { \
|
||||
if (machine()->Is64()) { \
|
||||
DCHECK_NULL(value_high); \
|
||||
return AddNode(machine()->Word64Atomic##name(type), base, index, \
|
||||
value); \
|
||||
} else { \
|
||||
return AddNode(machine()->Word32AtomicPair##name(), base, index, \
|
||||
VALUE_HALVES); \
|
||||
} \
|
||||
} \
|
||||
DCHECK_NULL(value_high); \
|
||||
return AddNode(machine()->Word32Atomic##name(type), base, index, value); \
|
||||
}
|
||||
ATOMIC_FUNCTION(Exchange)
|
||||
ATOMIC_FUNCTION(Add)
|
||||
@ -311,15 +311,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
#undef ATOMIC_FUNCTION
|
||||
#undef VALUE_HALVES
|
||||
|
||||
Node* AtomicCompareExchange(MachineType rep, Node* base, Node* index,
|
||||
Node* AtomicCompareExchange(MachineType type, Node* base, Node* index,
|
||||
Node* old_value, Node* old_value_high,
|
||||
Node* new_value, Node* new_value_high) {
|
||||
if (rep.representation() == MachineRepresentation::kWord64) {
|
||||
if (type.representation() == MachineRepresentation::kWord64) {
|
||||
if (machine()->Is64()) {
|
||||
DCHECK_NULL(old_value_high);
|
||||
DCHECK_NULL(new_value_high);
|
||||
return AddNode(machine()->Word64AtomicCompareExchange(rep), base, index,
|
||||
old_value, new_value);
|
||||
return AddNode(machine()->Word64AtomicCompareExchange(type), base,
|
||||
index, old_value, new_value);
|
||||
} else {
|
||||
return AddNode(machine()->Word32AtomicPairCompareExchange(), base,
|
||||
index, old_value, old_value_high, new_value,
|
||||
@ -328,7 +328,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
}
|
||||
DCHECK_NULL(old_value_high);
|
||||
DCHECK_NULL(new_value_high);
|
||||
return AddNode(machine()->Word32AtomicCompareExchange(rep), base, index,
|
||||
return AddNode(machine()->Word32AtomicCompareExchange(type), base, index,
|
||||
old_value, new_value);
|
||||
}
|
||||
|
||||
@ -889,15 +889,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
Node* Parameter(size_t index);
|
||||
|
||||
// Pointer utilities.
|
||||
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
|
||||
return Load(rep, PointerConstant(address), Int32Constant(offset));
|
||||
Node* LoadFromPointer(void* address, MachineType type, int32_t offset = 0) {
|
||||
return Load(type, PointerConstant(address), Int32Constant(offset));
|
||||
}
|
||||
Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
|
||||
return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
|
||||
}
|
||||
Node* UnalignedLoadFromPointer(void* address, MachineType rep,
|
||||
Node* UnalignedLoadFromPointer(void* address, MachineType type,
|
||||
int32_t offset = 0) {
|
||||
return UnalignedLoad(rep, PointerConstant(address), Int32Constant(offset));
|
||||
return UnalignedLoad(type, PointerConstant(address), Int32Constant(offset));
|
||||
}
|
||||
Node* UnalignedStoreToPointer(void* address, MachineRepresentation rep,
|
||||
Node* node) {
|
||||
|
@ -216,11 +216,11 @@ template <typename CType, bool use_result_buffer>
|
||||
class BinopTester {
|
||||
public:
|
||||
explicit BinopTester(RawMachineAssemblerTester<int32_t>* tester,
|
||||
MachineType rep)
|
||||
MachineType type)
|
||||
: T(tester),
|
||||
param0(T->LoadFromPointer(&p0, rep)),
|
||||
param1(T->LoadFromPointer(&p1, rep)),
|
||||
rep(rep),
|
||||
param0(T->LoadFromPointer(&p0, type)),
|
||||
param1(T->LoadFromPointer(&p1, type)),
|
||||
type(type),
|
||||
p0(static_cast<CType>(0)),
|
||||
p1(static_cast<CType>(0)),
|
||||
result(static_cast<CType>(0)) {}
|
||||
@ -242,7 +242,7 @@ class BinopTester {
|
||||
|
||||
void AddReturn(Node* val) {
|
||||
if (use_result_buffer) {
|
||||
T->Store(rep.representation(), T->PointerConstant(&result),
|
||||
T->Store(type.representation(), T->PointerConstant(&result),
|
||||
T->Int32Constant(0), val, kNoWriteBarrier);
|
||||
T->Return(T->Int32Constant(CHECK_VALUE));
|
||||
} else {
|
||||
@ -262,7 +262,7 @@ class BinopTester {
|
||||
}
|
||||
|
||||
protected:
|
||||
MachineType rep;
|
||||
MachineType type;
|
||||
CType p0;
|
||||
CType p1;
|
||||
CType result;
|
||||
|
@ -41,8 +41,8 @@ Node* SmiFromInt32(CodeAssembler& m, Node* value) {
|
||||
}
|
||||
|
||||
Node* LoadObjectField(CodeAssembler& m, Node* object, int offset,
|
||||
MachineType rep = MachineType::AnyTagged()) {
|
||||
return m.Load(rep, object, m.IntPtrConstant(offset - kHeapObjectTag));
|
||||
MachineType type = MachineType::AnyTagged()) {
|
||||
return m.Load(type, object, m.IntPtrConstant(offset - kHeapObjectTag));
|
||||
}
|
||||
|
||||
Node* LoadMap(CodeAssembler& m, Node* object) {
|
||||
|
@ -218,9 +218,9 @@ void CheckEq<Smi>(Smi in_value, Smi out_value) {
|
||||
// Initializes the buffer with some raw data respecting requested representation
|
||||
// of the values.
|
||||
template <typename CType>
|
||||
void InitBuffer(CType* buffer, size_t length, MachineType rep) {
|
||||
void InitBuffer(CType* buffer, size_t length, MachineType type) {
|
||||
const size_t kBufferSize = sizeof(CType) * length;
|
||||
if (!rep.IsTagged()) {
|
||||
if (!type.IsTagged()) {
|
||||
byte* raw = reinterpret_cast<byte*>(buffer);
|
||||
for (size_t i = 0; i < kBufferSize; i++) {
|
||||
raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
|
||||
@ -232,13 +232,13 @@ void InitBuffer(CType* buffer, size_t length, MachineType rep) {
|
||||
// pointer decompression that may be happenning during load.
|
||||
Isolate* isolate = CcTest::InitIsolateOnce();
|
||||
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
|
||||
if (rep.IsTaggedSigned()) {
|
||||
if (type.IsTaggedSigned()) {
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
|
||||
}
|
||||
} else {
|
||||
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
|
||||
if (!rep.IsTaggedPointer()) {
|
||||
if (!type.IsTaggedPointer()) {
|
||||
// Also add some Smis if we are checking AnyTagged case.
|
||||
for (size_t i = 0; i < length / 2; i++) {
|
||||
smi_view[i] =
|
||||
@ -249,11 +249,11 @@ void InitBuffer(CType* buffer, size_t length, MachineType rep) {
|
||||
}
|
||||
|
||||
template <typename CType>
|
||||
void RunLoadImmIndex(MachineType rep, TestAlignment t) {
|
||||
void RunLoadImmIndex(MachineType type, TestAlignment t) {
|
||||
const int kNumElems = 16;
|
||||
CType buffer[kNumElems];
|
||||
|
||||
InitBuffer(buffer, kNumElems, rep);
|
||||
InitBuffer(buffer, kNumElems, type);
|
||||
|
||||
// Test with various large and small offsets.
|
||||
for (int offset = -1; offset <= 200000; offset *= -5) {
|
||||
@ -261,7 +261,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
|
||||
BufferedRawMachineAssemblerTester<CType> m;
|
||||
void* base_pointer = &buffer[0] - offset;
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
if (rep.IsTagged()) {
|
||||
if (type.IsTagged()) {
|
||||
// When pointer compression is enabled then we need to access only
|
||||
// the lower 32-bit of the tagged value while the buffer contains
|
||||
// full 64-bit values.
|
||||
@ -271,9 +271,9 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
|
||||
Node* base = m.PointerConstant(base_pointer);
|
||||
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
|
||||
if (t == TestAlignment::kAligned) {
|
||||
m.Return(m.Load(rep, base, index));
|
||||
m.Return(m.Load(type, base, index));
|
||||
} else if (t == TestAlignment::kUnaligned) {
|
||||
m.Return(m.UnalignedLoad(rep, base, index));
|
||||
m.Return(m.UnalignedLoad(type, base, index));
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -284,7 +284,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
|
||||
}
|
||||
|
||||
template <typename CType>
|
||||
void RunLoadStore(MachineType rep, TestAlignment t) {
|
||||
void RunLoadStore(MachineType type, TestAlignment t) {
|
||||
const int kNumElems = 16;
|
||||
CType in_buffer[kNumElems];
|
||||
CType out_buffer[kNumElems];
|
||||
@ -293,7 +293,7 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
|
||||
|
||||
STATIC_ASSERT(sizeof(CType) <= sizeof(zap_data));
|
||||
MemCopy(&zap_value, &zap_data, sizeof(CType));
|
||||
InitBuffer(in_buffer, kNumElems, rep);
|
||||
InitBuffer(in_buffer, kNumElems, type);
|
||||
|
||||
for (int32_t x = 0; x < kNumElems; x++) {
|
||||
int32_t y = kNumElems - x - 1;
|
||||
@ -305,11 +305,12 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
|
||||
Node* out_base = m.PointerConstant(out_buffer);
|
||||
Node* out_index = m.IntPtrConstant(y * sizeof(CType));
|
||||
if (t == TestAlignment::kAligned) {
|
||||
Node* load = m.Load(rep, in_base, in_index);
|
||||
m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier);
|
||||
Node* load = m.Load(type, in_base, in_index);
|
||||
m.Store(type.representation(), out_base, out_index, load,
|
||||
kNoWriteBarrier);
|
||||
} else if (t == TestAlignment::kUnaligned) {
|
||||
Node* load = m.UnalignedLoad(rep, in_base, in_index);
|
||||
m.UnalignedStore(rep.representation(), out_base, out_index, load);
|
||||
Node* load = m.UnalignedLoad(type, in_base, in_index);
|
||||
m.UnalignedStore(type.representation(), out_base, out_index, load);
|
||||
}
|
||||
|
||||
m.Return(m.Int32Constant(OK));
|
||||
@ -328,12 +329,12 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
|
||||
}
|
||||
|
||||
template <typename CType>
|
||||
void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
|
||||
void RunUnalignedLoadStoreUnalignedAccess(MachineType type) {
|
||||
CType in, out;
|
||||
byte in_buffer[2 * sizeof(CType)];
|
||||
byte out_buffer[2 * sizeof(CType)];
|
||||
|
||||
InitBuffer(&in, 1, rep);
|
||||
InitBuffer(&in, 1, type);
|
||||
|
||||
for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
|
||||
// Direct write to &in_buffer[x] may cause unaligned access in C++ code so
|
||||
@ -346,11 +347,11 @@ void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
|
||||
|
||||
Node* in_base = m.PointerConstant(in_buffer);
|
||||
Node* in_index = m.IntPtrConstant(x);
|
||||
Node* load = m.UnalignedLoad(rep, in_base, in_index);
|
||||
Node* load = m.UnalignedLoad(type, in_base, in_index);
|
||||
|
||||
Node* out_base = m.PointerConstant(out_buffer);
|
||||
Node* out_index = m.IntPtrConstant(y);
|
||||
m.UnalignedStore(rep.representation(), out_base, out_index, load);
|
||||
m.UnalignedStore(type.representation(), out_base, out_index, load);
|
||||
|
||||
m.Return(m.Int32Constant(OK));
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user