[wasm] Implement bulk memory operations on memory64

This makes the bulk memory operations respect the memory type, i.e.
using i64 values for memory offsets if memory64 is enabled.

The called C functions now expect memory offsets to be passed as
{uintptr_t}, such that we can address any memory on all systems. For
64-bit memories on 32-bit systems, the upper half of the 64-bit value is
checked in compiled code before passing the lower half to the C
function.

Liftoff support turned out to be a bit harder than expected, because we
cannot hold three 64-bit values in registers on ia32 (not enough
registers...). Thus implement that in a follow-up CL.

R=thibaudm@chromium.org

Bug: v8:10949, chromium:1281995
Change-Id: Ie77636145f94cc579d479c4e7c346ba3c682679d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3427206
Reviewed-by: Thibaud Michaud <thibaudm@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78918}
This commit is contained in:
Clemens Backes 2022-01-31 16:00:24 +01:00 committed by V8 LUCI CQ
parent 1c3ac2d9f4
commit 18469ec4bf
6 changed files with 205 additions and 51 deletions

View File

@ -5379,16 +5379,18 @@ void WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_memory_init());
MemTypeToUintPtrOrOOBTrap({&dst}, position);
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), GetInstance()},
{MachineRepresentation::kWord32, dst},
{MachineType::PointerRepresentation(), dst},
{MachineRepresentation::kWord32, src},
{MachineRepresentation::kWord32,
gasm_->Uint32Constant(data_segment_index)},
{MachineRepresentation::kWord32, size}});
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
auto sig = FixedSizeSignature<MachineType>::Returns(MachineType::Int32())
.Params(MachineType::Pointer());
Node* call = BuildCCall(&sig, function, stack_slot);
// TODO(manoskouk): Also throw kDataSegmentOutOfBounds.
TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
@ -5427,19 +5429,42 @@ Node* WasmGraphBuilder::StoreArgsInStackSlot(
return stack_slot;
}
void WasmGraphBuilder::MemTypeToUintPtrOrOOBTrap(
std::initializer_list<Node**> nodes, wasm::WasmCodePosition position) {
if (!env_->module->is_memory64) {
for (Node** node : nodes) {
*node = BuildChangeUint32ToUintPtr(*node);
}
return;
}
if (kSystemPointerSize == kInt64Size) return; // memory64 on 64-bit
Node* any_high_word = nullptr;
for (Node** node : nodes) {
Node* high_word =
gasm_->TruncateInt64ToInt32(gasm_->Word64Shr(*node, Int32Constant(32)));
any_high_word =
any_high_word ? gasm_->Word32Or(any_high_word, high_word) : high_word;
// Only keep the low word as uintptr_t.
*node = gasm_->TruncateInt64ToInt32(*node);
}
TrapIfTrue(wasm::kTrapMemOutOfBounds, any_high_word, position);
}
void WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_memory_copy());
MemTypeToUintPtrOrOOBTrap({&dst, &src, &size}, position);
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), GetInstance()},
{MachineRepresentation::kWord32, dst},
{MachineRepresentation::kWord32, src},
{MachineRepresentation::kWord32, size}});
{MachineType::PointerRepresentation(), dst},
{MachineType::PointerRepresentation(), src},
{MachineType::PointerRepresentation(), size}});
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
auto sig = FixedSizeSignature<MachineType>::Returns(MachineType::Int32())
.Params(MachineType::Pointer());
Node* call = BuildCCall(&sig, function, stack_slot);
TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}
@ -5449,14 +5474,16 @@ void WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_memory_fill());
MemTypeToUintPtrOrOOBTrap({&dst, &size}, position);
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), GetInstance()},
{MachineRepresentation::kWord32, dst},
{MachineType::PointerRepresentation(), dst},
{MachineRepresentation::kWord32, value},
{MachineRepresentation::kWord32, size}});
{MachineType::PointerRepresentation(), size}});
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
auto sig = FixedSizeSignature<MachineType>::Returns(MachineType::Int32())
.Params(MachineType::Pointer());
Node* call = BuildCCall(&sig, function, stack_slot);
TrapIfFalse(wasm::kTrapMemOutOfBounds, call, position);
}

View File

@ -700,6 +700,9 @@ class WasmGraphBuilder {
// generates {index > max ? Smi(max) : Smi(index)}
Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval);
void MemTypeToUintPtrOrOOBTrap(std::initializer_list<Node**> nodes,
wasm::WasmCodePosition position);
Node* IsNull(Node* object);
void GetGlobalBaseAndOffset(const wasm::WasmGlobal&, Node** base_node,

View File

@ -4823,13 +4823,34 @@ class LiftoffCompiler {
void AtomicFence(FullDecoder* decoder) { __ AtomicFence(); }
// Pop a memtype (i32 or i64 depending on {WasmModule::is_memory64}) to a
// register. Returns the ptrsized register holding the popped value.
LiftoffRegister PopMemTypeToRegister(LiftoffRegList pinned) {
LiftoffRegister reg = __ PopToRegister(pinned);
// On 64-bit hosts, potentially zero-extend, then return.
if (kSystemPointerSize == kInt64Size) {
if (!env_->module->is_memory64) {
__ emit_u32_to_uintptr(reg.gp(), reg.gp());
}
return reg;
}
// For memory32 on 32-bit systems, also nothing to do.
if (!env_->module->is_memory64) return reg;
// TODO(v8:10949): Implement bounds-checking of the high word, while keeping
// register pressure low enough on ia32 to pop three 64-bit values (for
// memory.copy).
__ bailout(kOtherReason, "memory64 on 32-bit platform");
return reg.low();
}
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value&,
const Value&, const Value&) {
LiftoffRegList pinned;
LiftoffRegister size = pinned.set(__ PopToRegister());
LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
LiftoffRegister dst = pinned.set(PopMemTypeToRegister(pinned));
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
@ -4839,8 +4860,8 @@ class LiftoffCompiler {
__ LoadConstant(segment_index, WasmValue(imm.data_segment.index));
ExternalReference ext_ref = ExternalReference::wasm_memory_init();
auto sig =
MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32, kI32);
auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind, kI32,
kI32, kI32);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
segment_index, size};
// We don't need the instance anymore after the call. We can use the
@ -4877,13 +4898,14 @@ class LiftoffCompiler {
const MemoryCopyImmediate<validate>& imm, const Value&,
const Value&, const Value&) {
LiftoffRegList pinned;
LiftoffRegister size = pinned.set(__ PopToRegister());
LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
LiftoffRegister size = pinned.set(PopMemTypeToRegister(pinned));
LiftoffRegister src = pinned.set(PopMemTypeToRegister(pinned));
LiftoffRegister dst = pinned.set(PopMemTypeToRegister(pinned));
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32);
auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind,
kPointerKind, kPointerKind);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
@ -4898,13 +4920,15 @@ class LiftoffCompiler {
const MemoryIndexImmediate<validate>& imm, const Value&,
const Value&, const Value&) {
LiftoffRegList pinned;
LiftoffRegister size = pinned.set(__ PopToRegister());
LiftoffRegister size = pinned.set(PopMemTypeToRegister(pinned));
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
LiftoffRegister dst = pinned.set(PopMemTypeToRegister(pinned));
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32);
auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kPointerKind, kI32,
kPointerKind);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.

View File

@ -4945,10 +4945,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprMemoryInit: {
MemoryInitImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
// TODO(clemensb): Add memory64 support.
ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value size = Peek(0, 2, kWasmI32);
Value offset = Peek(1, 1, kWasmI32);
Value dst = Peek(2, 0, kWasmI32);
Value dst = Peek(2, 0, mem_type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryInit, imm, dst, offset, size);
Drop(3);
return opcode_length + imm.length;
@ -4965,10 +4965,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprMemoryCopy: {
MemoryCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
// TODO(clemensb): Add memory64 support.
Value size = Peek(0, 2, kWasmI32);
Value src = Peek(1, 1, kWasmI32);
Value dst = Peek(2, 0, kWasmI32);
ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value size = Peek(0, 2, mem_type);
Value src = Peek(1, 1, mem_type);
Value dst = Peek(2, 0, mem_type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryCopy, imm, dst, src, size);
Drop(3);
return opcode_length + imm.length;
@ -4976,10 +4976,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
case kExprMemoryFill: {
MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
// TODO(clemensb): Add memory64 support.
Value size = Peek(0, 2, kWasmI32);
ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value size = Peek(0, 2, mem_type);
Value value = Peek(1, 1, kWasmI32);
Value dst = Peek(2, 0, kWasmI32);
Value dst = Peek(2, 0, mem_type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(MemoryFill, imm, dst, value, size);
Drop(3);
return opcode_length + imm.length;

View File

@ -460,31 +460,28 @@ class V8_NODISCARD ThreadNotInWasmScope {
#endif
};
inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
inline byte* EffectiveAddress(WasmInstanceObject instance, uintptr_t index) {
return instance.memory_start() + index;
}
inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
return base + index;
}
template <typename V>
V ReadAndIncrementOffset(Address data, size_t* offset) {
V result = ReadUnalignedValue<V>(data + *offset);
*offset += sizeof(V);
return result;
}
constexpr int32_t kSuccess = 1;
constexpr int32_t kOutOfBounds = 0;
} // namespace
int32_t memory_init_wrapper(Address data) {
constexpr int32_t kSuccess = 1;
constexpr int32_t kOutOfBounds = 0;
ThreadNotInWasmScope thread_not_in_wasm_scope;
DisallowGarbageCollection no_gc;
size_t offset = 0;
Object raw_instance = ReadAndIncrementOffset<Object>(data, &offset);
WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uintptr_t dst = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t seg_index = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
@ -497,22 +494,19 @@ int32_t memory_init_wrapper(Address data) {
byte* seg_start =
reinterpret_cast<byte*>(instance.data_segment_starts()[seg_index]);
std::memcpy(EffectiveAddress(instance, dst),
EffectiveAddress(seg_start, seg_size, src), size);
std::memcpy(EffectiveAddress(instance, dst), seg_start + src, size);
return kSuccess;
}
int32_t memory_copy_wrapper(Address data) {
constexpr int32_t kSuccess = 1;
constexpr int32_t kOutOfBounds = 0;
ThreadNotInWasmScope thread_not_in_wasm_scope;
DisallowGarbageCollection no_gc;
size_t offset = 0;
Object raw_instance = ReadAndIncrementOffset<Object>(data, &offset);
WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
uintptr_t dst = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uintptr_t src = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uintptr_t size = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uint64_t mem_size = instance.memory_size();
if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
@ -525,19 +519,16 @@ int32_t memory_copy_wrapper(Address data) {
}
int32_t memory_fill_wrapper(Address data) {
constexpr int32_t kSuccess = 1;
constexpr int32_t kOutOfBounds = 0;
ThreadNotInWasmScope thread_not_in_wasm_scope;
DisallowGarbageCollection no_gc;
size_t offset = 0;
Object raw_instance = ReadAndIncrementOffset<Object>(data, &offset);
WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uintptr_t dst = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uint8_t value =
static_cast<uint8_t>(ReadAndIncrementOffset<uint32_t>(data, &offset));
uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
uintptr_t size = ReadAndIncrementOffset<uintptr_t>(data, &offset);
uint64_t mem_size = instance.memory_size();
if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;

View File

@ -111,3 +111,112 @@ function BasicMemory64Tests(num_pages) {
assertEquals(-1n, instance.exports.grow(7n)); // Above the of 10.
assertEquals(4n, instance.exports.grow(6n)); // Just at the maximum of 10.
})();
(function TestBulkMemoryOperations() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
const kMemSizeInPages = 10;
const kMemSize = kMemSizeInPages * kPageSize;
builder.addMemory64(kMemSizeInPages, kMemSizeInPages);
const kSegmentSize = 1024;
// Build a data segment with values [0, kSegmentSize-1].
const segment = Array.from({length: kSegmentSize}, (_, idx) => idx)
builder.addPassiveDataSegment(segment);
builder.exportMemoryAs('memory');
builder.addFunction('fill', makeSig([kWasmI64, kWasmI32, kWasmI64], []))
.addBody([
kExprLocalGet, 0, // local.get 0 (dst)
kExprLocalGet, 1, // local.get 1 (value)
kExprLocalGet, 2, // local.get 2 (size)
kNumericPrefix, kExprMemoryFill, 0 // memory.fill mem=0
])
.exportFunc();
builder.addFunction('copy', makeSig([kWasmI64, kWasmI64, kWasmI64], []))
.addBody([
kExprLocalGet, 0, // local.get 0 (dst)
kExprLocalGet, 1, // local.get 1 (src)
kExprLocalGet, 2, // local.get 2 (size)
kNumericPrefix, kExprMemoryCopy, 0, 0 // memory.copy srcmem=0 dstmem=0
])
.exportFunc();
builder.addFunction('init', makeSig([kWasmI64, kWasmI32, kWasmI32], []))
.addBody([
kExprLocalGet, 0, // local.get 0 (dst)
kExprLocalGet, 1, // local.get 1 (offset)
kExprLocalGet, 2, // local.get 2 (size)
kNumericPrefix, kExprMemoryInit, 0, 0 // memory.init seg=0 mem=0
])
.exportFunc();
let instance = builder.instantiate();
let fill = instance.exports.fill;
let copy = instance.exports.copy;
let init = instance.exports.init;
// {memory(offset,size)} extracts the memory at [offset, offset+size)] into an
// Array.
let memory = (offset, size) => Array.from(new Uint8Array(
instance.exports.memory.buffer.slice(offset, offset + size)));
// Empty init (size=0).
init(0n, 0, 0);
assertEquals([0, 0], memory(0, 2));
// Init memory[5..7] with [10..12].
init(5n, 10, 3);
assertEquals([0, 0, 10, 11, 12, 0, 0], memory(3, 7));
// Init the end of memory ([kMemSize-2, kMemSize-1]) with [20, 21].
init(BigInt(kMemSize-2), 20, 2);
assertEquals([0, 0, 20, 21], memory(kMemSize - 4, 4));
// Writing slightly OOB.
assertTraps(kTrapMemOutOfBounds, () => init(BigInt(kMemSize-2), 20, 3));
// Writing OOB, but the low 32-bit are in-bound.
assertTraps(kTrapMemOutOfBounds, () => init(1n << 32n, 0, 0));
// OOB even though size == 0.
assertTraps(kTrapMemOutOfBounds, () => init(-1n, 0, 0));
// More OOB.
assertTraps(kTrapMemOutOfBounds, () => init(-1n, 0, 1));
assertTraps(kTrapMemOutOfBounds, () => init(1n << 62n, 0, 1));
assertTraps(kTrapMemOutOfBounds, () => init(1n << 63n, 0, 1));
// Empty copy (size=0).
copy(0n, 0n, 0n);
// Copy memory[5..7] (containing [10..12]) to [3..5].
copy(3n, 5n, 3n);
assertEquals([0, 0, 0, 10, 11, 12, 11, 12, 0], memory(0, 9));
// Copy to the end of memory ([kMemSize-2, kMemSize-1]).
copy(BigInt(kMemSize-2), 3n, 2n);
assertEquals([0, 0, 10, 11], memory(kMemSize - 4, 4));
// Writing slightly OOB.
assertTraps(kTrapMemOutOfBounds, () => copy(BigInt(kMemSize-2), 0n, 3n));
// Writing OOB, but the low 32-bit are in-bound.
assertTraps(kTrapMemOutOfBounds, () => copy(1n << 32n, 0n, 1n));
assertTraps(kTrapMemOutOfBounds, () => copy(0n, 0n, 1n << 32n));
// OOB even though size == 0.
assertTraps(kTrapMemOutOfBounds, () => copy(-1n, 0n, 0n));
// More OOB.
assertTraps(kTrapMemOutOfBounds, () => copy(-1n, 0n, 1n));
assertTraps(kTrapMemOutOfBounds, () => copy(1n << 62n, 0n, 1n));
assertTraps(kTrapMemOutOfBounds, () => copy(1n << 63n, 0n, 1n));
// Empty fill (size=0).
fill(0n, 0, 0n);
// Fill memory[15..17] with 3s.
fill(15n, 3, 3n);
assertEquals([0, 3, 3, 3, 0], memory(14, 5));
// Fill the end of memory ([kMemSize-2, kMemSize-1]) with 7s.
fill(BigInt(kMemSize-2), 7, 2n);
assertEquals([0, 0, 7, 7], memory(kMemSize - 4, 4));
// Writing slightly OOB.
assertTraps(kTrapMemOutOfBounds, () => fill(BigInt(kMemSize-2), 0, 3n));
// Writing OOB, but the low 32-bit are in-bound.
assertTraps(kTrapMemOutOfBounds, () => fill(1n << 32n, 0, 1n));
assertTraps(kTrapMemOutOfBounds, () => fill(0n, 0, 1n << 32n));
// OOB even though size == 0.
assertTraps(kTrapMemOutOfBounds, () => fill(-1n, 0, 0n));
// More OOB.
assertTraps(kTrapMemOutOfBounds, () => fill(-1n, 0, 1n));
assertTraps(kTrapMemOutOfBounds, () => fill(1n << 62n, 0, 1n));
assertTraps(kTrapMemOutOfBounds, () => fill(1n << 63n, 0, 1n));
})();