[wasm][bulk-memory] Adjust bulk memory behavior to proposal phase 4

The following changes were introduced with the recent proposal update:
- OOB access with 0 length traps
- Double drop of segments is allowed
- Dropped segments are treated like having size 0 (OOB error)
- Active segments are dropped right after initialization

R=ahaas@chromium.org

Change-Id: I4e9fc4d9212841c7d858585c672143f99287520d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1946355
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Emanuel Ziegler <ecmziegler@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65305}
This commit is contained in:
Emanuel Ziegler 2019-12-02 18:47:12 +01:00 committed by Commit Bot
parent ea79fb8cc0
commit 7a51fe240b
12 changed files with 92 additions and 247 deletions

View File

@ -4816,41 +4816,18 @@ Node* WasmGraphBuilder::AtomicFence() {
#undef ATOMIC_LOAD_LIST
#undef ATOMIC_STORE_LIST
Node* WasmGraphBuilder::CheckDataSegmentIsPassiveAndNotDropped(
uint32_t data_segment_index, wasm::WasmCodePosition position) {
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
Node* dropped_data_segments =
LOAD_INSTANCE_FIELD(DroppedDataSegments, MachineType::Pointer());
Node* is_segment_dropped = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Uint8()), dropped_data_segments,
mcgraph()->IntPtrConstant(data_segment_index), Effect(), Control()));
TrapIfTrue(wasm::kTrapDataSegmentDropped, is_segment_dropped, position);
return dropped_data_segments;
}
Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
auto m = mcgraph()->machine();
auto common = mcgraph()->common();
Node* size_null_check =
graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0));
Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
size_null_check, Control());
Node* size_null_etrue = Effect();
Node* size_null_if_false =
graph()->NewNode(common->IfFalse(), size_null_branch);
SetControl(size_null_if_false);
CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, dst_fail, position);
Node* seg_index = Uint32Constant(data_segment_index);
auto m = mcgraph()->machine();
{
// Load segment size from WasmInstanceObject::data_segment_sizes.
@ -4889,47 +4866,28 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
Node* size_null_if_true =
graph()->NewNode(common->IfTrue(), size_null_branch);
Node* merge = SetControl(
graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
SetEffect(
graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
return merge;
return SetEffect(BuildCCall(&sig, function, dst, src, size));
}
Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
wasm::WasmCodePosition position) {
Node* dropped_data_segments =
CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
Node* seg_size_array =
LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::Pointer());
STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2);
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier));
StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier));
return SetEffect(
graph()->NewNode(store_op, dropped_data_segments,
mcgraph()->IntPtrConstant(data_segment_index),
mcgraph()->Int32Constant(1), Effect(), Control()));
graph()->NewNode(store_op, seg_size_array,
mcgraph()->IntPtrConstant(data_segment_index << 2),
mcgraph()->Int32Constant(0), Effect(), Control()));
}
Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
auto m = mcgraph()->machine();
auto common = mcgraph()->common();
// If size == 0, then memory.copy is a no-op.
Node* size_null_check =
graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0));
Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
size_null_check, Control());
Node* size_null_etrue = Effect();
Node* size_null_if_false =
graph()->NewNode(common->IfFalse(), size_null_branch);
SetControl(size_null_if_false);
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, dst_fail, position);
Node* src_fail = BoundsCheckMemRange(&src, &size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, src_fail, position);
@ -4938,102 +4896,49 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
Node* size_null_if_true =
graph()->NewNode(common->IfTrue(), size_null_branch);
Node* merge = SetControl(
graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
SetEffect(
graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
return merge;
return SetEffect(BuildCCall(&sig, function, dst, src, size));
}
Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
wasm::WasmCodePosition position) {
auto machine = mcgraph()->machine();
auto common = mcgraph()->common();
// If size == 0, then memory.copy is a no-op.
Node* size_null_check = graph()->NewNode(machine->Word32Equal(), size,
mcgraph()->Int32Constant(0));
Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
size_null_check, Control());
Node* size_null_etrue = Effect();
Node* size_null_if_false =
graph()->NewNode(common->IfFalse(), size_null_branch);
SetControl(size_null_if_false);
Node* fail = BoundsCheckMemRange(&dst, &size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_fill()));
MachineType sig_types[] = {MachineType::Pointer(), MachineType::Uint32(),
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, value, size);
Node* size_null_if_true =
graph()->NewNode(common->IfTrue(), size_null_branch);
Node* merge = SetControl(
graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
SetEffect(
graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
return merge;
}
Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped(
uint32_t elem_segment_index, wasm::WasmCodePosition position) {
// The elem segment index must be in bounds since it is required by
// validation.
DCHECK_LT(elem_segment_index, env_->module->elem_segments.size());
Node* dropped_elem_segments =
LOAD_INSTANCE_FIELD(DroppedElemSegments, MachineType::Pointer());
Node* is_segment_dropped = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Uint8()), dropped_elem_segments,
mcgraph()->IntPtrConstant(elem_segment_index), Effect(), Control()));
TrapIfTrue(wasm::kTrapElemSegmentDropped, is_segment_dropped, position);
return dropped_elem_segments;
return SetEffect(BuildCCall(&sig, function, dst, value, size));
}
Node* WasmGraphBuilder::TableInit(uint32_t table_index,
uint32_t elem_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
auto machine = mcgraph()->machine();
auto common = mcgraph()->common();
// If size == 0, then table.init is a no-op.
Node* size_zero_check = graph()->NewNode(machine->Word32Equal(), size,
mcgraph()->Int32Constant(0));
Node* size_zero_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
size_zero_check, Control());
DCHECK_LT(table_index, env_->module->tables.size());
// The elem segment index must be in bounds since it is required by
// validation.
DCHECK_LT(elem_segment_index, env_->module->elem_segments.size());
Node* size_zero_etrue = Effect();
Node* size_zero_if_false =
graph()->NewNode(common->IfFalse(), size_zero_branch);
SetControl(size_zero_if_false);
CheckElemSegmentIsPassiveAndNotDropped(elem_segment_index, position);
Node* args[] = {
graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
graph()->NewNode(mcgraph()->common()->NumberConstant(elem_segment_index)),
BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size),
BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size),
BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)};
BuildCallToRuntime(Runtime::kWasmTableInit, args, arraysize(args));
Node* size_zero_if_true =
graph()->NewNode(common->IfTrue(), size_zero_branch);
Node* merge = SetControl(
graph()->NewNode(common->Merge(2), size_zero_if_true, Control()));
SetEffect(
graph()->NewNode(common->EffectPhi(2), size_zero_etrue, Effect(), merge));
return merge;
return SetEffect(
BuildCallToRuntime(Runtime::kWasmTableInit, args, arraysize(args)));
}
Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
wasm::WasmCodePosition position) {
// The elem segment index must be in bounds since it is required by
// validation.
DCHECK_LT(elem_segment_index, env_->module->elem_segments.size());
Node* dropped_elem_segments =
CheckElemSegmentIsPassiveAndNotDropped(elem_segment_index, position);
LOAD_INSTANCE_FIELD(DroppedElemSegments, MachineType::Pointer());
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier));
return SetEffect(

View File

@ -370,12 +370,6 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position);
Node* AtomicFence();
// Returns a pointer to the dropped_data_segments array. Traps if the data
// segment is active or has been dropped.
Node* CheckDataSegmentIsPassiveAndNotDropped(uint32_t data_segment_index,
wasm::WasmCodePosition position);
Node* CheckElemSegmentIsPassiveAndNotDropped(uint32_t elem_segment_index,
wasm::WasmCodePosition position);
Node* MemoryInit(uint32_t data_segment_index, Node* dst, Node* src,
Node* size, wasm::WasmCodePosition position);
Node* MemoryCopy(Node* dst, Node* src, Node* size,

View File

@ -658,22 +658,23 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
uint32_t size = segment.source.length();
if (enabled_.has_bulk_memory()) {
if (size == 0) continue;
// Passive segments are not copied during instantiation.
if (!segment.active) continue;
uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
bool ok = base::ClampToBounds(
dest_offset, &size, static_cast<uint32_t>(instance->memory_size()));
if (!ok) {
thrower_->RuntimeError("data segment is out of bounds");
return;
}
// No need to copy empty segments.
if (size == 0) continue;
Address dest_addr =
reinterpret_cast<Address>(instance->memory_start()) + dest_offset;
Address src_addr = reinterpret_cast<Address>(wire_bytes.begin()) +
segment.source.offset();
memory_copy_wrapper(dest_addr, src_addr, size);
if (!ok) {
thrower_->RuntimeError("data segment is out of bounds");
return;
}
} else {
DCHECK(segment.active);
// Segments of size == 0 are just nops.
@ -1627,15 +1628,18 @@ void InstanceBuilder::InitializeIndirectFunctionTables(
bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<WasmTableObject> table_object,
uint32_t table_index,
const WasmElemSegment& elem_segment, uint32_t dst,
uint32_t src, size_t count) {
if (count == 0) return true;
uint32_t table_index, uint32_t segment_index,
uint32_t dst, uint32_t src, size_t count) {
DCHECK_LT(segment_index, instance->module()->elem_segments.size());
auto& elem_segment = instance->module()->elem_segments[segment_index];
// TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init
// instruction.
if (!base::IsInBounds(dst, count, table_object->current_length()) ||
!base::IsInBounds(src, count, elem_segment.entries.size())) {
!base::IsInBounds(src, count,
instance->dropped_elem_segments()[segment_index] == 0
? elem_segment.entries.size()
: 0)) {
return false;
}
@ -1695,7 +1699,9 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
}
void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
for (auto& elem_segment : module_->elem_segments) {
for (uint32_t segment_index = 0;
segment_index < module_->elem_segments.size(); ++segment_index) {
auto& elem_segment = instance->module()->elem_segments[segment_index];
// Passive segments are not copied during instantiation.
if (!elem_segment.active) continue;
@ -1703,14 +1709,17 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset);
uint32_t src = 0;
size_t count = elem_segment.entries.size();
if (enabled_.has_bulk_memory() && count == 0) continue;
bool success = LoadElemSegmentImpl(
isolate_, instance,
handle(WasmTableObject::cast(
instance->tables().get(elem_segment.table_index)),
isolate_),
table_index, elem_segment, dst, src, count);
table_index, segment_index, dst, src, count);
// Set the active segments to being already dropped, since memory.init on
// a dropped passive segment and an active segment have the same
// behavior.
instance->dropped_elem_segments()[segment_index] = 1;
if (enabled_.has_bulk_memory()) {
if (!success) {
thrower_->RuntimeError("table initializer is out of bounds");
@ -1751,12 +1760,11 @@ void InstanceBuilder::InitializeExceptions(
bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
auto& elem_segment = instance->module()->elem_segments[segment_index];
return LoadElemSegmentImpl(
isolate, instance,
handle(WasmTableObject::cast(instance->tables().get(table_index)),
isolate),
table_index, elem_segment, dst, src, count);
table_index, segment_index, dst, src, count);
}
} // namespace wasm

View File

@ -1725,24 +1725,6 @@ class ThreadImpl {
return true;
}
bool CheckDataSegmentIsPassiveAndNotDropped(uint32_t index, pc_t pc) {
DCHECK_LT(index, module()->num_declared_data_segments);
if (instance_object_->dropped_data_segments()[index]) {
DoTrap(kTrapDataSegmentDropped, pc);
return false;
}
return true;
}
bool CheckElemSegmentIsPassiveAndNotDropped(uint32_t index, pc_t pc) {
DCHECK_LT(index, module()->elem_segments.size());
if (instance_object_->dropped_elem_segments()[index]) {
DoTrap(kTrapElemSegmentDropped, pc);
return false;
}
return true;
}
template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
Address* address, pc_t pc, int* const len,
@ -1790,18 +1772,13 @@ class ThreadImpl {
return true;
case kExprMemoryInit: {
MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
*len += imm.length;
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
if (size == 0) {
return true;
}
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.data_segment_index,
pc)) {
return false;
}
Address dst_addr;
auto src_max =
instance_object_->data_segment_sizes()[imm.data_segment_index];
@ -1818,11 +1795,11 @@ class ThreadImpl {
}
case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.index, module()->num_declared_data_segments);
*len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
instance_object_->dropped_data_segments()[imm.index] = 1;
instance_object_->data_segment_sizes()[imm.index] = 0;
return true;
}
case kExprMemoryCopy: {
@ -1831,9 +1808,6 @@ class ThreadImpl {
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
if (size == 0) {
return true;
}
Address dst_addr;
Address src_addr;
if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
@ -1852,9 +1826,6 @@ class ThreadImpl {
auto size = Pop().to<uint32_t>();
auto value = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
if (size == 0) {
return true;
}
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
if (!ok) {
@ -1870,13 +1841,6 @@ class ThreadImpl {
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
if (size == 0) {
return true;
}
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.elem_segment_index,
pc)) {
return false;
}
HandleScope scope(isolate_); // Avoid leaking handles.
bool ok = WasmInstanceObject::InitTableEntries(
instance_object_->GetIsolate(), instance_object_, imm.table.index,
@ -1887,9 +1851,6 @@ class ThreadImpl {
case kExprElemDrop: {
ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
*len += imm.length;
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
instance_object_->dropped_elem_segments()[imm.index] = 1;
return true;
}

View File

@ -223,8 +223,6 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_starts, Address*,
kDataSegmentStartsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_sizes, uint32_t*,
kDataSegmentSizesOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_data_segments, byte*,
kDroppedDataSegmentsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_elem_segments, byte*,
kDroppedElemSegmentsOffset)

View File

@ -70,8 +70,6 @@ class WasmInstanceNativeAllocations {
std::make_unique<Address[]>(num_data_segments));
SET(instance, data_segment_sizes,
std::make_unique<uint32_t[]>(num_data_segments));
SET(instance, dropped_data_segments,
std::make_unique<uint8_t[]>(num_data_segments));
SET(instance, dropped_elem_segments,
std::make_unique<uint8_t[]>(num_elem_segments));
}
@ -121,7 +119,6 @@ class WasmInstanceNativeAllocations {
std::unique_ptr<Address[]> imported_mutable_globals_;
std::unique_ptr<Address[]> data_segment_starts_;
std::unique_ptr<uint32_t[]> data_segment_sizes_;
std::unique_ptr<uint8_t[]> dropped_data_segments_;
std::unique_ptr<uint8_t[]> dropped_elem_segments_;
#undef SET
};
@ -1379,17 +1376,16 @@ void WasmInstanceObject::InitDataSegmentArrays(
num_data_segments == module->data_segments.size());
for (size_t i = 0; i < num_data_segments; ++i) {
const wasm::WasmDataSegment& segment = module->data_segments[i];
// Set the active segments to being already dropped, since memory.init on
// a dropped passive segment and an active segment have the same
// behavior.
instance->dropped_data_segments()[i] = segment.active ? 1 : 0;
// Initialize the pointer and size of passive segments.
auto source_bytes = wire_bytes.SubVector(segment.source.offset(),
segment.source.end_offset());
instance->data_segment_starts()[i] =
reinterpret_cast<Address>(source_bytes.begin());
instance->data_segment_sizes()[i] = source_bytes.length();
// Set the active segments to being already dropped, since memory.init on
// a dropped passive segment and an active segment have the same
// behavior.
instance->data_segment_sizes()[i] =
segment.active ? 0 : source_bytes.length();
}
}
@ -1399,11 +1395,7 @@ void WasmInstanceObject::InitElemSegmentArrays(
auto module = module_object->module();
auto num_elem_segments = module->elem_segments.size();
for (size_t i = 0; i < num_elem_segments; ++i) {
const wasm::WasmElemSegment& segment = module->elem_segments[i];
// Set the active segments to being already dropped, since table.init on
// a dropped passive segment and an active segment have the same
// behavior.
instance->dropped_elem_segments()[i] = segment.active ? 1 : 0;
instance->dropped_elem_segments()[i] = 0;
}
}
@ -1435,8 +1427,6 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
uint32_t table_src_index,
uint32_t dst, uint32_t src,
uint32_t count) {
// Copying 0 elements is a no-op.
if (count == 0) return true;
CHECK_LT(table_dst_index, instance->tables().length());
CHECK_LT(table_src_index, instance->tables().length());
auto table_dst = handle(
@ -1471,8 +1461,6 @@ bool WasmInstanceObject::InitTableEntries(Isolate* isolate,
uint32_t table_index,
uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
// Copying 0 elements is a no-op.
if (count == 0) return true;
// Note that this implementation just calls through to module instantiation.
// This is intentional, so that the runtime only depends on the object
// methods, and not the module instantiation logic.

View File

@ -412,7 +412,6 @@ class WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address)
DECL_PRIMITIVE_ACCESSORS(data_segment_starts, Address*)
DECL_PRIMITIVE_ACCESSORS(data_segment_sizes, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(dropped_data_segments, byte*)
DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*)
// Clear uninitialized padding space. This ensures that the snapshot content
@ -459,7 +458,6 @@ class WasmInstanceObject : public JSObject {
V(kRealStackLimitAddressOffset, kSystemPointerSize) \
V(kDataSegmentStartsOffset, kSystemPointerSize) \
V(kDataSegmentSizesOffset, kSystemPointerSize) \
V(kDroppedDataSegmentsOffset, kSystemPointerSize) \
V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
V(kHeaderSize, 0)

View File

@ -128,9 +128,9 @@ WASM_EXEC_TEST(MemoryInitOutOfBounds) {
CHECK_EQ(0xDEADBEEF, r.Call(1000, 0, kWasmPageSize));
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, 0, 1));
// Copy 0 out-of-bounds succeeds.
CHECK_EQ(0, r.Call(kWasmPageSize + 1, 0, 0));
CHECK_EQ(0, r.Call(0, kWasmPageSize + 1, 0));
// Copy 0 out-of-bounds fails if target is invalid.
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize + 1, 0, 0));
CHECK_EQ(0xDEADBEEF, r.Call(0, kWasmPageSize + 1, 0));
// Make sure bounds aren't checked with 32-bit wrapping.
CHECK_EQ(0xDEADBEEF, r.Call(1, 1, 0xFFFFFFFF));
@ -239,9 +239,9 @@ WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
CHECK_EQ(0xDEADBEEF, r.Call(1000, 0, kWasmPageSize));
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, 0, 1));
// Copy 0 out-of-bounds always succeeds.
CHECK_EQ(0, r.Call(kWasmPageSize + 1, 0, 0));
CHECK_EQ(0, r.Call(0, kWasmPageSize + 1, 0));
// Copy 0 out-of-bounds fails if target is invalid.
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize + 1, 0, 0));
CHECK_EQ(0xDEADBEEF, r.Call(0, kWasmPageSize + 1, 0));
// Make sure bounds aren't checked with 32-bit wrapping.
CHECK_EQ(0xDEADBEEF, r.Call(1, 1, 0xFFFFFFFF));
@ -314,8 +314,8 @@ WASM_EXEC_TEST(MemoryFillOutOfBounds) {
CHECK_EQ(0xDEADBEEF, r.Call(1000, v, kWasmPageSize));
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, v, 1));
// Fill 0 out-of-bounds succeeds.
CHECK_EQ(0, r.Call(kWasmPageSize + 1, v, 0));
// Fill 0 out-of-bounds still fails.
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize + 1, v, 0));
// Make sure bounds aren't checked with 32-bit wrapping.
CHECK_EQ(0xDEADBEEF, r.Call(1, v, 0xFFFFFFFF));
@ -330,7 +330,7 @@ WASM_EXEC_TEST(DataDropTwice) {
BUILD(r, WASM_DATA_DROP(0), kExprI32Const, 0);
CHECK_EQ(0, r.Call());
CHECK_EQ(0xDEADBEEF, r.Call());
CHECK_EQ(0, r.Call());
}
WASM_EXEC_TEST(DataDropThenMemoryInit) {
@ -533,9 +533,9 @@ void TestTableInitOob(ExecutionTier execution_tier, int table_index) {
r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3);
CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// 0-count is never oob.
r.CheckCallViaJS(0, kTableSize + 1, 0, 0);
r.CheckCallViaJS(0, 0, kTableSize + 1, 0);
// 0-count is still oob if target is invalid.
r.CheckCallViaJS(0xDEADBEEF, kTableSize + 1, 0, 0);
r.CheckCallViaJS(0xDEADBEEF, 0, kTableSize + 1, 0);
r.CheckCallViaJS(0xDEADBEEF, 0, 0, 6);
r.CheckCallViaJS(0xDEADBEEF, 0, 1, 5);
@ -821,8 +821,8 @@ void TestTableCopyOob1(ExecutionTier execution_tier, int table_dst,
{
const uint32_t big = 1000000;
r.CheckCallViaJS(0, big, 0, 0);
r.CheckCallViaJS(0, 0, big, 0);
r.CheckCallViaJS(0xDEADBEEF, big, 0, 0);
r.CheckCallViaJS(0xDEADBEEF, 0, big, 0);
}
for (uint32_t big = 4294967295; big > 1000; big >>= 1) {
@ -865,7 +865,7 @@ WASM_EXEC_TEST(ElemDropTwice) {
BUILD(r, WASM_ELEM_DROP(0), kExprI32Const, 0);
r.CheckCallViaJS(0);
r.CheckCallViaJS(0xDEADBEEF);
r.CheckCallViaJS(0);
}
WASM_EXEC_TEST(ElemDropThenTableInit) {

View File

@ -241,7 +241,6 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment(Vector<const byte> bytes) {
DCHECK_EQ(index, test_module_->data_segments.size());
DCHECK_EQ(index, data_segment_starts_.size());
DCHECK_EQ(index, data_segment_sizes_.size());
DCHECK_EQ(index, dropped_data_segments_.size());
// Add a passive data segment. This isn't used by function compilation, but
// but it keeps the index in sync. The data segment's source will not be
@ -268,12 +267,10 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment(Vector<const byte> bytes) {
}
data_segment_starts_.push_back(new_data_address + old_data_size);
data_segment_sizes_.push_back(bytes.length());
dropped_data_segments_.push_back(0);
// The vector pointers may have moved, so update the instance object.
instance_object_->set_data_segment_starts(data_segment_starts_.data());
instance_object_->set_data_segment_sizes(data_segment_sizes_.data());
instance_object_->set_dropped_data_segments(dropped_data_segments_.data());
return index;
}

View File

@ -249,7 +249,6 @@ class TestingModuleBuilder {
std::vector<byte> data_segment_data_;
std::vector<Address> data_segment_starts_;
std::vector<uint32_t> data_segment_sizes_;
std::vector<byte> dropped_data_segments_;
std::vector<byte> dropped_elem_segments_;
const WasmGlobal* AddGlobal(ValueType type);

View File

@ -79,11 +79,12 @@ function getMemoryInit(mem, segment_data) {
// is a trap, not a validation error.
const instance = builder.instantiate();
// Initialization succeeds, because the size is 0 which is always valid.
// Initialization succeeds, because source address and size are 0
// which is valid on a dropped segment.
instance.exports.init(0);
// Initialization fails, because the size > 0 on dropped segment
assertTraps(kTrapDataSegmentDropped, () => instance.exports.init(1));
// Initialization fails, because the segment is implicitly dropped.
assertTraps(kTrapMemOutOfBounds, () => instance.exports.init(1));
})();
(function TestDataDropOnActiveSegment() {
@ -99,7 +100,8 @@ function getMemoryInit(mem, segment_data) {
.exportAs('drop');
const instance = builder.instantiate();
assertTraps(kTrapDataSegmentDropped, () => instance.exports.drop());
// Drop on passive segment is equivalent to double drop which is allowed.
instance.exports.drop();
})();
function getMemoryCopy(mem) {
@ -167,7 +169,9 @@ function getMemoryFill(mem) {
.exportAs('drop');
const instance = builder.instantiate();
assertTraps(kTrapElemSegmentDropped, () => instance.exports.drop());
// Segment already got dropped after initialization and is therefore
// not active anymore.
instance.exports.drop();
})();
(function TestLazyDataSegmentBoundsCheck() {
@ -180,10 +184,10 @@ function getMemoryFill(mem) {
assertEquals(0, view[kPageSize - 1]);
// Instantiation fails, but still modifies memory.
// Instantiation fails, memory remains unmodified.
assertThrows(() => builder.instantiate({m: {memory}}), WebAssembly.RuntimeError);
assertEquals(42, view[kPageSize - 1]);
assertEquals(0, view[kPageSize - 1]);
// The second segment is not initialized.
assertEquals(0, view[0]);
})();

View File

@ -6,6 +6,7 @@
[ALWAYS, {
#TODO(ahaas): Add additional stack checks on mips.
'skip-stack-guard-page': [PASS, ['arch == mipsel or arch == mips64el or ((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
# TODO(v8:9144): The MVP behavior when bounds-checking segments changed in
# the bulk-memory proposal. Since we've enabled bulk-memory by default, we
# need to update to use its testsuite.
@ -13,24 +14,16 @@
'binary-leb128': [FAIL],
'elem': [FAIL],
'data': [FAIL],
# TODO(mstarzinger): Roll newest tests into "js-types" repository.
'proposals/js-types/exports': [FAIL],
'proposals/js-types/globals': [FAIL],
'proposals/js-types/linking': [FAIL],
# TODO(thibaudm): Spec tests do not check multi-return functions correctly.
'proposals/multi-value/call': [FAIL],
'proposals/multi-value/if': [FAIL],
'proposals/multi-value/func': [FAIL],
# TODO(ahaas): Most recent changes to the bulk-memory proposal.
'proposals/bulk-memory-operations/memory_copy': [FAIL],
'proposals/bulk-memory-operations/table_copy': [FAIL],
'proposals/bulk-memory-operations/table_init': [FAIL],
'proposals/bulk-memory-operations/memory_init': [FAIL],
'proposals/bulk-memory-operations/memory_fill': [FAIL],
'proposals/bulk-memory-operations/elem': [FAIL],
'proposals/bulk-memory-operations/data': [FAIL],
'proposals/bulk-memory-operations/bulk': [FAIL],
}], # ALWAYS
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {