// Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include #include #include #include #include "src/base/macros.h" #include "src/base/v8-fallthrough.h" #include "src/execution/isolate.h" #include "src/wasm/function-body-decoder.h" #include "src/wasm/wasm-module-builder.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-opcodes-inl.h" #include "test/common/wasm/flag-utils.h" #include "test/common/wasm/test-signatures.h" #include "test/fuzzer/wasm-fuzzer-common.h" namespace v8 { namespace internal { namespace wasm { namespace fuzzer { namespace { constexpr int kMaxArrays = 4; constexpr int kMaxStructs = 4; constexpr int kMaxStructFields = 4; constexpr int kMaxFunctions = 4; constexpr int kMaxGlobals = 64; constexpr int kMaxParameters = 15; constexpr int kMaxReturns = 15; constexpr int kMaxExceptions = 4; constexpr int kMaxTableSize = 32; constexpr int kMaxTables = 4; constexpr int kMaxArraySize = 20; class DataRange { base::Vector data_; public: explicit DataRange(base::Vector data) : data_(data) {} DataRange(const DataRange&) = delete; DataRange& operator=(const DataRange&) = delete; // Don't accidentally pass DataRange by value. This will reuse bytes and might // lead to OOM because the end might not be reached. // Define move constructor and move assignment, disallow copy constructor and // copy assignment (below). DataRange(DataRange&& other) V8_NOEXCEPT : DataRange(other.data_) { other.data_ = {}; } DataRange& operator=(DataRange&& other) V8_NOEXCEPT { data_ = other.data_; other.data_ = {}; return *this; } size_t size() const { return data_.size(); } DataRange split() { uint16_t num_bytes = get() % std::max(size_t{1}, data_.size()); DataRange split(data_.SubVector(0, num_bytes)); data_ += num_bytes; return split; } template T get() { // Bool needs special handling (see template specialization below). static_assert(!std::is_same::value, "bool needs special handling"); static_assert(max_bytes <= sizeof(T)); // We want to support the case where we have less than sizeof(T) bytes // remaining in the slice. For example, if we emit an i32 constant, it's // okay if we don't have a full four bytes available, we'll just use what // we have. We aren't concerned about endianness because we are generating // arbitrary expressions. const size_t num_bytes = std::min(max_bytes, data_.size()); T result = T(); memcpy(&result, data_.begin(), num_bytes); data_ += num_bytes; return result; } }; // Explicit specialization must be defined outside of class body. template <> bool DataRange::get() { // The general implementation above is not instantiable for bool, as that // would cause undefinied behaviour when memcpy'ing random bytes to the // bool. This can result in different observable side effects when invoking // get between debug and release version, which eventually makes the // code output different as well as raising various unrecoverable errors on // runtime. // Hence we specialize get to consume a full byte and use the least // significant bit only (0 == false, 1 == true). return get() % 2; } enum NonNullables { kAllowNonNullables, kDisallowNonNullables }; enum PackedTypes { kIncludePackedTypes, kExcludePackedTypes }; enum Generics { kIncludeGenerics, kExcludeGenerics }; ValueType GetValueTypeHelper(DataRange* data, bool liftoff_as_reference, uint32_t num_nullable_types, uint32_t num_non_nullable_types, NonNullables allow_non_nullable, PackedTypes include_packed_types, Generics include_generics) { // Non wasm-gc types. std::vector types{kWasmI32, kWasmI64, kWasmF32, kWasmF64, kWasmS128}; if (!liftoff_as_reference) { return types[data->get() % types.size()]; } // If {liftoff_as_reference}, include wasm-gc types. if (include_packed_types == kIncludePackedTypes) { types.insert(types.end(), {kWasmI8, kWasmI16}); } // Decide if the return type will be nullable or not. const bool nullable = (allow_non_nullable == kAllowNonNullables) ? data->get() : true; if (nullable) { types.insert(types.end(), {kWasmI31Ref, kWasmFuncRef, kWasmExternRef, kWasmNullRef, kWasmNullExternRef, kWasmNullFuncRef}); } if (include_generics == kIncludeGenerics) { types.insert(types.end(), {kWasmDataRef, kWasmAnyRef, kWasmEqRef}); } // The last index of user-defined types allowed is different based on the // nullability of the output. const uint32_t num_user_defined_types = nullable ? num_nullable_types : num_non_nullable_types; // Conceptually, user-defined types are added to the end of the list. Pick a // random one among them. uint32_t id = data->get() % (types.size() + num_user_defined_types); Nullability nullability = nullable ? kNullable : kNonNullable; if (id >= types.size()) { // Return user-defined type. return ValueType::RefMaybeNull(id - static_cast(types.size()), nullability); } // If returning a reference type, fix its nullability according to {nullable}. if (types[id].is_reference()) { return ValueType::RefMaybeNull(types[id].heap_type(), nullability); } // Otherwise, just return the picked type. return types[id]; } ValueType GetValueType(DataRange* data, bool liftoff_as_reference, uint32_t num_types) { return GetValueTypeHelper(data, liftoff_as_reference, num_types, num_types, kAllowNonNullables, kExcludePackedTypes, kIncludeGenerics); } class WasmGenerator { template void op(DataRange* data) { Generate(data); builder_->Emit(Op); } class V8_NODISCARD BlockScope { public: BlockScope(WasmGenerator* gen, WasmOpcode block_type, base::Vector param_types, base::Vector result_types, base::Vector br_types, bool emit_end = true) : gen_(gen), emit_end_(emit_end) { gen->blocks_.emplace_back(br_types.begin(), br_types.end()); gen->builder_->EmitByte(block_type); if (param_types.size() == 0 && result_types.size() == 0) { gen->builder_->EmitValueType(kWasmVoid); return; } if (param_types.size() == 0 && result_types.size() == 1) { gen->builder_->EmitValueType(result_types[0]); return; } // Multi-value block. Zone* zone = gen->builder_->builder()->zone(); FunctionSig::Builder builder(zone, result_types.size(), param_types.size()); for (auto& type : param_types) { DCHECK_NE(type, kWasmVoid); builder.AddParam(type); } for (auto& type : result_types) { DCHECK_NE(type, kWasmVoid); builder.AddReturn(type); } FunctionSig* sig = builder.Build(); int sig_id = gen->builder_->builder()->AddSignature(sig); gen->builder_->EmitI32V(sig_id); } ~BlockScope() { if (emit_end_) gen_->builder_->Emit(kExprEnd); gen_->blocks_.pop_back(); } private: WasmGenerator* const gen_; bool emit_end_; }; void block(base::Vector param_types, base::Vector return_types, DataRange* data) { BlockScope block_scope(this, kExprBlock, param_types, return_types, return_types); ConsumeAndGenerate(param_types, return_types, data); } template void block(DataRange* data) { block({}, base::VectorOf({ValueType::Primitive(T)}), data); } void loop(base::Vector param_types, base::Vector return_types, DataRange* data) { BlockScope block_scope(this, kExprLoop, param_types, return_types, param_types); ConsumeAndGenerate(param_types, return_types, data); } template void loop(DataRange* data) { loop({}, base::VectorOf({ValueType::Primitive(T)}), data); } enum IfType { kIf, kIfElse }; void if_(base::Vector param_types, base::Vector return_types, IfType type, DataRange* data) { // One-armed "if" are only valid if the input and output types are the same. DCHECK_IMPLIES(type == kIf, param_types == return_types); Generate(kWasmI32, data); BlockScope block_scope(this, kExprIf, param_types, return_types, return_types); ConsumeAndGenerate(param_types, return_types, data); if (type == kIfElse) { builder_->Emit(kExprElse); ConsumeAndGenerate(param_types, return_types, data); } } template void if_(DataRange* data) { static_assert(T == kVoid || type == kIfElse, "if without else cannot produce a value"); if_({}, T == kVoid ? base::Vector{} : base::VectorOf({ValueType::Primitive(T)}), type, data); } void try_block_helper(ValueType return_type, DataRange* data) { bool has_catch_all = data->get(); uint8_t num_catch = data->get() % (builder_->builder()->NumExceptions() + 1); bool is_delegate = num_catch == 0 && !has_catch_all && data->get(); // Allow one more target than there are enclosing try blocks, for delegating // to the caller. base::Vector return_type_vec = return_type.kind() == kVoid ? base::Vector{} : base::VectorOf(&return_type, 1); BlockScope block_scope(this, kExprTry, {}, return_type_vec, return_type_vec, !is_delegate); int control_depth = static_cast(blocks_.size()) - 1; Generate(return_type, data); catch_blocks_.push_back(control_depth); for (int i = 0; i < num_catch; ++i) { const FunctionSig* exception_type = builder_->builder()->GetExceptionType(i); auto exception_type_vec = base::VectorOf(exception_type->parameters().begin(), exception_type->parameter_count()); builder_->EmitWithU32V(kExprCatch, i); ConsumeAndGenerate(exception_type_vec, return_type_vec, data); } if (has_catch_all) { builder_->Emit(kExprCatchAll); Generate(return_type, data); } if (is_delegate) { // The delegate target depth does not include the current try block, // because 'delegate' closes this scope. However it is still in the // {blocks_} list, so remove one to get the correct size. int delegate_depth = data->get() % (blocks_.size() - 1); builder_->EmitWithU32V(kExprDelegate, delegate_depth); } catch_blocks_.pop_back(); } template void try_block(DataRange* data) { try_block_helper(ValueType::Primitive(T), data); } void any_block(base::Vector param_types, base::Vector return_types, DataRange* data) { uint8_t block_type = data->get() % 4; switch (block_type) { case 0: block(param_types, return_types, data); return; case 1: loop(param_types, return_types, data); return; case 2: if (param_types == return_types) { if_({}, {}, kIf, data); return; } V8_FALLTHROUGH; case 3: if_(param_types, return_types, kIfElse, data); return; } } void br(DataRange* data) { // There is always at least the block representing the function body. DCHECK(!blocks_.empty()); const uint32_t target_block = data->get() % blocks_.size(); const auto break_types = blocks_[target_block]; Generate(base::VectorOf(break_types), data); builder_->EmitWithI32V( kExprBr, static_cast(blocks_.size()) - 1 - target_block); } template void br_if(DataRange* data) { // There is always at least the block representing the function body. DCHECK(!blocks_.empty()); const uint32_t target_block = data->get() % blocks_.size(); const auto break_types = base::VectorOf(blocks_[target_block]); Generate(break_types, data); Generate(kWasmI32, data); builder_->EmitWithI32V( kExprBrIf, static_cast(blocks_.size()) - 1 - target_block); ConsumeAndGenerate( break_types, wanted_kind == kVoid ? base::Vector{} : base::VectorOf({ValueType::Primitive(wanted_kind)}), data); } template void br_on_null(DataRange* data) { DCHECK(!blocks_.empty()); const uint32_t target_block = data->get() % blocks_.size(); const auto break_types = base::VectorOf(blocks_[target_block]); if (!liftoff_as_reference_) { Generate(data); return; } Generate(break_types, data); GenerateRef(HeapType(HeapType::kAny), data); builder_->EmitWithI32V( kExprBrOnNull, static_cast(blocks_.size()) - 1 - target_block); builder_->Emit(kExprDrop); ConsumeAndGenerate( break_types, wanted_kind == kVoid ? base::Vector{} : base::VectorOf({ValueType::Primitive(wanted_kind)}), data); } // TODO(eholk): make this function constexpr once gcc supports it static uint8_t max_alignment(WasmOpcode memop) { switch (memop) { case kExprS128LoadMem: case kExprS128StoreMem: return 4; case kExprI64LoadMem: case kExprF64LoadMem: case kExprI64StoreMem: case kExprF64StoreMem: case kExprI64AtomicStore: case kExprI64AtomicLoad: case kExprI64AtomicAdd: case kExprI64AtomicSub: case kExprI64AtomicAnd: case kExprI64AtomicOr: case kExprI64AtomicXor: case kExprI64AtomicExchange: case kExprI64AtomicCompareExchange: case kExprS128Load8x8S: case kExprS128Load8x8U: case kExprS128Load16x4S: case kExprS128Load16x4U: case kExprS128Load32x2S: case kExprS128Load32x2U: case kExprS128Load64Splat: case kExprS128Load64Zero: return 3; case kExprI32LoadMem: case kExprI64LoadMem32S: case kExprI64LoadMem32U: case kExprF32LoadMem: case kExprI32StoreMem: case kExprI64StoreMem32: case kExprF32StoreMem: case kExprI32AtomicStore: case kExprI64AtomicStore32U: case kExprI32AtomicLoad: case kExprI64AtomicLoad32U: case kExprI32AtomicAdd: case kExprI32AtomicSub: case kExprI32AtomicAnd: case kExprI32AtomicOr: case kExprI32AtomicXor: case kExprI32AtomicExchange: case kExprI32AtomicCompareExchange: case kExprI64AtomicAdd32U: case kExprI64AtomicSub32U: case kExprI64AtomicAnd32U: case kExprI64AtomicOr32U: case kExprI64AtomicXor32U: case kExprI64AtomicExchange32U: case kExprI64AtomicCompareExchange32U: case kExprS128Load32Splat: case kExprS128Load32Zero: return 2; case kExprI32LoadMem16S: case kExprI32LoadMem16U: case kExprI64LoadMem16S: case kExprI64LoadMem16U: case kExprI32StoreMem16: case kExprI64StoreMem16: case kExprI32AtomicStore16U: case kExprI64AtomicStore16U: case kExprI32AtomicLoad16U: case kExprI64AtomicLoad16U: case kExprI32AtomicAdd16U: case kExprI32AtomicSub16U: case kExprI32AtomicAnd16U: case kExprI32AtomicOr16U: case kExprI32AtomicXor16U: case kExprI32AtomicExchange16U: case kExprI32AtomicCompareExchange16U: case kExprI64AtomicAdd16U: case kExprI64AtomicSub16U: case kExprI64AtomicAnd16U: case kExprI64AtomicOr16U: case kExprI64AtomicXor16U: case kExprI64AtomicExchange16U: case kExprI64AtomicCompareExchange16U: case kExprS128Load16Splat: return 1; case kExprI32LoadMem8S: case kExprI32LoadMem8U: case kExprI64LoadMem8S: case kExprI64LoadMem8U: case kExprI32StoreMem8: case kExprI64StoreMem8: case kExprI32AtomicStore8U: case kExprI64AtomicStore8U: case kExprI32AtomicLoad8U: case kExprI64AtomicLoad8U: case kExprI32AtomicAdd8U: case kExprI32AtomicSub8U: case kExprI32AtomicAnd8U: case kExprI32AtomicOr8U: case kExprI32AtomicXor8U: case kExprI32AtomicExchange8U: case kExprI32AtomicCompareExchange8U: case kExprI64AtomicAdd8U: case kExprI64AtomicSub8U: case kExprI64AtomicAnd8U: case kExprI64AtomicOr8U: case kExprI64AtomicXor8U: case kExprI64AtomicExchange8U: case kExprI64AtomicCompareExchange8U: case kExprS128Load8Splat: return 0; default: return 0; } } template void memop(DataRange* data) { const uint8_t align = data->get() % (max_alignment(memory_op) + 1); const uint32_t offset = data->get(); // Generate the index and the arguments, if any. Generate(data); if (WasmOpcodes::IsPrefixOpcode(static_cast(memory_op >> 8))) { DCHECK(memory_op >> 8 == kAtomicPrefix || memory_op >> 8 == kSimdPrefix); builder_->EmitWithPrefix(memory_op); } else { builder_->Emit(memory_op); } builder_->EmitU32V(align); builder_->EmitU32V(offset); } template void atomic_op(DataRange* data) { const uint8_t align = data->get() % (max_alignment(Op) + 1); const uint32_t offset = data->get(); Generate(data); builder_->EmitWithPrefix(Op); builder_->EmitU32V(align); builder_->EmitU32V(offset); } template void op_with_prefix(DataRange* data) { Generate(data); builder_->EmitWithPrefix(Op); } void simd_const(DataRange* data) { builder_->EmitWithPrefix(kExprS128Const); for (int i = 0; i < kSimd128Size; i++) { builder_->EmitByte(data->get()); } } template void simd_lane_op(DataRange* data) { Generate(data); builder_->EmitWithPrefix(Op); builder_->EmitByte(data->get() % lanes); } template void simd_lane_memop(DataRange* data) { // Simd load/store instructions that have a lane immediate. memop(data); builder_->EmitByte(data->get() % lanes); } void simd_shuffle(DataRange* data) { Generate(data); builder_->EmitWithPrefix(kExprI8x16Shuffle); for (int i = 0; i < kSimd128Size; i++) { builder_->EmitByte(static_cast(data->get() % 32)); } } void drop(DataRange* data) { Generate(GetValueType(data, liftoff_as_reference_, static_cast(functions_.size()) + num_structs_ + num_arrays_), data); builder_->Emit(kExprDrop); } enum CallKind { kCallDirect, kCallIndirect, kCallRef }; template void call(DataRange* data) { call(data, ValueType::Primitive(wanted_kind), kCallDirect); } template void call_indirect(DataRange* data) { call(data, ValueType::Primitive(wanted_kind), kCallIndirect); } template void call_ref(DataRange* data) { if (liftoff_as_reference_) { call(data, ValueType::Primitive(wanted_kind), kCallRef); } else { Generate(data); } } void Convert(ValueType src, ValueType dst) { auto idx = [](ValueType t) -> int { switch (t.kind()) { case kI32: return 0; case kI64: return 1; case kF32: return 2; case kF64: return 3; default: UNREACHABLE(); } }; static constexpr WasmOpcode kConvertOpcodes[] = { // {i32, i64, f32, f64} -> i32 kExprNop, kExprI32ConvertI64, kExprI32SConvertF32, kExprI32SConvertF64, // {i32, i64, f32, f64} -> i64 kExprI64SConvertI32, kExprNop, kExprI64SConvertF32, kExprI64SConvertF64, // {i32, i64, f32, f64} -> f32 kExprF32SConvertI32, kExprF32SConvertI64, kExprNop, kExprF32ConvertF64, // {i32, i64, f32, f64} -> f64 kExprF64SConvertI32, kExprF64SConvertI64, kExprF64ConvertF32, kExprNop}; int arr_idx = idx(dst) << 2 | idx(src); builder_->Emit(kConvertOpcodes[arr_idx]); } void call(DataRange* data, ValueType wanted_kind, CallKind call_kind) { uint8_t random_byte = data->get(); int func_index = random_byte % functions_.size(); uint32_t sig_index = functions_[func_index]; const FunctionSig* sig = builder_->builder()->GetSignature(sig_index); // Generate arguments. for (size_t i = 0; i < sig->parameter_count(); ++i) { Generate(sig->GetParam(i), data); } // Emit call. // If the return types of the callee happen to match the return types of the // caller, generate a tail call. // TODO(thibaudm): Re-enable when crbug.com/1269989 is fixed. bool use_return_call = false; if (use_return_call && std::equal(sig->returns().begin(), sig->returns().end(), builder_->signature()->returns().begin(), builder_->signature()->returns().end())) { if (call_kind == kCallDirect) { builder_->EmitWithU32V(kExprReturnCall, func_index); } else if (call_kind == kCallIndirect) { // This will not trap because table[func_index] always contains function // func_index. builder_->EmitI32Const(func_index); builder_->EmitWithU32V(kExprReturnCallIndirect, sig_index); // TODO(11954): Use other table indices too. builder_->EmitByte(0); // Table index. } else { GenerateRef(HeapType(sig_index), data); builder_->EmitWithU32V(kExprReturnCallRef, sig_index); } return; } else { if (call_kind == kCallDirect) { builder_->EmitWithU32V(kExprCallFunction, func_index); } else if (call_kind == kCallIndirect) { // This will not trap because table[func_index] always contains function // func_index. builder_->EmitI32Const(func_index); builder_->EmitWithU32V(kExprCallIndirect, sig_index); // TODO(11954): Use other table indices too. builder_->EmitByte(0); // Table index. } else { GenerateRef(HeapType(sig_index), data); builder_->EmitWithU32V(kExprCallRef, sig_index); } } if (sig->return_count() == 0 && wanted_kind != kWasmVoid) { // The call did not generate a value. Thus just generate it here. Generate(wanted_kind, data); return; } if (wanted_kind == kWasmVoid) { // The call did generate values, but we did not want one. for (size_t i = 0; i < sig->return_count(); ++i) { builder_->Emit(kExprDrop); } return; } auto return_types = base::VectorOf(sig->returns().begin(), sig->return_count()); auto wanted_types = base::VectorOf(&wanted_kind, wanted_kind == kWasmVoid ? 0 : 1); ConsumeAndGenerate(return_types, wanted_types, data); } struct Var { uint32_t index; ValueType type = kWasmVoid; Var() = default; Var(uint32_t index, ValueType type) : index(index), type(type) {} bool is_valid() const { return type != kWasmVoid; } }; Var GetRandomLocal(DataRange* data) { uint32_t num_params = static_cast(builder_->signature()->parameter_count()); uint32_t num_locals = static_cast(locals_.size()); if (num_params + num_locals == 0) return {}; uint32_t index = data->get() % (num_params + num_locals); ValueType type = index < num_params ? builder_->signature()->GetParam(index) : locals_[index - num_params]; return {index, type}; } constexpr static bool is_convertible_kind(ValueKind kind) { return kind == kI32 || kind == kI64 || kind == kF32 || kind == kF64; } template void local_op(DataRange* data, WasmOpcode opcode) { static_assert(wanted_kind == kVoid || is_convertible_kind(wanted_kind)); Var local = GetRandomLocal(data); // If there are no locals and no parameters, just generate any value (if a // value is needed), or do nothing. if (!local.is_valid() || !is_convertible_kind(local.type.kind())) { if (wanted_kind == kVoid) return; return Generate(data); } if (opcode != kExprLocalGet) Generate(local.type, data); builder_->EmitWithU32V(opcode, local.index); if (wanted_kind != kVoid && local.type.kind() != wanted_kind) { Convert(local.type, ValueType::Primitive(wanted_kind)); } } template void get_local(DataRange* data) { static_assert(wanted_kind != kVoid, "illegal type"); local_op(data, kExprLocalGet); } void set_local(DataRange* data) { local_op(data, kExprLocalSet); } template void tee_local(DataRange* data) { local_op(data, kExprLocalTee); } template void i32_const(DataRange* data) { builder_->EmitI32Const(data->get()); } template void i64_const(DataRange* data) { builder_->EmitI64Const(data->get()); } Var GetRandomGlobal(DataRange* data, bool ensure_mutable) { uint32_t index; if (ensure_mutable) { if (mutable_globals_.empty()) return {}; index = mutable_globals_[data->get() % mutable_globals_.size()]; } else { if (globals_.empty()) return {}; index = data->get() % globals_.size(); } ValueType type = globals_[index]; return {index, type}; } template void global_op(DataRange* data) { static_assert(wanted_kind == kVoid || is_convertible_kind(wanted_kind)); constexpr bool is_set = wanted_kind == kVoid; Var global = GetRandomGlobal(data, is_set); // If there are no globals, just generate any value (if a value is needed), // or do nothing. if (!global.is_valid() || !is_convertible_kind(global.type.kind())) { if (wanted_kind == kVoid) return; return Generate(data); } if (is_set) Generate(global.type, data); builder_->EmitWithU32V(is_set ? kExprGlobalSet : kExprGlobalGet, global.index); if (!is_set && global.type.kind() != wanted_kind) { Convert(global.type, ValueType::Primitive(wanted_kind)); } } template void get_global(DataRange* data) { static_assert(wanted_kind != kVoid, "illegal type"); global_op(data); } template void select_with_type(DataRange* data) { static_assert(select_kind != kVoid, "illegal kind for select"); Generate(data); // num_types is always 1. uint8_t num_types = 1; builder_->EmitWithU8U8(kExprSelectWithType, num_types, ValueType::Primitive(select_kind).value_type_code()); } void set_global(DataRange* data) { global_op(data); } void throw_or_rethrow(DataRange* data) { bool rethrow = data->get(); if (rethrow && !catch_blocks_.empty()) { int control_depth = static_cast(blocks_.size() - 1); int catch_index = data->get() % static_cast(catch_blocks_.size()); builder_->EmitWithU32V(kExprRethrow, control_depth - catch_blocks_[catch_index]); } else { int tag = data->get() % builder_->builder()->NumExceptions(); const FunctionSig* exception_sig = builder_->builder()->GetExceptionType(tag); base::Vector exception_types( exception_sig->parameters().begin(), exception_sig->parameter_count()); Generate(exception_types, data); builder_->EmitWithU32V(kExprThrow, tag); } } template void sequence(DataRange* data) { Generate(data); } void current_memory(DataRange* data) { builder_->EmitWithU8(kExprMemorySize, 0); } void grow_memory(DataRange* data); void ref_null(HeapType type, DataRange* data) { builder_->EmitWithI32V(kExprRefNull, type.code()); } bool get_local_ref(HeapType type, DataRange* data, Nullability nullable) { Var local = GetRandomLocal(data); // TODO(manoskouk): Ideally we would check for subtyping here over type // equality, but we don't have a module. // TODO(7748): Remove this condition if non-nullable locals are allowed. if (nullable == kNullable && local.is_valid() && local.type.is_object_reference() && type == local.type.heap_type()) { builder_->EmitWithU32V(kExprLocalGet, local.index); return true; } return false; } bool new_object(HeapType type, DataRange* data, Nullability nullable) { DCHECK(liftoff_as_reference_ && type.is_index()); uint32_t index = type.ref_index(); bool new_default = data->get(); if (builder_->builder()->IsStructType(index)) { const StructType* struct_gen = builder_->builder()->GetStructType(index); int field_count = struct_gen->field_count(); bool can_be_defaultable = std::all_of( struct_gen->fields().begin(), struct_gen->fields().end(), [](ValueType type) -> bool { return type.is_defaultable(); }); bool is_mutable = std::all_of( struct_gen->mutabilities().begin(), struct_gen->mutabilities().end(), [](bool mutability) -> bool { return mutability; }); if (new_default && can_be_defaultable && is_mutable) { builder_->EmitWithPrefix(kExprStructNewDefault); builder_->EmitU32V(index); } else { for (int i = 0; i < field_count; i++) { Generate(struct_gen->field(i).Unpacked(), data); } builder_->EmitWithPrefix(kExprStructNew); builder_->EmitU32V(index); } } else if (builder_->builder()->IsArrayType(index)) { bool can_be_defaultable = builder_->builder() ->GetArrayType(index) ->element_type() .is_defaultable(); if (new_default && can_be_defaultable) { Generate(kWasmI32, data); builder_->EmitI32Const(kMaxArraySize); builder_->Emit(kExprI32RemS); builder_->EmitWithPrefix(kExprArrayNewDefault); builder_->EmitU32V(index); } else { Generate( builder_->builder()->GetArrayType(index)->element_type().Unpacked(), data); Generate(kWasmI32, data); builder_->EmitI32Const(kMaxArraySize); builder_->Emit(kExprI32RemS); builder_->EmitWithPrefix(kExprArrayNew); builder_->EmitU32V(index); } } else { // Map the type index to a function index. // TODO(11954. 7748): Once we have type canonicalization, choose a random // function from among those matching the signature (consider function // subtyping?). uint32_t func_index = index - (num_arrays_ + num_structs_); DCHECK_EQ(builder_->builder()->GetSignature(index), builder_->builder()->GetFunction(func_index)->signature()); builder_->EmitWithU32V(kExprRefFunc, func_index); } return true; } template void table_op(std::vector types, DataRange* data, WasmOpcode opcode) { DCHECK(opcode == kExprTableSet || opcode == kExprTableSize || opcode == kExprTableGrow || opcode == kExprTableFill); int num_tables = builder_->builder()->NumTables(); DCHECK_GT(num_tables, 0); int index = data->get() % num_tables; for (size_t i = 0; i < types.size(); i++) { // When passing the reftype by default kWasmFuncRef is used. // Then the type is changed according to its table type. if (types[i] == kWasmFuncRef) { types[i] = builder_->builder()->GetTableType(index); } } Generate(base::VectorOf(types), data); if (opcode == kExprTableSet) { builder_->Emit(opcode); } else { builder_->EmitWithPrefix(opcode); } builder_->EmitU32V(index); } bool table_get(HeapType type, DataRange* data, Nullability nullable) { ValueType needed_type = ValueType::RefMaybeNull(type, nullable); int table_count = builder_->builder()->NumTables(); ZoneVector table(builder_->builder()->zone()); for (int i = 0; i < table_count; i++) { if (builder_->builder()->GetTableType(i) == needed_type) { table.push_back(i); } } if (table.empty()) { return false; } int index = data->get() % static_cast(table.size()); Generate(kWasmI32, data); builder_->Emit(kExprTableGet); builder_->EmitU32V(table[index]); return true; } void table_set(DataRange* data) { table_op({kWasmI32, kWasmFuncRef}, data, kExprTableSet); } void table_size(DataRange* data) { table_op({}, data, kExprTableSize); } void table_grow(DataRange* data) { table_op({kWasmFuncRef, kWasmI32}, data, kExprTableGrow); } void table_fill(DataRange* data) { table_op({kWasmI32, kWasmFuncRef, kWasmI32}, data, kExprTableFill); } void table_copy(DataRange* data) { ValueType needed_type = data->get() ? kWasmFuncRef : kWasmExternRef; int table_count = builder_->builder()->NumTables(); ZoneVector table(builder_->builder()->zone()); for (int i = 0; i < table_count; i++) { if (builder_->builder()->GetTableType(i) == needed_type) { table.push_back(i); } } if (table.empty()) { return; } int first_index = data->get() % static_cast(table.size()); int second_index = data->get() % static_cast(table.size()); Generate(kWasmI32, data); Generate(kWasmI32, data); Generate(kWasmI32, data); builder_->EmitWithPrefix(kExprTableCopy); builder_->EmitU32V(table[first_index]); builder_->EmitU32V(table[second_index]); } bool array_get_helper(ValueType value_type, DataRange* data) { WasmModuleBuilder* builder = builder_->builder(); ZoneVector array_indices(builder->zone()); for (uint32_t i = num_structs_; i < num_arrays_ + num_structs_; i++) { DCHECK(builder->IsArrayType(i)); if (builder->GetArrayType(i)->element_type().Unpacked() == value_type) { array_indices.push_back(i); } } if (!array_indices.empty()) { int index = data->get() % static_cast(array_indices.size()); GenerateRef(HeapType(array_indices[index]), data, kNullable); Generate(kWasmI32, data); if (builder->GetArrayType(array_indices[index]) ->element_type() .is_packed()) { builder_->EmitWithPrefix(data->get() ? kExprArrayGetS : kExprArrayGetU); } else { builder_->EmitWithPrefix(kExprArrayGet); } builder_->EmitU32V(array_indices[index]); return true; } return false; } template void array_get(DataRange* data) { bool got_array_value = array_get_helper(ValueType::Primitive(wanted_kind), data); if (!got_array_value) { Generate(data); } } bool array_get_ref(HeapType type, DataRange* data, Nullability nullable) { ValueType needed_type = ValueType::RefMaybeNull(type, nullable); return array_get_helper(needed_type, data); } void i31_get(DataRange* data) { if (!liftoff_as_reference_) { Generate(kWasmI32, data); return; } GenerateRef(HeapType(HeapType::kI31), data); builder_->Emit(kExprRefAsNonNull); if (data->get()) { builder_->EmitWithPrefix(kExprI31GetS); } else { builder_->EmitWithPrefix(kExprI31GetU); } } void array_len(DataRange* data) { if (num_arrays_ > 1) { int array_index = (data->get() % num_arrays_) + num_structs_; DCHECK(builder_->builder()->IsArrayType(array_index)); GenerateRef(HeapType(array_index), data); builder_->EmitWithPrefix(kExprArrayLen); } else { Generate(kWasmI32, data); } } void array_set(DataRange* data) { WasmModuleBuilder* builder = builder_->builder(); ZoneVector array_indices(builder->zone()); for (uint32_t i = num_structs_; i < num_arrays_ + num_structs_; i++) { DCHECK(builder->IsArrayType(i)); if (builder->GetArrayType(i)->mutability()) { array_indices.push_back(i); } } if (array_indices.empty()) { return; } int index = data->get() % static_cast(array_indices.size()); GenerateRef(HeapType(array_indices[index]), data); Generate(kWasmI32, data); Generate( builder->GetArrayType(array_indices[index])->element_type().Unpacked(), data); builder_->EmitWithPrefix(kExprArraySet); builder_->EmitU32V(array_indices[index]); } bool struct_get_helper(ValueType value_type, DataRange* data) { WasmModuleBuilder* builder = builder_->builder(); ZoneVector field_index(builder->zone()); ZoneVector struct_index(builder->zone()); for (uint32_t i = 0; i < num_structs_; i++) { DCHECK(builder->IsStructType(i)); int field_count = builder->GetStructType(i)->field_count(); for (int index = 0; index < field_count; index++) { if (builder->GetStructType(i)->field(index) == value_type) { field_index.push_back(index); struct_index.push_back(i); } } } if (!field_index.empty()) { int index = data->get() % static_cast(field_index.size()); GenerateRef(HeapType(struct_index[index]), data, kNullable); if (builder->GetStructType(struct_index[index]) ->field(field_index[index]) .is_packed()) { builder_->EmitWithPrefix(data->get() ? kExprStructGetS : kExprStructGetU); } else { builder_->EmitWithPrefix(kExprStructGet); } builder_->EmitU32V(struct_index[index]); builder_->EmitU32V(field_index[index]); return true; } return false; } template void struct_get(DataRange* data) { bool got_struct_value = struct_get_helper(ValueType::Primitive(wanted_kind), data); if (!got_struct_value) { Generate(data); } } bool struct_get_ref(HeapType type, DataRange* data, Nullability nullable) { ValueType needed_type = ValueType::RefMaybeNull(type, nullable); return struct_get_helper(needed_type, data); } void struct_set(DataRange* data) { WasmModuleBuilder* builder = builder_->builder(); if (num_structs_ > 0) { int struct_index = data->get() % num_structs_; DCHECK(builder->IsStructType(struct_index)); const StructType* struct_type = builder->GetStructType(struct_index); ZoneVector field_indices(builder->zone()); for (uint32_t i = 0; i < struct_type->field_count(); i++) { if (struct_type->mutability(i)) { field_indices.push_back(i); } } if (field_indices.empty()) { return; } int field_index = field_indices[data->get() % field_indices.size()]; GenerateRef(HeapType(struct_index), data); Generate(struct_type->field(field_index).Unpacked(), data); builder_->EmitWithPrefix(kExprStructSet); builder_->EmitU32V(struct_index); builder_->EmitU32V(field_index); } } template void ref_is_null(DataRange* data) { GenerateRef(HeapType(HeapType::kAny), data); builder_->Emit(kExprRefIsNull); } void ref_eq(DataRange* data) { if (!liftoff_as_reference_) { Generate(kWasmI32, data); return; } GenerateRef(HeapType(HeapType::kEq), data); GenerateRef(HeapType(HeapType::kEq), data); builder_->Emit(kExprRefEq); } using GenerateFn = void (WasmGenerator::*const)(DataRange*); using GenerateFnWithHeap = bool (WasmGenerator::*const)(HeapType, DataRange*, Nullability); template void GenerateOneOf(GenerateFn (&alternatives)[N], DataRange* data) { static_assert(N < std::numeric_limits::max(), "Too many alternatives. Use a bigger type if needed."); const auto which = data->get(); GenerateFn alternate = alternatives[which % N]; (this->*alternate)(data); } // Returns true if it had succesfully generated the reference // and false otherwise. template bool GenerateOneOf(GenerateFnWithHeap (&alternatives)[N], HeapType type, DataRange* data, Nullability nullability) { static_assert(N < std::numeric_limits::max(), "Too many alternatives. Use a bigger type if needed."); int index = data->get() % (N + 1); if (nullability && index == N) { ref_null(type, data); return true; } for (int i = index; i < static_cast(N); i++) { if ((this->*alternatives[i])(type, data, nullability)) { return true; } } for (int i = 0; i < index; i++) { if ((this->*alternatives[i])(type, data, nullability)) { return true; } } if (nullability == kNullable) { ref_null(type, data); return true; } return false; } struct GeneratorRecursionScope { explicit GeneratorRecursionScope(WasmGenerator* gen) : gen(gen) { ++gen->recursion_depth; DCHECK_LE(gen->recursion_depth, kMaxRecursionDepth); } ~GeneratorRecursionScope() { DCHECK_GT(gen->recursion_depth, 0); --gen->recursion_depth; } WasmGenerator* gen; }; public: WasmGenerator(WasmFunctionBuilder* fn, const std::vector& functions, const std::vector& globals, const std::vector& mutable_globals, uint32_t num_structs, uint32_t num_arrays, DataRange* data, bool liftoff_as_reference) : builder_(fn), functions_(functions), globals_(globals), mutable_globals_(mutable_globals), num_structs_(num_structs), num_arrays_(num_arrays), liftoff_as_reference_(liftoff_as_reference) { const FunctionSig* sig = fn->signature(); blocks_.emplace_back(); for (size_t i = 0; i < sig->return_count(); ++i) { blocks_.back().push_back(sig->GetReturn(i)); } constexpr uint32_t kMaxLocals = 32; locals_.resize(data->get() % kMaxLocals); uint32_t num_types = static_cast(functions_.size()) + num_structs_ + num_arrays_; for (ValueType& local : locals_) { local = GetValueTypeHelper(data, liftoff_as_reference_, num_types, num_types, kDisallowNonNullables, kExcludePackedTypes, kIncludeGenerics); fn->AddLocal(local); } } void Generate(ValueType type, DataRange* data); template void Generate(DataRange* data); template void Generate(DataRange* data) { // TODO(clemensb): Implement a more even split. auto first_data = data->split(); Generate(&first_data); Generate(data); } void GenerateRef(HeapType type, DataRange* data, Nullability nullability = kNullable); std::vector GenerateTypes(DataRange* data); void Generate(base::Vector types, DataRange* data); void ConsumeAndGenerate(base::Vector parameter_types, base::Vector return_types, DataRange* data); bool HasSimd() { return has_simd_; } private: WasmFunctionBuilder* builder_; std::vector> blocks_; const std::vector& functions_; std::vector locals_; std::vector globals_; std::vector mutable_globals_; // indexes into {globals_}. uint32_t recursion_depth = 0; std::vector catch_blocks_; bool has_simd_; uint32_t num_structs_; uint32_t num_arrays_; bool liftoff_as_reference_; static constexpr uint32_t kMaxRecursionDepth = 64; bool recursion_limit_reached() { return recursion_depth >= kMaxRecursionDepth; } }; template <> void WasmGenerator::block(DataRange* data) { block({}, {}, data); } template <> void WasmGenerator::loop(DataRange* data) { loop({}, {}, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() == 0) return; constexpr GenerateFn alternatives[] = { &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::if_, &WasmGenerator::br, &WasmGenerator::br_if, &WasmGenerator::br_on_null, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::simd_lane_memop, &WasmGenerator::simd_lane_memop, &WasmGenerator::simd_lane_memop, &WasmGenerator::simd_lane_memop, &WasmGenerator::drop, &WasmGenerator::call, &WasmGenerator::call_indirect, &WasmGenerator::call_ref, &WasmGenerator::set_local, &WasmGenerator::set_global, &WasmGenerator::throw_or_rethrow, &WasmGenerator::try_block, &WasmGenerator::struct_set, &WasmGenerator::array_set, &WasmGenerator::table_set, &WasmGenerator::table_fill, &WasmGenerator::table_copy}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() <= 1) { builder_->EmitI32Const(data->get()); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::i32_const<1>, &WasmGenerator::i32_const<2>, &WasmGenerator::i32_const<3>, &WasmGenerator::i32_const<4>, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::br_if, &WasmGenerator::br_on_null, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::current_memory, &WasmGenerator::grow_memory, &WasmGenerator::get_local, &WasmGenerator::tee_local, &WasmGenerator::get_global, &WasmGenerator::op, &WasmGenerator::select_with_type, &WasmGenerator::call, &WasmGenerator::call_indirect, &WasmGenerator::call_ref, &WasmGenerator::try_block, &WasmGenerator::i31_get, &WasmGenerator::struct_get, &WasmGenerator::array_get, &WasmGenerator::array_len, &WasmGenerator::ref_is_null, &WasmGenerator::ref_eq, &WasmGenerator::table_size, &WasmGenerator::table_grow}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() <= 1) { builder_->EmitI64Const(data->get()); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::i64_const<1>, &WasmGenerator::i64_const<2>, &WasmGenerator::i64_const<3>, &WasmGenerator::i64_const<4>, &WasmGenerator::i64_const<5>, &WasmGenerator::i64_const<6>, &WasmGenerator::i64_const<7>, &WasmGenerator::i64_const<8>, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::br_if, &WasmGenerator::br_on_null, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::simd_lane_op, &WasmGenerator::get_local, &WasmGenerator::tee_local, &WasmGenerator::get_global, &WasmGenerator::op, &WasmGenerator::select_with_type, &WasmGenerator::call, &WasmGenerator::call_indirect, &WasmGenerator::call_ref, &WasmGenerator::try_block, &WasmGenerator::struct_get, &WasmGenerator::array_get}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() <= sizeof(float)) { builder_->EmitF32Const(data->get()); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::br_if, &WasmGenerator::br_on_null, &WasmGenerator::memop, &WasmGenerator::simd_lane_op, &WasmGenerator::get_local, &WasmGenerator::tee_local, &WasmGenerator::get_global, &WasmGenerator::op, &WasmGenerator::select_with_type, &WasmGenerator::call, &WasmGenerator::call_indirect, &WasmGenerator::call_ref, &WasmGenerator::try_block, &WasmGenerator::struct_get, &WasmGenerator::array_get}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() <= sizeof(double)) { builder_->EmitF64Const(data->get()); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::br_if, &WasmGenerator::br_on_null, &WasmGenerator::memop, &WasmGenerator::simd_lane_op, &WasmGenerator::get_local, &WasmGenerator::tee_local, &WasmGenerator::get_global, &WasmGenerator::op, &WasmGenerator::select_with_type, &WasmGenerator::call, &WasmGenerator::call_indirect, &WasmGenerator::call_ref, &WasmGenerator::try_block, &WasmGenerator::struct_get, &WasmGenerator::array_get}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); has_simd_ = true; if (recursion_limit_reached() || data->size() <= sizeof(int32_t)) { // TODO(v8:8460): v128.const is not implemented yet, and we need a way to // "bottom-out", so use a splat to generate this. builder_->EmitI32Const(data->get()); builder_->EmitWithPrefix(kExprI8x16Splat); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::simd_const, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::simd_shuffle, &WasmGenerator::op_with_prefix, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::simd_lane_memop, &WasmGenerator::simd_lane_memop, &WasmGenerator::simd_lane_memop, &WasmGenerator::simd_lane_memop, }; GenerateOneOf(alternatives, data); } void WasmGenerator::grow_memory(DataRange* data) { Generate(data); builder_->EmitWithU8(kExprMemoryGrow, 0); } void WasmGenerator::Generate(ValueType type, DataRange* data) { switch (type.kind()) { case kVoid: return Generate(data); case kI32: return Generate(data); case kI64: return Generate(data); case kF32: return Generate(data); case kF64: return Generate(data); case kS128: return Generate(data); case kRefNull: return GenerateRef(type.heap_type(), data, kNullable); case kRef: return GenerateRef(type.heap_type(), data, kNonNullable); default: UNREACHABLE(); } } void WasmGenerator::GenerateRef(HeapType type, DataRange* data, Nullability nullability) { base::Optional rec_scope; if (nullability) { rec_scope.emplace(this); } if (recursion_limit_reached() || data->size() == 0) { if (nullability == kNullable) { ref_null(type, data); return; } // It is ok not to return here because the non-nullable types are not // recursive by construction, so the depth is limited already. } constexpr GenerateFnWithHeap alternatives_indexed_type[] = { &WasmGenerator::new_object, &WasmGenerator::get_local_ref, &WasmGenerator::array_get_ref, &WasmGenerator::struct_get_ref}; constexpr GenerateFnWithHeap alternatives_func_any[] = { &WasmGenerator::table_get, &WasmGenerator::get_local_ref, &WasmGenerator::array_get_ref, &WasmGenerator::struct_get_ref}; constexpr GenerateFnWithHeap alternatives_other[] = { &WasmGenerator::array_get_ref, &WasmGenerator::get_local_ref, &WasmGenerator::struct_get_ref}; switch (type.representation()) { // For abstract types, sometimes generate one of their subtypes. case HeapType::kAny: { // Note: It is possible we land here even without {liftoff_as_reference_}. // In this case, we do not support any subtyping, and just fall back to // directly generating anyref. if (!liftoff_as_reference_) { DCHECK(nullability); GenerateOneOf(alternatives_func_any, type, data, nullability); return; } // Weighed according to the types in the module: // If there are D data types and F function types, the relative // frequencies for dataref is D, for funcref F, and for i31ref and falling // back to anyref 2. const uint8_t num_data_types = num_structs_ + num_arrays_; const uint8_t emit_i31ref = 2; const uint8_t fallback_to_anyref = 2; uint8_t random = data->get() % (num_data_types + emit_i31ref + fallback_to_anyref); // We have to compute this first so in case GenerateOneOf fails // we will continue to fall back on an alternative that is guaranteed // to generate a value of the wanted type. // In order to know which alternative to fall back to in case // GenerateOneOf failed, the random variable is recomputed. if (random >= num_data_types + emit_i31ref) { DCHECK(liftoff_as_reference_); if (GenerateOneOf(alternatives_func_any, type, data, nullability)) { return; } random = data->get() % (num_data_types + emit_i31ref); } if (random < num_data_types) { GenerateRef(HeapType(HeapType::kData), data, nullability); } else { GenerateRef(HeapType(HeapType::kI31), data, nullability); } return; } case HeapType::kArray: { DCHECK(liftoff_as_reference_); constexpr uint8_t fallback_to_dataref = 1; uint8_t random = data->get() % (num_arrays_ + fallback_to_dataref); // Try generating one of the alternatives and continue to the rest of the // methods in case it fails. if (random >= num_arrays_) { if (GenerateOneOf(alternatives_other, type, data, nullability)) return; random = data->get() % num_arrays_; } GenerateRef(HeapType(random), data, nullability); return; } case HeapType::kData: { DCHECK(liftoff_as_reference_); constexpr uint8_t fallback_to_dataref = 2; uint8_t random = data->get() % (num_arrays_ + num_structs_ + fallback_to_dataref); // Try generating one of the alternatives // and continue to the rest of the methods in case it fails. if (random >= num_arrays_ + num_structs_) { if (GenerateOneOf(alternatives_other, type, data, nullability)) { return; } random = data->get() % (num_arrays_ + num_structs_); } GenerateRef(HeapType(random), data, nullability); return; } case HeapType::kEq: { DCHECK(liftoff_as_reference_); const uint8_t num_types = num_arrays_ + num_structs_; const uint8_t emit_i31ref = 2; constexpr uint8_t fallback_to_eqref = 1; uint8_t random = data->get() % (num_types + emit_i31ref + fallback_to_eqref); // Try generating one of the alternatives // and continue to the rest of the methods in case it fails. if (random >= num_types + emit_i31ref) { if (GenerateOneOf(alternatives_other, type, data, nullability)) { return; } random = data->get() % (num_types + emit_i31ref); } if (random < num_types) { GenerateRef(HeapType(random), data, nullability); } else { GenerateRef(HeapType(HeapType::kI31), data, nullability); } return; } case HeapType::kFunc: { uint32_t random = data->get() % (functions_.size() + 1); /// Try generating one of the alternatives // and continue to the rest of the methods in case it fails. if (random >= functions_.size()) { if (GenerateOneOf(alternatives_func_any, type, data, nullability)) { return; } random = data->get() % functions_.size(); } if (liftoff_as_reference_) { // Only reduce to indexed type with liftoff as reference. uint32_t signature_index = functions_[random]; DCHECK(builder_->builder()->IsSignature(signature_index)); GenerateRef(HeapType(signature_index), data, nullability); } else { // If interpreter is used as reference, generate a ref.func directly. builder_->EmitWithU32V(kExprRefFunc, random); } return; } case HeapType::kI31: { DCHECK(liftoff_as_reference_); // Try generating one of the alternatives // and continue to the rest of the methods in case it fails. if (data->get() && GenerateOneOf(alternatives_other, type, data, nullability)) { return; } Generate(kWasmI32, data); builder_->EmitWithPrefix(kExprI31New); return; } case HeapType::kExtern: case HeapType::kNoExtern: case HeapType::kNoFunc: case HeapType::kNone: DCHECK(nullability == Nullability::kNullable); ref_null(type, data); return; default: // Indexed type. DCHECK(type.is_index()); DCHECK(liftoff_as_reference_); GenerateOneOf(alternatives_indexed_type, type, data, nullability); return; } UNREACHABLE(); } std::vector WasmGenerator::GenerateTypes(DataRange* data) { std::vector types; int num_params = int{data->get()} % (kMaxParameters + 1); for (int i = 0; i < num_params; ++i) { types.push_back(GetValueType( data, liftoff_as_reference_, num_structs_ + num_arrays_ + static_cast(functions_.size()))); } return types; } void WasmGenerator::Generate(base::Vector types, DataRange* data) { // Maybe emit a multi-value block with the expected return type. Use a // non-default value to indicate block generation to avoid recursion when we // reach the end of the data. bool generate_block = data->get() % 32 == 1; if (generate_block) { GeneratorRecursionScope rec_scope(this); if (!recursion_limit_reached()) { const auto param_types = GenerateTypes(data); Generate(base::VectorOf(param_types), data); any_block(base::VectorOf(param_types), types, data); return; } } if (types.size() == 0) { Generate(kWasmVoid, data); return; } if (types.size() == 1) { Generate(types[0], data); return; } // Split the types in two halves and recursively generate each half. // Each half is non empty to ensure termination. size_t split_index = data->get() % (types.size() - 1) + 1; base::Vector lower_half = types.SubVector(0, split_index); base::Vector upper_half = types.SubVector(split_index, types.size()); DataRange first_range = data->split(); Generate(lower_half, &first_range); Generate(upper_half, data); } // Emit code to match an arbitrary signature. // TODO(11954): Add the missing reference type conversion/upcasting. void WasmGenerator::ConsumeAndGenerate( base::Vector param_types, base::Vector return_types, DataRange* data) { // This numeric conversion logic consists of picking exactly one // index in the return values and dropping all the values that come // before that index. Then we convert the value from that index to the // wanted type. If we don't find any value we generate it. auto primitive = [](ValueType t) -> bool { switch (t.kind()) { case kI32: case kI64: case kF32: case kF64: return true; default: return false; } }; if (return_types.size() == 0 || param_types.size() == 0 || !primitive(return_types[0])) { for (unsigned i = 0; i < param_types.size(); i++) { builder_->Emit(kExprDrop); } Generate(return_types, data); return; } int bottom_primitives = 0; while (static_cast(param_types.size()) > bottom_primitives && primitive(param_types[bottom_primitives])) { bottom_primitives++; } int return_index = bottom_primitives > 0 ? (data->get() % bottom_primitives) : -1; for (int i = static_cast(param_types.size() - 1); i > return_index; --i) { builder_->Emit(kExprDrop); } for (int i = return_index; i > 0; --i) { Convert(param_types[i], param_types[i - 1]); builder_->EmitI32Const(0); builder_->Emit(kExprSelect); } DCHECK(!return_types.empty()); if (return_index >= 0) { Convert(param_types[0], return_types[0]); Generate(return_types + 1, data); } else { Generate(return_types, data); } } enum SigKind { kFunctionSig, kExceptionSig }; FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind, bool liftoff_as_reference, int num_types) { // Generate enough parameters to spill some to the stack. int num_params = int{data->get()} % (kMaxParameters + 1); int num_returns = sig_kind == kFunctionSig ? int{data->get()} % (kMaxReturns + 1) : 0; FunctionSig::Builder builder(zone, num_returns, num_params); for (int i = 0; i < num_returns; ++i) { builder.AddReturn(GetValueType(data, liftoff_as_reference, num_types)); } for (int i = 0; i < num_params; ++i) { builder.AddParam(GetValueType(data, liftoff_as_reference, num_types)); } return builder.Build(); } WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder, ValueType type, uint32_t num_struct_and_array_types); WasmInitExpr GenerateStructNewInitExpr(Zone* zone, WasmModuleBuilder* builder, uint32_t index, uint32_t num_struct_and_array_types) { const StructType* struct_type = builder->GetStructType(index); ZoneVector* elements = zone->New>(zone); int field_count = struct_type->field_count(); for (int field_index = 0; field_index < field_count; field_index++) { elements->push_back(GenerateInitExpr(zone, builder, struct_type->field(field_index), num_struct_and_array_types)); } return WasmInitExpr::StructNew(index, elements); } WasmInitExpr GenerateInitExpr(Zone* zone, WasmModuleBuilder* builder, ValueType type, uint32_t num_struct_and_array_types) { switch (type.kind()) { case kRefNull: return WasmInitExpr::RefNullConst(type.heap_type().representation()); case kI8: case kI16: case kI32: return WasmInitExpr(int32_t{0}); case kI64: return WasmInitExpr(int64_t{0}); case kF32: return WasmInitExpr(0.0f); case kF64: return WasmInitExpr(0.0); case kS128: { uint8_t s128_const[kSimd128Size] = {0}; return WasmInitExpr(s128_const); } case kRef: { switch (type.heap_type().representation()) { case HeapType::kData: case HeapType::kAny: case HeapType::kEq: { // We materialize all these types with a struct because they are all // its supertypes. DCHECK(builder->IsStructType(0)); return GenerateStructNewInitExpr(zone, builder, 0, num_struct_and_array_types); } case HeapType::kFunc: // We just pick the function at index 0. DCHECK_GT(builder->NumFunctions(), 0); return WasmInitExpr::RefFuncConst(0); default: { uint32_t index = type.ref_index(); if (builder->IsStructType(index)) { return GenerateStructNewInitExpr(zone, builder, index, num_struct_and_array_types); } if (builder->IsArrayType(index)) { ZoneVector* elements = zone->New>(zone); elements->push_back(GenerateInitExpr( zone, builder, builder->GetArrayType(index)->element_type(), num_struct_and_array_types)); return WasmInitExpr::ArrayNewFixed(index, elements); } if (builder->IsSignature(index)) { // Transform from signature index to function index. return WasmInitExpr::RefFuncConst(index - num_struct_and_array_types); } UNREACHABLE(); } } } case kVoid: case kRtt: case kBottom: UNREACHABLE(); } } } // namespace class WasmCompileFuzzer : public WasmExecutionFuzzer { bool GenerateModule(Isolate* isolate, Zone* zone, base::Vector data, ZoneBuffer* buffer, bool liftoff_as_reference) override { TestSignatures sigs; WasmModuleBuilder builder(zone); DataRange range(data); std::vector function_signatures; // Add struct and array types first so that we get a chance to generate // these types in function signatures. // Currently, WasmGenerator assumes this order for struct/array/signature // definitions. uint8_t num_structs = 0; uint8_t num_arrays = 0; static_assert(kMaxFunctions >= 1, "need min. 1 function"); uint8_t num_functions = 1 + (range.get() % kMaxFunctions); uint16_t num_types = num_functions; if (liftoff_as_reference) { // We need at least one struct/array in order to support WasmInitExpr // for kData, kAny and kEq. num_structs = 1 + range.get() % kMaxStructs; num_arrays = range.get() % (kMaxArrays + 1); num_types += num_structs + num_arrays; for (int struct_index = 0; struct_index < num_structs; struct_index++) { uint8_t num_fields = range.get() % (kMaxStructFields + 1); StructType::Builder struct_builder(zone, num_fields); for (int field_index = 0; field_index < num_fields; field_index++) { // Notes: // - We allow a type to only have non-nullable fields of types that // are defined earlier. This way we avoid infinite non-nullable // constructions. Also relevant for arrays and functions. // - Currently, we also allow nullable fields to only reference types // that are defined earlier. The reason is that every type can only // reference types in its own or earlier recursive groups, and we do // not support recursive groups yet. Also relevant for arrays and // functions. TODO(7748): Change the number of nullable types once // we support rec. groups. // - We exclude the generics types anyref, dataref, and eqref from the // fields of struct 0. This is because in GenerateInitExpr we // materialize these types with (ref 0), and having such fields in // struct 0 would produce an infinite recursion. ValueType type = GetValueTypeHelper( &range, true, builder.NumTypes(), builder.NumTypes(), kAllowNonNullables, kIncludePackedTypes, struct_index != 0 ? kIncludeGenerics : kExcludeGenerics); bool mutability = range.get(); struct_builder.AddField(type, mutability); } StructType* struct_fuz = struct_builder.Build(); builder.AddStructType(struct_fuz); } for (int array_index = 0; array_index < num_arrays; array_index++) { ValueType type = GetValueTypeHelper( &range, true, builder.NumTypes(), builder.NumTypes(), kAllowNonNullables, kIncludePackedTypes, kIncludeGenerics); ArrayType* array_fuz = zone->New(type, true); builder.AddArrayType(array_fuz); } } // We keep the signature for the first (main) function constant. function_signatures.push_back(builder.ForceAddSignature(sigs.i_iii())); for (uint8_t i = 1; i < num_functions; i++) { FunctionSig* sig = GenerateSig(zone, &range, kFunctionSig, liftoff_as_reference, builder.NumTypes()); uint32_t signature_index = builder.ForceAddSignature(sig); function_signatures.push_back(signature_index); } int num_exceptions = 1 + (range.get() % kMaxExceptions); for (int i = 0; i < num_exceptions; ++i) { FunctionSig* sig = GenerateSig(zone, &range, kExceptionSig, liftoff_as_reference, num_types); builder.AddException(sig); } // Generate function declarations before tables. This will be needed once we // have typed-function tables. std::vector functions; for (uint8_t i = 0; i < num_functions; i++) { const FunctionSig* sig = builder.GetSignature(function_signatures[i]); // If we are using wasm-gc, we cannot allow signature normalization // performed by adding a function by {FunctionSig}, because we emit // everything in one recursive group which blocks signature // canonicalization. // TODO(7748): Relax this when we implement type canonicalization and // proper recursive-group support. functions.push_back(liftoff_as_reference ? builder.AddFunction(function_signatures[i]) : builder.AddFunction(sig)); } int num_globals = range.get() % (kMaxGlobals + 1); std::vector globals; std::vector mutable_globals; globals.reserve(num_globals); mutable_globals.reserve(num_globals); for (int i = 0; i < num_globals; ++i) { ValueType type = GetValueTypeHelper( &range, liftoff_as_reference, num_types, num_types, kAllowNonNullables, kExcludePackedTypes, kIncludeGenerics); // 1/8 of globals are immutable. const bool mutability = (range.get() % 8) != 0; builder.AddGlobal( type, mutability, GenerateInitExpr(zone, &builder, type, static_cast(num_structs + num_arrays))); globals.push_back(type); if (mutability) mutable_globals.push_back(static_cast(i)); } // Generate tables before function bodies, so they are available for table // operations. // Always generate at least one table for call_indirect. int num_tables = range.get() % kMaxTables + 1; for (int i = 0; i < num_tables; i++) { // Table 0 has to reference all functions in the program. This is so that // all functions count as declared so they can be referenced with // ref.func. // TODO(11954): Consider removing this restriction. uint32_t min_size = i == 0 ? num_functions : range.get() % kMaxTableSize; uint32_t max_size = range.get() % (kMaxTableSize - min_size) + min_size; // Table 0 is always funcref. // TODO(11954): Remove this requirement once we support call_indirect with // other table indices. // TODO(11954): Support typed function tables. bool use_funcref = i == 0 || range.get(); ValueType type = use_funcref ? kWasmFuncRef : kWasmExternRef; uint32_t table_index = builder.AddTable(type, min_size, max_size); if (type == kWasmFuncRef) { // For function tables, initialize them with functions from the program. // Currently, the fuzzer assumes that every function table contains the // functions in the program in the order they are defined. // TODO(11954): Consider generalizing this. WasmModuleBuilder::WasmElemSegment segment( zone, kWasmFuncRef, table_index, WasmInitExpr(0)); for (int entry_index = 0; entry_index < static_cast(min_size); entry_index++) { segment.entries.emplace_back( WasmModuleBuilder::WasmElemSegment::Entry::kRefFuncEntry, entry_index % num_functions); } builder.AddElementSegment(std::move(segment)); } } for (int i = 0; i < num_functions; ++i) { WasmFunctionBuilder* f = functions[i]; DataRange function_range = range.split(); WasmGenerator gen(f, function_signatures, globals, mutable_globals, num_structs, num_arrays, &function_range, liftoff_as_reference); const FunctionSig* sig = f->signature(); base::Vector return_types(sig->returns().begin(), sig->return_count()); gen.Generate(return_types, &function_range); if (!CheckHardwareSupportsSimd() && gen.HasSimd()) return false; f->Emit(kExprEnd); if (i == 0) builder.AddExport(base::CStrVector("main"), f); } builder.SetMaxMemorySize(32); builder.WriteTo(buffer); return true; } }; extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { constexpr bool require_valid = true; EXPERIMENTAL_FLAG_SCOPE(typed_funcref); EXPERIMENTAL_FLAG_SCOPE(gc); EXPERIMENTAL_FLAG_SCOPE(simd); EXPERIMENTAL_FLAG_SCOPE(eh); WasmCompileFuzzer().FuzzWasmModule({data, size}, require_valid); return 0; } } // namespace fuzzer } // namespace wasm } // namespace internal } // namespace v8