// Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include #include #include #include #include "include/v8.h" #include "src/execution/isolate.h" #include "src/objects/objects-inl.h" #include "src/objects/objects.h" #include "src/utils/ostreams.h" #include "src/wasm/wasm-module-builder.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-opcodes-inl.h" #include "test/common/wasm/flag-utils.h" #include "test/common/wasm/test-signatures.h" #include "test/common/wasm/wasm-module-runner.h" #include "test/fuzzer/fuzzer-support.h" #include "test/fuzzer/wasm-fuzzer-common.h" namespace v8 { namespace internal { namespace wasm { namespace fuzzer { namespace { constexpr int kMaxFunctions = 4; constexpr int kMaxGlobals = 64; constexpr int kMaxParameters = 15; constexpr int kMaxReturns = 15; class DataRange { Vector data_; public: explicit DataRange(Vector data) : data_(data) {} DataRange(const DataRange&) = delete; DataRange& operator=(const DataRange&) = delete; // Don't accidentally pass DataRange by value. This will reuse bytes and might // lead to OOM because the end might not be reached. // Define move constructor and move assignment, disallow copy constructor and // copy assignment (below). DataRange(DataRange&& other) V8_NOEXCEPT : DataRange(other.data_) { other.data_ = {}; } DataRange& operator=(DataRange&& other) V8_NOEXCEPT { data_ = other.data_; other.data_ = {}; return *this; } size_t size() const { return data_.size(); } DataRange split() { uint16_t num_bytes = get() % std::max(size_t{1}, data_.size()); DataRange split(data_.SubVector(0, num_bytes)); data_ += num_bytes; return split; } template T get() { // DISABLE FOR BOOL // The -O3 on release will break the result. This creates a different // observable side effect when invoking get between debug and release // version, which eventually makes the code output different as well as // raising various unrecoverable errors on runtime. It is caused by // undefined behavior of assigning boolean via memcpy from randomized bytes. STATIC_ASSERT(!(std::is_same::value)); STATIC_ASSERT(max_bytes <= sizeof(T)); // We want to support the case where we have less than sizeof(T) bytes // remaining in the slice. For example, if we emit an i32 constant, it's // okay if we don't have a full four bytes available, we'll just use what // we have. We aren't concerned about endianness because we are generating // arbitrary expressions. const size_t num_bytes = std::min(max_bytes, data_.size()); T result = T(); memcpy(&result, data_.begin(), num_bytes); data_ += num_bytes; return result; } }; ValueType GetValueType(DataRange* data) { // TODO(v8:8460): We do not add kWasmS128 here yet because this method is used // to generate globals, and since we do not have v128.const yet, there is no // way to specify an initial value a global of this type. switch (data->get() % 4) { case 0: return kWasmI32; case 1: return kWasmI64; case 2: return kWasmF32; case 3: return kWasmF64; } UNREACHABLE(); } class WasmGenerator { template void op(DataRange* data) { Generate(data); builder_->Emit(Op); } class V8_NODISCARD BlockScope { public: BlockScope(WasmGenerator* gen, WasmOpcode block_type, Vector param_types, Vector result_types, Vector br_types) : gen_(gen) { gen->blocks_.emplace_back(br_types.begin(), br_types.end()); if (param_types.size() == 0 && result_types.size() == 0) { gen->builder_->EmitWithU8(block_type, kWasmStmt.value_type_code()); return; } if (param_types.size() == 0 && result_types.size() == 1) { gen->builder_->EmitWithU8(block_type, result_types[0].value_type_code()); return; } // Multi-value block. Zone* zone = gen->builder_->builder()->zone(); FunctionSig::Builder builder(zone, result_types.size(), param_types.size()); for (auto& type : param_types) { DCHECK_NE(type, kWasmStmt); builder.AddParam(type); } for (auto& type : result_types) { DCHECK_NE(type, kWasmStmt); builder.AddReturn(type); } FunctionSig* sig = builder.Build(); int sig_id = gen->builder_->builder()->AddSignature(sig); gen->builder_->EmitWithI32V(block_type, sig_id); } ~BlockScope() { gen_->builder_->Emit(kExprEnd); gen_->blocks_.pop_back(); } private: WasmGenerator* const gen_; }; void block(Vector param_types, Vector return_types, DataRange* data) { BlockScope block_scope(this, kExprBlock, param_types, return_types, return_types); ConsumeAndGenerate(param_types, return_types, data); } template void block(DataRange* data) { block({}, VectorOf({ValueType::Primitive(T)}), data); } void loop(Vector param_types, Vector return_types, DataRange* data) { BlockScope block_scope(this, kExprLoop, param_types, return_types, param_types); ConsumeAndGenerate(param_types, return_types, data); } template void loop(DataRange* data) { loop({}, VectorOf({ValueType::Primitive(T)}), data); } enum IfType { kIf, kIfElse }; void if_(Vector param_types, Vector return_types, IfType type, DataRange* data) { // One-armed "if" are only valid if the input and output types are the same. DCHECK_IMPLIES(type == kIf, param_types == return_types); Generate(kWasmI32, data); BlockScope block_scope(this, kExprIf, param_types, return_types, return_types); ConsumeAndGenerate(param_types, return_types, data); if (type == kIfElse) { builder_->Emit(kExprElse); ConsumeAndGenerate(param_types, return_types, data); } } template void if_(DataRange* data) { static_assert(T == ValueType::kStmt || type == kIfElse, "if without else cannot produce a value"); if_({}, T == ValueType::kStmt ? Vector{} : VectorOf({ValueType::Primitive(T)}), type, data); } void any_block(Vector param_types, Vector return_types, DataRange* data) { uint8_t block_type = data->get() % 4; switch (block_type) { case 0: block(param_types, return_types, data); return; case 1: loop(param_types, return_types, data); return; case 2: if (param_types == return_types) { if_({}, {}, kIf, data); return; } V8_FALLTHROUGH; case 3: if_(param_types, return_types, kIfElse, data); return; } } void br(DataRange* data) { // There is always at least the block representing the function body. DCHECK(!blocks_.empty()); const uint32_t target_block = data->get() % blocks_.size(); const auto break_types = blocks_[target_block]; Generate(VectorOf(break_types), data); builder_->EmitWithI32V( kExprBr, static_cast(blocks_.size()) - 1 - target_block); } template void br_if(DataRange* data) { // There is always at least the block representing the function body. DCHECK(!blocks_.empty()); const uint32_t target_block = data->get() % blocks_.size(); const auto break_types = VectorOf(blocks_[target_block]); Generate(break_types, data); Generate(kWasmI32, data); builder_->EmitWithI32V( kExprBrIf, static_cast(blocks_.size()) - 1 - target_block); ConsumeAndGenerate(break_types, wanted_type == ValueType::kStmt ? Vector{} : VectorOf({ValueType::Primitive(wanted_type)}), data); } // TODO(eholk): make this function constexpr once gcc supports it static uint8_t max_alignment(WasmOpcode memop) { switch (memop) { case kExprS128LoadMem: case kExprS128StoreMem: return 4; case kExprI64LoadMem: case kExprF64LoadMem: case kExprI64StoreMem: case kExprF64StoreMem: case kExprI64AtomicStore: case kExprI64AtomicLoad: case kExprI64AtomicAdd: case kExprI64AtomicSub: case kExprI64AtomicAnd: case kExprI64AtomicOr: case kExprI64AtomicXor: case kExprI64AtomicExchange: case kExprI64AtomicCompareExchange: case kExprS128Load8x8S: case kExprS128Load8x8U: case kExprS128Load16x4S: case kExprS128Load16x4U: case kExprS128Load32x2S: case kExprS128Load32x2U: case kExprS128Load64Splat: case kExprS128Load64Zero: return 3; case kExprI32LoadMem: case kExprI64LoadMem32S: case kExprI64LoadMem32U: case kExprF32LoadMem: case kExprI32StoreMem: case kExprI64StoreMem32: case kExprF32StoreMem: case kExprI32AtomicStore: case kExprI64AtomicStore32U: case kExprI32AtomicLoad: case kExprI64AtomicLoad32U: case kExprI32AtomicAdd: case kExprI32AtomicSub: case kExprI32AtomicAnd: case kExprI32AtomicOr: case kExprI32AtomicXor: case kExprI32AtomicExchange: case kExprI32AtomicCompareExchange: case kExprI64AtomicAdd32U: case kExprI64AtomicSub32U: case kExprI64AtomicAnd32U: case kExprI64AtomicOr32U: case kExprI64AtomicXor32U: case kExprI64AtomicExchange32U: case kExprI64AtomicCompareExchange32U: case kExprS128Load32Splat: case kExprS128Load32Zero: return 2; case kExprI32LoadMem16S: case kExprI32LoadMem16U: case kExprI64LoadMem16S: case kExprI64LoadMem16U: case kExprI32StoreMem16: case kExprI64StoreMem16: case kExprI32AtomicStore16U: case kExprI64AtomicStore16U: case kExprI32AtomicLoad16U: case kExprI64AtomicLoad16U: case kExprI32AtomicAdd16U: case kExprI32AtomicSub16U: case kExprI32AtomicAnd16U: case kExprI32AtomicOr16U: case kExprI32AtomicXor16U: case kExprI32AtomicExchange16U: case kExprI32AtomicCompareExchange16U: case kExprI64AtomicAdd16U: case kExprI64AtomicSub16U: case kExprI64AtomicAnd16U: case kExprI64AtomicOr16U: case kExprI64AtomicXor16U: case kExprI64AtomicExchange16U: case kExprI64AtomicCompareExchange16U: case kExprS128Load16Splat: return 1; case kExprI32LoadMem8S: case kExprI32LoadMem8U: case kExprI64LoadMem8S: case kExprI64LoadMem8U: case kExprI32StoreMem8: case kExprI64StoreMem8: case kExprI32AtomicStore8U: case kExprI64AtomicStore8U: case kExprI32AtomicLoad8U: case kExprI64AtomicLoad8U: case kExprI32AtomicAdd8U: case kExprI32AtomicSub8U: case kExprI32AtomicAnd8U: case kExprI32AtomicOr8U: case kExprI32AtomicXor8U: case kExprI32AtomicExchange8U: case kExprI32AtomicCompareExchange8U: case kExprI64AtomicAdd8U: case kExprI64AtomicSub8U: case kExprI64AtomicAnd8U: case kExprI64AtomicOr8U: case kExprI64AtomicXor8U: case kExprI64AtomicExchange8U: case kExprI64AtomicCompareExchange8U: case kExprS128Load8Splat: return 0; default: return 0; } } template void memop(DataRange* data) { const uint8_t align = data->get() % (max_alignment(memory_op) + 1); const uint32_t offset = data->get(); // Generate the index and the arguments, if any. Generate(data); if (WasmOpcodes::IsPrefixOpcode(static_cast(memory_op >> 8))) { DCHECK(memory_op >> 8 == kAtomicPrefix || memory_op >> 8 == kSimdPrefix); builder_->EmitWithPrefix(memory_op); } else { builder_->Emit(memory_op); } builder_->EmitU32V(align); builder_->EmitU32V(offset); } template void atomic_op(DataRange* data) { const uint8_t align = data->get() % (max_alignment(Op) + 1); const uint32_t offset = data->get(); Generate(data); builder_->EmitWithPrefix(Op); builder_->EmitU32V(align); builder_->EmitU32V(offset); } template void op_with_prefix(DataRange* data) { Generate(data); builder_->EmitWithPrefix(Op); } void simd_const(DataRange* data) { builder_->EmitWithPrefix(kExprS128Const); for (int i = 0; i < kSimd128Size; i++) { builder_->EmitByte(data->get()); } } template void simd_lane_op(DataRange* data) { Generate(data); builder_->EmitWithPrefix(Op); builder_->EmitByte(data->get() % lanes); } void simd_shuffle(DataRange* data) { Generate(data); builder_->EmitWithPrefix(kExprI8x16Shuffle); for (int i = 0; i < kSimd128Size; i++) { builder_->EmitByte(static_cast(data->get() % 32)); } } void drop(DataRange* data) { Generate(GetValueType(data), data); builder_->Emit(kExprDrop); } enum CallDirect : bool { kCallDirect = true, kCallIndirect = false }; template void call(DataRange* data) { call(data, ValueType::Primitive(wanted_type), kCallDirect); } template void call_indirect(DataRange* data) { call(data, ValueType::Primitive(wanted_type), kCallIndirect); } void Convert(ValueType src, ValueType dst) { auto idx = [](ValueType t) -> int { switch (t.kind()) { case ValueType::kI32: return 0; case ValueType::kI64: return 1; case ValueType::kF32: return 2; case ValueType::kF64: return 3; default: UNREACHABLE(); } }; static constexpr WasmOpcode kConvertOpcodes[] = { // {i32, i64, f32, f64} -> i32 kExprNop, kExprI32ConvertI64, kExprI32SConvertF32, kExprI32SConvertF64, // {i32, i64, f32, f64} -> i64 kExprI64SConvertI32, kExprNop, kExprI64SConvertF32, kExprI64SConvertF64, // {i32, i64, f32, f64} -> f32 kExprF32SConvertI32, kExprF32SConvertI64, kExprNop, kExprF32ConvertF64, // {i32, i64, f32, f64} -> f64 kExprF64SConvertI32, kExprF64SConvertI64, kExprF64ConvertF32, kExprNop}; int arr_idx = idx(dst) << 2 | idx(src); builder_->Emit(kConvertOpcodes[arr_idx]); } void ConvertOrGenerate(ValueType src, ValueType dst, DataRange* data) { if (src == dst) return; if (src == kWasmStmt && dst != kWasmStmt) { Generate(dst, data); } else if (dst == kWasmStmt && src != kWasmStmt) { builder_->Emit(kExprDrop); } else { Convert(src, dst); } } void call(DataRange* data, ValueType wanted_type, CallDirect call_direct) { uint8_t random_byte = data->get(); int func_index = random_byte % functions_.size(); uint32_t sig_index = functions_[func_index]; FunctionSig* sig = builder_->builder()->GetSignature(sig_index); // Generate arguments. for (size_t i = 0; i < sig->parameter_count(); ++i) { Generate(sig->GetParam(i), data); } // Emit call. // If the return types of the callee happen to match the return types of the // caller, generate a tail call. bool use_return_call = random_byte > 127; if (use_return_call && std::equal(sig->returns().begin(), sig->returns().end(), builder_->signature()->returns().begin(), builder_->signature()->returns().end())) { if (call_direct) { builder_->EmitWithU32V(kExprReturnCall, func_index); } else { builder_->EmitI32Const(func_index); builder_->EmitWithU32V(kExprReturnCallIndirect, sig_index); builder_->EmitByte(0); // Table index. } return; } else { if (call_direct) { builder_->EmitWithU32V(kExprCallFunction, func_index); } else { builder_->EmitI32Const(func_index); builder_->EmitWithU32V(kExprCallIndirect, sig_index); builder_->EmitByte(0); // Table index. } } if (sig->return_count() == 0 && wanted_type != kWasmStmt) { // The call did not generate a value. Thus just generate it here. Generate(wanted_type, data); return; } if (wanted_type == kWasmStmt) { // The call did generate values, but we did not want one. for (size_t i = 0; i < sig->return_count(); ++i) { builder_->Emit(kExprDrop); } return; } auto return_types = VectorOf(sig->returns().begin(), sig->return_count()); auto wanted_types = VectorOf(&wanted_type, wanted_type == kWasmStmt ? 0 : 1); ConsumeAndGenerate(return_types, wanted_types, data); } struct Var { uint32_t index; ValueType type = kWasmStmt; Var() = default; Var(uint32_t index, ValueType type) : index(index), type(type) {} bool is_valid() const { return type != kWasmStmt; } }; Var GetRandomLocal(DataRange* data) { uint32_t num_params = static_cast(builder_->signature()->parameter_count()); uint32_t num_locals = static_cast(locals_.size()); if (num_params + num_locals == 0) return {}; uint32_t index = data->get() % (num_params + num_locals); ValueType type = index < num_params ? builder_->signature()->GetParam(index) : locals_[index - num_params]; return {index, type}; } template void local_op(DataRange* data, WasmOpcode opcode) { Var local = GetRandomLocal(data); // If there are no locals and no parameters, just generate any value (if a // value is needed), or do nothing. if (!local.is_valid()) { if (wanted_type == ValueType::kStmt) return; return Generate(data); } if (opcode != kExprLocalGet) Generate(local.type, data); builder_->EmitWithU32V(opcode, local.index); if (wanted_type != ValueType::kStmt && local.type.kind() != wanted_type) { Convert(local.type, ValueType::Primitive(wanted_type)); } } template void get_local(DataRange* data) { static_assert(wanted_type != ValueType::kStmt, "illegal type"); local_op(data, kExprLocalGet); } void set_local(DataRange* data) { local_op(data, kExprLocalSet); } template void tee_local(DataRange* data) { local_op(data, kExprLocalTee); } template void i32_const(DataRange* data) { builder_->EmitI32Const(data->get()); } template void i64_const(DataRange* data) { builder_->EmitI64Const(data->get()); } Var GetRandomGlobal(DataRange* data, bool ensure_mutable) { uint32_t index; if (ensure_mutable) { if (mutable_globals_.empty()) return {}; index = mutable_globals_[data->get() % mutable_globals_.size()]; } else { if (globals_.empty()) return {}; index = data->get() % globals_.size(); } ValueType type = globals_[index]; return {index, type}; } template void global_op(DataRange* data) { constexpr bool is_set = wanted_type == ValueType::kStmt; Var global = GetRandomGlobal(data, is_set); // If there are no globals, just generate any value (if a value is needed), // or do nothing. if (!global.is_valid()) { if (wanted_type == ValueType::kStmt) return; return Generate(data); } if (is_set) Generate(global.type, data); builder_->EmitWithU32V(is_set ? kExprGlobalSet : kExprGlobalGet, global.index); if (!is_set && global.type.kind() != wanted_type) { Convert(global.type, ValueType::Primitive(wanted_type)); } } template void get_global(DataRange* data) { static_assert(wanted_type != ValueType::kStmt, "illegal type"); global_op(data); } template void select_with_type(DataRange* data) { static_assert(select_type != ValueType::kStmt, "illegal type for select"); Generate(data); // num_types is always 1. uint8_t num_types = 1; builder_->EmitWithU8U8(kExprSelectWithType, num_types, ValueType::Primitive(select_type).value_type_code()); } void set_global(DataRange* data) { global_op(data); } template void sequence(DataRange* data) { Generate(data); } void current_memory(DataRange* data) { builder_->EmitWithU8(kExprMemorySize, 0); } void grow_memory(DataRange* data); using GenerateFn = void (WasmGenerator::*const)(DataRange*); template void GenerateOneOf(GenerateFn (&alternatives)[N], DataRange* data) { static_assert(N < std::numeric_limits::max(), "Too many alternatives. Use a bigger type if needed."); const auto which = data->get(); GenerateFn alternate = alternatives[which % N]; (this->*alternate)(data); } struct GeneratorRecursionScope { explicit GeneratorRecursionScope(WasmGenerator* gen) : gen(gen) { ++gen->recursion_depth; DCHECK_LE(gen->recursion_depth, kMaxRecursionDepth); } ~GeneratorRecursionScope() { DCHECK_GT(gen->recursion_depth, 0); --gen->recursion_depth; } WasmGenerator* gen; }; public: WasmGenerator(WasmFunctionBuilder* fn, const std::vector& functions, const std::vector& globals, const std::vector& mutable_globals, DataRange* data) : builder_(fn), functions_(functions), globals_(globals), mutable_globals_(mutable_globals) { FunctionSig* sig = fn->signature(); blocks_.emplace_back(); for (size_t i = 0; i < sig->return_count(); ++i) { blocks_.back().push_back(sig->GetReturn(i)); } constexpr uint32_t kMaxLocals = 32; locals_.resize(data->get() % kMaxLocals); for (ValueType& local : locals_) { local = GetValueType(data); fn->AddLocal(local); } } void Generate(ValueType type, DataRange* data); template void Generate(DataRange* data); template void Generate(DataRange* data) { // TODO(clemensb): Implement a more even split. auto first_data = data->split(); Generate(&first_data); Generate(data); } std::vector GenerateTypes(DataRange* data); void Generate(Vector types, DataRange* data); void ConsumeAndGenerate(Vector parameter_types, Vector return_types, DataRange* data); private: WasmFunctionBuilder* builder_; std::vector> blocks_; const std::vector& functions_; std::vector locals_; std::vector globals_; std::vector mutable_globals_; // indexes into {globals_}. uint32_t recursion_depth = 0; static constexpr uint32_t kMaxRecursionDepth = 64; bool recursion_limit_reached() { return recursion_depth >= kMaxRecursionDepth; } }; template <> void WasmGenerator::block(DataRange* data) { block({}, {}, data); } template <> void WasmGenerator::loop(DataRange* data) { loop({}, {}, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() == 0) return; constexpr GenerateFn alternatives[] = { &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::if_, &WasmGenerator::br, &WasmGenerator::br_if, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::drop, &WasmGenerator::call, &WasmGenerator::call_indirect, &WasmGenerator::set_local, &WasmGenerator::set_global}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() <= 1) { builder_->EmitI32Const(data->get()); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::i32_const<1>, &WasmGenerator::i32_const<2>, &WasmGenerator::i32_const<3>, &WasmGenerator::i32_const<4>, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::br_if, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::current_memory, &WasmGenerator::grow_memory, &WasmGenerator::get_local, &WasmGenerator::tee_local, &WasmGenerator::get_global, &WasmGenerator::op, &WasmGenerator::select_with_type, &WasmGenerator::call, &WasmGenerator::call_indirect}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() <= 1) { builder_->EmitI64Const(data->get()); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::i64_const<1>, &WasmGenerator::i64_const<2>, &WasmGenerator::i64_const<3>, &WasmGenerator::i64_const<4>, &WasmGenerator::i64_const<5>, &WasmGenerator::i64_const<6>, &WasmGenerator::i64_const<7>, &WasmGenerator::i64_const<8>, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::br_if, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::atomic_op, &WasmGenerator::simd_lane_op, &WasmGenerator::get_local, &WasmGenerator::tee_local, &WasmGenerator::get_global, &WasmGenerator::op, &WasmGenerator::select_with_type, &WasmGenerator::call, &WasmGenerator::call_indirect}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() <= sizeof(float)) { builder_->EmitF32Const(data->get()); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::br_if, &WasmGenerator::memop, &WasmGenerator::simd_lane_op, &WasmGenerator::get_local, &WasmGenerator::tee_local, &WasmGenerator::get_global, &WasmGenerator::op, &WasmGenerator::select_with_type, &WasmGenerator::call, &WasmGenerator::call_indirect}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() <= sizeof(double)) { builder_->EmitF64Const(data->get()); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::sequence, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::op, &WasmGenerator::block, &WasmGenerator::loop, &WasmGenerator::if_, &WasmGenerator::br_if, &WasmGenerator::memop, &WasmGenerator::simd_lane_op, &WasmGenerator::get_local, &WasmGenerator::tee_local, &WasmGenerator::get_global, &WasmGenerator::op, &WasmGenerator::select_with_type, &WasmGenerator::call, &WasmGenerator::call_indirect}; GenerateOneOf(alternatives, data); } template <> void WasmGenerator::Generate(DataRange* data) { GeneratorRecursionScope rec_scope(this); if (recursion_limit_reached() || data->size() <= sizeof(int32_t)) { // TODO(v8:8460): v128.const is not implemented yet, and we need a way to // "bottom-out", so use a splat to generate this. builder_->EmitI32Const(data->get()); builder_->EmitWithPrefix(kExprI8x16Splat); return; } constexpr GenerateFn alternatives[] = { &WasmGenerator::simd_const, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::simd_lane_op, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, // I8x16Mul is prototyped but not in the proposal, thus omitted here. &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::op_with_prefix, &WasmGenerator::simd_shuffle, &WasmGenerator::op_with_prefix, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, &WasmGenerator::memop, }; GenerateOneOf(alternatives, data); } void WasmGenerator::grow_memory(DataRange* data) { Generate(data); builder_->EmitWithU8(kExprMemoryGrow, 0); } void WasmGenerator::Generate(ValueType type, DataRange* data) { switch (type.kind()) { case ValueType::kStmt: return Generate(data); case ValueType::kI32: return Generate(data); case ValueType::kI64: return Generate(data); case ValueType::kF32: return Generate(data); case ValueType::kF64: return Generate(data); case ValueType::kS128: return Generate(data); default: UNREACHABLE(); } } std::vector WasmGenerator::GenerateTypes(DataRange* data) { std::vector types; int num_params = int{data->get()} % (kMaxParameters + 1); for (int i = 0; i < num_params; ++i) { types.push_back(GetValueType(data)); } return types; } void WasmGenerator::Generate(Vector types, DataRange* data) { // Maybe emit a multi-value block with the expected return type. Use a // non-default value to indicate block generation to avoid recursion when we // reach the end of the data. bool generate_block = data->get() % 32 == 1; if (generate_block) { GeneratorRecursionScope rec_scope(this); if (!recursion_limit_reached()) { const auto param_types = GenerateTypes(data); Generate(VectorOf(param_types), data); any_block(VectorOf(param_types), types, data); return; } } if (types.size() == 0) { Generate(kWasmStmt, data); return; } if (types.size() == 1) { Generate(types[0], data); return; } // Split the types in two halves and recursively generate each half. // Each half is non empty to ensure termination. size_t split_index = data->get() % (types.size() - 1) + 1; Vector lower_half = types.SubVector(0, split_index); Vector upper_half = types.SubVector(split_index, types.size()); DataRange first_range = data->split(); Generate(lower_half, &first_range); Generate(upper_half, data); } // Emit code to match an arbitrary signature. void WasmGenerator::ConsumeAndGenerate(Vector param_types, Vector return_types, DataRange* data) { if (param_types.size() == 0) { Generate(return_types, data); return; } // Keep exactly one of the parameters on the stack with a combination of drops // and selects, convert this value to the first return type, and generate the // remaining types. // TODO(thibaudm): Improve this strategy to potentially generate any sequence // of instructions matching the given signature. size_t return_index = data->get() % param_types.size(); for (size_t i = param_types.size() - 1; i > return_index; --i) { builder_->Emit(kExprDrop); } for (size_t i = return_index; i > 0; --i) { Convert(param_types[i], param_types[i - 1]); builder_->EmitI32Const(0); builder_->Emit(kExprSelect); } if (return_types.empty()) { builder_->Emit(kExprDrop); } else { Convert(param_types[0], return_types[0]); Generate(return_types + 1, data); } } FunctionSig* GenerateSig(Zone* zone, DataRange* data) { // Generate enough parameters to spill some to the stack. int num_params = int{data->get()} % (kMaxParameters + 1); int num_returns = int{data->get()} % (kMaxReturns + 1); FunctionSig::Builder builder(zone, num_returns, num_params); for (int i = 0; i < num_returns; ++i) builder.AddReturn(GetValueType(data)); for (int i = 0; i < num_params; ++i) builder.AddParam(GetValueType(data)); return builder.Build(); } } // namespace class WasmCompileFuzzer : public WasmExecutionFuzzer { bool GenerateModule( Isolate* isolate, Zone* zone, Vector data, ZoneBuffer* buffer, int32_t* num_args, std::unique_ptr* interpreter_args, std::unique_ptr[]>* compiler_args) override { TestSignatures sigs; WasmModuleBuilder builder(zone); DataRange range(data); std::vector function_signatures; function_signatures.push_back(builder.AddSignature(sigs.i_iii())); static_assert(kMaxFunctions >= 1, "need min. 1 function"); int num_functions = 1 + (range.get() % kMaxFunctions); for (int i = 1; i < num_functions; ++i) { FunctionSig* sig = GenerateSig(zone, &range); uint32_t signature_index = builder.AddSignature(sig); function_signatures.push_back(signature_index); } int num_globals = range.get() % (kMaxGlobals + 1); std::vector globals; std::vector mutable_globals; globals.reserve(num_globals); mutable_globals.reserve(num_globals); for (int i = 0; i < num_globals; ++i) { ValueType type = GetValueType(&range); // 1/8 of globals are immutable. const bool mutability = (range.get() % 8) != 0; builder.AddGlobal(type, mutability, WasmInitExpr()); globals.push_back(type); if (mutability) mutable_globals.push_back(static_cast(i)); } for (int i = 0; i < num_functions; ++i) { DataRange function_range = i == num_functions - 1 ? std::move(range) : range.split(); FunctionSig* sig = builder.GetSignature(function_signatures[i]); WasmFunctionBuilder* f = builder.AddFunction(sig); WasmGenerator gen(f, function_signatures, globals, mutable_globals, &function_range); Vector return_types(sig->returns().begin(), sig->return_count()); gen.Generate(return_types, &function_range); f->Emit(kExprEnd); if (i == 0) builder.AddExport(CStrVector("main"), f); } builder.AllocateIndirectFunctions(num_functions); for (int i = 0; i < num_functions; ++i) { builder.SetIndirectFunction(i, i); } builder.SetMaxMemorySize(32); // We enable shared memory to be able to test atomics. builder.SetHasSharedMemory(); builder.WriteTo(buffer); *num_args = 3; interpreter_args->reset( new WasmValue[3]{WasmValue(1), WasmValue(2), WasmValue(3)}); compiler_args->reset(new Handle[3] { handle(Smi::FromInt(1), isolate), handle(Smi::FromInt(2), isolate), handle(Smi::FromInt(3), isolate) }); return true; } }; extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { constexpr bool require_valid = true; EXPERIMENTAL_FLAG_SCOPE(reftypes); WasmCompileFuzzer().FuzzWasmModule({data, size}, require_valid); return 0; } } // namespace fuzzer } // namespace wasm } // namespace internal } // namespace v8