diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index c33231011b..f0539f1e9a 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -3228,6 +3228,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) { DCHECK_NOT_NULL(wasm_context_); DCHECK_NOT_NULL(*control_); DCHECK_NOT_NULL(*effect_); + // Load the memory start. Node* mem_start = graph()->NewNode( jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_, @@ -3235,7 +3236,6 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) { static_cast(offsetof(WasmContext, mem_start))), *effect_, *control_); *effect_ = mem_start; - context_cache->mem_start = mem_start; // Load the memory size. @@ -3245,34 +3245,44 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) { static_cast(offsetof(WasmContext, mem_size))), *effect_, *control_); *effect_ = mem_size; - if (jsgraph()->machine()->Is64()) { - mem_size = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), - mem_size); - } - context_cache->mem_size = mem_size; + + // Load the memory mask. + Node* mem_mask = graph()->NewNode( + jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_, + jsgraph()->Int32Constant( + static_cast(offsetof(WasmContext, mem_mask))), + *effect_, *control_); + *effect_ = mem_mask; + context_cache->mem_mask = mem_mask; } void WasmGraphBuilder::PrepareContextCacheForLoop( WasmContextCacheNodes* context_cache, Node* control) { - // Introduce phis for mem_size and mem_start. - context_cache->mem_size = - Phi(MachineRepresentation::kWord32, 1, &context_cache->mem_size, control); - context_cache->mem_start = Phi(MachineType::PointerRepresentation(), 1, - &context_cache->mem_start, control); +#define INTRODUCE_PHI(field, rep) \ + context_cache->field = Phi(rep, 1, &context_cache->field, control); + + INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation()); + INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32); + INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32); + +#undef INTRODUCE_PHI } void WasmGraphBuilder::NewContextCacheMerge(WasmContextCacheNodes* to, WasmContextCacheNodes* from, Node* merge) { - if (to->mem_size != from->mem_size) { - Node* vals[] = {to->mem_size, from->mem_size}; - to->mem_size = Phi(MachineRepresentation::kWord32, 2, vals, merge); - } - if (to->mem_start != from->mem_start) { - Node* vals[] = {to->mem_start, from->mem_start}; - to->mem_start = Phi(MachineType::PointerRepresentation(), 2, vals, merge); +#define INTRODUCE_PHI(field, rep) \ + if (to->field != from->field) { \ + Node* vals[] = {to->field, from->field}; \ + to->field = Phi(rep, 2, vals, merge); \ } + + INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation()); + INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32); + INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32); + +#undef INTRODUCE_PHI } void WasmGraphBuilder::MergeContextCacheInto(WasmContextCacheNodes* to, @@ -3282,6 +3292,8 @@ void WasmGraphBuilder::MergeContextCacheInto(WasmContextCacheNodes* to, to->mem_size, from->mem_size); to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(), merge, to->mem_start, from->mem_start); + to->mem_mask = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge, + to->mem_mask, from->mem_mask); } Node* WasmGraphBuilder::CreateOrMergeIntoPhi(wasm::ValueType type, Node* merge, @@ -3483,13 +3495,24 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) { return node; } -void WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, - uint32_t offset, - wasm::WasmCodePosition position) { - if (FLAG_wasm_no_bounds_checks) return; +Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, + uint32_t offset, + wasm::WasmCodePosition position, + EnforceBoundsCheck enforce_check) { + if (FLAG_wasm_no_bounds_checks) return index; DCHECK_NOT_NULL(context_cache_); Node* mem_size = context_cache_->mem_size; + Node* mem_mask = context_cache_->mem_mask; DCHECK_NOT_NULL(mem_size); + DCHECK_NOT_NULL(mem_mask); + + auto m = jsgraph()->machine(); + if (use_trap_handler() && enforce_check == kCanOmitBoundsCheck) { + // Simply zero out the 32-bits on 64-bit targets and let the trap handler + // do its job. + return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index) + : index; + } uint32_t min_size = env_->module->initial_pages * wasm::WasmModule::kPageSize; uint32_t max_size = @@ -3501,7 +3524,7 @@ void WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, // The access will be out of bounds, even for the largest memory. TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0), 0, position); - return; + return jsgraph()->IntPtrConstant(0); } uint32_t end_offset = offset + access_size; @@ -3522,17 +3545,21 @@ void WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, } else { // The end offset is within the bounds of the smallest memory, so only // one check is required. Check to see if the index is also a constant. - UintPtrMatcher m(index); - if (m.HasValue()) { - uint64_t index_val = m.Value(); + UintPtrMatcher match(index); + if (match.HasValue()) { + uint64_t index_val = match.Value(); if ((index_val + offset + access_size) <= min_size) { // The input index is a constant and everything is statically within // bounds of the smallest possible memory. - return; + return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index) + : index; } } } + // Compute the effective size of the memory, which is the size of the memory + // minus the statically known offset, minus the byte size of the access minus + // one. Node* effective_size; if (jsgraph()->machine()->Is32()) { effective_size = @@ -3544,12 +3571,14 @@ void WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, jsgraph()->Int64Constant(static_cast(end_offset - 1))); } - const Operator* less = jsgraph()->machine()->Is32() - ? jsgraph()->machine()->Uint32LessThan() - : jsgraph()->machine()->Uint64LessThan(); - - Node* cond = graph()->NewNode(less, index, effective_size); + // Introduce the actual bounds check. + Node* cond = graph()->NewNode(m->Uint32LessThan(), index, effective_size); TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position); + + // In the fallthrough case, condition the index with the memory mask. + Node* masked_index = graph()->NewNode(m->Word32And(), index, mem_mask); + return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), masked_index) + : masked_index; } const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset, @@ -3602,15 +3631,10 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype, wasm::WasmCodePosition position) { Node* load; - if (jsgraph()->machine()->Is64()) { - index = - graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index); - } - // Wasm semantics throw on OOB. Introduce explicit bounds check. - if (!use_trap_handler()) { - BoundsCheckMem(wasm::WasmOpcodes::MemSize(memtype), index, offset, - position); - } + // Wasm semantics throw on OOB. Introduce explicit bounds check and + // conditioning when not using the trap handler. + index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(memtype), index, offset, + position, kCanOmitBoundsCheck); if (memtype.representation() == MachineRepresentation::kWord8 || jsgraph()->machine()->UnalignedLoadSupported(memtype.representation())) { @@ -3662,15 +3686,8 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index, wasm::ValueType type) { Node* store; - if (jsgraph()->machine()->Is64()) { - index = - graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index); - } - // Wasm semantics throw on OOB. Introduce explicit bounds check. - if (!use_trap_handler()) { - BoundsCheckMem(wasm::WasmOpcodes::MemSize(mem_rep), index, offset, - position); - } + index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(mem_rep), index, offset, + position, kCanOmitBoundsCheck); #if defined(V8_TARGET_BIG_ENDIAN) val = BuildChangeEndiannessStore(val, mem_rep, type); @@ -4233,51 +4250,54 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, // TODO(gdeepti): Add alignment validation, traps on misalignment Node* node; switch (opcode) { -#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \ - case wasm::kExpr##Name: { \ - BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), inputs[0], \ - offset, position); \ - node = graph()->NewNode( \ - jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \ - MemBuffer(offset), inputs[0], inputs[1], *effect_, *control_); \ - break; \ +#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \ + case wasm::kExpr##Name: { \ + Node* index = \ + BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \ + inputs[0], offset, position, kNeedsBoundsCheck); \ + node = graph()->NewNode( \ + jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \ + MemBuffer(offset), index, inputs[1], *effect_, *control_); \ + break; \ } ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP) #undef BUILD_ATOMIC_BINOP -#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \ - case wasm::kExpr##Name: { \ - BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), inputs[0], \ - offset, position); \ - node = graph()->NewNode( \ - jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \ - MemBuffer(offset), inputs[0], inputs[1], inputs[2], *effect_, \ - *control_); \ - break; \ +#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \ + case wasm::kExpr##Name: { \ + Node* index = \ + BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \ + inputs[0], offset, position, kNeedsBoundsCheck); \ + node = graph()->NewNode( \ + jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \ + MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \ + break; \ } ATOMIC_TERNARY_LIST(BUILD_ATOMIC_TERNARY_OP) #undef BUILD_ATOMIC_TERNARY_OP -#define BUILD_ATOMIC_LOAD_OP(Name, Type) \ - case wasm::kExpr##Name: { \ - BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), inputs[0], \ - offset, position); \ - node = graph()->NewNode( \ - jsgraph()->machine()->AtomicLoad(MachineType::Type()), \ - MemBuffer(offset), inputs[0], *effect_, *control_); \ - break; \ +#define BUILD_ATOMIC_LOAD_OP(Name, Type) \ + case wasm::kExpr##Name: { \ + Node* index = \ + BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \ + inputs[0], offset, position, kNeedsBoundsCheck); \ + node = graph()->NewNode( \ + jsgraph()->machine()->AtomicLoad(MachineType::Type()), \ + MemBuffer(offset), index, *effect_, *control_); \ + break; \ } ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP) #undef BUILD_ATOMIC_LOAD_OP -#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \ - case wasm::kExpr##Name: { \ - BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), inputs[0], \ - offset, position); \ - node = graph()->NewNode( \ - jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \ - MemBuffer(offset), inputs[0], inputs[1], *effect_, *control_); \ - break; \ +#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \ + case wasm::kExpr##Name: { \ + Node* index = \ + BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \ + inputs[0], offset, position, kNeedsBoundsCheck); \ + node = graph()->NewNode( \ + jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \ + MemBuffer(offset), index, inputs[1], *effect_, *control_); \ + break; \ } ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP) #undef BUILD_ATOMIC_STORE_OP diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h index 85ac4c5b6a..8e062aa818 100644 --- a/src/compiler/wasm-compiler.h +++ b/src/compiler/wasm-compiler.h @@ -218,6 +218,7 @@ Handle CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig, struct WasmContextCacheNodes { Node* mem_start; Node* mem_size; + Node* mem_mask; }; // Abstracts details of building TurboFan graph nodes for wasm to separate @@ -225,6 +226,8 @@ struct WasmContextCacheNodes { typedef ZoneVector NodeVector; class WasmGraphBuilder { public: + enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck }; + WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph, Handle centry_stub, wasm::FunctionSig* sig, compiler::SourcePositionTable* spt = nullptr, @@ -370,8 +373,6 @@ class WasmGraphBuilder { void set_effect_ptr(Node** effect) { this->effect_ = effect; } - Node* LoadMemSize(); - Node* LoadMemStart(); void GetGlobalBaseAndOffset(MachineType mem_type, uint32_t offset, Node** base_node, Node** offset_node); @@ -458,8 +459,9 @@ class WasmGraphBuilder { Node* String(const char* string); Node* MemBuffer(uint32_t offset); - void BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset, - wasm::WasmCodePosition position); + // BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index. + Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset, + wasm::WasmCodePosition, EnforceBoundsCheck); const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type); const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type); Node* BuildChangeEndiannessStore(Node* node, MachineRepresentation rep, diff --git a/src/wasm/wasm-interpreter.cc b/src/wasm/wasm-interpreter.cc index 29aa196850..dd1c159ebc 100644 --- a/src/wasm/wasm-interpreter.cc +++ b/src/wasm/wasm-interpreter.cc @@ -1478,9 +1478,15 @@ class ThreadImpl { } template - inline bool BoundsCheck(uint32_t mem_size, uint32_t offset, uint32_t index) { - return sizeof(mtype) <= mem_size && offset <= mem_size - sizeof(mtype) && - index <= mem_size - sizeof(mtype) - offset; + inline byte* BoundsCheckMem(uint32_t offset, uint32_t index) { + uint32_t mem_size = wasm_context_->mem_size; + if (sizeof(mtype) > mem_size) return nullptr; + if (offset > (mem_size - sizeof(mtype))) return nullptr; + if (index > (mem_size - sizeof(mtype) - offset)) return nullptr; + // Compute the effective address of the access, making sure to condition + // the index even in the in-bounds case. + return wasm_context_->mem_start + offset + + (index & wasm_context_->mem_mask); } template @@ -1489,11 +1495,11 @@ class ThreadImpl { MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype)); uint32_t index = Pop().to(); - if (!BoundsCheck(wasm_context_->mem_size, operand.offset, index)) { + byte* addr = BoundsCheckMem(operand.offset, index); + if (!addr) { DoTrap(kTrapMemOutOfBounds, pc); return false; } - byte* addr = wasm_context_->mem_start + operand.offset + index; WasmValue result( converter{}(ReadLittleEndianValue(addr))); @@ -1518,11 +1524,11 @@ class ThreadImpl { ctype val = Pop().to(); uint32_t index = Pop().to(); - if (!BoundsCheck(wasm_context_->mem_size, operand.offset, index)) { + byte* addr = BoundsCheckMem(operand.offset, index); + if (!addr) { DoTrap(kTrapMemOutOfBounds, pc); return false; } - byte* addr = wasm_context_->mem_start + operand.offset + index; WriteLittleEndianValue(addr, converter{}(val)); len = 1 + operand.length; @@ -1544,11 +1550,11 @@ class ThreadImpl { sizeof(type)); if (val) *val = Pop().to(); uint32_t index = Pop().to(); - if (!BoundsCheck(wasm_context_->mem_size, operand.offset, index)) { + address = BoundsCheckMem(operand.offset, index); + if (!address) { DoTrap(kTrapMemOutOfBounds, pc); return false; } - address = wasm_context_->mem_start + operand.offset + index; len = 2 + operand.length; return true; } @@ -2022,10 +2028,10 @@ class ThreadImpl { case kExpr##name: { \ uint32_t index = Pop().to(); \ ctype result; \ - if (!BoundsCheck(wasm_context_->mem_size, 0, index)) { \ + byte* addr = BoundsCheckMem(0, index); \ + if (!addr) { \ result = defval; \ } else { \ - byte* addr = wasm_context_->mem_start + index; \ /* TODO(titzer): alignment for asmjs load mem? */ \ result = static_cast(*reinterpret_cast(addr)); \ } \ @@ -2047,9 +2053,8 @@ class ThreadImpl { case kExpr##name: { \ WasmValue val = Pop(); \ uint32_t index = Pop().to(); \ - if (BoundsCheck(wasm_context_->mem_size, 0, index)) { \ - byte* addr = wasm_context_->mem_start + index; \ - /* TODO(titzer): alignment for asmjs store mem? */ \ + byte* addr = BoundsCheckMem(0, index); \ + if (addr) { \ *(reinterpret_cast(addr)) = static_cast(val.to()); \ } \ Push(val); \ diff --git a/src/wasm/wasm-objects.h b/src/wasm/wasm-objects.h index 3f4c85f093..2feac25b4d 100644 --- a/src/wasm/wasm-objects.h +++ b/src/wasm/wasm-objects.h @@ -5,6 +5,7 @@ #ifndef V8_WASM_OBJECTS_H_ #define V8_WASM_OBJECTS_H_ +#include "src/base/bits.h" #include "src/debug/debug.h" #include "src/debug/interface-types.h" #include "src/managed.h" @@ -63,6 +64,7 @@ class WasmInstanceObject; struct WasmContext { byte* mem_start = nullptr; uint32_t mem_size = 0; // TODO(titzer): uintptr_t? + uint32_t mem_mask = 0; // TODO(titzer): uintptr_t? byte* globals_start = nullptr; inline void SetRawMemory(void* mem_start, size_t mem_size) { @@ -70,6 +72,8 @@ struct WasmContext { wasm::kV8MaxWasmMemoryPages * wasm::kSpecMaxWasmMemoryPages); this->mem_start = static_cast(mem_start); this->mem_size = static_cast(mem_size); + this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1; + DCHECK_LE(mem_size, this->mem_mask + 1); } };