2016-05-25 08:32:37 +00:00
|
|
|
// Copyright 2016 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2020-06-23 07:48:53 +00:00
|
|
|
#include "test/common/wasm/wasm-interpreter.h"
|
|
|
|
|
2017-11-04 01:03:03 +00:00
|
|
|
#include <atomic>
|
2017-02-03 09:51:04 +00:00
|
|
|
#include <type_traits>
|
|
|
|
|
2019-01-10 11:47:08 +00:00
|
|
|
#include "src/base/overflowing-math.h"
|
2021-01-19 23:19:03 +00:00
|
|
|
#include "src/base/safe_conversions.h"
|
2019-05-21 09:30:15 +00:00
|
|
|
#include "src/codegen/assembler-inl.h"
|
2020-10-28 02:43:49 +00:00
|
|
|
#include "src/common/globals.h"
|
2017-08-07 11:40:21 +00:00
|
|
|
#include "src/compiler/wasm-compiler.h"
|
2019-05-15 18:28:45 +00:00
|
|
|
#include "src/numbers/conversions.h"
|
2019-05-23 08:51:46 +00:00
|
|
|
#include "src/objects/objects-inl.h"
|
2019-05-23 13:27:57 +00:00
|
|
|
#include "src/utils/boxed-float.h"
|
|
|
|
#include "src/utils/identity-map.h"
|
|
|
|
#include "src/utils/utils.h"
|
2016-05-25 08:32:37 +00:00
|
|
|
#include "src/wasm/decoder.h"
|
2017-02-10 01:16:37 +00:00
|
|
|
#include "src/wasm/function-body-decoder-impl.h"
|
2016-12-21 12:42:06 +00:00
|
|
|
#include "src/wasm/function-body-decoder.h"
|
2017-10-02 07:39:30 +00:00
|
|
|
#include "src/wasm/memory-tracing.h"
|
2019-05-03 18:21:28 +00:00
|
|
|
#include "src/wasm/module-compiler.h"
|
2019-06-27 12:46:03 +00:00
|
|
|
#include "src/wasm/wasm-arguments.h"
|
2017-12-05 00:28:35 +00:00
|
|
|
#include "src/wasm/wasm-engine.h"
|
2016-05-25 08:32:37 +00:00
|
|
|
#include "src/wasm/wasm-external-refs.h"
|
2016-12-05 10:02:26 +00:00
|
|
|
#include "src/wasm/wasm-limits.h"
|
2016-05-25 08:32:37 +00:00
|
|
|
#include "src/wasm/wasm-module.h"
|
2017-08-30 13:53:15 +00:00
|
|
|
#include "src/wasm/wasm-objects-inl.h"
|
2020-06-24 11:17:46 +00:00
|
|
|
#include "src/wasm/wasm-opcodes-inl.h"
|
2016-09-20 16:07:25 +00:00
|
|
|
#include "src/zone/accounting-allocator.h"
|
|
|
|
#include "src/zone/zone-containers.h"
|
2016-05-25 08:32:37 +00:00
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace wasm {
|
|
|
|
|
2019-06-17 09:26:18 +00:00
|
|
|
using base::ReadLittleEndianValue;
|
|
|
|
using base::ReadUnalignedValue;
|
|
|
|
using base::WriteLittleEndianValue;
|
|
|
|
using base::WriteUnalignedValue;
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
#define TRACE(...) \
|
|
|
|
do { \
|
|
|
|
if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
|
|
|
|
} while (false)
|
|
|
|
|
2018-08-10 11:20:40 +00:00
|
|
|
#if V8_TARGET_BIG_ENDIAN
|
|
|
|
#define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1)
|
|
|
|
#else
|
|
|
|
#define LANE(i, type) (i)
|
|
|
|
#endif
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
#define FOREACH_SIMPLE_BINOP(V) \
|
|
|
|
V(I32Add, uint32_t, +) \
|
|
|
|
V(I32Sub, uint32_t, -) \
|
|
|
|
V(I32Mul, uint32_t, *) \
|
|
|
|
V(I32And, uint32_t, &) \
|
|
|
|
V(I32Ior, uint32_t, |) \
|
|
|
|
V(I32Xor, uint32_t, ^) \
|
|
|
|
V(I32Eq, uint32_t, ==) \
|
|
|
|
V(I32Ne, uint32_t, !=) \
|
|
|
|
V(I32LtU, uint32_t, <) \
|
|
|
|
V(I32LeU, uint32_t, <=) \
|
|
|
|
V(I32GtU, uint32_t, >) \
|
|
|
|
V(I32GeU, uint32_t, >=) \
|
|
|
|
V(I32LtS, int32_t, <) \
|
|
|
|
V(I32LeS, int32_t, <=) \
|
|
|
|
V(I32GtS, int32_t, >) \
|
|
|
|
V(I32GeS, int32_t, >=) \
|
|
|
|
V(I64Add, uint64_t, +) \
|
|
|
|
V(I64Sub, uint64_t, -) \
|
|
|
|
V(I64Mul, uint64_t, *) \
|
|
|
|
V(I64And, uint64_t, &) \
|
|
|
|
V(I64Ior, uint64_t, |) \
|
|
|
|
V(I64Xor, uint64_t, ^) \
|
|
|
|
V(I64Eq, uint64_t, ==) \
|
|
|
|
V(I64Ne, uint64_t, !=) \
|
|
|
|
V(I64LtU, uint64_t, <) \
|
|
|
|
V(I64LeU, uint64_t, <=) \
|
|
|
|
V(I64GtU, uint64_t, >) \
|
|
|
|
V(I64GeU, uint64_t, >=) \
|
|
|
|
V(I64LtS, int64_t, <) \
|
|
|
|
V(I64LeS, int64_t, <=) \
|
|
|
|
V(I64GtS, int64_t, >) \
|
|
|
|
V(I64GeS, int64_t, >=) \
|
|
|
|
V(F32Add, float, +) \
|
2017-01-16 10:43:03 +00:00
|
|
|
V(F32Sub, float, -) \
|
2016-05-25 08:32:37 +00:00
|
|
|
V(F32Eq, float, ==) \
|
|
|
|
V(F32Ne, float, !=) \
|
|
|
|
V(F32Lt, float, <) \
|
|
|
|
V(F32Le, float, <=) \
|
|
|
|
V(F32Gt, float, >) \
|
|
|
|
V(F32Ge, float, >=) \
|
|
|
|
V(F64Add, double, +) \
|
2017-01-16 10:43:03 +00:00
|
|
|
V(F64Sub, double, -) \
|
2016-05-25 08:32:37 +00:00
|
|
|
V(F64Eq, double, ==) \
|
|
|
|
V(F64Ne, double, !=) \
|
|
|
|
V(F64Lt, double, <) \
|
|
|
|
V(F64Le, double, <=) \
|
|
|
|
V(F64Gt, double, >) \
|
2017-02-03 09:51:04 +00:00
|
|
|
V(F64Ge, double, >=) \
|
|
|
|
V(F32Mul, float, *) \
|
|
|
|
V(F64Mul, double, *) \
|
|
|
|
V(F32Div, float, /) \
|
2016-10-20 14:27:23 +00:00
|
|
|
V(F64Div, double, /)
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
#define FOREACH_OTHER_BINOP(V) \
|
|
|
|
V(I32DivS, int32_t) \
|
|
|
|
V(I32DivU, uint32_t) \
|
|
|
|
V(I32RemS, int32_t) \
|
|
|
|
V(I32RemU, uint32_t) \
|
|
|
|
V(I32Shl, uint32_t) \
|
|
|
|
V(I32ShrU, uint32_t) \
|
|
|
|
V(I32ShrS, int32_t) \
|
|
|
|
V(I64DivS, int64_t) \
|
|
|
|
V(I64DivU, uint64_t) \
|
|
|
|
V(I64RemS, int64_t) \
|
|
|
|
V(I64RemU, uint64_t) \
|
|
|
|
V(I64Shl, uint64_t) \
|
|
|
|
V(I64ShrU, uint64_t) \
|
|
|
|
V(I64ShrS, int64_t) \
|
|
|
|
V(I32Ror, int32_t) \
|
|
|
|
V(I32Rol, int32_t) \
|
|
|
|
V(I64Ror, int64_t) \
|
|
|
|
V(I64Rol, int64_t) \
|
|
|
|
V(F32Min, float) \
|
|
|
|
V(F32Max, float) \
|
|
|
|
V(F64Min, double) \
|
|
|
|
V(F64Max, double) \
|
|
|
|
V(I32AsmjsDivS, int32_t) \
|
|
|
|
V(I32AsmjsDivU, uint32_t) \
|
|
|
|
V(I32AsmjsRemS, int32_t) \
|
2017-10-26 07:45:12 +00:00
|
|
|
V(I32AsmjsRemU, uint32_t) \
|
|
|
|
V(F32CopySign, Float32) \
|
|
|
|
V(F64CopySign, Float64)
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2018-01-16 19:32:52 +00:00
|
|
|
#define FOREACH_I32CONV_FLOATOP(V) \
|
|
|
|
V(I32SConvertF32, int32_t, float) \
|
|
|
|
V(I32SConvertF64, int32_t, double) \
|
|
|
|
V(I32UConvertF32, uint32_t, float) \
|
|
|
|
V(I32UConvertF64, uint32_t, double)
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
#define FOREACH_OTHER_UNOP(V) \
|
|
|
|
V(I32Clz, uint32_t) \
|
|
|
|
V(I32Ctz, uint32_t) \
|
|
|
|
V(I32Popcnt, uint32_t) \
|
|
|
|
V(I32Eqz, uint32_t) \
|
|
|
|
V(I64Clz, uint64_t) \
|
|
|
|
V(I64Ctz, uint64_t) \
|
|
|
|
V(I64Popcnt, uint64_t) \
|
|
|
|
V(I64Eqz, uint64_t) \
|
2017-10-23 09:10:10 +00:00
|
|
|
V(F32Abs, Float32) \
|
|
|
|
V(F32Neg, Float32) \
|
2016-05-25 08:32:37 +00:00
|
|
|
V(F32Ceil, float) \
|
|
|
|
V(F32Floor, float) \
|
|
|
|
V(F32Trunc, float) \
|
|
|
|
V(F32NearestInt, float) \
|
2017-10-23 09:10:10 +00:00
|
|
|
V(F64Abs, Float64) \
|
|
|
|
V(F64Neg, Float64) \
|
2016-05-25 08:32:37 +00:00
|
|
|
V(F64Ceil, double) \
|
|
|
|
V(F64Floor, double) \
|
|
|
|
V(F64Trunc, double) \
|
|
|
|
V(F64NearestInt, double) \
|
|
|
|
V(I32ConvertI64, int64_t) \
|
|
|
|
V(I64SConvertF32, float) \
|
|
|
|
V(I64SConvertF64, double) \
|
|
|
|
V(I64UConvertF32, float) \
|
|
|
|
V(I64UConvertF64, double) \
|
|
|
|
V(I64SConvertI32, int32_t) \
|
|
|
|
V(I64UConvertI32, uint32_t) \
|
|
|
|
V(F32SConvertI32, int32_t) \
|
|
|
|
V(F32UConvertI32, uint32_t) \
|
|
|
|
V(F32SConvertI64, int64_t) \
|
|
|
|
V(F32UConvertI64, uint64_t) \
|
|
|
|
V(F32ConvertF64, double) \
|
|
|
|
V(F32ReinterpretI32, int32_t) \
|
|
|
|
V(F64SConvertI32, int32_t) \
|
|
|
|
V(F64UConvertI32, uint32_t) \
|
|
|
|
V(F64SConvertI64, int64_t) \
|
|
|
|
V(F64UConvertI64, uint64_t) \
|
|
|
|
V(F64ConvertF32, float) \
|
|
|
|
V(F64ReinterpretI64, int64_t) \
|
|
|
|
V(I32AsmjsSConvertF32, float) \
|
|
|
|
V(I32AsmjsUConvertF32, float) \
|
|
|
|
V(I32AsmjsSConvertF64, double) \
|
2017-02-03 09:51:04 +00:00
|
|
|
V(I32AsmjsUConvertF64, double) \
|
|
|
|
V(F32Sqrt, float) \
|
2016-10-20 14:27:23 +00:00
|
|
|
V(F64Sqrt, double)
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
namespace {
|
|
|
|
|
2017-10-26 07:45:12 +00:00
|
|
|
constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
|
|
|
|
constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) {
|
|
|
|
*trap = kTrapDivByZero;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
|
|
|
|
*trap = kTrapDivUnrepresentable;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return a / b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32DivU(uint32_t a, uint32_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) {
|
|
|
|
*trap = kTrapDivByZero;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return a / b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) {
|
|
|
|
*trap = kTrapRemByZero;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (b == -1) return 0;
|
|
|
|
return a % b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) {
|
|
|
|
*trap = kTrapRemByZero;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return a % b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
|
2017-12-02 00:30:37 +00:00
|
|
|
return a << (b & 0x1F);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
|
2017-12-02 00:30:37 +00:00
|
|
|
return a >> (b & 0x1F);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
|
2017-12-02 00:30:37 +00:00
|
|
|
return a >> (b & 0x1F);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) {
|
|
|
|
*trap = kTrapDivByZero;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
|
|
|
|
*trap = kTrapDivUnrepresentable;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return a / b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) {
|
|
|
|
*trap = kTrapDivByZero;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return a / b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) {
|
|
|
|
*trap = kTrapRemByZero;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (b == -1) return 0;
|
|
|
|
return a % b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) {
|
|
|
|
*trap = kTrapRemByZero;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return a % b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
|
2017-12-02 00:30:37 +00:00
|
|
|
return a << (b & 0x3F);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
|
2017-12-02 00:30:37 +00:00
|
|
|
return a >> (b & 0x3F);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
|
2017-12-02 00:30:37 +00:00
|
|
|
return a >> (b & 0x3F);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
|
2019-01-10 11:47:08 +00:00
|
|
|
return (a >> (b & 0x1F)) | (a << ((32 - b) & 0x1F));
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
|
2019-01-10 11:47:08 +00:00
|
|
|
return (a << (b & 0x1F)) | (a >> ((32 - b) & 0x1F));
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
|
2019-01-10 11:47:08 +00:00
|
|
|
return (a >> (b & 0x3F)) | (a << ((64 - b) & 0x3F));
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
|
2019-01-10 11:47:08 +00:00
|
|
|
return (a << (b & 0x3F)) | (a >> ((64 - b) & 0x3F));
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32Min(float a, float b, TrapReason* trap) { return JSMin(a, b); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32Max(float a, float b, TrapReason* trap) { return JSMax(a, b); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
|
2017-10-26 07:45:12 +00:00
|
|
|
return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
|
|
|
|
(b.get_bits() & kFloat32SignBitMask));
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64Min(double a, double b, TrapReason* trap) {
|
2016-08-22 13:50:23 +00:00
|
|
|
return JSMin(a, b);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64Max(double a, double b, TrapReason* trap) {
|
2016-08-22 13:50:23 +00:00
|
|
|
return JSMax(a, b);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
|
2017-10-26 07:45:12 +00:00
|
|
|
return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
|
|
|
|
(b.get_bits() & kFloat64SignBitMask));
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) return 0;
|
|
|
|
if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
|
|
|
|
return std::numeric_limits<int32_t>::min();
|
|
|
|
}
|
|
|
|
return a / b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) return 0;
|
|
|
|
return a / b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) return 0;
|
|
|
|
if (b == -1) return 0;
|
|
|
|
return a % b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (b == 0) return 0;
|
|
|
|
return a % b;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return DoubleToInt32(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return DoubleToUint32(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return DoubleToInt32(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return DoubleToUint32(a);
|
|
|
|
}
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
|
Reland "[bits] Consolidate Count{Leading,Trailing}Zeros"
This is a reland of 7d231e576a6ffd651041094ba10bc5b39777528c, fixed to
avoid instantiating CountLeadingZeros for bits==0.
Original change's description:
> [bits] Consolidate Count{Leading,Trailing}Zeros
>
> Instead of having one method for 32 bit integers and one for 64 bit,
> plus a templatized version to choose from those two, just implement one
> version which handles unsigned integers of any size. Also, make them
> constexpr.
> The Count{Leading,Trailing}Zeros{32,64} methods are kept for now in
> order to keep the amount of code changes small. Also, sometimes it
> improves readability by stating exactly the size of the argument,
> especially for leading zeros (where zero-extending would add more
> leading zeros).
>
> CountLeadingZeros now uses a binary search inspired implementation
> as proposed in Hacker's Delight. It's more than 20% faster on x64 if
> the builtins are disabled.
> CountTrailingZeros falls back to CountPopulation instead of counting in
> a naive loop. This is ~50% faster.
>
> R=mstarzinger@chromium.org
>
> Change-Id: I1d8bf1d7295b930724163248150444bd17fbb34e
> Reviewed-on: https://chromium-review.googlesource.com/741231
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#49106}
Change-Id: Icdff2510ec66d1c96a1912cef29d77d8550994ee
Reviewed-on: https://chromium-review.googlesource.com/753903
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49138}
2017-11-06 11:09:53 +00:00
|
|
|
return base::bits::CountLeadingZeros(val);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
|
Reland "[bits] Consolidate Count{Leading,Trailing}Zeros"
This is a reland of 7d231e576a6ffd651041094ba10bc5b39777528c, fixed to
avoid instantiating CountLeadingZeros for bits==0.
Original change's description:
> [bits] Consolidate Count{Leading,Trailing}Zeros
>
> Instead of having one method for 32 bit integers and one for 64 bit,
> plus a templatized version to choose from those two, just implement one
> version which handles unsigned integers of any size. Also, make them
> constexpr.
> The Count{Leading,Trailing}Zeros{32,64} methods are kept for now in
> order to keep the amount of code changes small. Also, sometimes it
> improves readability by stating exactly the size of the argument,
> especially for leading zeros (where zero-extending would add more
> leading zeros).
>
> CountLeadingZeros now uses a binary search inspired implementation
> as proposed in Hacker's Delight. It's more than 20% faster on x64 if
> the builtins are disabled.
> CountTrailingZeros falls back to CountPopulation instead of counting in
> a naive loop. This is ~50% faster.
>
> R=mstarzinger@chromium.org
>
> Change-Id: I1d8bf1d7295b930724163248150444bd17fbb34e
> Reviewed-on: https://chromium-review.googlesource.com/741231
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#49106}
Change-Id: Icdff2510ec66d1c96a1912cef29d77d8550994ee
Reviewed-on: https://chromium-review.googlesource.com/753903
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49138}
2017-11-06 11:09:53 +00:00
|
|
|
return base::bits::CountTrailingZeros(val);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
|
2018-04-20 07:25:19 +00:00
|
|
|
return base::bits::CountPopulation(val);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return val == 0 ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
|
Reland "[bits] Consolidate Count{Leading,Trailing}Zeros"
This is a reland of 7d231e576a6ffd651041094ba10bc5b39777528c, fixed to
avoid instantiating CountLeadingZeros for bits==0.
Original change's description:
> [bits] Consolidate Count{Leading,Trailing}Zeros
>
> Instead of having one method for 32 bit integers and one for 64 bit,
> plus a templatized version to choose from those two, just implement one
> version which handles unsigned integers of any size. Also, make them
> constexpr.
> The Count{Leading,Trailing}Zeros{32,64} methods are kept for now in
> order to keep the amount of code changes small. Also, sometimes it
> improves readability by stating exactly the size of the argument,
> especially for leading zeros (where zero-extending would add more
> leading zeros).
>
> CountLeadingZeros now uses a binary search inspired implementation
> as proposed in Hacker's Delight. It's more than 20% faster on x64 if
> the builtins are disabled.
> CountTrailingZeros falls back to CountPopulation instead of counting in
> a naive loop. This is ~50% faster.
>
> R=mstarzinger@chromium.org
>
> Change-Id: I1d8bf1d7295b930724163248150444bd17fbb34e
> Reviewed-on: https://chromium-review.googlesource.com/741231
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#49106}
Change-Id: Icdff2510ec66d1c96a1912cef29d77d8550994ee
Reviewed-on: https://chromium-review.googlesource.com/753903
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49138}
2017-11-06 11:09:53 +00:00
|
|
|
return base::bits::CountLeadingZeros(val);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
|
Reland "[bits] Consolidate Count{Leading,Trailing}Zeros"
This is a reland of 7d231e576a6ffd651041094ba10bc5b39777528c, fixed to
avoid instantiating CountLeadingZeros for bits==0.
Original change's description:
> [bits] Consolidate Count{Leading,Trailing}Zeros
>
> Instead of having one method for 32 bit integers and one for 64 bit,
> plus a templatized version to choose from those two, just implement one
> version which handles unsigned integers of any size. Also, make them
> constexpr.
> The Count{Leading,Trailing}Zeros{32,64} methods are kept for now in
> order to keep the amount of code changes small. Also, sometimes it
> improves readability by stating exactly the size of the argument,
> especially for leading zeros (where zero-extending would add more
> leading zeros).
>
> CountLeadingZeros now uses a binary search inspired implementation
> as proposed in Hacker's Delight. It's more than 20% faster on x64 if
> the builtins are disabled.
> CountTrailingZeros falls back to CountPopulation instead of counting in
> a naive loop. This is ~50% faster.
>
> R=mstarzinger@chromium.org
>
> Change-Id: I1d8bf1d7295b930724163248150444bd17fbb34e
> Reviewed-on: https://chromium-review.googlesource.com/741231
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#49106}
Change-Id: Icdff2510ec66d1c96a1912cef29d77d8550994ee
Reviewed-on: https://chromium-review.googlesource.com/753903
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49138}
2017-11-06 11:09:53 +00:00
|
|
|
return base::bits::CountTrailingZeros(val);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
|
2018-04-20 07:25:19 +00:00
|
|
|
return base::bits::CountPopulation(val);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return val == 0 ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
|
2017-10-26 07:45:12 +00:00
|
|
|
return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
|
2017-10-26 07:45:12 +00:00
|
|
|
return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32Floor(float a, TrapReason* trap) { return floorf(a); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32Trunc(float a, TrapReason* trap) { return truncf(a); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32NearestInt(float a, TrapReason* trap) { return nearbyintf(a); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32Sqrt(float a, TrapReason* trap) {
|
2016-10-20 14:27:23 +00:00
|
|
|
float result = sqrtf(a);
|
|
|
|
return result;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
|
2017-10-26 07:45:12 +00:00
|
|
|
return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
|
2017-10-26 07:45:12 +00:00
|
|
|
return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64Floor(double a, TrapReason* trap) { return floor(a); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64Trunc(double a, TrapReason* trap) { return trunc(a); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64NearestInt(double a, TrapReason* trap) { return nearbyint(a); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2018-01-16 19:32:52 +00:00
|
|
|
template <typename int_type, typename float_type>
|
|
|
|
int_type ExecuteConvert(float_type a, TrapReason* trap) {
|
|
|
|
if (is_inbounds<int_type>(a)) {
|
|
|
|
return static_cast<int_type>(a);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
*trap = kTrapFloatUnrepresentable;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-20 07:25:19 +00:00
|
|
|
template <typename dst_type, typename src_type, void (*fn)(Address)>
|
2020-11-23 13:49:19 +00:00
|
|
|
dst_type CallExternalIntToFloatFunction(src_type input) {
|
2018-04-20 07:25:19 +00:00
|
|
|
uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
|
|
|
|
Address data_addr = reinterpret_cast<Address>(data);
|
|
|
|
WriteUnalignedValue<src_type>(data_addr, input);
|
|
|
|
fn(data_addr);
|
|
|
|
return ReadUnalignedValue<dst_type>(data_addr);
|
|
|
|
}
|
|
|
|
|
2021-02-22 23:16:25 +00:00
|
|
|
template <typename dst_type, typename src_type>
|
|
|
|
dst_type ConvertFloatToIntOrTrap(src_type input, TrapReason* trap) {
|
|
|
|
if (base::IsValueInRangeForNumericType<dst_type>(input)) {
|
|
|
|
return static_cast<dst_type>(input);
|
|
|
|
} else {
|
|
|
|
*trap = kTrapFloatUnrepresentable;
|
|
|
|
return 0;
|
|
|
|
}
|
2018-04-20 07:25:19 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return static_cast<uint32_t>(a & 0xFFFFFFFF);
|
|
|
|
}
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
|
2021-02-22 23:16:25 +00:00
|
|
|
return ConvertFloatToIntOrTrap<int64_t, float>(a, trap);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
|
2021-02-22 23:16:25 +00:00
|
|
|
return ConvertFloatToIntOrTrap<int64_t, double>(a, trap);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
|
2021-02-22 23:16:25 +00:00
|
|
|
return ConvertFloatToIntOrTrap<uint64_t, float>(a, trap);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
|
2021-02-22 23:16:25 +00:00
|
|
|
return ConvertFloatToIntOrTrap<uint64_t, double>(a, trap);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return static_cast<int64_t>(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return static_cast<uint64_t>(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return static_cast<float>(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return static_cast<float>(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
|
2018-04-20 07:25:19 +00:00
|
|
|
return static_cast<float>(a);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
|
2018-04-20 07:25:19 +00:00
|
|
|
return CallExternalIntToFloatFunction<float, uint64_t,
|
|
|
|
uint64_to_float32_wrapper>(a);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
float ExecuteF32ConvertF64(double a, TrapReason* trap) {
|
2019-06-06 18:50:46 +00:00
|
|
|
return DoubleToFloat32(a);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
|
2017-10-23 09:10:10 +00:00
|
|
|
return Float32::FromBits(a);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return static_cast<double>(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return static_cast<double>(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
|
2018-04-20 07:25:19 +00:00
|
|
|
return static_cast<double>(a);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
|
2018-04-20 07:25:19 +00:00
|
|
|
return CallExternalIntToFloatFunction<double, uint64_t,
|
|
|
|
uint64_to_float64_wrapper>(a);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
double ExecuteF64ConvertF32(float a, TrapReason* trap) {
|
2016-05-25 08:32:37 +00:00
|
|
|
return static_cast<double>(a);
|
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
Float64 ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
|
2017-10-23 09:10:10 +00:00
|
|
|
return Float64::FromBits(a);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int32_t ExecuteI32ReinterpretF32(WasmValue a) {
|
2017-10-23 09:10:10 +00:00
|
|
|
return a.to_f32_boxed().get_bits();
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 13:49:19 +00:00
|
|
|
int64_t ExecuteI64ReinterpretF64(WasmValue a) {
|
2017-10-23 09:10:10 +00:00
|
|
|
return a.to_f64_boxed().get_bits();
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2021-02-12 10:35:40 +00:00
|
|
|
constexpr int32_t kCatchAllExceptionIndex = -1;
|
2021-02-15 11:04:05 +00:00
|
|
|
constexpr int32_t kRethrowOrDelegateExceptionIndex = -2;
|
2019-01-28 13:59:04 +00:00
|
|
|
|
2018-09-21 00:32:01 +00:00
|
|
|
} // namespace
|
|
|
|
|
2017-04-26 17:41:26 +00:00
|
|
|
class SideTable;
|
2017-04-25 09:43:39 +00:00
|
|
|
|
|
|
|
// Code and metadata needed to execute a function.
|
|
|
|
struct InterpreterCode {
|
|
|
|
const WasmFunction* function; // wasm function
|
|
|
|
BodyLocalDecls locals; // local declarations
|
2020-05-25 12:46:55 +00:00
|
|
|
const byte* start; // start of code
|
|
|
|
const byte* end; // end of code
|
|
|
|
SideTable* side_table; // precomputed side table for control flow
|
2017-04-25 09:43:39 +00:00
|
|
|
|
|
|
|
const byte* at(pc_t pc) { return start + pc; }
|
|
|
|
};
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
// A helper class to compute the control transfers for each bytecode offset.
|
|
|
|
// Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
|
|
|
|
// be directly executed without the need to dynamically track blocks.
|
2017-04-26 17:41:26 +00:00
|
|
|
class SideTable : public ZoneObject {
|
2016-05-25 08:32:37 +00:00
|
|
|
public:
|
|
|
|
ControlTransferMap map_;
|
2021-02-16 12:00:13 +00:00
|
|
|
// Map rethrow instructions to the catch block index they target.
|
|
|
|
ZoneMap<pc_t, int> rethrow_map_;
|
2019-10-24 12:37:01 +00:00
|
|
|
int32_t max_stack_height_ = 0;
|
2021-02-16 12:00:13 +00:00
|
|
|
int32_t max_control_stack_height = 0;
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2017-04-26 17:41:26 +00:00
|
|
|
SideTable(Zone* zone, const WasmModule* module, InterpreterCode* code)
|
2021-02-16 12:00:13 +00:00
|
|
|
: map_(zone), rethrow_map_(zone) {
|
2017-04-25 09:43:39 +00:00
|
|
|
// Create a zone for all temporary objects.
|
|
|
|
Zone control_transfer_zone(zone->allocator(), ZONE_NAME);
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
// Represents a control flow label.
|
2017-04-25 09:43:39 +00:00
|
|
|
class CLabel : public ZoneObject {
|
2020-07-09 11:51:58 +00:00
|
|
|
friend Zone;
|
|
|
|
|
2019-10-24 12:37:01 +00:00
|
|
|
explicit CLabel(Zone* zone, int32_t target_stack_height, uint32_t arity)
|
2021-02-11 13:37:03 +00:00
|
|
|
: catch_targets(zone),
|
|
|
|
target_stack_height(target_stack_height),
|
|
|
|
arity(arity),
|
|
|
|
refs(zone) {
|
2020-05-29 08:44:32 +00:00
|
|
|
DCHECK_LE(0, target_stack_height);
|
|
|
|
}
|
2017-04-25 09:43:39 +00:00
|
|
|
|
|
|
|
public:
|
|
|
|
struct Ref {
|
|
|
|
const byte* from_pc;
|
2019-10-24 12:37:01 +00:00
|
|
|
const int32_t stack_height;
|
2017-04-25 09:43:39 +00:00
|
|
|
};
|
2021-02-16 12:00:13 +00:00
|
|
|
struct CatchTarget {
|
|
|
|
int exception_index;
|
|
|
|
int target_control_index;
|
|
|
|
const byte* pc;
|
|
|
|
};
|
2018-05-07 13:47:59 +00:00
|
|
|
const byte* target = nullptr;
|
2021-02-16 12:00:13 +00:00
|
|
|
ZoneVector<CatchTarget> catch_targets;
|
2019-10-24 12:37:01 +00:00
|
|
|
int32_t target_stack_height;
|
2017-05-02 15:46:52 +00:00
|
|
|
// Arity when branching to this label.
|
2017-04-25 09:43:39 +00:00
|
|
|
const uint32_t arity;
|
|
|
|
ZoneVector<Ref> refs;
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2019-10-24 12:37:01 +00:00
|
|
|
static CLabel* New(Zone* zone, int32_t stack_height, uint32_t arity) {
|
2020-07-09 11:51:58 +00:00
|
|
|
return zone->New<CLabel>(zone, stack_height, arity);
|
2017-04-25 09:43:39 +00:00
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
|
|
|
|
// Bind this label to the given PC.
|
2017-04-25 09:43:39 +00:00
|
|
|
void Bind(const byte* pc) {
|
2016-05-25 08:32:37 +00:00
|
|
|
DCHECK_NULL(target);
|
|
|
|
target = pc;
|
|
|
|
}
|
|
|
|
|
2021-02-16 12:00:13 +00:00
|
|
|
void Bind(const byte* pc, int exception_index, int target_control_index) {
|
|
|
|
catch_targets.push_back({exception_index, target_control_index, pc});
|
2021-02-11 13:37:03 +00:00
|
|
|
}
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
// Reference this label from the given location.
|
2019-10-24 12:37:01 +00:00
|
|
|
void Ref(const byte* from_pc, int32_t stack_height) {
|
2017-04-25 09:43:39 +00:00
|
|
|
// Target being bound before a reference means this is a loop.
|
|
|
|
DCHECK_IMPLIES(target, *target == kExprLoop);
|
|
|
|
refs.push_back({from_pc, stack_height});
|
|
|
|
}
|
|
|
|
|
|
|
|
void Finish(ControlTransferMap* map, const byte* start) {
|
2021-02-11 13:37:03 +00:00
|
|
|
DCHECK_EQ(!!target, catch_targets.empty());
|
2017-04-25 09:43:39 +00:00
|
|
|
for (auto ref : refs) {
|
|
|
|
size_t offset = static_cast<size_t>(ref.from_pc - start);
|
|
|
|
DCHECK_GE(ref.stack_height, target_stack_height);
|
|
|
|
spdiff_t spdiff =
|
|
|
|
static_cast<spdiff_t>(ref.stack_height - target_stack_height);
|
2021-02-11 13:37:03 +00:00
|
|
|
if (target) {
|
|
|
|
auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
|
|
|
|
TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
|
|
|
|
pcdiff, ref.stack_height, target_stack_height, spdiff);
|
|
|
|
ControlTransferEntry& entry = (map->map)[offset];
|
|
|
|
entry.pc_diff = pcdiff;
|
|
|
|
entry.sp_diff = spdiff;
|
|
|
|
entry.target_arity = arity;
|
|
|
|
} else {
|
|
|
|
Zone* zone = map->catch_map.get_allocator().zone();
|
|
|
|
auto p = map->catch_map.emplace(
|
|
|
|
offset, ZoneVector<CatchControlTransferEntry>(zone));
|
|
|
|
auto& catch_entries = p.first->second;
|
|
|
|
for (auto& p : catch_targets) {
|
2021-02-16 12:00:13 +00:00
|
|
|
auto pcdiff = static_cast<pcdiff_t>(p.pc - ref.from_pc);
|
2021-02-11 13:37:03 +00:00
|
|
|
TRACE(
|
|
|
|
"control transfer @%zu: Δpc %d, stack %u->%u, exn: %d = "
|
|
|
|
"-%u\n",
|
|
|
|
offset, pcdiff, ref.stack_height, target_stack_height,
|
2021-02-16 12:00:13 +00:00
|
|
|
p.exception_index, spdiff);
|
2021-02-11 13:37:03 +00:00
|
|
|
CatchControlTransferEntry entry;
|
|
|
|
entry.pc_diff = pcdiff;
|
|
|
|
entry.sp_diff = spdiff;
|
|
|
|
entry.target_arity = arity;
|
2021-02-16 12:00:13 +00:00
|
|
|
entry.exception_index = p.exception_index;
|
|
|
|
entry.target_control_index = p.target_control_index;
|
2021-02-11 13:37:03 +00:00
|
|
|
catch_entries.emplace_back(entry);
|
|
|
|
}
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// An entry in the control stack.
|
|
|
|
struct Control {
|
|
|
|
const byte* pc;
|
|
|
|
CLabel* end_label;
|
|
|
|
CLabel* else_label;
|
2017-05-02 15:46:52 +00:00
|
|
|
// Arity (number of values on the stack) when exiting this control
|
|
|
|
// structure via |end|.
|
|
|
|
uint32_t exit_arity;
|
2017-05-02 17:46:21 +00:00
|
|
|
// Track whether this block was already left, i.e. all further
|
|
|
|
// instructions are unreachable.
|
|
|
|
bool unreachable = false;
|
2021-02-16 19:34:49 +00:00
|
|
|
// Whether this is a try...unwind...end block. Needed to handle the
|
|
|
|
// implicit rethrow when we reach the end of the block.
|
|
|
|
bool unwind = false;
|
2017-05-02 15:46:52 +00:00
|
|
|
|
|
|
|
Control(const byte* pc, CLabel* end_label, CLabel* else_label,
|
|
|
|
uint32_t exit_arity)
|
|
|
|
: pc(pc),
|
|
|
|
end_label(end_label),
|
|
|
|
else_label(else_label),
|
|
|
|
exit_arity(exit_arity) {}
|
|
|
|
Control(const byte* pc, CLabel* end_label, uint32_t exit_arity)
|
|
|
|
: Control(pc, end_label, nullptr, exit_arity) {}
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2017-04-25 09:43:39 +00:00
|
|
|
void Finish(ControlTransferMap* map, const byte* start) {
|
|
|
|
end_label->Finish(map, start);
|
|
|
|
if (else_label) else_label->Finish(map, start);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Compute the ControlTransfer map.
|
2016-09-27 20:46:10 +00:00
|
|
|
// This algorithm maintains a stack of control constructs similar to the
|
2016-05-25 08:32:37 +00:00
|
|
|
// AST decoder. The {control_stack} allows matching {br,br_if,br_table}
|
|
|
|
// bytecodes with their target, as well as determining whether the current
|
|
|
|
// bytecodes are within the true or false block of an else.
|
2017-04-25 09:43:39 +00:00
|
|
|
ZoneVector<Control> control_stack(&control_transfer_zone);
|
2019-01-28 13:59:04 +00:00
|
|
|
// It also maintains a stack of all nested {try} blocks to resolve local
|
2021-02-16 12:00:13 +00:00
|
|
|
// handler targets for potentially throwing operations. This stack contains
|
|
|
|
// indices into the above control stack.
|
2019-01-28 13:59:04 +00:00
|
|
|
ZoneVector<size_t> exception_stack(zone);
|
2019-10-24 12:37:01 +00:00
|
|
|
int32_t stack_height = 0;
|
2017-04-25 09:43:39 +00:00
|
|
|
uint32_t func_arity =
|
|
|
|
static_cast<uint32_t>(code->function->sig->return_count());
|
|
|
|
CLabel* func_label =
|
|
|
|
CLabel::New(&control_transfer_zone, stack_height, func_arity);
|
2020-05-25 12:46:55 +00:00
|
|
|
control_stack.emplace_back(code->start, func_label, func_arity);
|
2017-05-02 17:46:21 +00:00
|
|
|
auto control_parent = [&]() -> Control& {
|
|
|
|
DCHECK_LE(2, control_stack.size());
|
|
|
|
return control_stack[control_stack.size() - 2];
|
|
|
|
};
|
|
|
|
auto copy_unreachable = [&] {
|
|
|
|
control_stack.back().unreachable = control_parent().unreachable;
|
|
|
|
};
|
2021-02-23 17:05:26 +00:00
|
|
|
int max_exception_arity = 0;
|
|
|
|
if (module) {
|
|
|
|
for (auto& exception : module->exceptions) {
|
|
|
|
max_exception_arity =
|
|
|
|
std::max(max_exception_arity,
|
|
|
|
static_cast<int>(exception.sig->parameter_count()));
|
|
|
|
}
|
|
|
|
}
|
2020-05-25 12:46:55 +00:00
|
|
|
for (BytecodeIterator i(code->start, code->end, &code->locals);
|
2017-04-25 09:43:39 +00:00
|
|
|
i.has_next(); i.next()) {
|
2016-07-11 12:57:22 +00:00
|
|
|
WasmOpcode opcode = i.current();
|
2019-10-24 12:37:01 +00:00
|
|
|
int32_t exceptional_stack_height = 0;
|
2017-11-04 01:03:03 +00:00
|
|
|
if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
|
2017-05-02 17:46:21 +00:00
|
|
|
bool unreachable = control_stack.back().unreachable;
|
|
|
|
if (unreachable) {
|
|
|
|
TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
|
|
|
|
WasmOpcodes::OpcodeName(opcode));
|
|
|
|
} else {
|
|
|
|
auto stack_effect =
|
|
|
|
StackEffect(module, code->function->sig, i.pc(), i.end());
|
|
|
|
TRACE("@%u: %s (sp %d - %d + %d)\n", i.pc_offset(),
|
|
|
|
WasmOpcodes::OpcodeName(opcode), stack_height, stack_effect.first,
|
|
|
|
stack_effect.second);
|
|
|
|
DCHECK_GE(stack_height, stack_effect.first);
|
|
|
|
DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
|
|
|
|
stack_effect.first + stack_effect.second);
|
2019-01-30 15:06:42 +00:00
|
|
|
exceptional_stack_height = stack_height - stack_effect.first;
|
2017-05-02 17:46:21 +00:00
|
|
|
stack_height = stack_height - stack_effect.first + stack_effect.second;
|
|
|
|
if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
|
|
|
|
}
|
2019-01-30 15:06:42 +00:00
|
|
|
if (!exception_stack.empty() && WasmOpcodes::IsThrowingOpcode(opcode)) {
|
|
|
|
// Record exceptional control flow from potentially throwing opcodes to
|
|
|
|
// the local handler if one is present. The stack height at the throw
|
|
|
|
// point is assumed to have popped all operands and not pushed any yet.
|
|
|
|
DCHECK_GE(control_stack.size() - 1, exception_stack.back());
|
|
|
|
const Control* c = &control_stack[exception_stack.back()];
|
|
|
|
if (!unreachable) c->else_label->Ref(i.pc(), exceptional_stack_height);
|
2021-02-23 17:05:26 +00:00
|
|
|
if (exceptional_stack_height + max_exception_arity >
|
|
|
|
max_stack_height_) {
|
|
|
|
max_stack_height_ = exceptional_stack_height + max_exception_arity;
|
2019-04-05 11:06:55 +00:00
|
|
|
}
|
2020-05-25 12:46:55 +00:00
|
|
|
TRACE("handler @%u: %s -> try @%u\n", i.pc_offset(),
|
|
|
|
WasmOpcodes::OpcodeName(opcode),
|
2019-01-30 15:06:42 +00:00
|
|
|
static_cast<uint32_t>(c->pc - code->start));
|
|
|
|
}
|
2021-02-16 12:00:13 +00:00
|
|
|
max_control_stack_height = std::max(
|
|
|
|
max_control_stack_height, static_cast<int>(control_stack.size()));
|
2016-05-25 08:32:37 +00:00
|
|
|
switch (opcode) {
|
2017-05-02 15:46:52 +00:00
|
|
|
case kExprBlock:
|
2017-04-27 14:12:43 +00:00
|
|
|
case kExprLoop: {
|
2017-05-02 15:46:52 +00:00
|
|
|
bool is_loop = opcode == kExprLoop;
|
[wasm-gc] read_heap_type should check if index is in module bounds
read_heap_type did not have knowledge of the module for which the heap
type was being decoded. As a result, callers of read_heap_type (or
read_value_type, which in turn calls read_heap_type) had to check after
the fact that a decoded indexed type (ref, ref null, or rtt) references
a type index within the module's bounds. This was not done consistently,
and was missing (at least) in DecodeLocals.
To avoid such problems in the future, this CL refactors read_heap_type
to accept a module and check the decoded index against it.
Changes:
- Add WasmModule argument to read_heap_type. Do so accordingly to all
its transitive callers (read_value_type, immediate arguments,
DecodeLocalDecls, DecodeValue/HeapType in unittests).
- Add index check to read_heap_type and emit an error for an
out-of-bounds index.
- Remove all other now-redundant index validations. Replace them with
decoder->ok() if needed (since read_heap_type will now emit an error).
- Fix error message in Validate for BlockTypeImmediate.
- In DecodeLocalDecls in unittests, pass an empty module to
DecodeLocalDecls in the main code.
- Add a unit test with an invalid index in local type declarations.
Bug: v8:9495
Change-Id: I4ed1204847db80f78b6ae85fa40d300cd2456295
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2569757
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71572}
2020-12-02 14:29:22 +00:00
|
|
|
BlockTypeImmediate<Decoder::kNoValidation> imm(
|
|
|
|
WasmFeatures::All(), &i, i.pc() + 1, module);
|
2019-06-25 14:07:35 +00:00
|
|
|
if (imm.type == kWasmBottom) {
|
2020-04-20 18:19:12 +00:00
|
|
|
imm.sig = module->signature(imm.sig_index);
|
2017-10-11 13:01:17 +00:00
|
|
|
}
|
|
|
|
TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
|
2018-05-03 11:59:06 +00:00
|
|
|
is_loop ? "Loop" : "Block", imm.in_arity(), imm.out_arity());
|
2020-05-29 08:44:32 +00:00
|
|
|
DCHECK_IMPLIES(!unreachable,
|
|
|
|
stack_height >= static_cast<int32_t>(imm.in_arity()));
|
|
|
|
int32_t target_stack_height = stack_height - imm.in_arity();
|
|
|
|
// The stack may underflow in unreachable code. In this case the
|
|
|
|
// stack height is clamped at 0.
|
|
|
|
if (V8_UNLIKELY(target_stack_height < 0)) target_stack_height = 0;
|
2018-05-03 11:59:06 +00:00
|
|
|
CLabel* label =
|
2020-05-29 08:44:32 +00:00
|
|
|
CLabel::New(&control_transfer_zone, target_stack_height,
|
2018-05-03 11:59:06 +00:00
|
|
|
is_loop ? imm.in_arity() : imm.out_arity());
|
|
|
|
control_stack.emplace_back(i.pc(), label, imm.out_arity());
|
2017-05-02 17:46:21 +00:00
|
|
|
copy_unreachable();
|
2017-05-02 15:46:52 +00:00
|
|
|
if (is_loop) label->Bind(i.pc());
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprIf: {
|
[wasm-gc] read_heap_type should check if index is in module bounds
read_heap_type did not have knowledge of the module for which the heap
type was being decoded. As a result, callers of read_heap_type (or
read_value_type, which in turn calls read_heap_type) had to check after
the fact that a decoded indexed type (ref, ref null, or rtt) references
a type index within the module's bounds. This was not done consistently,
and was missing (at least) in DecodeLocals.
To avoid such problems in the future, this CL refactors read_heap_type
to accept a module and check the decoded index against it.
Changes:
- Add WasmModule argument to read_heap_type. Do so accordingly to all
its transitive callers (read_value_type, immediate arguments,
DecodeLocalDecls, DecodeValue/HeapType in unittests).
- Add index check to read_heap_type and emit an error for an
out-of-bounds index.
- Remove all other now-redundant index validations. Replace them with
decoder->ok() if needed (since read_heap_type will now emit an error).
- Fix error message in Validate for BlockTypeImmediate.
- In DecodeLocalDecls in unittests, pass an empty module to
DecodeLocalDecls in the main code.
- Add a unit test with an invalid index in local type declarations.
Bug: v8:9495
Change-Id: I4ed1204847db80f78b6ae85fa40d300cd2456295
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2569757
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71572}
2020-12-02 14:29:22 +00:00
|
|
|
BlockTypeImmediate<Decoder::kNoValidation> imm(
|
|
|
|
WasmFeatures::All(), &i, i.pc() + 1, module);
|
2019-06-25 14:07:35 +00:00
|
|
|
if (imm.type == kWasmBottom) {
|
2020-04-20 18:19:12 +00:00
|
|
|
imm.sig = module->signature(imm.sig_index);
|
2017-10-11 13:01:17 +00:00
|
|
|
}
|
|
|
|
TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
|
2018-05-03 11:59:06 +00:00
|
|
|
imm.in_arity(), imm.out_arity());
|
2020-05-29 08:44:32 +00:00
|
|
|
DCHECK_IMPLIES(!unreachable,
|
|
|
|
stack_height >= static_cast<int32_t>(imm.in_arity()));
|
|
|
|
int32_t target_stack_height = stack_height - imm.in_arity();
|
|
|
|
// The stack may underflow in unreachable code. In this case the
|
|
|
|
// stack height is clamped at 0.
|
|
|
|
if (V8_UNLIKELY(target_stack_height < 0)) target_stack_height = 0;
|
|
|
|
CLabel* end_label = CLabel::New(&control_transfer_zone,
|
|
|
|
target_stack_height, imm.out_arity());
|
2017-04-25 09:43:39 +00:00
|
|
|
CLabel* else_label =
|
|
|
|
CLabel::New(&control_transfer_zone, stack_height, 0);
|
2017-05-02 15:46:52 +00:00
|
|
|
control_stack.emplace_back(i.pc(), end_label, else_label,
|
2018-05-03 11:59:06 +00:00
|
|
|
imm.out_arity());
|
2017-05-02 17:46:21 +00:00
|
|
|
copy_unreachable();
|
|
|
|
if (!unreachable) else_label->Ref(i.pc(), stack_height);
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprElse: {
|
2021-02-17 11:53:28 +00:00
|
|
|
TRACE("control @%u: Else\n", i.pc_offset());
|
2016-05-25 08:32:37 +00:00
|
|
|
Control* c = &control_stack.back();
|
2021-02-17 11:53:28 +00:00
|
|
|
DCHECK_EQ(*c->pc, kExprIf);
|
|
|
|
copy_unreachable();
|
|
|
|
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
|
|
|
|
DCHECK_NOT_NULL(c->else_label);
|
|
|
|
c->else_label->Bind(i.pc() + 1);
|
|
|
|
c->else_label->Finish(&map_, code->start);
|
|
|
|
stack_height = c->else_label->target_stack_height;
|
|
|
|
c->else_label = nullptr;
|
|
|
|
DCHECK_IMPLIES(!unreachable,
|
|
|
|
stack_height >= c->end_label->target_stack_height);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprCatchAll: {
|
|
|
|
TRACE("control @%u: CatchAll\n", i.pc_offset());
|
|
|
|
Control* c = &control_stack.back();
|
|
|
|
DCHECK_EQ(*c->pc, kExprTry);
|
|
|
|
if (!exception_stack.empty() &&
|
|
|
|
exception_stack.back() == control_stack.size() - 1) {
|
|
|
|
// Only pop the exception stack if this is the only catch handler.
|
|
|
|
exception_stack.pop_back();
|
2017-05-02 17:46:21 +00:00
|
|
|
}
|
2021-02-17 11:53:28 +00:00
|
|
|
copy_unreachable();
|
|
|
|
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
|
|
|
|
DCHECK_NOT_NULL(c->else_label);
|
|
|
|
int control_index = static_cast<int>(control_stack.size()) - 1;
|
|
|
|
c->else_label->Bind(i.pc() + 1, kCatchAllExceptionIndex,
|
|
|
|
control_index);
|
|
|
|
c->else_label->Finish(&map_, code->start);
|
|
|
|
c->else_label = nullptr;
|
|
|
|
DCHECK_IMPLIES(!unreachable,
|
|
|
|
stack_height >= c->end_label->target_stack_height);
|
|
|
|
stack_height = c->end_label->target_stack_height;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-02-16 19:34:49 +00:00
|
|
|
case kExprUnwind: {
|
2021-02-17 11:53:28 +00:00
|
|
|
TRACE("control @%u: Unwind\n", i.pc_offset());
|
2021-02-16 19:34:49 +00:00
|
|
|
Control* c = &control_stack.back();
|
|
|
|
DCHECK_EQ(*c->pc, kExprTry);
|
|
|
|
DCHECK(!exception_stack.empty());
|
|
|
|
DCHECK_EQ(exception_stack.back(), control_stack.size() - 1);
|
|
|
|
exception_stack.pop_back();
|
|
|
|
copy_unreachable();
|
|
|
|
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
|
|
|
|
DCHECK_NOT_NULL(c->else_label);
|
|
|
|
int control_index = static_cast<int>(control_stack.size()) - 1;
|
|
|
|
c->else_label->Bind(i.pc() + 1, kCatchAllExceptionIndex,
|
|
|
|
control_index);
|
|
|
|
c->else_label->Finish(&map_, code->start);
|
|
|
|
c->else_label = nullptr;
|
|
|
|
c->unwind = true;
|
|
|
|
DCHECK_IMPLIES(!unreachable,
|
|
|
|
stack_height >= c->end_label->target_stack_height);
|
|
|
|
stack_height = c->end_label->target_stack_height;
|
|
|
|
break;
|
|
|
|
}
|
2019-01-28 13:59:04 +00:00
|
|
|
case kExprTry: {
|
[wasm-gc] read_heap_type should check if index is in module bounds
read_heap_type did not have knowledge of the module for which the heap
type was being decoded. As a result, callers of read_heap_type (or
read_value_type, which in turn calls read_heap_type) had to check after
the fact that a decoded indexed type (ref, ref null, or rtt) references
a type index within the module's bounds. This was not done consistently,
and was missing (at least) in DecodeLocals.
To avoid such problems in the future, this CL refactors read_heap_type
to accept a module and check the decoded index against it.
Changes:
- Add WasmModule argument to read_heap_type. Do so accordingly to all
its transitive callers (read_value_type, immediate arguments,
DecodeLocalDecls, DecodeValue/HeapType in unittests).
- Add index check to read_heap_type and emit an error for an
out-of-bounds index.
- Remove all other now-redundant index validations. Replace them with
decoder->ok() if needed (since read_heap_type will now emit an error).
- Fix error message in Validate for BlockTypeImmediate.
- In DecodeLocalDecls in unittests, pass an empty module to
DecodeLocalDecls in the main code.
- Add a unit test with an invalid index in local type declarations.
Bug: v8:9495
Change-Id: I4ed1204847db80f78b6ae85fa40d300cd2456295
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2569757
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71572}
2020-12-02 14:29:22 +00:00
|
|
|
BlockTypeImmediate<Decoder::kNoValidation> imm(
|
|
|
|
WasmFeatures::All(), &i, i.pc() + 1, module);
|
2019-06-25 14:07:35 +00:00
|
|
|
if (imm.type == kWasmBottom) {
|
2020-04-20 18:19:12 +00:00
|
|
|
imm.sig = module->signature(imm.sig_index);
|
2019-01-28 13:59:04 +00:00
|
|
|
}
|
|
|
|
TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
|
|
|
|
imm.in_arity(), imm.out_arity());
|
|
|
|
CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
|
|
|
|
imm.out_arity());
|
|
|
|
CLabel* catch_label =
|
2021-02-11 13:37:03 +00:00
|
|
|
CLabel::New(&control_transfer_zone, stack_height, 0);
|
2019-01-28 13:59:04 +00:00
|
|
|
control_stack.emplace_back(i.pc(), end_label, catch_label,
|
|
|
|
imm.out_arity());
|
|
|
|
exception_stack.push_back(control_stack.size() - 1);
|
|
|
|
copy_unreachable();
|
|
|
|
break;
|
|
|
|
}
|
2021-02-16 12:00:13 +00:00
|
|
|
case kExprRethrow: {
|
|
|
|
BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
|
|
|
|
int index = static_cast<int>(control_stack.size()) - 1 - imm.depth;
|
|
|
|
rethrow_map_.emplace(i.pc() - i.start(), index);
|
|
|
|
break;
|
|
|
|
}
|
2019-01-28 13:59:04 +00:00
|
|
|
case kExprCatch: {
|
2021-02-11 13:37:03 +00:00
|
|
|
if (!exception_stack.empty() &&
|
|
|
|
exception_stack.back() == control_stack.size() - 1) {
|
|
|
|
// Only pop the exception stack once when we enter the first catch.
|
|
|
|
exception_stack.pop_back();
|
|
|
|
}
|
|
|
|
ExceptionIndexImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
|
2019-01-28 13:59:04 +00:00
|
|
|
Control* c = &control_stack.back();
|
|
|
|
copy_unreachable();
|
|
|
|
TRACE("control @%u: Catch\n", i.pc_offset());
|
2021-02-16 12:00:13 +00:00
|
|
|
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
|
2021-02-11 13:37:03 +00:00
|
|
|
|
2019-01-28 13:59:04 +00:00
|
|
|
DCHECK_NOT_NULL(c->else_label);
|
2021-02-16 12:00:13 +00:00
|
|
|
int control_index = static_cast<int>(control_stack.size()) - 1;
|
|
|
|
c->else_label->Bind(i.pc() + imm.length + 1, imm.index,
|
|
|
|
control_index);
|
2021-02-11 13:37:03 +00:00
|
|
|
|
2019-10-24 12:37:01 +00:00
|
|
|
DCHECK_IMPLIES(!unreachable,
|
|
|
|
stack_height >= c->end_label->target_stack_height);
|
2021-02-11 13:37:03 +00:00
|
|
|
const FunctionSig* exception_sig = module->exceptions[imm.index].sig;
|
|
|
|
int catch_in_arity =
|
|
|
|
static_cast<int>(exception_sig->parameter_count());
|
|
|
|
stack_height = c->end_label->target_stack_height + catch_in_arity;
|
2019-01-28 13:59:04 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
case kExprEnd: {
|
|
|
|
Control* c = &control_stack.back();
|
2016-09-27 20:46:10 +00:00
|
|
|
TRACE("control @%u: End\n", i.pc_offset());
|
2017-04-25 09:43:39 +00:00
|
|
|
// Only loops have bound labels.
|
|
|
|
DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
|
|
|
|
if (!c->end_label->target) {
|
2021-02-11 13:37:03 +00:00
|
|
|
if (c->else_label) {
|
|
|
|
if (*c->pc == kExprIf) {
|
|
|
|
// Bind else label for one-armed if.
|
|
|
|
c->else_label->Bind(i.pc());
|
2021-02-12 15:35:46 +00:00
|
|
|
} else if (!exception_stack.empty()) {
|
|
|
|
// No catch_all block, prepare for implicit rethrow.
|
|
|
|
DCHECK_EQ(*c->pc, kExprTry);
|
|
|
|
Control* next_try_block =
|
|
|
|
&control_stack[exception_stack.back()];
|
2021-02-16 12:00:13 +00:00
|
|
|
constexpr int kUnusedControlIndex = -1;
|
|
|
|
c->else_label->Bind(i.pc(), kRethrowOrDelegateExceptionIndex,
|
|
|
|
kUnusedControlIndex);
|
2021-02-15 11:04:05 +00:00
|
|
|
if (!unreachable) {
|
|
|
|
next_try_block->else_label->Ref(
|
|
|
|
i.pc(), c->else_label->target_stack_height);
|
|
|
|
}
|
2021-02-11 13:37:03 +00:00
|
|
|
}
|
2021-02-16 19:34:49 +00:00
|
|
|
} else if (c->unwind) {
|
|
|
|
DCHECK_EQ(*c->pc, kExprTry);
|
|
|
|
rethrow_map_.emplace(i.pc() - i.start(),
|
|
|
|
static_cast<int>(control_stack.size()) - 1);
|
|
|
|
if (!exception_stack.empty()) {
|
|
|
|
Control* next_try_block =
|
|
|
|
&control_stack[exception_stack.back()];
|
|
|
|
if (!unreachable) {
|
|
|
|
next_try_block->else_label->Ref(i.pc(), stack_height);
|
|
|
|
}
|
|
|
|
}
|
2021-02-11 13:37:03 +00:00
|
|
|
}
|
2017-04-25 09:43:39 +00:00
|
|
|
c->end_label->Bind(i.pc() + 1);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
2020-05-25 12:46:55 +00:00
|
|
|
c->Finish(&map_, code->start);
|
2021-02-15 11:04:05 +00:00
|
|
|
|
|
|
|
DCHECK_IMPLIES(!unreachable,
|
|
|
|
stack_height >= c->end_label->target_stack_height);
|
|
|
|
stack_height = c->end_label->target_stack_height + c->exit_arity;
|
|
|
|
control_stack.pop_back();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprDelegate: {
|
|
|
|
BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
|
|
|
|
TRACE("control @%u: Delegate[depth=%u]\n", i.pc_offset(), imm.depth);
|
|
|
|
Control* c = &control_stack.back();
|
|
|
|
const size_t new_stack_size = control_stack.size() - 1;
|
|
|
|
const size_t max_depth = new_stack_size - 1;
|
|
|
|
if (imm.depth < max_depth) {
|
2021-02-16 12:00:13 +00:00
|
|
|
constexpr int kUnusedControlIndex = -1;
|
|
|
|
c->else_label->Bind(i.pc(), kRethrowOrDelegateExceptionIndex,
|
|
|
|
kUnusedControlIndex);
|
2021-02-15 11:04:05 +00:00
|
|
|
c->else_label->Finish(&map_, code->start);
|
|
|
|
Control* target = &control_stack[max_depth - imm.depth];
|
|
|
|
DCHECK_EQ(*target->pc, kExprTry);
|
|
|
|
DCHECK_NOT_NULL(target->else_label);
|
|
|
|
if (!unreachable) {
|
|
|
|
target->else_label->Ref(i.pc(),
|
|
|
|
c->end_label->target_stack_height);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c->else_label = nullptr;
|
2021-02-23 16:42:10 +00:00
|
|
|
c->end_label->Bind(i.pc() + imm.length + 1);
|
2021-02-15 11:04:05 +00:00
|
|
|
c->Finish(&map_, code->start);
|
|
|
|
|
2019-10-24 12:37:01 +00:00
|
|
|
DCHECK_IMPLIES(!unreachable,
|
|
|
|
stack_height >= c->end_label->target_stack_height);
|
2017-05-02 15:46:52 +00:00
|
|
|
stack_height = c->end_label->target_stack_height + c->exit_arity;
|
2016-05-25 08:32:37 +00:00
|
|
|
control_stack.pop_back();
|
2021-02-15 11:04:05 +00:00
|
|
|
exception_stack.pop_back();
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprBr: {
|
2020-09-30 09:37:50 +00:00
|
|
|
BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
|
2018-05-03 11:59:06 +00:00
|
|
|
TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
|
|
|
|
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
|
2017-05-02 17:46:21 +00:00
|
|
|
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprBrIf: {
|
2020-09-30 09:37:50 +00:00
|
|
|
BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
|
2018-05-03 11:59:06 +00:00
|
|
|
TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
|
|
|
|
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
|
2017-05-02 17:46:21 +00:00
|
|
|
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprBrTable: {
|
2020-09-30 09:37:50 +00:00
|
|
|
BranchTableImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
|
|
|
|
BranchTableIterator<Decoder::kNoValidation> iterator(&i, imm);
|
2016-09-27 20:46:10 +00:00
|
|
|
TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
|
2018-05-03 11:59:06 +00:00
|
|
|
imm.table_count);
|
2017-05-02 17:46:21 +00:00
|
|
|
if (!unreachable) {
|
|
|
|
while (iterator.has_next()) {
|
|
|
|
uint32_t j = iterator.cur_index();
|
|
|
|
uint32_t target = iterator.next();
|
|
|
|
Control* c = &control_stack[control_stack.size() - target - 1];
|
|
|
|
c->end_label->Ref(i.pc() + j, stack_height);
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2017-05-02 17:46:21 +00:00
|
|
|
default:
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
2017-05-02 17:46:21 +00:00
|
|
|
}
|
|
|
|
if (WasmOpcodes::IsUnconditionalJump(opcode)) {
|
|
|
|
control_stack.back().unreachable = true;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-25 09:43:39 +00:00
|
|
|
DCHECK_EQ(0, control_stack.size());
|
|
|
|
DCHECK_EQ(func_arity, stack_height);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 13:59:04 +00:00
|
|
|
bool HasEntryAt(pc_t from) {
|
2021-02-11 13:37:03 +00:00
|
|
|
auto result = map_.map.find(from);
|
|
|
|
return result != map_.map.end();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool HasCatchEntryAt(pc_t from) {
|
|
|
|
auto result = map_.catch_map.find(from);
|
|
|
|
return result != map_.catch_map.end();
|
2019-01-28 13:59:04 +00:00
|
|
|
}
|
|
|
|
|
2017-04-25 09:43:39 +00:00
|
|
|
ControlTransferEntry& Lookup(pc_t from) {
|
2021-02-11 13:37:03 +00:00
|
|
|
auto result = map_.map.find(from);
|
|
|
|
DCHECK(result != map_.map.end());
|
2016-05-25 08:32:37 +00:00
|
|
|
return result->second;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// The main storage for interpreter code. It maps {WasmFunction} to the
|
|
|
|
// metadata needed to execute each function.
|
|
|
|
class CodeMap {
|
|
|
|
Zone* zone_;
|
|
|
|
const WasmModule* module_;
|
|
|
|
ZoneVector<InterpreterCode> interpreter_code_;
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
public:
|
2018-07-18 16:05:36 +00:00
|
|
|
CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
|
2017-07-14 13:58:25 +00:00
|
|
|
: zone_(zone), module_(module), interpreter_code_(zone) {
|
2016-05-25 08:32:37 +00:00
|
|
|
if (module == nullptr) return;
|
2017-03-14 15:54:43 +00:00
|
|
|
interpreter_code_.reserve(module->functions.size());
|
|
|
|
for (const WasmFunction& function : module->functions) {
|
|
|
|
if (function.imported) {
|
2017-06-12 11:59:14 +00:00
|
|
|
DCHECK(!function.code.is_set());
|
2017-03-14 15:54:43 +00:00
|
|
|
AddFunction(&function, nullptr, nullptr);
|
|
|
|
} else {
|
2017-06-12 11:59:14 +00:00
|
|
|
AddFunction(&function, module_start + function.code.offset(),
|
|
|
|
module_start + function.code.end_offset());
|
2017-03-14 15:54:43 +00:00
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
2017-03-15 15:57:02 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 09:46:16 +00:00
|
|
|
const WasmModule* module() const { return module_; }
|
2017-11-28 22:25:36 +00:00
|
|
|
|
2017-03-14 15:54:43 +00:00
|
|
|
InterpreterCode* GetCode(const WasmFunction* function) {
|
|
|
|
InterpreterCode* code = GetCode(function->func_index);
|
|
|
|
DCHECK_EQ(function, code->function);
|
|
|
|
return code;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
InterpreterCode* GetCode(uint32_t function_index) {
|
2017-03-14 15:54:43 +00:00
|
|
|
DCHECK_LT(function_index, interpreter_code_.size());
|
2016-05-25 08:32:37 +00:00
|
|
|
return Preprocess(&interpreter_code_[function_index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
InterpreterCode* Preprocess(InterpreterCode* code) {
|
2017-03-14 15:54:43 +00:00
|
|
|
DCHECK_EQ(code->function->imported, code->start == nullptr);
|
2017-04-26 17:41:26 +00:00
|
|
|
if (!code->side_table && code->start) {
|
2016-05-25 08:32:37 +00:00
|
|
|
// Compute the control targets map and the local declarations.
|
2020-07-09 11:51:58 +00:00
|
|
|
code->side_table = zone_->New<SideTable>(zone_, module_, code);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
|
2017-03-14 15:54:43 +00:00
|
|
|
void AddFunction(const WasmFunction* function, const byte* code_start,
|
|
|
|
const byte* code_end) {
|
2020-05-25 12:46:55 +00:00
|
|
|
InterpreterCode code = {function, BodyLocalDecls(zone_), code_start,
|
|
|
|
code_end, nullptr};
|
2016-05-25 08:32:37 +00:00
|
|
|
|
|
|
|
DCHECK_EQ(interpreter_code_.size(), function->func_index);
|
|
|
|
interpreter_code_.push_back(code);
|
|
|
|
}
|
|
|
|
|
2017-03-14 15:54:43 +00:00
|
|
|
void SetFunctionCode(const WasmFunction* function, const byte* start,
|
2016-05-25 08:32:37 +00:00
|
|
|
const byte* end) {
|
2017-03-14 15:54:43 +00:00
|
|
|
DCHECK_LT(function->func_index, interpreter_code_.size());
|
|
|
|
InterpreterCode* code = &interpreter_code_[function->func_index];
|
|
|
|
DCHECK_EQ(function, code->function);
|
2016-05-25 08:32:37 +00:00
|
|
|
code->start = const_cast<byte*>(start);
|
|
|
|
code->end = const_cast<byte*>(end);
|
2017-04-26 17:41:26 +00:00
|
|
|
code->side_table = nullptr;
|
2016-05-25 08:32:37 +00:00
|
|
|
Preprocess(code);
|
|
|
|
}
|
2017-03-23 09:46:16 +00:00
|
|
|
};
|
2017-03-15 15:57:02 +00:00
|
|
|
|
2018-09-21 00:32:01 +00:00
|
|
|
namespace {
|
|
|
|
|
2020-05-27 13:41:19 +00:00
|
|
|
struct CallResult {
|
2018-09-21 00:32:01 +00:00
|
|
|
enum Type {
|
|
|
|
// The function should be executed inside this interpreter.
|
|
|
|
INTERNAL,
|
|
|
|
// For indirect calls: Table or function does not exist.
|
|
|
|
INVALID_FUNC,
|
|
|
|
// For indirect calls: Signature does not match expected signature.
|
2020-05-27 13:41:19 +00:00
|
|
|
SIGNATURE_MISMATCH
|
2018-09-21 00:32:01 +00:00
|
|
|
};
|
|
|
|
Type type;
|
|
|
|
// If type is INTERNAL, this field holds the function to call internally.
|
|
|
|
InterpreterCode* interpreter_code;
|
|
|
|
|
2020-05-27 13:41:19 +00:00
|
|
|
CallResult(Type type) : type(type) { // NOLINT
|
2018-09-21 00:32:01 +00:00
|
|
|
DCHECK_NE(INTERNAL, type);
|
|
|
|
}
|
2020-05-27 13:41:19 +00:00
|
|
|
CallResult(Type type, InterpreterCode* code)
|
2018-09-21 00:32:01 +00:00
|
|
|
: type(type), interpreter_code(code) {
|
|
|
|
DCHECK_EQ(INTERNAL, type);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-10-24 12:28:30 +00:00
|
|
|
// Like a static_cast from src to dst, but specialized for boxed floats.
|
|
|
|
template <typename dst, typename src>
|
|
|
|
struct converter {
|
|
|
|
dst operator()(src val) const { return static_cast<dst>(val); }
|
|
|
|
};
|
|
|
|
template <>
|
|
|
|
struct converter<Float64, uint64_t> {
|
|
|
|
Float64 operator()(uint64_t val) const { return Float64::FromBits(val); }
|
|
|
|
};
|
|
|
|
template <>
|
|
|
|
struct converter<Float32, uint32_t> {
|
|
|
|
Float32 operator()(uint32_t val) const { return Float32::FromBits(val); }
|
|
|
|
};
|
|
|
|
template <>
|
|
|
|
struct converter<uint64_t, Float64> {
|
|
|
|
uint64_t operator()(Float64 val) const { return val.get_bits(); }
|
|
|
|
};
|
|
|
|
template <>
|
|
|
|
struct converter<uint32_t, Float32> {
|
|
|
|
uint32_t operator()(Float32 val) const { return val.get_bits(); }
|
|
|
|
};
|
|
|
|
|
2017-10-26 07:45:12 +00:00
|
|
|
template <typename T>
|
|
|
|
V8_INLINE bool has_nondeterminism(T val) {
|
|
|
|
static_assert(!std::is_floating_point<T>::value, "missing specialization");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
V8_INLINE bool has_nondeterminism<float>(float val) {
|
|
|
|
return std::isnan(val);
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
V8_INLINE bool has_nondeterminism<double>(double val) {
|
|
|
|
return std::isnan(val);
|
|
|
|
}
|
|
|
|
|
2018-09-21 00:32:01 +00:00
|
|
|
} // namespace
|
|
|
|
|
2020-06-05 11:05:04 +00:00
|
|
|
//============================================================================
|
|
|
|
// The implementation details of the interpreter.
|
|
|
|
//============================================================================
|
|
|
|
class WasmInterpreterInternals {
|
2016-05-25 08:32:37 +00:00
|
|
|
public:
|
2020-06-05 11:05:04 +00:00
|
|
|
WasmInterpreterInternals(Zone* zone, const WasmModule* module,
|
|
|
|
const ModuleWireBytes& wire_bytes,
|
|
|
|
Handle<WasmInstanceObject> instance_object)
|
|
|
|
: module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
|
|
|
|
codemap_(module, module_bytes_.data(), zone),
|
|
|
|
isolate_(instance_object->GetIsolate()),
|
|
|
|
instance_object_(instance_object),
|
2020-06-08 20:27:55 +00:00
|
|
|
reference_stack_(isolate_->global_handles()->Create(
|
|
|
|
ReadOnlyRoots(isolate_).empty_fixed_array())),
|
2020-06-05 11:05:04 +00:00
|
|
|
frames_(zone) {}
|
|
|
|
|
2020-06-08 20:27:55 +00:00
|
|
|
~WasmInterpreterInternals() {
|
|
|
|
isolate_->global_handles()->Destroy(reference_stack_.location());
|
|
|
|
}
|
2019-09-24 16:36:06 +00:00
|
|
|
|
2017-01-18 10:23:20 +00:00
|
|
|
WasmInterpreter::State state() { return state_; }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2017-07-14 13:49:01 +00:00
|
|
|
void InitFrame(const WasmFunction* function, WasmValue* args) {
|
2020-05-29 16:17:29 +00:00
|
|
|
DCHECK(frames_.empty());
|
2020-06-05 11:05:04 +00:00
|
|
|
InterpreterCode* code = codemap_.GetCode(function);
|
2017-04-26 17:41:26 +00:00
|
|
|
size_t num_params = function->sig->parameter_count();
|
|
|
|
EnsureStackSpace(num_params);
|
|
|
|
Push(args, num_params);
|
2017-03-14 15:54:43 +00:00
|
|
|
PushFrame(code);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-04-06 13:32:36 +00:00
|
|
|
WasmInterpreter::State Run(int num_steps = -1) {
|
2017-03-14 15:54:43 +00:00
|
|
|
DCHECK(state_ == WasmInterpreter::STOPPED ||
|
|
|
|
state_ == WasmInterpreter::PAUSED);
|
2017-04-06 13:32:36 +00:00
|
|
|
DCHECK(num_steps == -1 || num_steps > 0);
|
|
|
|
if (num_steps == -1) {
|
2016-05-30 10:02:34 +00:00
|
|
|
TRACE(" => Run()\n");
|
2017-04-06 13:32:36 +00:00
|
|
|
} else if (num_steps == 1) {
|
|
|
|
TRACE(" => Step()\n");
|
|
|
|
} else {
|
|
|
|
TRACE(" => Run(%d)\n", num_steps);
|
|
|
|
}
|
|
|
|
state_ = WasmInterpreter::RUNNING;
|
|
|
|
Execute(frames_.back().code, frames_.back().pc, num_steps);
|
2020-05-29 16:17:29 +00:00
|
|
|
// If state_ is STOPPED, the stack must be fully unwound.
|
|
|
|
DCHECK_IMPLIES(state_ == WasmInterpreter::STOPPED, frames_.empty());
|
2016-05-25 08:32:37 +00:00
|
|
|
return state_;
|
|
|
|
}
|
|
|
|
|
2017-01-18 10:23:20 +00:00
|
|
|
void Pause() { UNIMPLEMENTED(); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2017-01-18 10:23:20 +00:00
|
|
|
void Reset() {
|
2016-05-25 08:32:37 +00:00
|
|
|
TRACE("----- RESET -----\n");
|
2019-05-15 08:37:41 +00:00
|
|
|
ResetStack(0);
|
2016-05-25 08:32:37 +00:00
|
|
|
frames_.clear();
|
|
|
|
state_ = WasmInterpreter::STOPPED;
|
|
|
|
trap_reason_ = kTrapCount;
|
2016-10-20 14:27:23 +00:00
|
|
|
possible_nondeterminism_ = false;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue GetReturnValue(uint32_t index) {
|
2017-12-02 00:30:37 +00:00
|
|
|
if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xDEADBEEF);
|
2017-03-21 10:54:14 +00:00
|
|
|
DCHECK_EQ(WasmInterpreter::FINISHED, state_);
|
2020-05-29 16:17:29 +00:00
|
|
|
return GetStackValue(index);
|
2017-04-26 17:41:26 +00:00
|
|
|
}
|
|
|
|
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue GetStackValue(sp_t index) {
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_GT(StackHeight(), index);
|
2019-04-05 12:12:50 +00:00
|
|
|
return stack_[index].ExtractValue(this, index);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-07-14 13:49:01 +00:00
|
|
|
void SetStackValue(sp_t index, WasmValue value) {
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_GT(StackHeight(), index);
|
2019-04-05 12:12:50 +00:00
|
|
|
stack_[index] = StackValue(value, this, index);
|
2017-04-11 13:04:13 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 11:54:31 +00:00
|
|
|
TrapReason GetTrapReason() { return trap_reason_; }
|
|
|
|
|
2020-08-10 14:20:01 +00:00
|
|
|
bool PossibleNondeterminism() const { return possible_nondeterminism_; }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-08-10 14:20:01 +00:00
|
|
|
uint64_t NumInterpretedCalls() const { return num_interpreted_calls_; }
|
2017-02-21 18:21:31 +00:00
|
|
|
|
2020-06-05 11:05:04 +00:00
|
|
|
CodeMap* codemap() { return &codemap_; }
|
|
|
|
|
2019-02-07 10:02:06 +00:00
|
|
|
private:
|
2017-03-20 12:53:01 +00:00
|
|
|
// Handle a thrown exception. Returns whether the exception was handled inside
|
2020-05-29 16:17:29 +00:00
|
|
|
// of wasm. Unwinds the interpreted stack accordingly.
|
2020-06-05 11:05:04 +00:00
|
|
|
WasmInterpreter::ExceptionHandlingResult HandleException(Isolate* isolate) {
|
2017-03-20 12:53:01 +00:00
|
|
|
DCHECK(isolate->has_pending_exception());
|
2020-03-23 12:56:45 +00:00
|
|
|
bool catchable =
|
|
|
|
isolate->is_catchable_by_wasm(isolate->pending_exception());
|
2020-05-29 16:17:29 +00:00
|
|
|
while (!frames_.empty()) {
|
2019-01-30 15:17:20 +00:00
|
|
|
Frame& frame = frames_.back();
|
|
|
|
InterpreterCode* code = frame.code;
|
2021-02-11 13:37:03 +00:00
|
|
|
if (catchable && code->side_table->HasCatchEntryAt(frame.pc)) {
|
2019-01-30 15:17:20 +00:00
|
|
|
TRACE("----- HANDLE -----\n");
|
2021-02-16 12:00:13 +00:00
|
|
|
HandleScope scope(isolate_);
|
2021-02-11 13:37:03 +00:00
|
|
|
Handle<Object> exception =
|
|
|
|
handle(isolate->pending_exception(), isolate);
|
|
|
|
if (JumpToHandlerDelta(code, exception, &frame.pc)) {
|
|
|
|
isolate->clear_pending_exception();
|
|
|
|
TRACE(" => handler #%zu (#%u @%zu)\n", frames_.size() - 1,
|
|
|
|
code->function->func_index, frame.pc);
|
|
|
|
return WasmInterpreter::HANDLED;
|
|
|
|
} else {
|
|
|
|
TRACE(" => no handler #%zu (#%u @%zu)\n", frames_.size() - 1,
|
|
|
|
code->function->func_index, frame.pc);
|
|
|
|
}
|
2019-01-30 15:17:20 +00:00
|
|
|
}
|
|
|
|
TRACE(" => drop frame #%zu (#%u @%zu)\n", frames_.size() - 1,
|
|
|
|
code->function->func_index, frame.pc);
|
2019-05-15 08:37:41 +00:00
|
|
|
ResetStack(frame.sp);
|
2021-02-16 12:00:13 +00:00
|
|
|
if (!frame.caught_exception_stack.is_null()) {
|
|
|
|
isolate_->global_handles()->Destroy(
|
|
|
|
frame.caught_exception_stack.location());
|
|
|
|
}
|
2019-01-30 15:17:20 +00:00
|
|
|
frames_.pop_back();
|
|
|
|
}
|
|
|
|
TRACE("----- UNWIND -----\n");
|
2020-05-29 16:17:29 +00:00
|
|
|
DCHECK(frames_.empty());
|
|
|
|
DCHECK_EQ(sp_, stack_.get());
|
2017-03-20 12:53:01 +00:00
|
|
|
state_ = WasmInterpreter::STOPPED;
|
2020-06-05 11:05:04 +00:00
|
|
|
return WasmInterpreter::UNWOUND;
|
2017-03-20 12:53:01 +00:00
|
|
|
}
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
// Entries on the stack of functions being evaluated.
|
|
|
|
struct Frame {
|
|
|
|
InterpreterCode* code;
|
2017-03-14 10:46:18 +00:00
|
|
|
pc_t pc;
|
2016-05-25 08:32:37 +00:00
|
|
|
sp_t sp;
|
|
|
|
|
|
|
|
// Limit of parameters.
|
|
|
|
sp_t plimit() { return sp + code->function->sig->parameter_count(); }
|
|
|
|
// Limit of locals.
|
2017-01-06 22:24:56 +00:00
|
|
|
sp_t llimit() { return plimit() + code->locals.type_list.size(); }
|
2021-02-16 12:00:13 +00:00
|
|
|
|
|
|
|
Handle<FixedArray> caught_exception_stack;
|
2016-05-25 08:32:37 +00:00
|
|
|
};
|
|
|
|
|
2019-04-05 12:12:50 +00:00
|
|
|
// Safety wrapper for values on the operand stack represented as {WasmValue}.
|
|
|
|
// Most values are stored directly on the stack, only reference values are
|
|
|
|
// kept in a separate on-heap reference stack to make the GC trace them.
|
2019-12-11 12:02:45 +00:00
|
|
|
// TODO(wasm): Optimize simple stack operations (like "get_local",
|
2019-04-05 12:12:50 +00:00
|
|
|
// "set_local", and "tee_local") so that they don't require a handle scope.
|
|
|
|
class StackValue {
|
|
|
|
public:
|
|
|
|
StackValue() = default; // Only needed for resizing the stack.
|
2020-06-05 11:05:04 +00:00
|
|
|
StackValue(WasmValue v, WasmInterpreterInternals* impl, sp_t index)
|
|
|
|
: value_(v) {
|
2019-04-05 12:12:50 +00:00
|
|
|
if (IsReferenceValue()) {
|
2021-03-04 17:37:44 +00:00
|
|
|
value_ = WasmValue(Handle<Object>::null(), value_.type());
|
2019-04-05 12:12:50 +00:00
|
|
|
int ref_index = static_cast<int>(index);
|
2021-03-04 17:37:44 +00:00
|
|
|
impl->reference_stack_->set(ref_index, *v.to_ref());
|
2019-04-05 12:12:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-05 11:05:04 +00:00
|
|
|
WasmValue ExtractValue(WasmInterpreterInternals* impl, sp_t index) {
|
2019-04-05 12:12:50 +00:00
|
|
|
if (!IsReferenceValue()) return value_;
|
2021-03-04 17:37:44 +00:00
|
|
|
DCHECK(value_.to_ref().is_null());
|
2019-04-05 12:12:50 +00:00
|
|
|
int ref_index = static_cast<int>(index);
|
2020-06-05 11:05:04 +00:00
|
|
|
Isolate* isolate = impl->isolate_;
|
2020-06-08 20:27:55 +00:00
|
|
|
Handle<Object> ref(impl->reference_stack_->get(ref_index), isolate);
|
2019-05-15 08:37:41 +00:00
|
|
|
DCHECK(!ref->IsTheHole(isolate));
|
2021-03-04 17:37:44 +00:00
|
|
|
return WasmValue(ref, value_.type());
|
2019-04-05 12:12:50 +00:00
|
|
|
}
|
|
|
|
|
2021-03-04 17:37:44 +00:00
|
|
|
bool IsReferenceValue() const { return value_.type().is_reference(); }
|
2019-04-05 12:12:50 +00:00
|
|
|
|
2020-06-05 11:05:04 +00:00
|
|
|
void ClearValue(WasmInterpreterInternals* impl, sp_t index) {
|
2019-05-15 08:37:41 +00:00
|
|
|
if (!IsReferenceValue()) return;
|
|
|
|
int ref_index = static_cast<int>(index);
|
2020-06-05 11:05:04 +00:00
|
|
|
Isolate* isolate = impl->isolate_;
|
2020-06-08 20:27:55 +00:00
|
|
|
impl->reference_stack_->set_the_hole(isolate, ref_index);
|
2019-05-15 08:37:41 +00:00
|
|
|
}
|
|
|
|
|
2020-06-05 11:05:04 +00:00
|
|
|
static void ClearValues(WasmInterpreterInternals* impl, sp_t index,
|
|
|
|
int count) {
|
2019-05-15 08:37:41 +00:00
|
|
|
int ref_index = static_cast<int>(index);
|
2020-06-08 20:27:55 +00:00
|
|
|
impl->reference_stack_->FillWithHoles(ref_index, ref_index + count);
|
2019-05-15 08:37:41 +00:00
|
|
|
}
|
|
|
|
|
2020-06-05 11:05:04 +00:00
|
|
|
static bool IsClearedValue(WasmInterpreterInternals* impl, sp_t index) {
|
2019-05-15 08:37:41 +00:00
|
|
|
int ref_index = static_cast<int>(index);
|
2020-06-05 11:05:04 +00:00
|
|
|
Isolate* isolate = impl->isolate_;
|
2020-06-08 20:27:55 +00:00
|
|
|
return impl->reference_stack_->is_the_hole(isolate, ref_index);
|
2019-05-15 08:37:41 +00:00
|
|
|
}
|
|
|
|
|
2019-04-05 12:12:50 +00:00
|
|
|
private:
|
|
|
|
WasmValue value_;
|
|
|
|
};
|
|
|
|
|
2020-06-05 11:05:04 +00:00
|
|
|
const WasmModule* module() const { return codemap_.module(); }
|
2016-05-25 08:32:37 +00:00
|
|
|
|
|
|
|
void DoTrap(TrapReason trap, pc_t pc) {
|
2018-07-18 09:34:06 +00:00
|
|
|
TRACE("TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap));
|
2016-05-25 08:32:37 +00:00
|
|
|
state_ = WasmInterpreter::TRAPPED;
|
|
|
|
trap_reason_ = trap;
|
|
|
|
CommitPc(pc);
|
|
|
|
}
|
|
|
|
|
2019-02-27 18:43:47 +00:00
|
|
|
// Check if there is room for a function's activation.
|
|
|
|
void EnsureStackSpaceForCall(InterpreterCode* code) {
|
|
|
|
EnsureStackSpace(code->side_table->max_stack_height_ +
|
|
|
|
code->locals.type_list.size());
|
|
|
|
DCHECK_GE(StackHeight(), code->function->sig->parameter_count());
|
|
|
|
}
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
// Push a frame with arguments already on the stack.
|
2017-03-14 15:54:43 +00:00
|
|
|
void PushFrame(InterpreterCode* code) {
|
|
|
|
DCHECK_NOT_NULL(code);
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_NOT_NULL(code->side_table);
|
2019-02-27 18:43:47 +00:00
|
|
|
EnsureStackSpaceForCall(code);
|
2017-04-26 17:41:26 +00:00
|
|
|
|
2017-02-21 18:21:31 +00:00
|
|
|
++num_interpreted_calls_;
|
2016-05-25 08:32:37 +00:00
|
|
|
size_t arity = code->function->sig->parameter_count();
|
|
|
|
// The parameters will overlap the arguments already on the stack.
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_GE(StackHeight(), arity);
|
2019-02-27 18:43:47 +00:00
|
|
|
|
2021-02-16 12:00:13 +00:00
|
|
|
frames_.push_back(
|
|
|
|
{code, 0, StackHeight() - arity, Handle<FixedArray>::null()});
|
2017-03-14 10:46:18 +00:00
|
|
|
frames_.back().pc = InitLocals(code);
|
2017-03-14 15:54:43 +00:00
|
|
|
TRACE(" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
|
|
|
|
code->function->func_index, frames_.back().pc);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pc_t InitLocals(InterpreterCode* code) {
|
2020-03-12 14:29:51 +00:00
|
|
|
for (ValueType p : code->locals.type_list) {
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue val;
|
2020-03-12 14:29:51 +00:00
|
|
|
switch (p.kind()) {
|
2019-10-24 08:30:59 +00:00
|
|
|
#define CASE_TYPE(valuetype, ctype) \
|
2021-02-22 09:28:44 +00:00
|
|
|
case valuetype: \
|
2019-10-24 08:30:59 +00:00
|
|
|
val = WasmValue(ctype{}); \
|
2017-03-15 15:57:02 +00:00
|
|
|
break;
|
2019-10-24 08:30:59 +00:00
|
|
|
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
|
2017-03-15 15:57:02 +00:00
|
|
|
#undef CASE_TYPE
|
2021-02-22 09:28:44 +00:00
|
|
|
case kOptRef: {
|
2021-03-04 17:37:44 +00:00
|
|
|
val = WasmValue(isolate_->factory()->null_value(), p);
|
2019-04-03 11:06:41 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kRef: // TODO(7748): Implement.
|
|
|
|
case kRtt:
|
|
|
|
case kRttWithDepth:
|
|
|
|
case kStmt:
|
|
|
|
case kBottom:
|
|
|
|
case kI8:
|
|
|
|
case kI16:
|
2016-05-25 08:32:37 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
}
|
2017-04-26 17:41:26 +00:00
|
|
|
Push(val);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
2017-01-06 22:24:56 +00:00
|
|
|
return code->locals.encoded_size;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void CommitPc(pc_t pc) {
|
2017-03-14 15:54:43 +00:00
|
|
|
DCHECK(!frames_.empty());
|
|
|
|
frames_.back().pc = pc;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2019-01-30 15:17:20 +00:00
|
|
|
void ReloadFromFrameOnException(Decoder* decoder, InterpreterCode** code,
|
2019-02-07 11:33:37 +00:00
|
|
|
pc_t* pc, pc_t* limit) {
|
2019-01-30 15:17:20 +00:00
|
|
|
Frame* top = &frames_.back();
|
|
|
|
*code = top->code;
|
|
|
|
*pc = top->pc;
|
|
|
|
*limit = top->code->end - top->code->start;
|
|
|
|
decoder->Reset(top->code->start, top->code->end);
|
|
|
|
}
|
|
|
|
|
2017-04-25 09:43:39 +00:00
|
|
|
int LookupTargetDelta(InterpreterCode* code, pc_t pc) {
|
2017-04-26 17:41:26 +00:00
|
|
|
return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
|
2016-09-27 20:46:10 +00:00
|
|
|
}
|
|
|
|
|
2021-02-11 13:37:03 +00:00
|
|
|
bool JumpToHandlerDelta(InterpreterCode* code,
|
|
|
|
Handle<Object> exception_object, pc_t* pc) {
|
|
|
|
auto it = code->side_table->map_.catch_map.find(*pc);
|
2021-02-15 11:04:05 +00:00
|
|
|
if (it == code->side_table->map_.catch_map.end()) {
|
|
|
|
// No handler in this frame means that we should rethrow to the caller.
|
|
|
|
return false;
|
|
|
|
}
|
2021-02-16 12:00:13 +00:00
|
|
|
CatchControlTransferEntry* handler = nullptr;
|
2021-02-11 13:37:03 +00:00
|
|
|
for (auto& entry : it->second) {
|
2021-02-12 15:35:46 +00:00
|
|
|
if (entry.exception_index < 0) {
|
2021-02-12 10:35:40 +00:00
|
|
|
ResetStack(StackHeight() - entry.sp_diff);
|
|
|
|
*pc += entry.pc_diff;
|
2021-02-15 11:04:05 +00:00
|
|
|
if (entry.exception_index == kRethrowOrDelegateExceptionIndex) {
|
|
|
|
// Recursively try to find a handler in the next enclosing try block
|
|
|
|
// (for the implicit rethrow) or in the delegate target.
|
2021-02-12 15:35:46 +00:00
|
|
|
return JumpToHandlerDelta(code, exception_object, pc);
|
|
|
|
}
|
2021-02-16 12:00:13 +00:00
|
|
|
handler = &entry;
|
|
|
|
break;
|
2021-02-12 10:35:40 +00:00
|
|
|
} else if (MatchingExceptionTag(exception_object,
|
|
|
|
entry.exception_index)) {
|
2021-02-16 12:00:13 +00:00
|
|
|
handler = &entry;
|
2021-02-11 13:37:03 +00:00
|
|
|
const WasmException* exception =
|
|
|
|
&module()->exceptions[entry.exception_index];
|
|
|
|
const FunctionSig* sig = exception->sig;
|
|
|
|
int catch_in_arity = static_cast<int>(sig->parameter_count());
|
|
|
|
DoUnpackException(exception, exception_object);
|
|
|
|
DoStackTransfer(entry.sp_diff + catch_in_arity, catch_in_arity);
|
2021-02-16 12:00:13 +00:00
|
|
|
*pc += handler->pc_diff;
|
|
|
|
break;
|
2021-02-11 13:37:03 +00:00
|
|
|
}
|
|
|
|
}
|
2021-02-16 12:00:13 +00:00
|
|
|
if (!handler) return false;
|
|
|
|
if (frames_.back().caught_exception_stack.is_null()) {
|
|
|
|
Handle<FixedArray> caught_exception_stack =
|
|
|
|
isolate_->factory()->NewFixedArray(
|
|
|
|
code->side_table->max_control_stack_height);
|
|
|
|
caught_exception_stack->FillWithHoles(
|
|
|
|
0, code->side_table->max_control_stack_height);
|
|
|
|
frames_.back().caught_exception_stack =
|
|
|
|
isolate_->global_handles()->Create(*caught_exception_stack);
|
|
|
|
}
|
|
|
|
frames_.back().caught_exception_stack->set(handler->target_control_index,
|
|
|
|
*exception_object);
|
|
|
|
return true;
|
2019-01-28 13:59:04 +00:00
|
|
|
}
|
|
|
|
|
2016-09-27 20:46:10 +00:00
|
|
|
int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
|
2017-04-26 17:41:26 +00:00
|
|
|
ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
|
2019-05-15 12:20:35 +00:00
|
|
|
DoStackTransfer(control_transfer_entry.sp_diff,
|
2017-04-25 09:43:39 +00:00
|
|
|
control_transfer_entry.target_arity);
|
|
|
|
return control_transfer_entry.pc_diff;
|
2016-09-27 20:46:10 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 10:46:18 +00:00
|
|
|
pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
|
2020-05-25 12:46:55 +00:00
|
|
|
switch (code->start[pc]) {
|
2017-03-14 10:46:18 +00:00
|
|
|
case kExprCallFunction: {
|
2020-09-30 09:37:50 +00:00
|
|
|
CallFunctionImmediate<Decoder::kNoValidation> imm(decoder,
|
|
|
|
code->at(pc + 1));
|
2018-05-03 11:59:06 +00:00
|
|
|
return pc + 1 + imm.length;
|
2017-03-14 10:46:18 +00:00
|
|
|
}
|
|
|
|
case kExprCallIndirect: {
|
2020-09-30 09:37:50 +00:00
|
|
|
CallIndirectImmediate<Decoder::kNoValidation> imm(
|
2020-06-26 12:24:26 +00:00
|
|
|
WasmFeatures::All(), decoder, code->at(pc + 1));
|
2018-05-03 11:59:06 +00:00
|
|
|
return pc + 1 + imm.length;
|
2017-03-14 10:46:18 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
|
|
|
|
size_t arity) {
|
[base] Define CHECK comparison for signed vs. unsigned
The current CHECK/DCHECK implementation fails statically if a signed
value is compared against an unsigned value. The common solution is to
cast on each caller, which is tedious and error-prone (might hide bugs).
This CL implements signed vs. unsigned comparisons by executing up to
two comparisons. For example, if i is int32_t and u is uint_32_t, a
DCHECK_LE(i, u) would create the check
i <= 0 || static_cast<uint32_t>(i) <= u.
For checks against constants, at least one of the checks can be removed
by compiler optimizations.
The tradeoff we have to make is to sometimes silently execute an
additional comparison. And we increase code complexity of course, even
though the usage is just as easy (or even easier) as before.
The compile time impact seems to be minimal:
I ran 3 full compilations for Optdebug on my local machine, one time on
the current ToT, one time with this CL plus http://crrev.com/2524093002.
Before: 143.72 +- 1.21 seconds
Now: 144.18 +- 0.67 seconds
In order to check that the new comparisons are working, I refactored
some DCHECKs in wasm to use the new magic, and added unit test cases.
R=ishell@chromium.org, titzer@chromium.org
CC=ahaas@chromium.org, bmeurer@chromium.org
Committed: https://crrev.com/5925074a9dab5a8577766545b91b62f2c531d3dc
Review-Url: https://codereview.chromium.org/2526783002
Cr-Original-Commit-Position: refs/heads/master@{#41275}
Cr-Commit-Position: refs/heads/master@{#41411}
2016-12-01 08:52:31 +00:00
|
|
|
DCHECK_GT(frames_.size(), 0);
|
2019-05-15 12:20:35 +00:00
|
|
|
spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - frames_.back().sp);
|
2021-02-16 12:00:13 +00:00
|
|
|
if (!frames_.back().caught_exception_stack.is_null()) {
|
|
|
|
isolate_->global_handles()->Destroy(
|
|
|
|
frames_.back().caught_exception_stack.location());
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
frames_.pop_back();
|
2020-05-29 16:17:29 +00:00
|
|
|
if (frames_.empty()) {
|
2016-09-27 20:46:10 +00:00
|
|
|
// A return from the last frame terminates the execution.
|
2016-05-25 08:32:37 +00:00
|
|
|
state_ = WasmInterpreter::FINISHED;
|
2019-05-15 12:20:35 +00:00
|
|
|
DoStackTransfer(sp_diff, arity);
|
2016-05-25 08:32:37 +00:00
|
|
|
TRACE(" => finish\n");
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// Return to caller frame.
|
|
|
|
Frame* top = &frames_.back();
|
|
|
|
*code = top->code;
|
2017-03-14 10:46:18 +00:00
|
|
|
decoder->Reset((*code)->start, (*code)->end);
|
|
|
|
*pc = ReturnPc(decoder, *code, top->pc);
|
2016-05-25 08:32:37 +00:00
|
|
|
*limit = top->code->end - top->code->start;
|
2017-03-14 15:54:43 +00:00
|
|
|
TRACE(" => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
|
|
|
|
(*code)->function->func_index, *pc);
|
2019-05-15 12:20:35 +00:00
|
|
|
DoStackTransfer(sp_diff, arity);
|
2016-05-25 08:32:37 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-31 08:29:02 +00:00
|
|
|
// Returns true if the call was successful, false if the stack check failed
|
2020-05-29 16:17:29 +00:00
|
|
|
// and the stack was fully unwound.
|
2021-02-22 09:17:38 +00:00
|
|
|
bool DoCall(Decoder* decoder, InterpreterCode** target, pc_t* pc,
|
2018-04-06 09:37:52 +00:00
|
|
|
pc_t* limit) V8_WARN_UNUSED_RESULT {
|
2017-03-14 15:54:43 +00:00
|
|
|
frames_.back().pc = *pc;
|
2021-02-22 09:17:38 +00:00
|
|
|
PushFrame(*target);
|
|
|
|
return DoStackCheck(decoder, target, pc, limit);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2019-02-27 18:43:47 +00:00
|
|
|
// Returns true if the tail call was successful, false if the stack check
|
|
|
|
// failed.
|
|
|
|
bool DoReturnCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
|
|
|
|
pc_t* limit) V8_WARN_UNUSED_RESULT {
|
|
|
|
DCHECK_NOT_NULL(target);
|
|
|
|
DCHECK_NOT_NULL(target->side_table);
|
|
|
|
EnsureStackSpaceForCall(target);
|
|
|
|
|
|
|
|
++num_interpreted_calls_;
|
|
|
|
|
|
|
|
Frame* top = &frames_.back();
|
|
|
|
|
|
|
|
// Drop everything except current parameters.
|
2019-05-15 12:20:35 +00:00
|
|
|
spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - top->sp);
|
2019-02-27 18:43:47 +00:00
|
|
|
size_t arity = target->function->sig->parameter_count();
|
|
|
|
|
2019-05-15 12:20:35 +00:00
|
|
|
DoStackTransfer(sp_diff, arity);
|
2019-02-27 18:43:47 +00:00
|
|
|
|
|
|
|
*limit = target->end - target->start;
|
|
|
|
decoder->Reset(target->start, target->end);
|
|
|
|
|
|
|
|
// Rebuild current frame to look like a call to callee.
|
|
|
|
top->code = target;
|
|
|
|
top->pc = 0;
|
|
|
|
top->sp = StackHeight() - arity;
|
|
|
|
top->pc = InitLocals(target);
|
|
|
|
|
|
|
|
*pc = top->pc;
|
|
|
|
|
|
|
|
TRACE(" => ReturnCall #%zu (#%u @%zu)\n", frames_.size() - 1,
|
|
|
|
target->function->func_index, top->pc);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-05-15 12:20:35 +00:00
|
|
|
// Copies {arity} values on the top of the stack down the stack while also
|
|
|
|
// dropping {sp_diff} many stack values in total from the stack.
|
|
|
|
void DoStackTransfer(spdiff_t sp_diff, size_t arity) {
|
2016-09-27 20:46:10 +00:00
|
|
|
// before: |---------------| pop_count | arity |
|
2019-05-15 12:20:35 +00:00
|
|
|
// ^ 0 ^ dest ^ src ^ StackHeight()
|
|
|
|
// ^----< sp_diff >----^
|
2016-09-27 20:46:10 +00:00
|
|
|
//
|
|
|
|
// after: |---------------| arity |
|
2019-05-15 12:20:35 +00:00
|
|
|
// ^ 0 ^ StackHeight()
|
|
|
|
sp_t stack_height = StackHeight();
|
|
|
|
sp_t dest = stack_height - sp_diff;
|
|
|
|
sp_t src = stack_height - arity;
|
|
|
|
DCHECK_LE(dest, stack_height);
|
|
|
|
DCHECK_LE(dest, src);
|
|
|
|
if (arity && (dest != src)) {
|
|
|
|
StackValue* stack = stack_.get();
|
|
|
|
memmove(stack + dest, stack + src, arity * sizeof(StackValue));
|
2019-04-05 12:12:50 +00:00
|
|
|
// Also move elements on the reference stack accordingly.
|
2020-06-08 20:27:55 +00:00
|
|
|
reference_stack_->MoveElements(
|
2019-05-15 12:20:35 +00:00
|
|
|
isolate_, static_cast<int>(dest), static_cast<int>(src),
|
|
|
|
static_cast<int>(arity), UPDATE_WRITE_BARRIER);
|
2019-04-05 12:12:50 +00:00
|
|
|
}
|
2019-05-15 12:20:35 +00:00
|
|
|
ResetStack(dest + arity);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-11-23 18:42:18 +00:00
|
|
|
Address EffectiveAddress(uint64_t index) {
|
|
|
|
DCHECK_GE(std::numeric_limits<uintptr_t>::max(),
|
|
|
|
instance_object_->memory_size());
|
|
|
|
DCHECK_GE(instance_object_->memory_size(), index);
|
2019-03-13 19:22:40 +00:00
|
|
|
// Compute the effective address of the access, making sure to condition
|
|
|
|
// the index even in the in-bounds case.
|
|
|
|
return reinterpret_cast<Address>(instance_object_->memory_start()) +
|
|
|
|
(index & instance_object_->memory_mask());
|
|
|
|
}
|
|
|
|
|
2017-04-03 07:44:47 +00:00
|
|
|
template <typename mtype>
|
2020-11-23 18:42:18 +00:00
|
|
|
Address BoundsCheckMem(uint64_t offset, uint64_t index) {
|
|
|
|
uint64_t effective_index = offset + index;
|
2019-01-17 14:29:43 +00:00
|
|
|
if (effective_index < index) {
|
|
|
|
return kNullAddress; // wraparound => oob
|
|
|
|
}
|
2020-06-24 02:58:43 +00:00
|
|
|
if (!base::IsInBounds<uint64_t>(effective_index, sizeof(mtype),
|
|
|
|
instance_object_->memory_size())) {
|
2019-01-17 14:29:43 +00:00
|
|
|
return kNullAddress; // oob
|
|
|
|
}
|
2019-03-13 19:22:40 +00:00
|
|
|
return EffectiveAddress(effective_index);
|
|
|
|
}
|
|
|
|
|
2020-11-23 18:42:18 +00:00
|
|
|
bool BoundsCheckMemRange(uint64_t index, uint64_t* size,
|
2020-11-23 13:49:19 +00:00
|
|
|
Address* out_address) {
|
2020-11-23 18:42:18 +00:00
|
|
|
DCHECK_GE(std::numeric_limits<uintptr_t>::max(),
|
|
|
|
instance_object_->memory_size());
|
|
|
|
if (!base::ClampToBounds<uint64_t>(index, size,
|
|
|
|
instance_object_->memory_size())) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-03-13 19:22:40 +00:00
|
|
|
*out_address = EffectiveAddress(index);
|
2020-11-23 18:42:18 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t ToMemType(WasmValue value) {
|
|
|
|
return module()->is_memory64 ? value.to<uint64_t>() : value.to<uint32_t>();
|
2017-04-03 07:44:47 +00:00
|
|
|
}
|
|
|
|
|
2017-02-03 09:51:04 +00:00
|
|
|
template <typename ctype, typename mtype>
|
2019-07-08 08:42:48 +00:00
|
|
|
bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
|
2019-09-19 22:01:07 +00:00
|
|
|
int* const len, MachineRepresentation rep,
|
2020-06-26 12:24:26 +00:00
|
|
|
uint32_t prefix_len = 1) {
|
|
|
|
// prefix_len is the length of the opcode, before the immediate. We don't
|
|
|
|
// increment pc at the caller, because we want to keep pc to the start of
|
|
|
|
// the operation to keep trap reporting and tracing accurate, otherwise
|
|
|
|
// those will report at the middle of an opcode.
|
2020-09-30 09:37:50 +00:00
|
|
|
MemoryAccessImmediate<Decoder::kNoValidation> imm(
|
2020-11-25 14:09:56 +00:00
|
|
|
decoder, code->at(pc + prefix_len), sizeof(ctype),
|
|
|
|
module()->is_memory64);
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t index = ToMemType(Pop());
|
2018-05-03 11:59:06 +00:00
|
|
|
Address addr = BoundsCheckMem<mtype>(imm.offset, index);
|
2017-12-18 13:04:30 +00:00
|
|
|
if (!addr) {
|
2017-02-03 09:51:04 +00:00
|
|
|
DoTrap(kTrapMemOutOfBounds, pc);
|
|
|
|
return false;
|
|
|
|
}
|
2017-10-24 12:28:30 +00:00
|
|
|
WasmValue result(
|
|
|
|
converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
|
2017-02-03 09:51:04 +00:00
|
|
|
|
2017-04-26 17:41:26 +00:00
|
|
|
Push(result);
|
2019-08-26 16:50:16 +00:00
|
|
|
*len += imm.length;
|
2017-10-02 07:39:30 +00:00
|
|
|
|
2018-08-28 09:38:48 +00:00
|
|
|
if (FLAG_trace_wasm_memory) {
|
2018-08-02 09:50:08 +00:00
|
|
|
MemoryTracingInfo info(imm.offset + index, false, rep);
|
2020-08-04 11:09:23 +00:00
|
|
|
TraceMemoryOperation({}, &info, code->function->func_index,
|
|
|
|
static_cast<int>(pc),
|
2018-04-06 10:18:18 +00:00
|
|
|
instance_object_->memory_start());
|
2017-10-02 07:39:30 +00:00
|
|
|
}
|
|
|
|
|
2017-02-03 09:51:04 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename ctype, typename mtype>
|
2019-07-08 08:42:48 +00:00
|
|
|
bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
|
2019-09-19 22:01:07 +00:00
|
|
|
int* const len, MachineRepresentation rep,
|
2020-06-26 12:24:26 +00:00
|
|
|
uint32_t prefix_len = 1) {
|
|
|
|
// prefix_len is the length of the opcode, before the immediate. We don't
|
|
|
|
// increment pc at the caller, because we want to keep pc to the start of
|
|
|
|
// the operation to keep trap reporting and tracing accurate, otherwise
|
|
|
|
// those will report at the middle of an opcode.
|
2020-09-30 09:37:50 +00:00
|
|
|
MemoryAccessImmediate<Decoder::kNoValidation> imm(
|
2020-11-25 14:09:56 +00:00
|
|
|
decoder, code->at(pc + prefix_len), sizeof(ctype),
|
|
|
|
module()->is_memory64);
|
2017-10-26 07:45:12 +00:00
|
|
|
ctype val = Pop().to<ctype>();
|
2017-02-03 09:51:04 +00:00
|
|
|
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t index = ToMemType(Pop());
|
2018-05-03 11:59:06 +00:00
|
|
|
Address addr = BoundsCheckMem<mtype>(imm.offset, index);
|
2017-12-18 13:04:30 +00:00
|
|
|
if (!addr) {
|
2017-02-03 09:51:04 +00:00
|
|
|
DoTrap(kTrapMemOutOfBounds, pc);
|
|
|
|
return false;
|
|
|
|
}
|
2017-10-26 07:45:12 +00:00
|
|
|
WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
|
2019-08-26 16:50:16 +00:00
|
|
|
*len += imm.length;
|
2017-02-03 09:51:04 +00:00
|
|
|
|
2018-08-28 09:38:48 +00:00
|
|
|
if (FLAG_trace_wasm_memory) {
|
2018-08-02 09:50:08 +00:00
|
|
|
MemoryTracingInfo info(imm.offset + index, true, rep);
|
2020-08-04 11:09:23 +00:00
|
|
|
TraceMemoryOperation({}, &info, code->function->func_index,
|
|
|
|
static_cast<int>(pc),
|
2018-04-06 10:18:18 +00:00
|
|
|
instance_object_->memory_start());
|
2017-10-02 07:39:30 +00:00
|
|
|
}
|
|
|
|
|
2017-02-03 09:51:04 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-07-10 21:54:03 +00:00
|
|
|
template <typename type, typename op_type>
|
2017-12-01 22:31:45 +00:00
|
|
|
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
|
2019-07-09 16:07:55 +00:00
|
|
|
Address* address, pc_t pc, int* const len,
|
2018-04-26 09:58:33 +00:00
|
|
|
type* val = nullptr, type* val2 = nullptr) {
|
2020-10-28 02:08:58 +00:00
|
|
|
MemoryAccessImmediate<Decoder::kNoValidation> imm(
|
2020-11-25 14:09:56 +00:00
|
|
|
decoder, code->at(pc + *len), sizeof(type), module()->is_memory64);
|
2018-07-10 21:54:03 +00:00
|
|
|
if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
|
|
|
|
if (val) *val = static_cast<type>(Pop().to<op_type>());
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t index = ToMemType(Pop());
|
2019-07-09 16:07:55 +00:00
|
|
|
*address = BoundsCheckMem<type>(imm.offset, index);
|
2020-01-14 18:17:34 +00:00
|
|
|
if (!*address) {
|
2017-11-04 01:03:03 +00:00
|
|
|
DoTrap(kTrapMemOutOfBounds, pc);
|
|
|
|
return false;
|
|
|
|
}
|
2020-01-14 18:17:34 +00:00
|
|
|
if (!IsAligned(*address, sizeof(type))) {
|
|
|
|
DoTrap(kTrapUnalignedAccess, pc);
|
|
|
|
return false;
|
|
|
|
}
|
[wasm] Fix wasm decoder for multi-byte opcodes
SIMD opcodes consist of the prefix byte, then an LEB128 encoded int. We
were decoding this incorrectly as a fixed uint8. This fixes the decoder
to properly handle multi bytes.
In some cases, the multi byte logic is applied to all prefixed opcodes.
This is not a problem, since for values < 0x80, the LEB encoding is a
single byte, and decodes to the same int. If the prefix opcode has
instructions with index >= 0x80, it would be required to be LEB128
encoded anyway.
There are a bunch of trivial changes to test-run-wasm-simd, to change
the macro from BUILD to BUILD_V, the former only works for single byte
opcodes, the latter is a new template-based macro that correct handles
multi-byte opcodes. The only unchanged test is the shuffle fuzzer test,
which builds its own sequence of bytes without using the BUILD macro.
Bug: v8:10258
Change-Id: Ie7377e899a7eab97ecf28176fd908babc08d0f19
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2118476
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67186}
2020-04-15 19:03:00 +00:00
|
|
|
*len += imm.length;
|
2017-11-04 01:03:03 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-12-11 13:57:20 +00:00
|
|
|
template <typename type>
|
|
|
|
bool ExtractAtomicWaitNotifyParams(Decoder* decoder, InterpreterCode* code,
|
|
|
|
pc_t pc, int* const len,
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t* buffer_offset, type* val,
|
2020-02-28 13:59:12 +00:00
|
|
|
int64_t* timeout = nullptr) {
|
2020-06-26 12:24:26 +00:00
|
|
|
// TODO(manoskouk): Introduce test which exposes wrong pc offset below.
|
2020-09-30 09:37:50 +00:00
|
|
|
MemoryAccessImmediate<Decoder::kFullValidation> imm(
|
2020-11-25 14:09:56 +00:00
|
|
|
decoder, code->at(pc + *len), sizeof(type), module()->is_memory64);
|
2019-12-11 13:57:20 +00:00
|
|
|
if (timeout) {
|
2020-02-28 13:59:12 +00:00
|
|
|
*timeout = Pop().to<int64_t>();
|
2019-12-11 13:57:20 +00:00
|
|
|
}
|
|
|
|
*val = Pop().to<type>();
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t index = ToMemType(Pop());
|
2019-12-11 13:57:20 +00:00
|
|
|
// Check bounds.
|
2020-11-23 18:42:18 +00:00
|
|
|
Address address = BoundsCheckMem<uint64_t>(imm.offset, index);
|
2019-12-11 13:57:20 +00:00
|
|
|
*buffer_offset = index + imm.offset;
|
|
|
|
if (!address) {
|
|
|
|
DoTrap(kTrapMemOutOfBounds, pc);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Check alignment.
|
|
|
|
const uint32_t align_mask = sizeof(type) - 1;
|
|
|
|
if ((*buffer_offset & align_mask) != 0) {
|
|
|
|
DoTrap(kTrapUnalignedAccess, pc);
|
|
|
|
return false;
|
|
|
|
}
|
[wasm] Fix wasm decoder for multi-byte opcodes
SIMD opcodes consist of the prefix byte, then an LEB128 encoded int. We
were decoding this incorrectly as a fixed uint8. This fixes the decoder
to properly handle multi bytes.
In some cases, the multi byte logic is applied to all prefixed opcodes.
This is not a problem, since for values < 0x80, the LEB encoding is a
single byte, and decodes to the same int. If the prefix opcode has
instructions with index >= 0x80, it would be required to be LEB128
encoded anyway.
There are a bunch of trivial changes to test-run-wasm-simd, to change
the macro from BUILD to BUILD_V, the former only works for single byte
opcodes, the latter is a new template-based macro that correct handles
multi-byte opcodes. The only unchanged test is the shuffle fuzzer test,
which builds its own sequence of bytes without using the BUILD macro.
Bug: v8:10258
Change-Id: Ie7377e899a7eab97ecf28176fd908babc08d0f19
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2118476
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67186}
2020-04-15 19:03:00 +00:00
|
|
|
*len += imm.length;
|
2019-12-11 13:57:20 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-12-27 18:54:38 +00:00
|
|
|
bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
|
2019-07-09 16:07:55 +00:00
|
|
|
InterpreterCode* code, pc_t pc, int* const len) {
|
2017-12-27 18:54:38 +00:00
|
|
|
switch (opcode) {
|
2018-01-16 19:32:52 +00:00
|
|
|
case kExprI32SConvertSatF32:
|
2021-01-26 22:44:12 +00:00
|
|
|
Push(WasmValue(base::saturated_cast<int32_t>(Pop().to<float>())));
|
2018-01-16 19:32:52 +00:00
|
|
|
return true;
|
|
|
|
case kExprI32UConvertSatF32:
|
2021-01-26 22:44:12 +00:00
|
|
|
Push(WasmValue(base::saturated_cast<uint32_t>(Pop().to<float>())));
|
2018-01-16 19:32:52 +00:00
|
|
|
return true;
|
|
|
|
case kExprI32SConvertSatF64:
|
2021-01-26 22:44:12 +00:00
|
|
|
Push(WasmValue(base::saturated_cast<int32_t>(Pop().to<double>())));
|
2018-01-16 19:32:52 +00:00
|
|
|
return true;
|
|
|
|
case kExprI32UConvertSatF64:
|
2021-01-26 22:44:12 +00:00
|
|
|
Push(WasmValue(base::saturated_cast<uint32_t>(Pop().to<double>())));
|
2017-12-27 18:54:38 +00:00
|
|
|
return true;
|
2018-02-07 19:10:16 +00:00
|
|
|
case kExprI64SConvertSatF32:
|
2021-01-26 22:44:12 +00:00
|
|
|
Push(WasmValue(base::saturated_cast<int64_t>(Pop().to<float>())));
|
2018-01-30 23:43:44 +00:00
|
|
|
return true;
|
2018-02-07 19:10:16 +00:00
|
|
|
case kExprI64UConvertSatF32:
|
2021-01-26 22:44:12 +00:00
|
|
|
Push(WasmValue(base::saturated_cast<uint64_t>(Pop().to<float>())));
|
2018-02-07 19:10:16 +00:00
|
|
|
return true;
|
|
|
|
case kExprI64SConvertSatF64:
|
2021-01-26 22:44:12 +00:00
|
|
|
Push(WasmValue(base::saturated_cast<int64_t>(Pop().to<double>())));
|
2018-02-07 19:10:16 +00:00
|
|
|
return true;
|
|
|
|
case kExprI64UConvertSatF64:
|
2021-01-26 22:44:12 +00:00
|
|
|
Push(WasmValue(base::saturated_cast<uint64_t>(Pop().to<double>())));
|
2018-02-07 19:10:16 +00:00
|
|
|
return true;
|
2019-03-13 19:22:40 +00:00
|
|
|
case kExprMemoryInit: {
|
2020-09-30 09:37:50 +00:00
|
|
|
MemoryInitImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-12-02 17:47:12 +00:00
|
|
|
// The data segment index must be in bounds since it is required by
|
|
|
|
// validation.
|
2019-03-13 19:22:40 +00:00
|
|
|
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
|
2019-07-09 16:07:55 +00:00
|
|
|
*len += imm.length;
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t size = ToMemType(Pop());
|
|
|
|
uint64_t src = ToMemType(Pop());
|
|
|
|
uint64_t dst = ToMemType(Pop());
|
2019-03-13 19:22:40 +00:00
|
|
|
Address dst_addr;
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t src_max =
|
2019-03-13 19:22:40 +00:00
|
|
|
instance_object_->data_segment_sizes()[imm.data_segment_index];
|
2019-11-20 14:22:39 +00:00
|
|
|
if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
|
|
|
|
!base::IsInBounds(src, size, src_max)) {
|
|
|
|
DoTrap(kTrapMemOutOfBounds, pc);
|
|
|
|
return false;
|
|
|
|
}
|
2019-03-13 19:22:40 +00:00
|
|
|
Address src_addr =
|
|
|
|
instance_object_->data_segment_starts()[imm.data_segment_index] +
|
|
|
|
src;
|
2020-03-11 14:02:53 +00:00
|
|
|
std::memmove(reinterpret_cast<void*>(dst_addr),
|
|
|
|
reinterpret_cast<void*>(src_addr), size);
|
2019-11-20 14:22:39 +00:00
|
|
|
return true;
|
2019-03-13 19:22:40 +00:00
|
|
|
}
|
|
|
|
case kExprDataDrop: {
|
2020-09-30 09:37:50 +00:00
|
|
|
DataDropImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-12-02 17:47:12 +00:00
|
|
|
// The data segment index must be in bounds since it is required by
|
|
|
|
// validation.
|
|
|
|
DCHECK_LT(imm.index, module()->num_declared_data_segments);
|
2019-07-09 16:07:55 +00:00
|
|
|
*len += imm.length;
|
2019-12-02 17:47:12 +00:00
|
|
|
instance_object_->data_segment_sizes()[imm.index] = 0;
|
2019-03-13 19:22:40 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case kExprMemoryCopy: {
|
2020-09-30 09:37:50 +00:00
|
|
|
MemoryCopyImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-07-17 15:55:23 +00:00
|
|
|
*len += imm.length;
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t size = ToMemType(Pop());
|
|
|
|
uint64_t src = ToMemType(Pop());
|
|
|
|
uint64_t dst = ToMemType(Pop());
|
2019-03-13 19:22:40 +00:00
|
|
|
Address dst_addr;
|
2019-11-20 14:22:39 +00:00
|
|
|
Address src_addr;
|
|
|
|
if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
|
|
|
|
!BoundsCheckMemRange(src, &size, &src_addr)) {
|
|
|
|
DoTrap(kTrapMemOutOfBounds, pc);
|
|
|
|
return false;
|
2019-03-13 19:22:40 +00:00
|
|
|
}
|
2019-11-20 14:22:39 +00:00
|
|
|
|
2020-03-11 14:02:53 +00:00
|
|
|
std::memmove(reinterpret_cast<void*>(dst_addr),
|
|
|
|
reinterpret_cast<void*>(src_addr), size);
|
2019-11-20 14:22:39 +00:00
|
|
|
return true;
|
2019-03-13 19:22:40 +00:00
|
|
|
}
|
|
|
|
case kExprMemoryFill: {
|
2020-09-30 09:37:50 +00:00
|
|
|
MemoryIndexImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-07-18 16:19:47 +00:00
|
|
|
*len += imm.length;
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t size = ToMemType(Pop());
|
|
|
|
uint32_t value = Pop().to<uint32_t>();
|
|
|
|
uint64_t dst = ToMemType(Pop());
|
2019-03-13 19:22:40 +00:00
|
|
|
Address dst_addr;
|
2020-11-23 18:42:18 +00:00
|
|
|
if (!BoundsCheckMemRange(dst, &size, &dst_addr)) {
|
2019-11-21 11:02:27 +00:00
|
|
|
DoTrap(kTrapMemOutOfBounds, pc);
|
|
|
|
return false;
|
|
|
|
}
|
2020-03-13 10:53:01 +00:00
|
|
|
std::memset(reinterpret_cast<void*>(dst_addr), value, size);
|
2019-11-21 11:02:27 +00:00
|
|
|
return true;
|
2019-03-13 19:22:40 +00:00
|
|
|
}
|
2019-04-05 19:04:37 +00:00
|
|
|
case kExprTableInit: {
|
2020-09-30 09:37:50 +00:00
|
|
|
TableInitImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-07-09 16:07:55 +00:00
|
|
|
*len += imm.length;
|
2019-11-21 11:02:27 +00:00
|
|
|
auto size = Pop().to<uint32_t>();
|
|
|
|
auto src = Pop().to<uint32_t>();
|
|
|
|
auto dst = Pop().to<uint32_t>();
|
2019-04-05 19:04:37 +00:00
|
|
|
HandleScope scope(isolate_); // Avoid leaking handles.
|
|
|
|
bool ok = WasmInstanceObject::InitTableEntries(
|
|
|
|
instance_object_->GetIsolate(), instance_object_, imm.table.index,
|
|
|
|
imm.elem_segment_index, dst, src, size);
|
|
|
|
if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
case kExprElemDrop: {
|
2020-09-30 09:37:50 +00:00
|
|
|
ElemDropImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-07-09 16:07:55 +00:00
|
|
|
*len += imm.length;
|
2019-04-05 19:04:37 +00:00
|
|
|
instance_object_->dropped_elem_segments()[imm.index] = 1;
|
|
|
|
return true;
|
|
|
|
}
|
2019-03-21 15:55:09 +00:00
|
|
|
case kExprTableCopy: {
|
2020-09-30 09:37:50 +00:00
|
|
|
TableCopyImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-03-21 15:55:09 +00:00
|
|
|
auto size = Pop().to<uint32_t>();
|
|
|
|
auto src = Pop().to<uint32_t>();
|
|
|
|
auto dst = Pop().to<uint32_t>();
|
2019-04-04 11:22:49 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2019-03-21 15:55:09 +00:00
|
|
|
bool ok = WasmInstanceObject::CopyTableEntries(
|
2019-04-04 11:22:49 +00:00
|
|
|
isolate_, instance_object_, imm.table_dst.index,
|
|
|
|
imm.table_src.index, dst, src, size);
|
2019-03-21 15:55:09 +00:00
|
|
|
if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
|
2019-07-09 16:07:55 +00:00
|
|
|
*len += imm.length;
|
2019-03-21 15:55:09 +00:00
|
|
|
return ok;
|
|
|
|
}
|
2019-07-09 10:29:20 +00:00
|
|
|
case kExprTableGrow: {
|
2020-09-30 09:37:50 +00:00
|
|
|
TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-07-09 10:29:20 +00:00
|
|
|
HandleScope handle_scope(isolate_);
|
|
|
|
auto table = handle(
|
|
|
|
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
|
|
|
|
isolate_);
|
|
|
|
auto delta = Pop().to<uint32_t>();
|
2021-03-04 17:37:44 +00:00
|
|
|
auto value = Pop().to_ref();
|
2019-07-09 10:29:20 +00:00
|
|
|
int32_t result = WasmTableObject::Grow(isolate_, table, delta, value);
|
|
|
|
Push(WasmValue(result));
|
2019-07-09 16:07:55 +00:00
|
|
|
*len += imm.length;
|
2019-07-09 10:29:20 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case kExprTableSize: {
|
2020-09-30 09:37:50 +00:00
|
|
|
TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-07-09 10:29:20 +00:00
|
|
|
HandleScope handle_scope(isolate_);
|
|
|
|
auto table = handle(
|
|
|
|
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
|
|
|
|
isolate_);
|
|
|
|
uint32_t table_size = table->current_length();
|
|
|
|
Push(WasmValue(table_size));
|
2019-07-09 16:07:55 +00:00
|
|
|
*len += imm.length;
|
2019-07-09 10:29:20 +00:00
|
|
|
return true;
|
|
|
|
}
|
2019-07-11 13:02:18 +00:00
|
|
|
case kExprTableFill: {
|
2020-09-30 09:37:50 +00:00
|
|
|
TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
|
2021-03-09 20:47:09 +00:00
|
|
|
code->at(pc + *len));
|
2019-07-11 13:02:18 +00:00
|
|
|
HandleScope handle_scope(isolate_);
|
|
|
|
auto count = Pop().to<uint32_t>();
|
2021-03-04 17:37:44 +00:00
|
|
|
auto value = Pop().to_ref();
|
2019-07-11 13:02:18 +00:00
|
|
|
auto start = Pop().to<uint32_t>();
|
|
|
|
|
|
|
|
auto table = handle(
|
|
|
|
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
|
|
|
|
isolate_);
|
|
|
|
uint32_t table_size = table->current_length();
|
|
|
|
if (start > table_size) {
|
|
|
|
DoTrap(kTrapTableOutOfBounds, pc);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Even when table.fill goes out-of-bounds, as many entries as possible
|
|
|
|
// are put into the table. Only afterwards we trap.
|
|
|
|
uint32_t fill_count = std::min(count, table_size - start);
|
|
|
|
if (fill_count < count) {
|
|
|
|
DoTrap(kTrapTableOutOfBounds, pc);
|
|
|
|
return false;
|
|
|
|
}
|
2020-02-17 16:21:02 +00:00
|
|
|
WasmTableObject::Fill(isolate_, table, start, value, fill_count);
|
|
|
|
|
2019-07-11 13:02:18 +00:00
|
|
|
*len += imm.length;
|
|
|
|
return true;
|
|
|
|
}
|
2017-12-27 18:54:38 +00:00
|
|
|
default:
|
2020-05-25 12:46:55 +00:00
|
|
|
FATAL(
|
|
|
|
"Unknown or unimplemented opcode #%d:%s", code->start[pc],
|
|
|
|
WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(code->start[pc])));
|
2017-12-27 18:54:38 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-01-29 14:22:30 +00:00
|
|
|
template <typename type, typename op_type, typename func>
|
|
|
|
op_type ExecuteAtomicBinopBE(type val, Address addr, func op) {
|
|
|
|
type old_val;
|
|
|
|
type new_val;
|
|
|
|
old_val = ReadUnalignedValue<type>(addr);
|
|
|
|
do {
|
|
|
|
new_val =
|
|
|
|
ByteReverse(static_cast<type>(op(ByteReverse<type>(old_val), val)));
|
|
|
|
} while (!(std::atomic_compare_exchange_strong(
|
|
|
|
reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val)));
|
|
|
|
return static_cast<op_type>(ByteReverse<type>(old_val));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename type>
|
|
|
|
type AdjustByteOrder(type param) {
|
|
|
|
#if V8_TARGET_BIG_ENDIAN
|
|
|
|
return ByteReverse(param);
|
|
|
|
#else
|
|
|
|
return param;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-11-04 01:03:03 +00:00
|
|
|
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
|
2019-07-09 16:07:55 +00:00
|
|
|
InterpreterCode* code, pc_t pc, int* const len) {
|
2019-01-29 14:22:30 +00:00
|
|
|
#if V8_TARGET_BIG_ENDIAN
|
|
|
|
constexpr bool kBigEndian = true;
|
|
|
|
#else
|
|
|
|
constexpr bool kBigEndian = false;
|
|
|
|
#endif
|
2017-11-04 01:03:03 +00:00
|
|
|
WasmValue result;
|
|
|
|
switch (opcode) {
|
2019-07-09 16:07:55 +00:00
|
|
|
#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
|
|
|
|
case kExpr##name: { \
|
|
|
|
type val; \
|
|
|
|
Address addr; \
|
|
|
|
op_type result; \
|
|
|
|
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
|
|
|
|
&val)) { \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
|
|
|
|
"Size mismatch for types std::atomic<" #type \
|
|
|
|
">, and " #type); \
|
|
|
|
if (kBigEndian) { \
|
|
|
|
auto oplambda = [](type a, type b) { return a op b; }; \
|
|
|
|
result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
|
|
|
|
} else { \
|
|
|
|
result = static_cast<op_type>( \
|
|
|
|
std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
|
|
|
|
} \
|
|
|
|
Push(WasmValue(result)); \
|
|
|
|
break; \
|
2017-11-04 01:03:03 +00:00
|
|
|
}
|
2019-01-29 14:22:30 +00:00
|
|
|
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add, +);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add, +);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add,
|
|
|
|
+);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub, -);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub, -);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub,
|
|
|
|
-);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and, &);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and, &);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t,
|
|
|
|
atomic_fetch_and, &);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or, |);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or, |);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or, |);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor, ^);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor, ^);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor,
|
|
|
|
^);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange,
|
|
|
|
=);
|
|
|
|
ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t, atomic_exchange,
|
|
|
|
=);
|
2018-07-10 21:54:03 +00:00
|
|
|
ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
|
2019-01-29 14:22:30 +00:00
|
|
|
atomic_exchange, =);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add, +);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add, +);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add,
|
|
|
|
+);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add,
|
|
|
|
+);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub, -);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub, -);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub,
|
|
|
|
-);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub,
|
|
|
|
-);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and, &);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and, &);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t,
|
|
|
|
atomic_fetch_and, &);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t,
|
|
|
|
atomic_fetch_and, &);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or, |);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or, |);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or, |);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or, |);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor, ^);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor, ^);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor,
|
|
|
|
^);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor,
|
|
|
|
^);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange,
|
|
|
|
=);
|
|
|
|
ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t, atomic_exchange,
|
|
|
|
=);
|
2018-07-10 21:54:03 +00:00
|
|
|
ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
|
2019-01-29 14:22:30 +00:00
|
|
|
atomic_exchange, =);
|
2018-07-10 21:54:03 +00:00
|
|
|
ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
|
2019-01-29 14:22:30 +00:00
|
|
|
atomic_exchange, =);
|
2017-11-04 01:03:03 +00:00
|
|
|
#undef ATOMIC_BINOP_CASE
|
2019-07-09 16:07:55 +00:00
|
|
|
#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
|
|
|
|
case kExpr##name: { \
|
|
|
|
type old_val; \
|
|
|
|
type new_val; \
|
|
|
|
Address addr; \
|
|
|
|
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
|
|
|
|
&old_val, &new_val)) { \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
|
|
|
|
"Size mismatch for types std::atomic<" #type \
|
|
|
|
">, and " #type); \
|
|
|
|
old_val = AdjustByteOrder<type>(old_val); \
|
|
|
|
new_val = AdjustByteOrder<type>(new_val); \
|
|
|
|
std::atomic_compare_exchange_strong( \
|
|
|
|
reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
|
|
|
|
Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
|
|
|
|
break; \
|
2018-07-10 21:54:03 +00:00
|
|
|
}
|
|
|
|
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
|
|
|
|
uint32_t);
|
|
|
|
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
|
|
|
|
uint32_t);
|
|
|
|
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
|
|
|
|
uint32_t);
|
|
|
|
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
|
|
|
|
uint64_t);
|
|
|
|
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
|
|
|
|
uint64_t);
|
|
|
|
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
|
|
|
|
uint64_t);
|
|
|
|
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
|
|
|
|
uint64_t);
|
2018-04-26 09:58:33 +00:00
|
|
|
#undef ATOMIC_COMPARE_EXCHANGE_CASE
|
2019-07-09 16:07:55 +00:00
|
|
|
#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
|
|
|
|
case kExpr##name: { \
|
|
|
|
Address addr; \
|
|
|
|
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, \
|
|
|
|
len)) { \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
|
|
|
|
"Size mismatch for types std::atomic<" #type \
|
|
|
|
">, and " #type); \
|
|
|
|
result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
|
|
|
|
std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
|
|
|
|
Push(result); \
|
|
|
|
break; \
|
2017-12-01 22:31:45 +00:00
|
|
|
}
|
2018-07-10 21:54:03 +00:00
|
|
|
ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
|
|
|
|
ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
|
|
|
|
ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, uint32_t, atomic_load);
|
|
|
|
ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
|
|
|
|
ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
|
|
|
|
ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
|
|
|
|
ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
|
2017-12-01 22:31:45 +00:00
|
|
|
#undef ATOMIC_LOAD_CASE
|
2019-07-09 16:07:55 +00:00
|
|
|
#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
|
|
|
|
case kExpr##name: { \
|
|
|
|
type val; \
|
|
|
|
Address addr; \
|
|
|
|
if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
|
|
|
|
&val)) { \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
static_assert(sizeof(std::atomic<type>) == sizeof(type), \
|
|
|
|
"Size mismatch for types std::atomic<" #type \
|
|
|
|
">, and " #type); \
|
|
|
|
std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
|
|
|
|
AdjustByteOrder<type>(val)); \
|
|
|
|
break; \
|
2017-12-01 22:31:45 +00:00
|
|
|
}
|
2018-07-10 21:54:03 +00:00
|
|
|
ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
|
|
|
|
ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
|
|
|
|
ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, uint32_t, atomic_store);
|
|
|
|
ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
|
|
|
|
ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
|
|
|
|
ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
|
|
|
|
ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
|
2017-12-01 22:31:45 +00:00
|
|
|
#undef ATOMIC_STORE_CASE
|
2019-07-18 09:10:30 +00:00
|
|
|
case kExprAtomicFence:
|
|
|
|
std::atomic_thread_fence(std::memory_order_seq_cst);
|
[wasm] Fix wasm decoder for multi-byte opcodes
SIMD opcodes consist of the prefix byte, then an LEB128 encoded int. We
were decoding this incorrectly as a fixed uint8. This fixes the decoder
to properly handle multi bytes.
In some cases, the multi byte logic is applied to all prefixed opcodes.
This is not a problem, since for values < 0x80, the LEB encoding is a
single byte, and decodes to the same int. If the prefix opcode has
instructions with index >= 0x80, it would be required to be LEB128
encoded anyway.
There are a bunch of trivial changes to test-run-wasm-simd, to change
the macro from BUILD to BUILD_V, the former only works for single byte
opcodes, the latter is a new template-based macro that correct handles
multi-byte opcodes. The only unchanged test is the shuffle fuzzer test,
which builds its own sequence of bytes without using the BUILD macro.
Bug: v8:10258
Change-Id: Ie7377e899a7eab97ecf28176fd908babc08d0f19
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2118476
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67186}
2020-04-15 19:03:00 +00:00
|
|
|
*len += 1;
|
2019-07-18 09:10:30 +00:00
|
|
|
break;
|
2019-12-11 13:57:20 +00:00
|
|
|
case kExprI32AtomicWait: {
|
2020-11-03 11:27:26 +00:00
|
|
|
if (!module()->has_shared_memory) {
|
|
|
|
DoTrap(kTrapUnreachable, pc);
|
|
|
|
return false;
|
|
|
|
}
|
2019-12-11 13:57:20 +00:00
|
|
|
int32_t val;
|
2020-02-28 13:59:12 +00:00
|
|
|
int64_t timeout;
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t buffer_offset;
|
2019-12-11 13:57:20 +00:00
|
|
|
if (!ExtractAtomicWaitNotifyParams<int32_t>(
|
|
|
|
decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
HandleScope handle_scope(isolate_);
|
|
|
|
Handle<JSArrayBuffer> array_buffer(
|
|
|
|
instance_object_->memory_object().array_buffer(), isolate_);
|
2020-02-28 13:59:12 +00:00
|
|
|
auto result = FutexEmulation::WaitWasm32(isolate_, array_buffer,
|
|
|
|
buffer_offset, val, timeout);
|
2019-12-11 13:57:20 +00:00
|
|
|
Push(WasmValue(result.ToSmi().value()));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprI64AtomicWait: {
|
2020-11-03 11:27:26 +00:00
|
|
|
if (!module()->has_shared_memory) {
|
|
|
|
DoTrap(kTrapUnreachable, pc);
|
|
|
|
return false;
|
|
|
|
}
|
2019-12-11 13:57:20 +00:00
|
|
|
int64_t val;
|
2020-02-28 13:59:12 +00:00
|
|
|
int64_t timeout;
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t buffer_offset;
|
2019-12-11 13:57:20 +00:00
|
|
|
if (!ExtractAtomicWaitNotifyParams<int64_t>(
|
|
|
|
decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
HandleScope handle_scope(isolate_);
|
|
|
|
Handle<JSArrayBuffer> array_buffer(
|
|
|
|
instance_object_->memory_object().array_buffer(), isolate_);
|
2020-02-28 13:59:12 +00:00
|
|
|
auto result = FutexEmulation::WaitWasm64(isolate_, array_buffer,
|
|
|
|
buffer_offset, val, timeout);
|
2019-12-11 13:57:20 +00:00
|
|
|
Push(WasmValue(result.ToSmi().value()));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprAtomicNotify: {
|
|
|
|
int32_t val;
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t buffer_offset;
|
2019-12-11 13:57:20 +00:00
|
|
|
if (!ExtractAtomicWaitNotifyParams<int32_t>(decoder, code, pc, len,
|
|
|
|
&buffer_offset, &val)) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-11-03 11:27:26 +00:00
|
|
|
if (!module()->has_shared_memory) {
|
|
|
|
Push(WasmValue(0));
|
|
|
|
break;
|
|
|
|
}
|
2019-12-11 13:57:20 +00:00
|
|
|
HandleScope handle_scope(isolate_);
|
|
|
|
Handle<JSArrayBuffer> array_buffer(
|
|
|
|
instance_object_->memory_object().array_buffer(), isolate_);
|
|
|
|
auto result = FutexEmulation::Wake(array_buffer, buffer_offset, val);
|
|
|
|
Push(WasmValue(result.ToSmi().value()));
|
|
|
|
break;
|
|
|
|
}
|
2017-11-04 01:03:03 +00:00
|
|
|
default:
|
2018-04-26 09:58:33 +00:00
|
|
|
UNREACHABLE();
|
2017-11-04 01:03:03 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-19 18:58:39 +00:00
|
|
|
template <typename T, T (*float_round_op)(T)>
|
|
|
|
T AixFpOpWorkaround(T input) {
|
|
|
|
#if V8_OS_AIX
|
|
|
|
return FpOpWorkaround<T>(input, float_round_op(input));
|
|
|
|
#else
|
|
|
|
return float_round_op(input);
|
|
|
|
#endif
|
|
|
|
}
|
2018-05-18 21:47:59 +00:00
|
|
|
bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
|
2020-06-26 12:24:26 +00:00
|
|
|
pc_t pc, int* const len) {
|
2018-05-18 21:47:59 +00:00
|
|
|
switch (opcode) {
|
|
|
|
#define SPLAT_CASE(format, sType, valType, num) \
|
|
|
|
case kExpr##format##Splat: { \
|
|
|
|
WasmValue val = Pop(); \
|
|
|
|
valType v = val.to<valType>(); \
|
|
|
|
sType s; \
|
|
|
|
for (int i = 0; i < num; i++) s.val[i] = v; \
|
|
|
|
Push(WasmValue(Simd128(s))); \
|
|
|
|
return true; \
|
|
|
|
}
|
2019-07-01 17:17:31 +00:00
|
|
|
SPLAT_CASE(F64x2, float2, double, 2)
|
|
|
|
SPLAT_CASE(F32x4, float4, float, 4)
|
2019-06-26 17:31:38 +00:00
|
|
|
SPLAT_CASE(I64x2, int2, int64_t, 2)
|
2018-05-18 21:47:59 +00:00
|
|
|
SPLAT_CASE(I32x4, int4, int32_t, 4)
|
|
|
|
SPLAT_CASE(I16x8, int8, int32_t, 8)
|
|
|
|
SPLAT_CASE(I8x16, int16, int32_t, 16)
|
|
|
|
#undef SPLAT_CASE
|
2020-09-30 09:37:50 +00:00
|
|
|
#define EXTRACT_LANE_CASE(format, name) \
|
|
|
|
case kExpr##format##ExtractLane: { \
|
|
|
|
SimdLaneImmediate<Decoder::kNoValidation> imm(decoder, \
|
|
|
|
code->at(pc + *len)); \
|
|
|
|
*len += 1; \
|
|
|
|
WasmValue val = Pop(); \
|
|
|
|
Simd128 s = val.to_s128(); \
|
|
|
|
auto ss = s.to_##name(); \
|
|
|
|
Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
|
|
|
|
return true; \
|
2018-05-18 21:47:59 +00:00
|
|
|
}
|
2019-07-10 04:22:46 +00:00
|
|
|
EXTRACT_LANE_CASE(F64x2, f64x2)
|
|
|
|
EXTRACT_LANE_CASE(F32x4, f32x4)
|
2019-07-01 23:58:02 +00:00
|
|
|
EXTRACT_LANE_CASE(I64x2, i64x2)
|
2018-05-18 21:47:59 +00:00
|
|
|
EXTRACT_LANE_CASE(I32x4, i32x4)
|
|
|
|
#undef EXTRACT_LANE_CASE
|
2020-05-28 22:48:28 +00:00
|
|
|
|
|
|
|
// Unsigned extracts require a bit more care. The underlying array in
|
|
|
|
// Simd128 is signed (see wasm-value.h), so when casted to uint32_t it
|
|
|
|
// will be signed extended, e.g. int8_t -> int32_t -> uint32_t. So for
|
|
|
|
// unsigned extracts, we will cast it int8_t -> uint8_t -> uint32_t. We
|
|
|
|
// add the DCHECK to ensure that if the array type changes, we know to
|
|
|
|
// change this function.
|
2020-09-30 09:37:50 +00:00
|
|
|
#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, extended_type) \
|
|
|
|
case kExpr##format##ExtractLane##sign: { \
|
|
|
|
SimdLaneImmediate<Decoder::kNoValidation> imm(decoder, \
|
|
|
|
code->at(pc + *len)); \
|
|
|
|
*len += 1; \
|
|
|
|
WasmValue val = Pop(); \
|
|
|
|
Simd128 s = val.to_s128(); \
|
|
|
|
auto ss = s.to_##name(); \
|
|
|
|
auto res = ss.val[LANE(imm.lane, ss)]; \
|
|
|
|
DCHECK(std::is_signed<decltype(res)>::value); \
|
|
|
|
if (std::is_unsigned<extended_type>::value) { \
|
|
|
|
using unsigned_type = std::make_unsigned<decltype(res)>::type; \
|
|
|
|
Push(WasmValue( \
|
|
|
|
static_cast<extended_type>(static_cast<unsigned_type>(res)))); \
|
|
|
|
} else { \
|
|
|
|
Push(WasmValue(static_cast<extended_type>(res))); \
|
|
|
|
} \
|
|
|
|
return true; \
|
2019-10-08 23:43:15 +00:00
|
|
|
}
|
|
|
|
EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, S, int32_t)
|
|
|
|
EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, U, uint32_t)
|
|
|
|
EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, S, int32_t)
|
|
|
|
EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, U, uint32_t)
|
|
|
|
#undef EXTRACT_LANE_EXTEND_CASE
|
2020-05-28 22:48:28 +00:00
|
|
|
|
2020-05-02 00:45:30 +00:00
|
|
|
#define BINOP_CASE(op, name, stype, count, expr) \
|
|
|
|
case kExpr##op: { \
|
|
|
|
WasmValue v2 = Pop(); \
|
|
|
|
WasmValue v1 = Pop(); \
|
|
|
|
stype s1 = v1.to_s128().to_##name(); \
|
|
|
|
stype s2 = v2.to_s128().to_##name(); \
|
|
|
|
stype res; \
|
|
|
|
for (size_t i = 0; i < count; ++i) { \
|
|
|
|
auto a = s1.val[LANE(i, s1)]; \
|
2020-10-20 21:14:25 +00:00
|
|
|
auto b = s2.val[LANE(i, s2)]; \
|
2020-05-02 00:45:30 +00:00
|
|
|
auto result = expr; \
|
|
|
|
possible_nondeterminism_ |= has_nondeterminism(result); \
|
2020-10-20 21:14:25 +00:00
|
|
|
res.val[LANE(i, res)] = expr; \
|
2020-05-02 00:45:30 +00:00
|
|
|
} \
|
|
|
|
Push(WasmValue(Simd128(res))); \
|
|
|
|
return true; \
|
2018-05-23 22:21:05 +00:00
|
|
|
}
|
2019-07-24 20:01:43 +00:00
|
|
|
BINOP_CASE(F64x2Add, f64x2, float2, 2, a + b)
|
|
|
|
BINOP_CASE(F64x2Sub, f64x2, float2, 2, a - b)
|
|
|
|
BINOP_CASE(F64x2Mul, f64x2, float2, 2, a * b)
|
2019-08-13 16:17:38 +00:00
|
|
|
BINOP_CASE(F64x2Div, f64x2, float2, 2, base::Divide(a, b))
|
2019-07-31 20:13:36 +00:00
|
|
|
BINOP_CASE(F64x2Min, f64x2, float2, 2, JSMin(a, b))
|
|
|
|
BINOP_CASE(F64x2Max, f64x2, float2, 2, JSMax(a, b))
|
[wasm-simd] Prototype f32x4 and f64x2 pmin and pmax
This patch implements f32x4.pmin, f32x4.pmax, f64x2.pmin, and f64x2.pmax
for x64 and interpreter.
Pseudo-min and Pseudo-max instructions were proposed in
https://github.com/WebAssembly/simd/pull/122. These instructions
exactly match std::min and std::max in C++ STL, and thus have different
semantics from the existing min and max.
The instruction-selector for x64 switches the operands around, because
it allows for defining the dst to be same as first (really the second
input node), allowing better codegen.
For example, b = f32x4.pmin(a, b) directly maps to vminps(b, b, a) or
minps(b, a), as long as we can define dst == b, and switching the
instruction operands around allows us to do that.
Bug: v8:10501
Change-Id: I06f983fc1764caf673e600ac91d9c0ac5166e17e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2186630
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67688}
2020-05-08 16:47:19 +00:00
|
|
|
BINOP_CASE(F64x2Pmin, f64x2, float2, 2, std::min(a, b))
|
|
|
|
BINOP_CASE(F64x2Pmax, f64x2, float2, 2, std::max(a, b))
|
2018-05-23 22:21:05 +00:00
|
|
|
BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
|
|
|
|
BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
|
|
|
|
BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
|
2019-08-23 16:47:41 +00:00
|
|
|
BINOP_CASE(F32x4Div, f32x4, float4, 4, a / b)
|
2019-07-29 23:17:15 +00:00
|
|
|
BINOP_CASE(F32x4Min, f32x4, float4, 4, JSMin(a, b))
|
|
|
|
BINOP_CASE(F32x4Max, f32x4, float4, 4, JSMax(a, b))
|
[wasm-simd] Prototype f32x4 and f64x2 pmin and pmax
This patch implements f32x4.pmin, f32x4.pmax, f64x2.pmin, and f64x2.pmax
for x64 and interpreter.
Pseudo-min and Pseudo-max instructions were proposed in
https://github.com/WebAssembly/simd/pull/122. These instructions
exactly match std::min and std::max in C++ STL, and thus have different
semantics from the existing min and max.
The instruction-selector for x64 switches the operands around, because
it allows for defining the dst to be same as first (really the second
input node), allowing better codegen.
For example, b = f32x4.pmin(a, b) directly maps to vminps(b, b, a) or
minps(b, a), as long as we can define dst == b, and switching the
instruction operands around allows us to do that.
Bug: v8:10501
Change-Id: I06f983fc1764caf673e600ac91d9c0ac5166e17e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2186630
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67688}
2020-05-08 16:47:19 +00:00
|
|
|
BINOP_CASE(F32x4Pmin, f32x4, float4, 4, std::min(a, b))
|
|
|
|
BINOP_CASE(F32x4Pmax, f32x4, float4, 4, std::max(a, b))
|
2019-07-02 16:15:05 +00:00
|
|
|
BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
|
|
|
|
BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
|
2019-07-08 04:56:33 +00:00
|
|
|
BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
|
2019-01-10 11:47:08 +00:00
|
|
|
BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
|
|
|
|
BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
|
|
|
|
BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
|
2018-05-25 21:38:37 +00:00
|
|
|
BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
|
2018-06-04 21:41:09 +00:00
|
|
|
BINOP_CASE(I32x4MinU, i32x4, int4, 4,
|
|
|
|
static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
|
2018-05-23 22:21:05 +00:00
|
|
|
BINOP_CASE(I32x4MaxS, i32x4, int4, 4, a > b ? a : b)
|
2018-06-04 21:41:09 +00:00
|
|
|
BINOP_CASE(I32x4MaxU, i32x4, int4, 4,
|
|
|
|
static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
|
2018-05-23 22:21:05 +00:00
|
|
|
BINOP_CASE(S128And, i32x4, int4, 4, a & b)
|
|
|
|
BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
|
|
|
|
BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
|
2020-01-10 17:45:02 +00:00
|
|
|
BINOP_CASE(S128AndNot, i32x4, int4, 4, a & ~b)
|
2019-01-10 11:47:08 +00:00
|
|
|
BINOP_CASE(I16x8Add, i16x8, int8, 8, base::AddWithWraparound(a, b))
|
|
|
|
BINOP_CASE(I16x8Sub, i16x8, int8, 8, base::SubWithWraparound(a, b))
|
|
|
|
BINOP_CASE(I16x8Mul, i16x8, int8, 8, base::MulWithWraparound(a, b))
|
2018-05-25 21:38:37 +00:00
|
|
|
BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
|
2018-06-04 21:41:09 +00:00
|
|
|
BINOP_CASE(I16x8MinU, i16x8, int8, 8,
|
|
|
|
static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
|
2018-05-25 21:38:37 +00:00
|
|
|
BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
|
2018-06-04 21:41:09 +00:00
|
|
|
BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
|
|
|
|
static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
|
2020-10-09 20:59:22 +00:00
|
|
|
BINOP_CASE(I16x8AddSatS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
|
|
|
|
BINOP_CASE(I16x8AddSatU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
|
|
|
|
BINOP_CASE(I16x8SubSatS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
|
|
|
|
BINOP_CASE(I16x8SubSatU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
|
2019-12-09 10:32:55 +00:00
|
|
|
BINOP_CASE(I16x8RoundingAverageU, i16x8, int8, 8,
|
|
|
|
base::RoundingAverageUnsigned<uint16_t>(a, b))
|
2020-10-01 20:08:54 +00:00
|
|
|
BINOP_CASE(I16x8Q15MulRSatS, i16x8, int8, 8,
|
|
|
|
SaturateRoundingQMul<int16_t>(a, b))
|
2019-01-10 11:47:08 +00:00
|
|
|
BINOP_CASE(I8x16Add, i8x16, int16, 16, base::AddWithWraparound(a, b))
|
|
|
|
BINOP_CASE(I8x16Sub, i8x16, int16, 16, base::SubWithWraparound(a, b))
|
2018-05-25 21:38:37 +00:00
|
|
|
BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
|
2018-06-04 21:41:09 +00:00
|
|
|
BINOP_CASE(I8x16MinU, i8x16, int16, 16,
|
|
|
|
static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
|
2018-05-25 21:38:37 +00:00
|
|
|
BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
|
2018-06-04 21:41:09 +00:00
|
|
|
BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
|
|
|
|
static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
|
2020-10-09 20:59:22 +00:00
|
|
|
BINOP_CASE(I8x16AddSatS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
|
|
|
|
BINOP_CASE(I8x16AddSatU, i8x16, int16, 16, SaturateAdd<uint8_t>(a, b))
|
|
|
|
BINOP_CASE(I8x16SubSatS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
|
|
|
|
BINOP_CASE(I8x16SubSatU, i8x16, int16, 16, SaturateSub<uint8_t>(a, b))
|
2019-12-09 10:32:55 +00:00
|
|
|
BINOP_CASE(I8x16RoundingAverageU, i8x16, int16, 16,
|
|
|
|
base::RoundingAverageUnsigned<uint8_t>(a, b))
|
2018-05-23 22:21:05 +00:00
|
|
|
#undef BINOP_CASE
|
2020-05-02 00:45:30 +00:00
|
|
|
#define UNOP_CASE(op, name, stype, count, expr) \
|
|
|
|
case kExpr##op: { \
|
|
|
|
WasmValue v = Pop(); \
|
|
|
|
stype s = v.to_s128().to_##name(); \
|
|
|
|
stype res; \
|
|
|
|
for (size_t i = 0; i < count; ++i) { \
|
2020-10-20 21:14:25 +00:00
|
|
|
auto a = s.val[LANE(i, s)]; \
|
2020-05-02 00:45:30 +00:00
|
|
|
auto result = expr; \
|
|
|
|
possible_nondeterminism_ |= has_nondeterminism(result); \
|
2020-10-20 21:14:25 +00:00
|
|
|
res.val[LANE(i, res)] = result; \
|
2020-05-02 00:45:30 +00:00
|
|
|
} \
|
|
|
|
Push(WasmValue(Simd128(res))); \
|
|
|
|
return true; \
|
2018-05-23 23:24:10 +00:00
|
|
|
}
|
2019-07-18 09:03:02 +00:00
|
|
|
UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
|
2019-07-17 06:44:49 +00:00
|
|
|
UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
|
2019-09-11 16:33:44 +00:00
|
|
|
UNOP_CASE(F64x2Sqrt, f64x2, float2, 2, std::sqrt(a))
|
2020-10-19 18:58:39 +00:00
|
|
|
UNOP_CASE(F64x2Ceil, f64x2, float2, 2,
|
|
|
|
(AixFpOpWorkaround<double, &ceil>(a)))
|
|
|
|
UNOP_CASE(F64x2Floor, f64x2, float2, 2,
|
|
|
|
(AixFpOpWorkaround<double, &floor>(a)))
|
|
|
|
UNOP_CASE(F64x2Trunc, f64x2, float2, 2,
|
|
|
|
(AixFpOpWorkaround<double, &trunc>(a)))
|
|
|
|
UNOP_CASE(F64x2NearestInt, f64x2, float2, 2,
|
|
|
|
(AixFpOpWorkaround<double, &nearbyint>(a)))
|
2018-05-23 23:24:10 +00:00
|
|
|
UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
|
|
|
|
UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
|
2019-09-11 16:33:44 +00:00
|
|
|
UNOP_CASE(F32x4Sqrt, f32x4, float4, 4, std::sqrt(a))
|
2019-01-10 11:47:08 +00:00
|
|
|
UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
|
|
|
|
UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
|
2020-10-19 18:58:39 +00:00
|
|
|
UNOP_CASE(F32x4Ceil, f32x4, float4, 4,
|
|
|
|
(AixFpOpWorkaround<float, &ceilf>(a)))
|
|
|
|
UNOP_CASE(F32x4Floor, f32x4, float4, 4,
|
|
|
|
(AixFpOpWorkaround<float, &floorf>(a)))
|
|
|
|
UNOP_CASE(F32x4Trunc, f32x4, float4, 4,
|
|
|
|
(AixFpOpWorkaround<float, &truncf>(a)))
|
|
|
|
UNOP_CASE(F32x4NearestInt, f32x4, float4, 4,
|
|
|
|
(AixFpOpWorkaround<float, &nearbyintf>(a)))
|
2019-07-02 20:56:11 +00:00
|
|
|
UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
|
2019-01-10 11:47:08 +00:00
|
|
|
UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
|
2021-02-09 21:42:15 +00:00
|
|
|
// Use llabs which will work correctly on both 64-bit and 32-bit.
|
|
|
|
UNOP_CASE(I64x2Abs, i64x2, int2, 2, std::llabs(a))
|
2020-02-21 04:14:53 +00:00
|
|
|
UNOP_CASE(I32x4Abs, i32x4, int4, 4, std::abs(a))
|
2018-05-23 23:24:10 +00:00
|
|
|
UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
|
2019-01-10 11:47:08 +00:00
|
|
|
UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
|
2020-02-21 04:14:53 +00:00
|
|
|
UNOP_CASE(I16x8Abs, i16x8, int8, 8, std::abs(a))
|
2019-01-10 11:47:08 +00:00
|
|
|
UNOP_CASE(I8x16Neg, i8x16, int16, 16, base::NegateWithWraparound(a))
|
2020-02-21 04:14:53 +00:00
|
|
|
UNOP_CASE(I8x16Abs, i8x16, int16, 16, std::abs(a))
|
2020-10-09 19:57:22 +00:00
|
|
|
UNOP_CASE(I8x16Popcnt, i8x16, int16, 16,
|
|
|
|
base::bits::CountPopulation<uint8_t>(a))
|
2018-05-23 23:24:10 +00:00
|
|
|
#undef UNOP_CASE
|
2020-03-18 23:14:40 +00:00
|
|
|
|
|
|
|
// Cast to double in call to signbit is due to MSCV issue, see
|
|
|
|
// https://github.com/microsoft/STL/issues/519.
|
2020-05-28 18:15:27 +00:00
|
|
|
#define BITMASK_CASE(op, name, stype, count) \
|
|
|
|
case kExpr##op: { \
|
|
|
|
WasmValue v = Pop(); \
|
|
|
|
stype s = v.to_s128().to_##name(); \
|
|
|
|
int32_t res = 0; \
|
|
|
|
for (size_t i = 0; i < count; ++i) { \
|
|
|
|
bool sign = std::signbit(static_cast<double>(s.val[LANE(i, s)])); \
|
|
|
|
res |= (sign << i); \
|
|
|
|
} \
|
|
|
|
Push(WasmValue(res)); \
|
|
|
|
return true; \
|
2020-03-18 23:14:40 +00:00
|
|
|
}
|
|
|
|
BITMASK_CASE(I8x16BitMask, i8x16, int16, 16)
|
|
|
|
BITMASK_CASE(I16x8BitMask, i16x8, int8, 8)
|
|
|
|
BITMASK_CASE(I32x4BitMask, i32x4, int4, 4)
|
2020-10-12 18:11:52 +00:00
|
|
|
BITMASK_CASE(I64x2BitMask, i64x2, int2, 2)
|
2020-03-18 23:14:40 +00:00
|
|
|
#undef BITMASK_CASE
|
|
|
|
|
2020-05-02 00:45:30 +00:00
|
|
|
#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
|
|
|
|
case kExpr##op: { \
|
|
|
|
WasmValue v2 = Pop(); \
|
|
|
|
WasmValue v1 = Pop(); \
|
|
|
|
stype s1 = v1.to_s128().to_##name(); \
|
|
|
|
stype s2 = v2.to_s128().to_##name(); \
|
|
|
|
out_stype res; \
|
|
|
|
for (size_t i = 0; i < count; ++i) { \
|
2020-10-20 21:14:25 +00:00
|
|
|
auto a = s1.val[LANE(i, s1)]; \
|
|
|
|
auto b = s2.val[LANE(i, s2)]; \
|
2020-05-02 00:45:30 +00:00
|
|
|
auto result = expr; \
|
|
|
|
possible_nondeterminism_ |= has_nondeterminism(result); \
|
2020-10-20 21:14:25 +00:00
|
|
|
res.val[LANE(i, res)] = result ? -1 : 0; \
|
2020-05-02 00:45:30 +00:00
|
|
|
} \
|
|
|
|
Push(WasmValue(Simd128(res))); \
|
|
|
|
return true; \
|
2018-05-25 21:50:54 +00:00
|
|
|
}
|
2019-07-16 02:55:31 +00:00
|
|
|
CMPOP_CASE(F64x2Eq, f64x2, float2, int2, 2, a == b)
|
|
|
|
CMPOP_CASE(F64x2Ne, f64x2, float2, int2, 2, a != b)
|
2019-07-16 04:40:33 +00:00
|
|
|
CMPOP_CASE(F64x2Gt, f64x2, float2, int2, 2, a > b)
|
|
|
|
CMPOP_CASE(F64x2Ge, f64x2, float2, int2, 2, a >= b)
|
|
|
|
CMPOP_CASE(F64x2Lt, f64x2, float2, int2, 2, a < b)
|
|
|
|
CMPOP_CASE(F64x2Le, f64x2, float2, int2, 2, a <= b)
|
2018-05-25 21:50:54 +00:00
|
|
|
CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
|
|
|
|
CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
|
|
|
|
CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
|
|
|
|
CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
|
|
|
|
CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
|
|
|
|
CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
|
2019-07-03 16:12:53 +00:00
|
|
|
CMPOP_CASE(I64x2Eq, i64x2, int2, int2, 2, a == b)
|
2021-02-01 21:40:17 +00:00
|
|
|
CMPOP_CASE(I64x2Ne, i64x2, int2, int2, 2, a != b)
|
2021-02-09 21:56:43 +00:00
|
|
|
CMPOP_CASE(I64x2LtS, i64x2, int2, int2, 2, a < b)
|
|
|
|
CMPOP_CASE(I64x2GtS, i64x2, int2, int2, 2, a > b)
|
|
|
|
CMPOP_CASE(I64x2LeS, i64x2, int2, int2, 2, a <= b)
|
|
|
|
CMPOP_CASE(I64x2GeS, i64x2, int2, int2, 2, a >= b)
|
2018-05-25 21:50:54 +00:00
|
|
|
CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
|
|
|
|
CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
|
|
|
|
CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
|
|
|
|
CMPOP_CASE(I32x4GeS, i32x4, int4, int4, 4, a >= b)
|
|
|
|
CMPOP_CASE(I32x4LtS, i32x4, int4, int4, 4, a < b)
|
|
|
|
CMPOP_CASE(I32x4LeS, i32x4, int4, int4, 4, a <= b)
|
2018-06-04 21:41:09 +00:00
|
|
|
CMPOP_CASE(I32x4GtU, i32x4, int4, int4, 4,
|
|
|
|
static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
|
|
|
|
CMPOP_CASE(I32x4GeU, i32x4, int4, int4, 4,
|
|
|
|
static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
|
|
|
|
CMPOP_CASE(I32x4LtU, i32x4, int4, int4, 4,
|
|
|
|
static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
|
|
|
|
CMPOP_CASE(I32x4LeU, i32x4, int4, int4, 4,
|
|
|
|
static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
|
2018-05-25 21:50:54 +00:00
|
|
|
CMPOP_CASE(I16x8Eq, i16x8, int8, int8, 8, a == b)
|
|
|
|
CMPOP_CASE(I16x8Ne, i16x8, int8, int8, 8, a != b)
|
|
|
|
CMPOP_CASE(I16x8GtS, i16x8, int8, int8, 8, a > b)
|
|
|
|
CMPOP_CASE(I16x8GeS, i16x8, int8, int8, 8, a >= b)
|
|
|
|
CMPOP_CASE(I16x8LtS, i16x8, int8, int8, 8, a < b)
|
|
|
|
CMPOP_CASE(I16x8LeS, i16x8, int8, int8, 8, a <= b)
|
2018-06-04 21:41:09 +00:00
|
|
|
CMPOP_CASE(I16x8GtU, i16x8, int8, int8, 8,
|
|
|
|
static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
|
|
|
|
CMPOP_CASE(I16x8GeU, i16x8, int8, int8, 8,
|
|
|
|
static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
|
|
|
|
CMPOP_CASE(I16x8LtU, i16x8, int8, int8, 8,
|
|
|
|
static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
|
|
|
|
CMPOP_CASE(I16x8LeU, i16x8, int8, int8, 8,
|
|
|
|
static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
|
2018-05-25 21:50:54 +00:00
|
|
|
CMPOP_CASE(I8x16Eq, i8x16, int16, int16, 16, a == b)
|
|
|
|
CMPOP_CASE(I8x16Ne, i8x16, int16, int16, 16, a != b)
|
|
|
|
CMPOP_CASE(I8x16GtS, i8x16, int16, int16, 16, a > b)
|
|
|
|
CMPOP_CASE(I8x16GeS, i8x16, int16, int16, 16, a >= b)
|
|
|
|
CMPOP_CASE(I8x16LtS, i8x16, int16, int16, 16, a < b)
|
|
|
|
CMPOP_CASE(I8x16LeS, i8x16, int16, int16, 16, a <= b)
|
2018-06-04 21:41:09 +00:00
|
|
|
CMPOP_CASE(I8x16GtU, i8x16, int16, int16, 16,
|
|
|
|
static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
|
|
|
|
CMPOP_CASE(I8x16GeU, i8x16, int16, int16, 16,
|
|
|
|
static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
|
|
|
|
CMPOP_CASE(I8x16LtU, i8x16, int16, int16, 16,
|
|
|
|
static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
|
|
|
|
CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
|
|
|
|
static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
|
2018-05-25 21:50:54 +00:00
|
|
|
#undef CMPOP_CASE
|
2020-09-30 09:37:50 +00:00
|
|
|
#define REPLACE_LANE_CASE(format, name, stype, ctype) \
|
|
|
|
case kExpr##format##ReplaceLane: { \
|
|
|
|
SimdLaneImmediate<Decoder::kNoValidation> imm(decoder, \
|
|
|
|
code->at(pc + *len)); \
|
|
|
|
*len += 1; \
|
|
|
|
WasmValue new_val = Pop(); \
|
|
|
|
WasmValue simd_val = Pop(); \
|
|
|
|
stype s = simd_val.to_s128().to_##name(); \
|
|
|
|
s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \
|
|
|
|
Push(WasmValue(Simd128(s))); \
|
|
|
|
return true; \
|
2018-05-31 21:38:23 +00:00
|
|
|
}
|
2019-07-10 04:22:46 +00:00
|
|
|
REPLACE_LANE_CASE(F64x2, f64x2, float2, double)
|
2018-05-31 21:38:23 +00:00
|
|
|
REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
|
2019-07-01 23:58:02 +00:00
|
|
|
REPLACE_LANE_CASE(I64x2, i64x2, int2, int64_t)
|
2018-05-31 21:38:23 +00:00
|
|
|
REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
|
|
|
|
REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
|
|
|
|
REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
|
|
|
|
#undef REPLACE_LANE_CASE
|
2018-06-13 21:20:48 +00:00
|
|
|
case kExprS128LoadMem:
|
|
|
|
return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
|
2019-09-19 22:01:07 +00:00
|
|
|
MachineRepresentation::kSimd128,
|
2020-06-26 12:24:26 +00:00
|
|
|
/*prefix_len=*/*len);
|
2018-06-13 21:20:48 +00:00
|
|
|
case kExprS128StoreMem:
|
|
|
|
return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
|
2019-09-19 22:01:07 +00:00
|
|
|
MachineRepresentation::kSimd128,
|
2020-06-26 12:24:26 +00:00
|
|
|
/*prefix_len=*/*len);
|
2019-07-30 18:50:02 +00:00
|
|
|
#define SHIFT_CASE(op, name, stype, count, expr) \
|
|
|
|
case kExpr##op: { \
|
|
|
|
uint32_t shift = Pop().to<uint32_t>(); \
|
|
|
|
WasmValue v = Pop(); \
|
|
|
|
stype s = v.to_s128().to_##name(); \
|
|
|
|
stype res; \
|
|
|
|
for (size_t i = 0; i < count; ++i) { \
|
2020-10-20 21:14:25 +00:00
|
|
|
auto a = s.val[LANE(i, s)]; \
|
|
|
|
res.val[LANE(i, res)] = expr; \
|
2019-07-30 18:50:02 +00:00
|
|
|
} \
|
|
|
|
Push(WasmValue(Simd128(res))); \
|
|
|
|
return true; \
|
|
|
|
}
|
2019-08-29 23:55:57 +00:00
|
|
|
SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
|
|
|
|
static_cast<uint64_t>(a) << (shift % 64))
|
|
|
|
SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> (shift % 64))
|
|
|
|
SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
|
|
|
|
static_cast<uint64_t>(a) >> (shift % 64))
|
|
|
|
SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
|
|
|
|
static_cast<uint32_t>(a) << (shift % 32))
|
|
|
|
SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> (shift % 32))
|
|
|
|
SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
|
|
|
|
static_cast<uint32_t>(a) >> (shift % 32))
|
|
|
|
SHIFT_CASE(I16x8Shl, i16x8, int8, 8,
|
|
|
|
static_cast<uint16_t>(a) << (shift % 16))
|
|
|
|
SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> (shift % 16))
|
|
|
|
SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
|
|
|
|
static_cast<uint16_t>(a) >> (shift % 16))
|
|
|
|
SHIFT_CASE(I8x16Shl, i8x16, int16, 16,
|
|
|
|
static_cast<uint8_t>(a) << (shift % 8))
|
|
|
|
SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> (shift % 8))
|
2018-06-13 21:20:48 +00:00
|
|
|
SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
|
2019-08-29 23:55:57 +00:00
|
|
|
static_cast<uint8_t>(a) >> (shift % 8))
|
2020-10-16 19:54:50 +00:00
|
|
|
case kExprI16x8ExtMulLowI8x16S: {
|
|
|
|
return DoSimdExtMul<int16, int8, int8_t, int16_t>(0);
|
|
|
|
}
|
|
|
|
case kExprI16x8ExtMulHighI8x16S: {
|
|
|
|
return DoSimdExtMul<int16, int8, int8_t, int16_t>(8);
|
|
|
|
}
|
|
|
|
case kExprI16x8ExtMulLowI8x16U: {
|
|
|
|
return DoSimdExtMul<int16, int8, uint8_t, uint16_t>(0);
|
|
|
|
}
|
|
|
|
case kExprI16x8ExtMulHighI8x16U: {
|
|
|
|
return DoSimdExtMul<int16, int8, uint8_t, uint16_t>(8);
|
|
|
|
}
|
|
|
|
case kExprI32x4ExtMulLowI16x8S: {
|
|
|
|
return DoSimdExtMul<int8, int4, int16_t, int32_t>(0);
|
|
|
|
}
|
|
|
|
case kExprI32x4ExtMulHighI16x8S: {
|
|
|
|
return DoSimdExtMul<int8, int4, int16_t, int32_t>(4);
|
|
|
|
}
|
|
|
|
case kExprI32x4ExtMulLowI16x8U: {
|
|
|
|
return DoSimdExtMul<int8, int4, uint16_t, uint32_t>(0);
|
|
|
|
}
|
|
|
|
case kExprI32x4ExtMulHighI16x8U: {
|
|
|
|
return DoSimdExtMul<int8, int4, uint16_t, uint32_t>(4);
|
|
|
|
}
|
|
|
|
case kExprI64x2ExtMulLowI32x4S: {
|
|
|
|
return DoSimdExtMul<int4, int2, int32_t, int64_t>(0);
|
|
|
|
}
|
|
|
|
case kExprI64x2ExtMulHighI32x4S: {
|
|
|
|
return DoSimdExtMul<int4, int2, int32_t, int64_t>(2);
|
|
|
|
}
|
|
|
|
case kExprI64x2ExtMulLowI32x4U: {
|
|
|
|
return DoSimdExtMul<int4, int2, uint32_t, uint64_t>(0);
|
|
|
|
}
|
|
|
|
case kExprI64x2ExtMulHighI32x4U: {
|
|
|
|
return DoSimdExtMul<int4, int2, uint32_t, uint64_t>(2);
|
|
|
|
}
|
2018-06-13 21:20:48 +00:00
|
|
|
#undef SHIFT_CASE
|
2018-06-20 06:03:44 +00:00
|
|
|
#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
|
|
|
|
expr) \
|
|
|
|
case kExpr##op: { \
|
|
|
|
WasmValue v = Pop(); \
|
|
|
|
src_type s = v.to_s128().to_##name(); \
|
2021-01-19 23:19:03 +00:00
|
|
|
dst_type res = {0}; \
|
2018-06-20 06:03:44 +00:00
|
|
|
for (size_t i = 0; i < count; ++i) { \
|
2018-08-10 11:20:40 +00:00
|
|
|
ctype a = s.val[LANE(start_index + i, s)]; \
|
2020-05-02 00:45:30 +00:00
|
|
|
auto result = expr; \
|
|
|
|
possible_nondeterminism_ |= has_nondeterminism(result); \
|
2018-08-10 11:20:40 +00:00
|
|
|
res.val[LANE(i, res)] = expr; \
|
2018-06-20 06:03:44 +00:00
|
|
|
} \
|
|
|
|
Push(WasmValue(Simd128(res))); \
|
|
|
|
return true; \
|
|
|
|
}
|
|
|
|
CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
|
|
|
|
static_cast<float>(a))
|
|
|
|
CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t,
|
|
|
|
static_cast<float>(a))
|
|
|
|
CONVERT_CASE(I32x4SConvertF32x4, float4, f32x4, int4, 4, 0, double,
|
|
|
|
std::isnan(a) ? 0
|
|
|
|
: a<kMinInt ? kMinInt : a> kMaxInt
|
|
|
|
? kMaxInt
|
|
|
|
: static_cast<int32_t>(a))
|
|
|
|
CONVERT_CASE(I32x4UConvertF32x4, float4, f32x4, int4, 4, 0, double,
|
|
|
|
std::isnan(a)
|
|
|
|
? 0
|
|
|
|
: a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
|
|
|
|
: static_cast<uint32_t>(a))
|
2020-10-06 20:45:45 +00:00
|
|
|
CONVERT_CASE(I64x2SConvertI32x4Low, int4, i32x4, int2, 2, 0, int32_t, a)
|
|
|
|
CONVERT_CASE(I64x2SConvertI32x4High, int4, i32x4, int2, 2, 2, int32_t,
|
|
|
|
a)
|
|
|
|
CONVERT_CASE(I64x2UConvertI32x4Low, int4, i32x4, int2, 2, 0, uint32_t,
|
|
|
|
a)
|
|
|
|
CONVERT_CASE(I64x2UConvertI32x4High, int4, i32x4, int2, 2, 2, uint32_t,
|
|
|
|
a)
|
2018-06-20 06:03:44 +00:00
|
|
|
CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
|
|
|
|
a)
|
|
|
|
CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
|
|
|
|
a)
|
|
|
|
CONVERT_CASE(I32x4SConvertI16x8Low, int8, i16x8, int4, 4, 0, int16_t, a)
|
|
|
|
CONVERT_CASE(I32x4UConvertI16x8Low, int8, i16x8, int4, 4, 0, uint16_t,
|
|
|
|
a)
|
|
|
|
CONVERT_CASE(I16x8SConvertI8x16High, int16, i8x16, int8, 8, 8, int8_t,
|
|
|
|
a)
|
|
|
|
CONVERT_CASE(I16x8UConvertI8x16High, int16, i8x16, int8, 8, 8, uint8_t,
|
|
|
|
a)
|
|
|
|
CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
|
|
|
|
CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
|
|
|
|
a)
|
2021-01-19 23:19:03 +00:00
|
|
|
CONVERT_CASE(F64x2ConvertLowI32x4S, int4, i32x4, float2, 2, 0, int32_t,
|
|
|
|
static_cast<double>(a))
|
|
|
|
CONVERT_CASE(F64x2ConvertLowI32x4U, int4, i32x4, float2, 2, 0, uint32_t,
|
|
|
|
static_cast<double>(a))
|
|
|
|
CONVERT_CASE(I32x4TruncSatF64x2SZero, float2, f64x2, int4, 2, 0, double,
|
|
|
|
base::saturated_cast<int32_t>(a))
|
|
|
|
CONVERT_CASE(I32x4TruncSatF64x2UZero, float2, f64x2, int4, 2, 0, double,
|
|
|
|
base::saturated_cast<uint32_t>(a))
|
|
|
|
CONVERT_CASE(F32x4DemoteF64x2Zero, float2, f64x2, float4, 2, 0, float,
|
|
|
|
DoubleToFloat32(a))
|
|
|
|
CONVERT_CASE(F64x2PromoteLowF32x4, float4, f32x4, float2, 2, 0, float,
|
|
|
|
static_cast<double>(a))
|
2018-06-20 06:03:44 +00:00
|
|
|
#undef CONVERT_CASE
|
2021-02-02 17:26:54 +00:00
|
|
|
#define PACK_CASE(op, src_type, name, dst_type, count, dst_ctype) \
|
|
|
|
case kExpr##op: { \
|
|
|
|
WasmValue v2 = Pop(); \
|
|
|
|
WasmValue v1 = Pop(); \
|
|
|
|
src_type s1 = v1.to_s128().to_##name(); \
|
|
|
|
src_type s2 = v2.to_s128().to_##name(); \
|
|
|
|
dst_type res; \
|
|
|
|
for (size_t i = 0; i < count; ++i) { \
|
|
|
|
int64_t v = i < count / 2 ? s1.val[LANE(i, s1)] \
|
|
|
|
: s2.val[LANE(i - count / 2, s2)]; \
|
|
|
|
res.val[LANE(i, res)] = base::saturated_cast<dst_ctype>(v); \
|
|
|
|
} \
|
|
|
|
Push(WasmValue(Simd128(res))); \
|
|
|
|
return true; \
|
|
|
|
}
|
|
|
|
PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t)
|
|
|
|
PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t)
|
|
|
|
PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t)
|
|
|
|
PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t)
|
2018-06-20 06:03:44 +00:00
|
|
|
#undef PACK_CASE
|
2018-06-29 19:29:16 +00:00
|
|
|
case kExprS128Select: {
|
2019-03-25 20:44:32 +00:00
|
|
|
int4 bool_val = Pop().to_s128().to_i32x4();
|
2018-06-29 19:29:16 +00:00
|
|
|
int4 v2 = Pop().to_s128().to_i32x4();
|
|
|
|
int4 v1 = Pop().to_s128().to_i32x4();
|
|
|
|
int4 res;
|
|
|
|
for (size_t i = 0; i < 4; ++i) {
|
2020-10-20 21:14:25 +00:00
|
|
|
res.val[LANE(i, res)] = v2.val[LANE(i, v2)] ^
|
|
|
|
((v1.val[LANE(i, v1)] ^ v2.val[LANE(i, v2)]) &
|
|
|
|
bool_val.val[LANE(i, bool_val)]);
|
2018-06-29 19:29:16 +00:00
|
|
|
}
|
|
|
|
Push(WasmValue(Simd128(res)));
|
|
|
|
return true;
|
|
|
|
}
|
2020-06-12 00:05:20 +00:00
|
|
|
case kExprI32x4DotI16x8S: {
|
|
|
|
int8 v2 = Pop().to_s128().to_i16x8();
|
|
|
|
int8 v1 = Pop().to_s128().to_i16x8();
|
|
|
|
int4 res;
|
|
|
|
for (size_t i = 0; i < 4; i++) {
|
|
|
|
int32_t lo = (v1.val[LANE(i * 2, v1)] * v2.val[LANE(i * 2, v2)]);
|
|
|
|
int32_t hi =
|
|
|
|
(v1.val[LANE(i * 2 + 1, v1)] * v2.val[LANE(i * 2 + 1, v2)]);
|
|
|
|
res.val[LANE(i, res)] = base::AddWithWraparound(lo, hi);
|
|
|
|
}
|
|
|
|
Push(WasmValue(Simd128(res)));
|
|
|
|
return true;
|
|
|
|
}
|
2020-07-02 00:19:09 +00:00
|
|
|
case kExprS128Const: {
|
2020-09-30 09:37:50 +00:00
|
|
|
Simd128Immediate<Decoder::kNoValidation> imm(decoder,
|
|
|
|
code->at(pc + *len));
|
2020-07-02 00:19:09 +00:00
|
|
|
int16 res;
|
|
|
|
for (size_t i = 0; i < kSimd128Size; ++i) {
|
|
|
|
res.val[LANE(i, res)] = imm.value[i];
|
|
|
|
}
|
|
|
|
Push(WasmValue(Simd128(res)));
|
|
|
|
*len += 16;
|
|
|
|
return true;
|
|
|
|
}
|
2020-09-25 18:08:04 +00:00
|
|
|
case kExprI8x16Swizzle: {
|
2019-10-09 17:25:33 +00:00
|
|
|
int16 v2 = Pop().to_s128().to_i8x16();
|
|
|
|
int16 v1 = Pop().to_s128().to_i8x16();
|
|
|
|
int16 res;
|
|
|
|
for (size_t i = 0; i < kSimd128Size; ++i) {
|
2020-10-20 21:14:25 +00:00
|
|
|
int lane = v2.val[LANE(i, v2)];
|
|
|
|
res.val[LANE(i, res)] =
|
2019-10-09 17:25:33 +00:00
|
|
|
lane < kSimd128Size && lane >= 0 ? v1.val[LANE(lane, v1)] : 0;
|
|
|
|
}
|
|
|
|
Push(WasmValue(Simd128(res)));
|
|
|
|
return true;
|
|
|
|
}
|
2020-09-25 18:08:04 +00:00
|
|
|
case kExprI8x16Shuffle: {
|
2020-09-30 09:37:50 +00:00
|
|
|
Simd128Immediate<Decoder::kNoValidation> imm(decoder,
|
|
|
|
code->at(pc + *len));
|
2019-07-09 16:07:55 +00:00
|
|
|
*len += 16;
|
2018-06-29 19:29:16 +00:00
|
|
|
int16 v2 = Pop().to_s128().to_i8x16();
|
|
|
|
int16 v1 = Pop().to_s128().to_i8x16();
|
|
|
|
int16 res;
|
|
|
|
for (size_t i = 0; i < kSimd128Size; ++i) {
|
2020-07-02 00:19:09 +00:00
|
|
|
int lane = imm.value[i];
|
2020-10-20 21:14:25 +00:00
|
|
|
res.val[LANE(i, res)] = lane < kSimd128Size
|
|
|
|
? v1.val[LANE(lane, v1)]
|
|
|
|
: v2.val[LANE(lane - kSimd128Size, v2)];
|
2018-06-29 19:29:16 +00:00
|
|
|
}
|
|
|
|
Push(WasmValue(Simd128(res)));
|
|
|
|
return true;
|
|
|
|
}
|
2021-01-29 22:48:10 +00:00
|
|
|
case kExprV128AnyTrue: {
|
2019-03-25 20:44:32 +00:00
|
|
|
int4 s = Pop().to_s128().to_i32x4();
|
2020-10-20 21:14:25 +00:00
|
|
|
bool res = s.val[LANE(0, s)] | s.val[LANE(1, s)] | s.val[LANE(2, s)] |
|
|
|
|
s.val[LANE(3, s)];
|
2019-03-25 20:44:32 +00:00
|
|
|
Push(WasmValue((res)));
|
|
|
|
return true;
|
|
|
|
}
|
2018-06-29 19:29:16 +00:00
|
|
|
#define REDUCTION_CASE(op, name, stype, count, operation) \
|
|
|
|
case kExpr##op: { \
|
|
|
|
stype s = Pop().to_s128().to_##name(); \
|
2019-03-25 20:44:32 +00:00
|
|
|
bool res = true; \
|
|
|
|
for (size_t i = 0; i < count; ++i) { \
|
2020-10-20 21:14:25 +00:00
|
|
|
res = res & static_cast<bool>(s.val[LANE(i, s)]); \
|
2018-06-29 19:29:16 +00:00
|
|
|
} \
|
|
|
|
Push(WasmValue(res)); \
|
|
|
|
return true; \
|
|
|
|
}
|
2021-03-05 23:56:29 +00:00
|
|
|
REDUCTION_CASE(I64x2AllTrue, i64x2, int2, 2, &)
|
|
|
|
REDUCTION_CASE(I32x4AllTrue, i32x4, int4, 4, &)
|
|
|
|
REDUCTION_CASE(I16x8AllTrue, i16x8, int8, 8, &)
|
|
|
|
REDUCTION_CASE(I8x16AllTrue, i8x16, int16, 16, &)
|
2018-06-29 19:29:16 +00:00
|
|
|
#undef REDUCTION_CASE
|
2020-10-20 21:14:25 +00:00
|
|
|
#define QFM_CASE(op, name, stype, count, operation) \
|
|
|
|
case kExpr##op: { \
|
|
|
|
stype c = Pop().to_s128().to_##name(); \
|
|
|
|
stype b = Pop().to_s128().to_##name(); \
|
|
|
|
stype a = Pop().to_s128().to_##name(); \
|
|
|
|
stype res; \
|
|
|
|
for (size_t i = 0; i < count; i++) { \
|
|
|
|
res.val[LANE(i, res)] = \
|
|
|
|
a.val[LANE(i, a)] operation(b.val[LANE(i, b)] * c.val[LANE(i, c)]); \
|
|
|
|
} \
|
|
|
|
Push(WasmValue(Simd128(res))); \
|
|
|
|
return true; \
|
[wasm-simd] Implement QFMA and QFMS on x64
Quasi Fused Multiply-Add and Quasi Fused Multiply-Subtract performs, on floats, a + b * c and a - b * c respectively.
When there is only a single rounding, it is a fused operation. Quasi in this case means that the result can either be fused or not fused (two roundings), depending on hardware support.
It is tricky to write the test because we need to calculate the expected value, and there is no easy way to express fused or unfused operation in C++, i.e.
we cannot confirm that float expected = a + b * c will perform a fused or unfused operation (unless we use intrinsics).
Thus in the test we have a list of simple checks, plus interesting values that we know will produce different results depending on whether it was fused or not.
The difference between 32x4 and 64x2 qfma/qfms is the type, and also the values of b and c that will cause an overflow, and thus the intermediate rounding will affect the final result.
The same array can be copy pasted for both types, but with a bit of templating we can avoid that duplication.
Change-Id: I0973a3d28468d25f310b593c72f21bff54d809a7
Bug: v8:9415
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1779325
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63878}
2019-09-18 19:44:10 +00:00
|
|
|
}
|
|
|
|
QFM_CASE(F32x4Qfma, f32x4, float4, 4, +)
|
|
|
|
QFM_CASE(F32x4Qfms, f32x4, float4, 4, -)
|
|
|
|
QFM_CASE(F64x2Qfma, f64x2, float2, 2, +)
|
|
|
|
QFM_CASE(F64x2Qfms, f64x2, float2, 2, -)
|
|
|
|
#undef QFM_CASE
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load8Splat: {
|
2019-11-19 17:53:14 +00:00
|
|
|
return DoSimdLoadSplat<int16, int32_t, int8_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord8);
|
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load16Splat: {
|
2019-11-19 17:53:14 +00:00
|
|
|
return DoSimdLoadSplat<int8, int32_t, int16_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord16);
|
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load32Splat: {
|
2019-11-19 17:53:14 +00:00
|
|
|
return DoSimdLoadSplat<int4, int32_t, int32_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord32);
|
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load64Splat: {
|
2019-11-19 17:53:14 +00:00
|
|
|
return DoSimdLoadSplat<int2, int64_t, int64_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load8x8S: {
|
2019-11-19 19:28:05 +00:00
|
|
|
return DoSimdLoadExtend<int8, int16_t, int8_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load8x8U: {
|
2019-11-19 19:28:05 +00:00
|
|
|
return DoSimdLoadExtend<int8, uint16_t, uint8_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load16x4S: {
|
2019-11-19 19:28:05 +00:00
|
|
|
return DoSimdLoadExtend<int4, int32_t, int16_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load16x4U: {
|
2019-11-19 19:28:05 +00:00
|
|
|
return DoSimdLoadExtend<int4, uint32_t, uint16_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load32x2S: {
|
2019-11-19 19:28:05 +00:00
|
|
|
return DoSimdLoadExtend<int2, int64_t, int32_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-09-22 20:54:28 +00:00
|
|
|
case kExprS128Load32x2U: {
|
2019-11-19 19:28:05 +00:00
|
|
|
return DoSimdLoadExtend<int2, uint64_t, uint32_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-10-16 21:33:38 +00:00
|
|
|
case kExprS128Load32Zero: {
|
2020-07-17 17:47:13 +00:00
|
|
|
return DoSimdLoadZeroExtend<int4, uint32_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord32);
|
|
|
|
}
|
2020-10-16 21:33:38 +00:00
|
|
|
case kExprS128Load64Zero: {
|
2020-07-17 17:47:13 +00:00
|
|
|
return DoSimdLoadZeroExtend<int2, uint64_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-10-12 17:09:03 +00:00
|
|
|
case kExprS128Load8Lane: {
|
|
|
|
return DoSimdLoadLane<int16, int32_t, int8_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord8);
|
|
|
|
}
|
|
|
|
case kExprS128Load16Lane: {
|
|
|
|
return DoSimdLoadLane<int8, int32_t, int16_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord16);
|
|
|
|
}
|
|
|
|
case kExprS128Load32Lane: {
|
|
|
|
return DoSimdLoadLane<int4, int32_t, int32_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord32);
|
|
|
|
}
|
|
|
|
case kExprS128Load64Lane: {
|
|
|
|
return DoSimdLoadLane<int2, int64_t, int64_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-10-15 22:02:24 +00:00
|
|
|
case kExprS128Store8Lane: {
|
|
|
|
return DoSimdStoreLane<int16, int32_t, int8_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord8);
|
|
|
|
}
|
|
|
|
case kExprS128Store16Lane: {
|
|
|
|
return DoSimdStoreLane<int8, int32_t, int16_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord16);
|
|
|
|
}
|
|
|
|
case kExprS128Store32Lane: {
|
|
|
|
return DoSimdStoreLane<int4, int32_t, int32_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord32);
|
|
|
|
}
|
|
|
|
case kExprS128Store64Lane: {
|
|
|
|
return DoSimdStoreLane<int2, int64_t, int64_t>(
|
|
|
|
decoder, code, pc, len, MachineRepresentation::kWord64);
|
|
|
|
}
|
2020-11-02 09:16:52 +00:00
|
|
|
case kExprI32x4ExtAddPairwiseI16x8S: {
|
|
|
|
return DoSimdExtAddPairwise<int4, int8, int32_t, int16_t>();
|
|
|
|
}
|
|
|
|
case kExprI32x4ExtAddPairwiseI16x8U: {
|
|
|
|
return DoSimdExtAddPairwise<int4, int8, uint32_t, uint16_t>();
|
|
|
|
}
|
|
|
|
case kExprI16x8ExtAddPairwiseI8x16S: {
|
|
|
|
return DoSimdExtAddPairwise<int8, int16, int16_t, int8_t>();
|
|
|
|
}
|
|
|
|
case kExprI16x8ExtAddPairwiseI8x16U: {
|
|
|
|
return DoSimdExtAddPairwise<int8, int16, uint16_t, uint8_t>();
|
|
|
|
}
|
2020-11-20 01:06:13 +00:00
|
|
|
case kExprPrefetchT:
|
|
|
|
case kExprPrefetchNT: {
|
|
|
|
// Max alignment doesn't matter, use an arbitrary value.
|
|
|
|
MemoryAccessImmediate<Decoder::kNoValidation> imm(
|
2020-11-25 14:09:56 +00:00
|
|
|
decoder, code->at(pc + *len), 4, module()->is_memory64);
|
2020-11-20 01:06:13 +00:00
|
|
|
// Pop address and do nothing.
|
|
|
|
Pop().to<uint32_t>();
|
|
|
|
*len += imm.length;
|
|
|
|
return true;
|
|
|
|
}
|
2018-05-18 21:47:59 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-19 17:53:14 +00:00
|
|
|
template <typename s_type, typename result_type, typename load_type>
|
|
|
|
bool DoSimdLoadSplat(Decoder* decoder, InterpreterCode* code, pc_t pc,
|
|
|
|
int* const len, MachineRepresentation rep) {
|
2020-04-20 18:26:34 +00:00
|
|
|
// len is the number of bytes the make up this op, including prefix byte, so
|
|
|
|
// the prefix_len for ExecuteLoad is len, minus the prefix byte itself.
|
|
|
|
// Think of prefix_len as: number of extra bytes that make up this op.
|
2019-11-19 17:53:14 +00:00
|
|
|
if (!ExecuteLoad<result_type, load_type>(decoder, code, pc, len, rep,
|
2020-06-26 12:24:26 +00:00
|
|
|
/*prefix_len=*/*len)) {
|
2019-11-19 17:53:14 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
result_type v = Pop().to<result_type>();
|
|
|
|
s_type s;
|
2020-05-20 17:56:00 +00:00
|
|
|
for (size_t i = 0; i < arraysize(s.val); i++) s.val[LANE(i, s)] = v;
|
2019-11-19 17:53:14 +00:00
|
|
|
Push(WasmValue(Simd128(s)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-11-19 19:28:05 +00:00
|
|
|
template <typename s_type, typename wide_type, typename narrow_type>
|
|
|
|
bool DoSimdLoadExtend(Decoder* decoder, InterpreterCode* code, pc_t pc,
|
|
|
|
int* const len, MachineRepresentation rep) {
|
|
|
|
static_assert(sizeof(wide_type) == sizeof(narrow_type) * 2,
|
|
|
|
"size mismatch for wide and narrow types");
|
|
|
|
if (!ExecuteLoad<uint64_t, uint64_t>(decoder, code, pc, len, rep,
|
2020-06-26 12:24:26 +00:00
|
|
|
/*prefix_len=*/*len)) {
|
2019-11-19 19:28:05 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
constexpr int lanes = kSimd128Size / sizeof(wide_type);
|
|
|
|
uint64_t v = Pop().to_u64();
|
|
|
|
s_type s;
|
|
|
|
for (int i = 0; i < lanes; i++) {
|
|
|
|
uint8_t shift = i * (sizeof(narrow_type) * 8);
|
|
|
|
narrow_type el = static_cast<narrow_type>(v >> shift);
|
2020-05-20 17:56:00 +00:00
|
|
|
s.val[LANE(i, s)] = static_cast<wide_type>(el);
|
2019-11-19 19:28:05 +00:00
|
|
|
}
|
|
|
|
Push(WasmValue(Simd128(s)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-07-17 17:47:13 +00:00
|
|
|
template <typename s_type, typename load_type>
|
|
|
|
bool DoSimdLoadZeroExtend(Decoder* decoder, InterpreterCode* code, pc_t pc,
|
|
|
|
int* const len, MachineRepresentation rep) {
|
|
|
|
if (!ExecuteLoad<load_type, load_type>(decoder, code, pc, len, rep,
|
|
|
|
/*prefix_len=*/*len)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
load_type v = Pop().to<load_type>();
|
|
|
|
s_type s;
|
|
|
|
// All lanes are 0.
|
|
|
|
for (size_t i = 0; i < arraysize(s.val); i++) s.val[LANE(i, s)] = 0;
|
|
|
|
// Lane 0 is set to the loaded value.
|
|
|
|
s.val[LANE(0, s)] = v;
|
|
|
|
Push(WasmValue(Simd128(s)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-12 17:09:03 +00:00
|
|
|
template <typename s_type, typename result_type, typename load_type>
|
|
|
|
bool DoSimdLoadLane(Decoder* decoder, InterpreterCode* code, pc_t pc,
|
|
|
|
int* const len, MachineRepresentation rep) {
|
|
|
|
s_type value = Pop().to_s128().to<s_type>();
|
|
|
|
if (!ExecuteLoad<result_type, load_type>(decoder, code, pc, len, rep,
|
|
|
|
/*prefix_len=*/*len)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
SimdLaneImmediate<Decoder::kNoValidation> lane_imm(decoder,
|
|
|
|
code->at(pc + *len));
|
|
|
|
*len += lane_imm.length;
|
|
|
|
result_type loaded = Pop().to<result_type>();
|
|
|
|
value.val[LANE(lane_imm.lane, value)] = loaded;
|
|
|
|
Push(WasmValue(Simd128(value)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-15 22:02:24 +00:00
|
|
|
template <typename s_type, typename result_type, typename load_type>
|
|
|
|
bool DoSimdStoreLane(Decoder* decoder, InterpreterCode* code, pc_t pc,
|
|
|
|
int* const len, MachineRepresentation rep) {
|
|
|
|
// Extract a single lane, push it onto the stack, then store the lane.
|
|
|
|
s_type value = Pop().to_s128().to<s_type>();
|
|
|
|
|
|
|
|
MemoryAccessImmediate<Decoder::kNoValidation> imm(
|
2020-11-25 14:09:56 +00:00
|
|
|
decoder, code->at(pc + *len), sizeof(load_type), module()->is_memory64);
|
2020-10-15 22:02:24 +00:00
|
|
|
|
|
|
|
SimdLaneImmediate<Decoder::kNoValidation> lane_imm(
|
|
|
|
decoder, code->at(pc + *len + imm.length));
|
|
|
|
|
2021-03-04 17:37:44 +00:00
|
|
|
Push(WasmValue(
|
|
|
|
static_cast<result_type>(value.val[LANE(lane_imm.lane, value)])));
|
2020-10-15 22:02:24 +00:00
|
|
|
|
|
|
|
// ExecuteStore will update the len, so pass it unchanged here.
|
|
|
|
if (!ExecuteStore<result_type, load_type>(decoder, code, pc, len, rep,
|
|
|
|
/*prefix_len=*/*len)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
*len += lane_imm.length;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-16 19:54:50 +00:00
|
|
|
template <typename s_type, typename d_type, typename narrow, typename wide>
|
|
|
|
bool DoSimdExtMul(unsigned start) {
|
|
|
|
WasmValue v2 = Pop();
|
|
|
|
WasmValue v1 = Pop();
|
|
|
|
auto s1 = v1.to_s128().to<s_type>();
|
|
|
|
auto s2 = v2.to_s128().to<s_type>();
|
|
|
|
auto end = start + (kSimd128Size / sizeof(wide));
|
|
|
|
d_type res;
|
|
|
|
for (size_t dst = 0; start < end; ++start, ++dst) {
|
|
|
|
// Need static_cast for unsigned narrow types.
|
|
|
|
res.val[LANE(dst, res)] =
|
2020-10-20 17:04:48 +00:00
|
|
|
MultiplyLong<wide>(static_cast<narrow>(s1.val[LANE(start, s1)]),
|
|
|
|
static_cast<narrow>(s2.val[LANE(start, s2)]));
|
2020-10-16 19:54:50 +00:00
|
|
|
}
|
|
|
|
Push(WasmValue(Simd128(res)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-11-02 09:16:52 +00:00
|
|
|
template <typename DstSimdType, typename SrcSimdType, typename Wide,
|
|
|
|
typename Narrow>
|
|
|
|
bool DoSimdExtAddPairwise() {
|
|
|
|
constexpr int lanes = kSimd128Size / sizeof(DstSimdType::val[0]);
|
|
|
|
auto v = Pop().to_s128().to<SrcSimdType>();
|
|
|
|
DstSimdType res;
|
|
|
|
for (int i = 0; i < lanes; ++i) {
|
|
|
|
res.val[LANE(i, res)] =
|
|
|
|
AddLong<Wide>(static_cast<Narrow>(v.val[LANE(i * 2, v)]),
|
|
|
|
static_cast<Narrow>(v.val[LANE(i * 2 + 1, v)]));
|
|
|
|
}
|
|
|
|
Push(WasmValue(Simd128(res)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-03-31 08:29:02 +00:00
|
|
|
// Check if our control stack (frames_) exceeds the limit. Trigger stack
|
|
|
|
// overflow if it does, and unwinding the current frame.
|
2020-05-29 16:17:29 +00:00
|
|
|
// Returns true if execution can continue, false if the stack was fully
|
|
|
|
// unwound. Do call this function immediately *after* pushing a new frame. The
|
|
|
|
// pc of the top frame will be reset to 0 if the stack check fails.
|
2021-02-22 09:17:38 +00:00
|
|
|
bool DoStackCheck(Decoder* decoder, InterpreterCode** target, pc_t* pc,
|
|
|
|
pc_t* limit) V8_WARN_UNUSED_RESULT {
|
2017-11-02 09:39:33 +00:00
|
|
|
// The goal of this stack check is not to prevent actual stack overflows,
|
|
|
|
// but to simulate stack overflows during the execution of compiled code.
|
|
|
|
// That is why this function uses FLAG_stack_size, even though the value
|
|
|
|
// stack actually lies in zone memory.
|
|
|
|
const size_t stack_size_limit = FLAG_stack_size * KB;
|
|
|
|
// Sum up the value stack size and the control stack size.
|
2019-03-15 13:31:14 +00:00
|
|
|
const size_t current_stack_size = (sp_ - stack_.get()) * sizeof(*sp_) +
|
|
|
|
frames_.size() * sizeof(frames_[0]);
|
2017-11-02 09:39:33 +00:00
|
|
|
if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
|
2021-02-22 09:17:38 +00:00
|
|
|
*pc = frames_.back().pc;
|
|
|
|
*limit = (*target)->end - (*target)->start;
|
|
|
|
decoder->Reset((*target)->start, (*target)->end);
|
2017-03-31 08:29:02 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// The pc of the top frame is initialized to the first instruction. We reset
|
|
|
|
// it to 0 here such that we report the same position as in compiled code.
|
|
|
|
frames_.back().pc = 0;
|
2019-04-04 11:22:49 +00:00
|
|
|
isolate_->StackOverflow();
|
2021-02-22 09:17:38 +00:00
|
|
|
if (HandleException(isolate_) == WasmInterpreter::HANDLED) {
|
|
|
|
ReloadFromFrameOnException(decoder, target, pc, limit);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2017-03-31 08:29:02 +00:00
|
|
|
}
|
|
|
|
|
2019-01-23 13:26:50 +00:00
|
|
|
void EncodeI32ExceptionValue(Handle<FixedArray> encoded_values,
|
|
|
|
uint32_t* encoded_index, uint32_t value) {
|
|
|
|
encoded_values->set((*encoded_index)++, Smi::FromInt(value >> 16));
|
|
|
|
encoded_values->set((*encoded_index)++, Smi::FromInt(value & 0xffff));
|
|
|
|
}
|
|
|
|
|
|
|
|
void EncodeI64ExceptionValue(Handle<FixedArray> encoded_values,
|
|
|
|
uint32_t* encoded_index, uint64_t value) {
|
|
|
|
EncodeI32ExceptionValue(encoded_values, encoded_index,
|
|
|
|
static_cast<uint32_t>(value >> 32));
|
|
|
|
EncodeI32ExceptionValue(encoded_values, encoded_index,
|
|
|
|
static_cast<uint32_t>(value));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate, initialize and throw a new exception. The exception values are
|
2019-01-28 13:59:04 +00:00
|
|
|
// being popped off the operand stack. Returns true if the exception is being
|
2019-01-23 13:26:50 +00:00
|
|
|
// handled locally by the interpreter, false otherwise (interpreter exits).
|
|
|
|
bool DoThrowException(const WasmException* exception,
|
|
|
|
uint32_t index) V8_WARN_UNUSED_RESULT {
|
2019-04-04 11:22:49 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2019-01-23 13:26:50 +00:00
|
|
|
Handle<WasmExceptionTag> exception_tag(
|
2019-05-23 07:47:44 +00:00
|
|
|
WasmExceptionTag::cast(instance_object_->exceptions_table().get(index)),
|
2019-04-04 11:22:49 +00:00
|
|
|
isolate_);
|
2019-01-23 13:26:50 +00:00
|
|
|
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
|
2019-09-26 09:20:27 +00:00
|
|
|
Handle<WasmExceptionPackage> exception_object =
|
2019-04-04 11:22:49 +00:00
|
|
|
WasmExceptionPackage::New(isolate_, exception_tag, encoded_size);
|
2019-01-23 13:26:50 +00:00
|
|
|
Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
|
2019-04-04 11:22:49 +00:00
|
|
|
WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
|
2019-01-23 13:26:50 +00:00
|
|
|
// Encode the exception values on the operand stack into the exception
|
|
|
|
// package allocated above. This encoding has to be in sync with other
|
|
|
|
// backends so that exceptions can be passed between them.
|
2019-03-21 08:07:46 +00:00
|
|
|
const WasmExceptionSig* sig = exception->sig;
|
2019-01-23 13:26:50 +00:00
|
|
|
uint32_t encoded_index = 0;
|
2019-04-05 12:12:50 +00:00
|
|
|
sp_t base_index = StackHeight() - sig->parameter_count();
|
2019-01-23 13:26:50 +00:00
|
|
|
for (size_t i = 0; i < sig->parameter_count(); ++i) {
|
2019-04-05 12:12:50 +00:00
|
|
|
WasmValue value = GetStackValue(base_index + i);
|
2020-03-12 14:29:51 +00:00
|
|
|
switch (sig->GetParam(i).kind()) {
|
2021-02-22 09:28:44 +00:00
|
|
|
case kI32: {
|
2019-01-23 13:26:50 +00:00
|
|
|
uint32_t u32 = value.to_u32();
|
|
|
|
EncodeI32ExceptionValue(encoded_values, &encoded_index, u32);
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kF32: {
|
2019-01-23 13:26:50 +00:00
|
|
|
uint32_t f32 = value.to_f32_boxed().get_bits();
|
|
|
|
EncodeI32ExceptionValue(encoded_values, &encoded_index, f32);
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kI64: {
|
2019-01-23 13:26:50 +00:00
|
|
|
uint64_t u64 = value.to_u64();
|
|
|
|
EncodeI64ExceptionValue(encoded_values, &encoded_index, u64);
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kF64: {
|
2019-01-23 13:26:50 +00:00
|
|
|
uint64_t f64 = value.to_f64_boxed().get_bits();
|
|
|
|
EncodeI64ExceptionValue(encoded_values, &encoded_index, f64);
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kS128: {
|
2019-04-04 15:04:30 +00:00
|
|
|
int4 s128 = value.to_s128().to_i32x4();
|
|
|
|
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[0]);
|
|
|
|
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[1]);
|
|
|
|
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[2]);
|
|
|
|
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[3]);
|
2019-01-23 13:26:50 +00:00
|
|
|
break;
|
2019-04-04 15:04:30 +00:00
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kRef:
|
|
|
|
case kOptRef: {
|
2020-07-01 06:35:04 +00:00
|
|
|
switch (sig->GetParam(i).heap_representation()) {
|
2020-06-29 09:24:51 +00:00
|
|
|
case HeapType::kExtern:
|
2020-11-19 14:51:14 +00:00
|
|
|
case HeapType::kFunc:
|
|
|
|
case HeapType::kAny: {
|
2021-03-04 17:37:44 +00:00
|
|
|
Handle<Object> ref = value.to_ref();
|
|
|
|
encoded_values->set(encoded_index++, *ref);
|
[wasm-gc] Change ValueType representation to account for new types
Motivation:
Changes to the typed function references and gc proposals solidified
the notion of heap type, clarified nullable vs. non-nullable reference
types, and introduced rtts, which contain an integer depth field in
addition to a heap type. This required us to overhaul our ValueType
representation, which results in extensive changes.
To keep this CL "small", we do not try to implement the binary encoding
as described in the proposals, but rather devise a simpler one of our
own (see below). Also, we do not try to implement additional
functionality for the new types.
Changes:
- Introduce HeapType. Move heap types from ValueType to HeapType.
- Introduce Nullability for reference types.
- Rework ValueType helper methods.
- Introduce rtts in ValueType with an integer depth field. Include depth
in the ValueType encoding.
- Make the constructor of ValueType private, instead expose static
functions which explicitly state what they create.
- Change every switch statement on ValueType::Kind. Sometimes, we need
nested switches.
- Introduce temporary constants in ValueTypeCode for nullable types,
use them for decoding.
- In WasmGlobalObject, split 'flags' into 'raw_type' and 'is_mutable'.
- Change IsSubtypeOfRef to IsSubtypeOfHeap and implement changes in
subtyping.
- kWasmFuncRef initializers are now non-nullable. Initializers are
only required to be subtypes of the declared global type.
- Change tests and fuzzers as needed.
Bug: v8:7748
Change-Id: If41f783bd4128443b07e94188cea7dd53ab0bfa5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2247657
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68408}
2020-06-18 11:24:07 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-01-28 11:53:37 +00:00
|
|
|
case HeapType::kBottom:
|
|
|
|
UNREACHABLE();
|
2020-06-29 09:24:51 +00:00
|
|
|
case HeapType::kEq:
|
2021-01-28 11:53:37 +00:00
|
|
|
case HeapType::kData:
|
|
|
|
case HeapType::kI31:
|
[wasm-gc] Change ValueType representation to account for new types
Motivation:
Changes to the typed function references and gc proposals solidified
the notion of heap type, clarified nullable vs. non-nullable reference
types, and introduced rtts, which contain an integer depth field in
addition to a heap type. This required us to overhaul our ValueType
representation, which results in extensive changes.
To keep this CL "small", we do not try to implement the binary encoding
as described in the proposals, but rather devise a simpler one of our
own (see below). Also, we do not try to implement additional
functionality for the new types.
Changes:
- Introduce HeapType. Move heap types from ValueType to HeapType.
- Introduce Nullability for reference types.
- Rework ValueType helper methods.
- Introduce rtts in ValueType with an integer depth field. Include depth
in the ValueType encoding.
- Make the constructor of ValueType private, instead expose static
functions which explicitly state what they create.
- Change every switch statement on ValueType::Kind. Sometimes, we need
nested switches.
- Introduce temporary constants in ValueTypeCode for nullable types,
use them for decoding.
- In WasmGlobalObject, split 'flags' into 'raw_type' and 'is_mutable'.
- Change IsSubtypeOfRef to IsSubtypeOfHeap and implement changes in
subtyping.
- kWasmFuncRef initializers are now non-nullable. Initializers are
only required to be subtypes of the declared global type.
- Change tests and fuzzers as needed.
Bug: v8:7748
Change-Id: If41f783bd4128443b07e94188cea7dd53ab0bfa5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2247657
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68408}
2020-06-18 11:24:07 +00:00
|
|
|
default:
|
|
|
|
// TODO(7748): Implement these.
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
break;
|
|
|
|
}
|
2019-04-04 15:04:30 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kRtt: // TODO(7748): Implement.
|
|
|
|
case kRttWithDepth:
|
|
|
|
case kI8:
|
|
|
|
case kI16:
|
|
|
|
case kStmt:
|
|
|
|
case kBottom:
|
2019-01-23 13:26:50 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DCHECK_EQ(encoded_size, encoded_index);
|
2019-04-05 12:12:50 +00:00
|
|
|
Drop(static_cast<int>(sig->parameter_count()));
|
2019-01-23 13:26:50 +00:00
|
|
|
// Now that the exception is ready, set it as pending.
|
2019-04-04 11:22:49 +00:00
|
|
|
isolate_->Throw(*exception_object);
|
2020-06-05 11:05:04 +00:00
|
|
|
return HandleException(isolate_) == WasmInterpreter::HANDLED;
|
2019-01-23 13:26:50 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 13:59:04 +00:00
|
|
|
// Throw a given existing exception. Returns true if the exception is being
|
|
|
|
// handled locally by the interpreter, false otherwise (interpreter exits).
|
2021-02-16 12:00:13 +00:00
|
|
|
bool DoRethrowException(Handle<Object> exception) {
|
|
|
|
isolate_->ReThrow(*exception);
|
2020-06-05 11:05:04 +00:00
|
|
|
return HandleException(isolate_) == WasmInterpreter::HANDLED;
|
2019-01-28 13:59:04 +00:00
|
|
|
}
|
|
|
|
|
2019-04-03 11:06:41 +00:00
|
|
|
// Determines whether the given exception has a tag matching the expected tag
|
|
|
|
// for the given index within the exception table of the current instance.
|
|
|
|
bool MatchingExceptionTag(Handle<Object> exception_object, uint32_t index) {
|
2019-09-26 09:20:27 +00:00
|
|
|
if (!exception_object->IsWasmExceptionPackage(isolate_)) return false;
|
|
|
|
Handle<Object> caught_tag = WasmExceptionPackage::GetExceptionTag(
|
|
|
|
isolate_, Handle<WasmExceptionPackage>::cast(exception_object));
|
2019-04-03 11:06:41 +00:00
|
|
|
Handle<Object> expected_tag =
|
2019-05-23 07:47:44 +00:00
|
|
|
handle(instance_object_->exceptions_table().get(index), isolate_);
|
2019-04-03 11:06:41 +00:00
|
|
|
DCHECK(expected_tag->IsWasmExceptionTag());
|
|
|
|
return expected_tag.is_identical_to(caught_tag);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecodeI32ExceptionValue(Handle<FixedArray> encoded_values,
|
|
|
|
uint32_t* encoded_index, uint32_t* value) {
|
|
|
|
uint32_t msb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
|
|
|
|
uint32_t lsb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
|
|
|
|
*value = (msb << 16) | (lsb & 0xffff);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecodeI64ExceptionValue(Handle<FixedArray> encoded_values,
|
|
|
|
uint32_t* encoded_index, uint64_t* value) {
|
|
|
|
uint32_t lsb = 0, msb = 0;
|
|
|
|
DecodeI32ExceptionValue(encoded_values, encoded_index, &msb);
|
|
|
|
DecodeI32ExceptionValue(encoded_values, encoded_index, &lsb);
|
|
|
|
*value = (static_cast<uint64_t>(msb) << 32) | static_cast<uint64_t>(lsb);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unpack the values encoded in the given exception. The exception values are
|
|
|
|
// pushed onto the operand stack. Callers must perform a tag check to ensure
|
|
|
|
// the encoded values match the expected signature of the exception.
|
|
|
|
void DoUnpackException(const WasmException* exception,
|
|
|
|
Handle<Object> exception_object) {
|
2019-09-26 09:20:27 +00:00
|
|
|
Handle<FixedArray> encoded_values =
|
|
|
|
Handle<FixedArray>::cast(WasmExceptionPackage::GetExceptionValues(
|
|
|
|
isolate_, Handle<WasmExceptionPackage>::cast(exception_object)));
|
2019-04-03 11:06:41 +00:00
|
|
|
// Decode the exception values from the given exception package and push
|
|
|
|
// them onto the operand stack. This encoding has to be in sync with other
|
|
|
|
// backends so that exceptions can be passed between them.
|
|
|
|
const WasmExceptionSig* sig = exception->sig;
|
|
|
|
uint32_t encoded_index = 0;
|
|
|
|
for (size_t i = 0; i < sig->parameter_count(); ++i) {
|
|
|
|
WasmValue value;
|
2020-03-12 14:29:51 +00:00
|
|
|
switch (sig->GetParam(i).kind()) {
|
2021-02-22 09:28:44 +00:00
|
|
|
case kI32: {
|
2019-04-03 11:06:41 +00:00
|
|
|
uint32_t u32 = 0;
|
|
|
|
DecodeI32ExceptionValue(encoded_values, &encoded_index, &u32);
|
|
|
|
value = WasmValue(u32);
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kF32: {
|
2019-04-03 11:06:41 +00:00
|
|
|
uint32_t f32_bits = 0;
|
|
|
|
DecodeI32ExceptionValue(encoded_values, &encoded_index, &f32_bits);
|
|
|
|
value = WasmValue(Float32::FromBits(f32_bits));
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kI64: {
|
2019-04-03 11:06:41 +00:00
|
|
|
uint64_t u64 = 0;
|
|
|
|
DecodeI64ExceptionValue(encoded_values, &encoded_index, &u64);
|
|
|
|
value = WasmValue(u64);
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kF64: {
|
2019-04-03 11:06:41 +00:00
|
|
|
uint64_t f64_bits = 0;
|
|
|
|
DecodeI64ExceptionValue(encoded_values, &encoded_index, &f64_bits);
|
|
|
|
value = WasmValue(Float64::FromBits(f64_bits));
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kS128: {
|
2019-04-04 15:04:30 +00:00
|
|
|
int4 s128 = {0, 0, 0, 0};
|
|
|
|
uint32_t* vals = reinterpret_cast<uint32_t*>(s128.val);
|
|
|
|
DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[0]);
|
|
|
|
DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[1]);
|
|
|
|
DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[2]);
|
|
|
|
DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[3]);
|
|
|
|
value = WasmValue(Simd128(s128));
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kRef:
|
|
|
|
case kOptRef: {
|
2020-07-01 06:35:04 +00:00
|
|
|
switch (sig->GetParam(i).heap_representation()) {
|
2020-06-29 09:24:51 +00:00
|
|
|
case HeapType::kExtern:
|
2020-11-19 14:51:14 +00:00
|
|
|
case HeapType::kFunc:
|
|
|
|
case HeapType::kAny: {
|
2021-03-04 17:37:44 +00:00
|
|
|
Handle<Object> ref(encoded_values->get(encoded_index++),
|
|
|
|
isolate_);
|
|
|
|
value = WasmValue(ref, sig->GetParam(i));
|
[wasm-gc] Change ValueType representation to account for new types
Motivation:
Changes to the typed function references and gc proposals solidified
the notion of heap type, clarified nullable vs. non-nullable reference
types, and introduced rtts, which contain an integer depth field in
addition to a heap type. This required us to overhaul our ValueType
representation, which results in extensive changes.
To keep this CL "small", we do not try to implement the binary encoding
as described in the proposals, but rather devise a simpler one of our
own (see below). Also, we do not try to implement additional
functionality for the new types.
Changes:
- Introduce HeapType. Move heap types from ValueType to HeapType.
- Introduce Nullability for reference types.
- Rework ValueType helper methods.
- Introduce rtts in ValueType with an integer depth field. Include depth
in the ValueType encoding.
- Make the constructor of ValueType private, instead expose static
functions which explicitly state what they create.
- Change every switch statement on ValueType::Kind. Sometimes, we need
nested switches.
- Introduce temporary constants in ValueTypeCode for nullable types,
use them for decoding.
- In WasmGlobalObject, split 'flags' into 'raw_type' and 'is_mutable'.
- Change IsSubtypeOfRef to IsSubtypeOfHeap and implement changes in
subtyping.
- kWasmFuncRef initializers are now non-nullable. Initializers are
only required to be subtypes of the declared global type.
- Change tests and fuzzers as needed.
Bug: v8:7748
Change-Id: If41f783bd4128443b07e94188cea7dd53ab0bfa5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2247657
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68408}
2020-06-18 11:24:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
// TODO(7748): Implement these.
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
break;
|
|
|
|
}
|
2019-04-03 11:06:41 +00:00
|
|
|
break;
|
2019-04-04 15:04:30 +00:00
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kRtt: // TODO(7748): Implement.
|
|
|
|
case kRttWithDepth:
|
|
|
|
case kI8:
|
|
|
|
case kI16:
|
|
|
|
case kStmt:
|
|
|
|
case kBottom:
|
2019-04-03 11:06:41 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
Push(value);
|
|
|
|
}
|
|
|
|
DCHECK_EQ(WasmExceptionPackage::GetEncodedSize(exception), encoded_index);
|
|
|
|
}
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
void Execute(InterpreterCode* code, pc_t pc, int max) {
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_NOT_NULL(code->side_table);
|
|
|
|
DCHECK(!frames_.empty());
|
|
|
|
// There must be enough space on the stack to hold the arguments, locals,
|
|
|
|
// and the value stack.
|
|
|
|
DCHECK_LE(code->function->sig->parameter_count() +
|
|
|
|
code->locals.type_list.size() +
|
|
|
|
code->side_table->max_stack_height_,
|
2018-07-05 09:00:20 +00:00
|
|
|
stack_limit_ - stack_.get() - frames_.back().sp);
|
2019-04-03 12:24:56 +00:00
|
|
|
// Seal the surrounding {HandleScope} to ensure that all cases within the
|
|
|
|
// interpreter switch below which deal with handles open their own scope.
|
|
|
|
// This avoids leaking / accumulating handles in the surrounding scope.
|
2019-04-04 11:22:49 +00:00
|
|
|
SealHandleScope shs(isolate_);
|
2017-04-26 17:41:26 +00:00
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
Decoder decoder(code->start, code->end);
|
|
|
|
pc_t limit = code->end - code->start;
|
2017-03-24 15:42:49 +00:00
|
|
|
|
|
|
|
while (true) {
|
2017-01-27 09:48:08 +00:00
|
|
|
DCHECK_GT(limit, pc);
|
2017-03-15 15:57:02 +00:00
|
|
|
DCHECK_NOT_NULL(code->start);
|
2016-05-25 08:32:37 +00:00
|
|
|
|
|
|
|
int len = 1;
|
2017-11-04 01:03:03 +00:00
|
|
|
byte orig = code->start[pc];
|
|
|
|
WasmOpcode opcode = static_cast<WasmOpcode>(orig);
|
2020-06-26 12:24:26 +00:00
|
|
|
|
2017-11-04 01:03:03 +00:00
|
|
|
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
|
2020-06-26 12:24:26 +00:00
|
|
|
uint32_t prefixed_opcode_length = 0;
|
2020-09-30 09:37:50 +00:00
|
|
|
opcode = decoder.read_prefixed_opcode<Decoder::kNoValidation>(
|
2020-06-26 12:24:26 +00:00
|
|
|
code->at(pc), &prefixed_opcode_length);
|
2020-10-15 17:45:21 +00:00
|
|
|
// read_prefixed_opcode includes the prefix byte, overwrite len.
|
|
|
|
len = prefixed_opcode_length;
|
2017-11-04 01:03:03 +00:00
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2017-04-06 13:32:36 +00:00
|
|
|
// If max is 0, break. If max is positive (a limit is set), decrement it.
|
2020-03-26 13:36:16 +00:00
|
|
|
if (max >= 0 && WasmOpcodes::IsBreakable(opcode)) {
|
|
|
|
if (max == 0) break;
|
|
|
|
--max;
|
|
|
|
}
|
2017-03-24 15:42:49 +00:00
|
|
|
|
2020-05-25 12:46:55 +00:00
|
|
|
TRACE("@%-3zu: %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
|
2016-05-25 08:32:37 +00:00
|
|
|
TraceValueStack();
|
|
|
|
TRACE("\n");
|
|
|
|
|
2017-04-25 09:43:39 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
// Compute the stack effect of this opcode, and verify later that the
|
2019-02-07 11:33:37 +00:00
|
|
|
// stack was modified accordingly.
|
2018-08-02 09:50:08 +00:00
|
|
|
std::pair<uint32_t, uint32_t> stack_effect =
|
2020-06-05 11:05:04 +00:00
|
|
|
StackEffect(codemap_.module(), frames_.back().code->function->sig,
|
2020-05-25 12:46:55 +00:00
|
|
|
code->start + pc, code->end);
|
2017-04-26 17:41:26 +00:00
|
|
|
sp_t expected_new_stack_height =
|
|
|
|
StackHeight() - stack_effect.first + stack_effect.second;
|
2017-04-25 09:43:39 +00:00
|
|
|
#endif
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
switch (orig) {
|
|
|
|
case kExprNop:
|
|
|
|
break;
|
2019-01-28 13:59:04 +00:00
|
|
|
case kExprBlock:
|
|
|
|
case kExprLoop:
|
|
|
|
case kExprTry: {
|
2020-09-30 09:37:50 +00:00
|
|
|
BlockTypeImmediate<Decoder::kNoValidation> imm(
|
[wasm-gc] read_heap_type should check if index is in module bounds
read_heap_type did not have knowledge of the module for which the heap
type was being decoded. As a result, callers of read_heap_type (or
read_value_type, which in turn calls read_heap_type) had to check after
the fact that a decoded indexed type (ref, ref null, or rtt) references
a type index within the module's bounds. This was not done consistently,
and was missing (at least) in DecodeLocals.
To avoid such problems in the future, this CL refactors read_heap_type
to accept a module and check the decoded index against it.
Changes:
- Add WasmModule argument to read_heap_type. Do so accordingly to all
its transitive callers (read_value_type, immediate arguments,
DecodeLocalDecls, DecodeValue/HeapType in unittests).
- Add index check to read_heap_type and emit an error for an
out-of-bounds index.
- Remove all other now-redundant index validations. Replace them with
decoder->ok() if needed (since read_heap_type will now emit an error).
- Fix error message in Validate for BlockTypeImmediate.
- In DecodeLocalDecls in unittests, pass an empty module to
DecodeLocalDecls in the main code.
- Add a unit test with an invalid index in local type declarations.
Bug: v8:9495
Change-Id: I4ed1204847db80f78b6ae85fa40d300cd2456295
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2569757
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71572}
2020-12-02 14:29:22 +00:00
|
|
|
WasmFeatures::All(), &decoder, code->at(pc + 1), module());
|
2018-05-03 11:59:06 +00:00
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprIf: {
|
2020-09-30 09:37:50 +00:00
|
|
|
BlockTypeImmediate<Decoder::kNoValidation> imm(
|
[wasm-gc] read_heap_type should check if index is in module bounds
read_heap_type did not have knowledge of the module for which the heap
type was being decoded. As a result, callers of read_heap_type (or
read_value_type, which in turn calls read_heap_type) had to check after
the fact that a decoded indexed type (ref, ref null, or rtt) references
a type index within the module's bounds. This was not done consistently,
and was missing (at least) in DecodeLocals.
To avoid such problems in the future, this CL refactors read_heap_type
to accept a module and check the decoded index against it.
Changes:
- Add WasmModule argument to read_heap_type. Do so accordingly to all
its transitive callers (read_value_type, immediate arguments,
DecodeLocalDecls, DecodeValue/HeapType in unittests).
- Add index check to read_heap_type and emit an error for an
out-of-bounds index.
- Remove all other now-redundant index validations. Replace them with
decoder->ok() if needed (since read_heap_type will now emit an error).
- Fix error message in Validate for BlockTypeImmediate.
- In DecodeLocalDecls in unittests, pass an empty module to
DecodeLocalDecls in the main code.
- Add a unit test with an invalid index in local type declarations.
Bug: v8:9495
Change-Id: I4ed1204847db80f78b6ae85fa40d300cd2456295
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2569757
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71572}
2020-12-02 14:29:22 +00:00
|
|
|
WasmFeatures::All(), &decoder, code->at(pc + 1), module());
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue cond = Pop();
|
2016-05-25 08:32:37 +00:00
|
|
|
bool is_true = cond.to<uint32_t>() != 0;
|
|
|
|
if (is_true) {
|
2021-02-11 13:37:03 +00:00
|
|
|
// Fall through to the true block.
|
2018-05-03 11:59:06 +00:00
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
TRACE(" true => fallthrough\n");
|
|
|
|
} else {
|
2017-04-25 09:43:39 +00:00
|
|
|
len = LookupTargetDelta(code, pc);
|
2016-05-25 08:32:37 +00:00
|
|
|
TRACE(" false => @%zu\n", pc + len);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2019-01-28 13:59:04 +00:00
|
|
|
case kExprElse:
|
2021-02-16 19:34:49 +00:00
|
|
|
case kExprUnwind:
|
2021-02-17 11:53:28 +00:00
|
|
|
case kExprCatch:
|
|
|
|
case kExprCatchAll: {
|
2017-04-25 09:43:39 +00:00
|
|
|
len = LookupTargetDelta(code, pc);
|
2016-05-25 08:32:37 +00:00
|
|
|
TRACE(" end => @%zu\n", pc + len);
|
|
|
|
break;
|
|
|
|
}
|
2019-01-23 13:26:50 +00:00
|
|
|
case kExprThrow: {
|
2020-09-30 09:37:50 +00:00
|
|
|
ExceptionIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2019-01-28 13:59:04 +00:00
|
|
|
CommitPc(pc); // Needed for local unwinding.
|
2019-01-23 13:26:50 +00:00
|
|
|
const WasmException* exception = &module()->exceptions[imm.index];
|
|
|
|
if (!DoThrowException(exception, imm.index)) return;
|
2019-02-07 11:33:37 +00:00
|
|
|
ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
|
|
|
|
continue; // Do not bump pc.
|
2019-01-28 13:59:04 +00:00
|
|
|
}
|
|
|
|
case kExprRethrow: {
|
2021-02-16 12:00:13 +00:00
|
|
|
BranchDepthImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
|
|
|
HandleScope scope(isolate_); // Avoid leaking handles.
|
|
|
|
DCHECK(!frames_.back().caught_exception_stack.is_null());
|
|
|
|
int index = code->side_table->rethrow_map_[pc];
|
|
|
|
DCHECK_LE(0, index);
|
|
|
|
DCHECK_LT(index, frames_.back().caught_exception_stack->Size());
|
|
|
|
Handle<Object> exception = handle(
|
|
|
|
frames_.back().caught_exception_stack->get(index), isolate_);
|
|
|
|
DCHECK(!exception->IsTheHole());
|
2019-01-28 13:59:04 +00:00
|
|
|
CommitPc(pc); // Needed for local unwinding.
|
2021-02-16 12:00:13 +00:00
|
|
|
if (!DoRethrowException(exception)) return;
|
2019-02-07 11:33:37 +00:00
|
|
|
ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
|
|
|
|
continue; // Do not bump pc.
|
2019-01-23 13:26:50 +00:00
|
|
|
}
|
2019-05-28 10:06:42 +00:00
|
|
|
case kExprSelectWithType: {
|
2020-09-30 09:37:50 +00:00
|
|
|
SelectTypeImmediate<Decoder::kNoValidation> imm(
|
[wasm-gc] read_heap_type should check if index is in module bounds
read_heap_type did not have knowledge of the module for which the heap
type was being decoded. As a result, callers of read_heap_type (or
read_value_type, which in turn calls read_heap_type) had to check after
the fact that a decoded indexed type (ref, ref null, or rtt) references
a type index within the module's bounds. This was not done consistently,
and was missing (at least) in DecodeLocals.
To avoid such problems in the future, this CL refactors read_heap_type
to accept a module and check the decoded index against it.
Changes:
- Add WasmModule argument to read_heap_type. Do so accordingly to all
its transitive callers (read_value_type, immediate arguments,
DecodeLocalDecls, DecodeValue/HeapType in unittests).
- Add index check to read_heap_type and emit an error for an
out-of-bounds index.
- Remove all other now-redundant index validations. Replace them with
decoder->ok() if needed (since read_heap_type will now emit an error).
- Fix error message in Validate for BlockTypeImmediate.
- In DecodeLocalDecls in unittests, pass an empty module to
DecodeLocalDecls in the main code.
- Add a unit test with an invalid index in local type declarations.
Bug: v8:9495
Change-Id: I4ed1204847db80f78b6ae85fa40d300cd2456295
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2569757
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71572}
2020-12-02 14:29:22 +00:00
|
|
|
WasmFeatures::All(), &decoder, code->at(pc + 1), module());
|
2019-05-28 10:06:42 +00:00
|
|
|
len = 1 + imm.length;
|
|
|
|
V8_FALLTHROUGH;
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
case kExprSelect: {
|
2020-01-15 11:55:19 +00:00
|
|
|
HandleScope scope(isolate_); // Avoid leaking handles.
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue cond = Pop();
|
|
|
|
WasmValue fval = Pop();
|
|
|
|
WasmValue tval = Pop();
|
2017-04-26 17:41:26 +00:00
|
|
|
Push(cond.to<int32_t>() != 0 ? tval : fval);
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprBr: {
|
2020-09-30 09:37:50 +00:00
|
|
|
BranchDepthImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2018-05-03 11:59:06 +00:00
|
|
|
len = DoBreak(code, pc, imm.depth);
|
2016-05-25 08:32:37 +00:00
|
|
|
TRACE(" br => @%zu\n", pc + len);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprBrIf: {
|
2020-09-30 09:37:50 +00:00
|
|
|
BranchDepthImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue cond = Pop();
|
2016-05-25 08:32:37 +00:00
|
|
|
bool is_true = cond.to<uint32_t>() != 0;
|
|
|
|
if (is_true) {
|
2018-05-03 11:59:06 +00:00
|
|
|
len = DoBreak(code, pc, imm.depth);
|
2016-05-25 08:32:37 +00:00
|
|
|
TRACE(" br_if => @%zu\n", pc + len);
|
|
|
|
} else {
|
|
|
|
TRACE(" false => fallthrough\n");
|
2018-05-03 11:59:06 +00:00
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprBrTable: {
|
2020-09-30 09:37:50 +00:00
|
|
|
BranchTableImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
|
|
|
BranchTableIterator<Decoder::kNoValidation> iterator(&decoder, imm);
|
2016-05-25 08:32:37 +00:00
|
|
|
uint32_t key = Pop().to<uint32_t>();
|
2016-11-02 17:06:38 +00:00
|
|
|
uint32_t depth = 0;
|
2018-05-03 11:59:06 +00:00
|
|
|
if (key >= imm.table_count) key = imm.table_count;
|
2016-11-02 17:06:38 +00:00
|
|
|
for (uint32_t i = 0; i <= key; i++) {
|
|
|
|
DCHECK(iterator.has_next());
|
|
|
|
depth = iterator.next();
|
|
|
|
}
|
|
|
|
len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
|
2016-09-27 20:46:10 +00:00
|
|
|
TRACE(" br[%u] => @%zu\n", key, pc + key + len);
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprReturn: {
|
2016-09-27 20:46:10 +00:00
|
|
|
size_t arity = code->function->sig->return_count();
|
2017-03-14 10:46:18 +00:00
|
|
|
if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
|
2019-02-07 11:33:37 +00:00
|
|
|
continue; // Do not bump pc.
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
case kExprUnreachable: {
|
2017-03-14 10:46:18 +00:00
|
|
|
return DoTrap(kTrapUnreachable, pc);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
2021-02-15 11:04:05 +00:00
|
|
|
case kExprDelegate: {
|
|
|
|
BranchDepthImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
|
|
|
len = 1 + imm.length;
|
|
|
|
break;
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
case kExprEnd: {
|
2021-02-16 19:34:49 +00:00
|
|
|
if (code->side_table->rethrow_map_.count(pc)) {
|
|
|
|
// Implicit rethrow after unwind.
|
|
|
|
HandleScope scope(isolate_);
|
|
|
|
DCHECK(!frames_.back().caught_exception_stack.is_null());
|
|
|
|
int index = code->side_table->rethrow_map_[pc];
|
|
|
|
Handle<Object> exception = handle(
|
|
|
|
frames_.back().caught_exception_stack->get(index), isolate_);
|
|
|
|
DCHECK(!exception->IsTheHole());
|
|
|
|
CommitPc(pc); // Needed for local unwinding.
|
|
|
|
if (!DoRethrowException(exception)) return;
|
|
|
|
ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
|
|
|
|
continue; // Do not bump pc.
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprI32Const: {
|
2020-09-30 09:37:50 +00:00
|
|
|
ImmI32Immediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2018-05-03 11:59:06 +00:00
|
|
|
Push(WasmValue(imm.value));
|
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprI64Const: {
|
2020-09-30 09:37:50 +00:00
|
|
|
ImmI64Immediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2018-05-03 11:59:06 +00:00
|
|
|
Push(WasmValue(imm.value));
|
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprF32Const: {
|
2020-09-30 09:37:50 +00:00
|
|
|
ImmF32Immediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2018-05-03 11:59:06 +00:00
|
|
|
Push(WasmValue(imm.value));
|
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprF64Const: {
|
2020-09-30 09:37:50 +00:00
|
|
|
ImmF64Immediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2018-05-03 11:59:06 +00:00
|
|
|
Push(WasmValue(imm.value));
|
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-04-03 11:06:41 +00:00
|
|
|
case kExprRefNull: {
|
2020-09-30 09:37:50 +00:00
|
|
|
HeapTypeImmediate<Decoder::kNoValidation> imm(
|
[wasm-gc] read_heap_type should check if index is in module bounds
read_heap_type did not have knowledge of the module for which the heap
type was being decoded. As a result, callers of read_heap_type (or
read_value_type, which in turn calls read_heap_type) had to check after
the fact that a decoded indexed type (ref, ref null, or rtt) references
a type index within the module's bounds. This was not done consistently,
and was missing (at least) in DecodeLocals.
To avoid such problems in the future, this CL refactors read_heap_type
to accept a module and check the decoded index against it.
Changes:
- Add WasmModule argument to read_heap_type. Do so accordingly to all
its transitive callers (read_value_type, immediate arguments,
DecodeLocalDecls, DecodeValue/HeapType in unittests).
- Add index check to read_heap_type and emit an error for an
out-of-bounds index.
- Remove all other now-redundant index validations. Replace them with
decoder->ok() if needed (since read_heap_type will now emit an error).
- Fix error message in Validate for BlockTypeImmediate.
- In DecodeLocalDecls in unittests, pass an empty module to
DecodeLocalDecls in the main code.
- Add a unit test with an invalid index in local type declarations.
Bug: v8:9495
Change-Id: I4ed1204847db80f78b6ae85fa40d300cd2456295
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2569757
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71572}
2020-12-02 14:29:22 +00:00
|
|
|
WasmFeatures::All(), &decoder, code->at(pc + 1), module());
|
2020-06-03 12:48:16 +00:00
|
|
|
len = 1 + imm.length;
|
2021-03-04 17:37:44 +00:00
|
|
|
Push(WasmValue(isolate_->factory()->null_value(),
|
|
|
|
ValueType::Ref(imm.type, kNullable)));
|
2019-04-03 11:06:41 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-05-13 09:45:06 +00:00
|
|
|
case kExprRefFunc: {
|
2020-09-30 09:37:50 +00:00
|
|
|
FunctionIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2019-05-13 09:45:06 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
|
|
|
|
2019-08-22 10:54:51 +00:00
|
|
|
Handle<WasmExternalFunction> function =
|
|
|
|
WasmInstanceObject::GetOrCreateWasmExternalFunction(
|
2019-05-13 09:45:06 +00:00
|
|
|
isolate_, instance_object_, imm.index);
|
2021-03-04 17:37:44 +00:00
|
|
|
Push(WasmValue(function, kWasmFuncRef));
|
2019-05-13 09:45:06 +00:00
|
|
|
len = 1 + imm.length;
|
|
|
|
break;
|
|
|
|
}
|
2019-10-08 12:38:48 +00:00
|
|
|
case kExprLocalGet: {
|
2020-09-30 09:37:50 +00:00
|
|
|
LocalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2019-04-05 12:12:50 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2018-05-03 11:59:06 +00:00
|
|
|
Push(GetStackValue(frames_.back().sp + imm.index));
|
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-10-08 12:38:48 +00:00
|
|
|
case kExprLocalSet: {
|
2020-09-30 09:37:50 +00:00
|
|
|
LocalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2019-04-05 12:12:50 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue val = Pop();
|
2018-05-03 11:59:06 +00:00
|
|
|
SetStackValue(frames_.back().sp + imm.index, val);
|
|
|
|
len = 1 + imm.length;
|
2016-09-27 20:46:10 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-10-08 12:38:48 +00:00
|
|
|
case kExprLocalTee: {
|
2020-09-30 09:37:50 +00:00
|
|
|
LocalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2019-04-05 12:12:50 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue val = Pop();
|
2018-05-03 11:59:06 +00:00
|
|
|
SetStackValue(frames_.back().sp + imm.index, val);
|
2017-04-26 17:41:26 +00:00
|
|
|
Push(val);
|
2018-05-03 11:59:06 +00:00
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-09-27 20:46:10 +00:00
|
|
|
case kExprDrop: {
|
2019-04-05 12:12:50 +00:00
|
|
|
Drop();
|
2016-09-27 20:46:10 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
case kExprCallFunction: {
|
2020-09-30 09:37:50 +00:00
|
|
|
CallFunctionImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2020-06-05 11:05:04 +00:00
|
|
|
InterpreterCode* target = codemap_.GetCode(imm.index);
|
2020-05-27 13:41:19 +00:00
|
|
|
CHECK(!target->function->imported);
|
2017-03-23 09:46:16 +00:00
|
|
|
// Execute an internal call.
|
2021-02-22 09:17:38 +00:00
|
|
|
if (!DoCall(&decoder, &target, &pc, &limit)) return;
|
2017-03-15 15:57:02 +00:00
|
|
|
code = target;
|
2019-02-07 11:33:37 +00:00
|
|
|
continue; // Do not bump pc.
|
2017-03-23 09:46:16 +00:00
|
|
|
} break;
|
2019-02-27 18:43:47 +00:00
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
case kExprCallIndirect: {
|
2020-09-30 09:37:50 +00:00
|
|
|
CallIndirectImmediate<Decoder::kNoValidation> imm(
|
2020-06-26 12:24:26 +00:00
|
|
|
WasmFeatures::All(), &decoder, code->at(pc + 1));
|
2016-09-27 20:46:10 +00:00
|
|
|
uint32_t entry_index = Pop().to<uint32_t>();
|
2018-08-17 12:35:29 +00:00
|
|
|
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
|
2020-05-27 13:41:19 +00:00
|
|
|
CallResult result =
|
2019-07-09 10:40:11 +00:00
|
|
|
CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
|
2017-03-23 09:46:16 +00:00
|
|
|
switch (result.type) {
|
2020-05-27 13:41:19 +00:00
|
|
|
case CallResult::INTERNAL:
|
2017-03-23 09:46:16 +00:00
|
|
|
// The import is a function of this instance. Call it directly.
|
2021-02-22 09:17:38 +00:00
|
|
|
if (!DoCall(&decoder, &result.interpreter_code, &pc, &limit))
|
2017-03-31 08:29:02 +00:00
|
|
|
return;
|
2017-03-23 09:46:16 +00:00
|
|
|
code = result.interpreter_code;
|
2019-02-07 11:33:37 +00:00
|
|
|
continue; // Do not bump pc.
|
2020-05-27 13:41:19 +00:00
|
|
|
case CallResult::INVALID_FUNC:
|
2020-10-02 05:31:50 +00:00
|
|
|
return DoTrap(kTrapTableOutOfBounds, pc);
|
2020-05-27 13:41:19 +00:00
|
|
|
case CallResult::SIGNATURE_MISMATCH:
|
2016-10-11 12:40:24 +00:00
|
|
|
return DoTrap(kTrapFuncSigMismatch, pc);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
2017-03-23 09:46:16 +00:00
|
|
|
} break;
|
2019-02-27 18:43:47 +00:00
|
|
|
|
|
|
|
case kExprReturnCall: {
|
2020-09-08 06:28:58 +00:00
|
|
|
// Make return calls more expensive, so that return call recursions
|
|
|
|
// don't cause a timeout.
|
|
|
|
if (max > 0) max = std::max(0, max - 100);
|
2020-09-30 09:37:50 +00:00
|
|
|
CallFunctionImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2020-06-05 11:05:04 +00:00
|
|
|
InterpreterCode* target = codemap_.GetCode(imm.index);
|
2019-02-27 18:43:47 +00:00
|
|
|
|
2020-05-27 13:41:19 +00:00
|
|
|
CHECK(!target->function->imported);
|
|
|
|
// Enter internal found function.
|
|
|
|
if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
|
|
|
|
code = target;
|
|
|
|
continue; // Do not bump pc.
|
2019-02-27 18:43:47 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case kExprReturnCallIndirect: {
|
2020-09-08 06:28:58 +00:00
|
|
|
// Make return calls more expensive, so that return call recursions
|
|
|
|
// don't cause a timeout.
|
|
|
|
if (max > 0) max = std::max(0, max - 100);
|
2020-09-30 09:37:50 +00:00
|
|
|
CallIndirectImmediate<Decoder::kNoValidation> imm(
|
2020-06-26 12:24:26 +00:00
|
|
|
WasmFeatures::All(), &decoder, code->at(pc + 1));
|
2019-02-27 18:43:47 +00:00
|
|
|
uint32_t entry_index = Pop().to<uint32_t>();
|
|
|
|
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
|
|
|
|
|
|
|
|
// TODO(wasm): Calling functions needs some refactoring to avoid
|
|
|
|
// multi-exit code like this.
|
2020-05-27 13:41:19 +00:00
|
|
|
CallResult result =
|
2019-07-09 10:40:11 +00:00
|
|
|
CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
|
2019-02-27 18:43:47 +00:00
|
|
|
switch (result.type) {
|
2020-05-27 13:41:19 +00:00
|
|
|
case CallResult::INTERNAL: {
|
2019-02-27 18:43:47 +00:00
|
|
|
InterpreterCode* target = result.interpreter_code;
|
|
|
|
|
|
|
|
DCHECK(!target->function->imported);
|
|
|
|
|
|
|
|
// The function belongs to this instance. Enter it directly.
|
|
|
|
if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
|
|
|
|
code = result.interpreter_code;
|
|
|
|
continue; // Do not bump pc.
|
|
|
|
}
|
2020-05-27 13:41:19 +00:00
|
|
|
case CallResult::INVALID_FUNC:
|
2020-10-02 05:31:50 +00:00
|
|
|
return DoTrap(kTrapTableOutOfBounds, pc);
|
2020-05-27 13:41:19 +00:00
|
|
|
case CallResult::SIGNATURE_MISMATCH:
|
2019-02-27 18:43:47 +00:00
|
|
|
return DoTrap(kTrapFuncSigMismatch, pc);
|
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
2019-10-08 13:16:30 +00:00
|
|
|
case kExprGlobalGet: {
|
2020-09-30 09:37:50 +00:00
|
|
|
GlobalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2019-05-10 00:52:56 +00:00
|
|
|
HandleScope handle_scope(isolate_);
|
2019-10-24 08:30:59 +00:00
|
|
|
Push(WasmInstanceObject::GetGlobalValue(
|
|
|
|
instance_object_, module()->globals[imm.index]));
|
2018-05-03 11:59:06 +00:00
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-10-08 13:16:30 +00:00
|
|
|
case kExprGlobalSet: {
|
2020-09-30 09:37:50 +00:00
|
|
|
GlobalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2019-10-23 11:17:14 +00:00
|
|
|
auto& global = module()->globals[imm.index];
|
2020-03-12 14:29:51 +00:00
|
|
|
switch (global.type.kind()) {
|
2019-10-24 08:30:59 +00:00
|
|
|
#define CASE_TYPE(valuetype, ctype) \
|
2021-02-22 09:28:44 +00:00
|
|
|
case valuetype: { \
|
2019-10-24 08:30:59 +00:00
|
|
|
uint8_t* ptr = \
|
2019-10-23 11:17:14 +00:00
|
|
|
WasmInstanceObject::GetGlobalStorage(instance_object_, global); \
|
|
|
|
WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \
|
|
|
|
Pop().to<ctype>()); \
|
|
|
|
break; \
|
2019-04-09 08:29:42 +00:00
|
|
|
}
|
2019-10-24 08:30:59 +00:00
|
|
|
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
|
2017-03-15 15:57:02 +00:00
|
|
|
#undef CASE_TYPE
|
2021-02-22 09:28:44 +00:00
|
|
|
case kRef:
|
|
|
|
case kOptRef: {
|
2020-04-17 19:44:04 +00:00
|
|
|
// TODO(7748): Type checks or DCHECKs for ref types?
|
2019-04-09 08:29:42 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2019-04-09 10:38:08 +00:00
|
|
|
Handle<FixedArray> global_buffer; // The buffer of the global.
|
2019-10-23 11:17:14 +00:00
|
|
|
uint32_t global_index; // The index into the buffer.
|
|
|
|
std::tie(global_buffer, global_index) =
|
|
|
|
WasmInstanceObject::GetGlobalBufferAndIndex(instance_object_,
|
|
|
|
global);
|
2021-03-04 17:37:44 +00:00
|
|
|
Handle<Object> ref = Pop().to_ref();
|
2020-01-15 11:55:19 +00:00
|
|
|
global_buffer->set(global_index, *ref);
|
2019-04-09 08:29:42 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kRtt: // TODO(7748): Implement.
|
|
|
|
case kRttWithDepth:
|
|
|
|
case kI8:
|
|
|
|
case kI16:
|
|
|
|
case kStmt:
|
|
|
|
case kBottom:
|
2017-03-15 15:57:02 +00:00
|
|
|
UNREACHABLE();
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
2018-05-03 11:59:06 +00:00
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-07-08 12:23:20 +00:00
|
|
|
case kExprTableGet: {
|
2020-09-30 09:37:50 +00:00
|
|
|
TableIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2019-07-08 12:23:20 +00:00
|
|
|
HandleScope handle_scope(isolate_);
|
|
|
|
auto table = handle(
|
|
|
|
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
|
|
|
|
isolate_);
|
|
|
|
uint32_t table_size = table->current_length();
|
|
|
|
uint32_t entry_index = Pop().to<uint32_t>();
|
|
|
|
if (entry_index >= table_size) {
|
|
|
|
return DoTrap(kTrapTableOutOfBounds, pc);
|
|
|
|
}
|
|
|
|
Handle<Object> value =
|
|
|
|
WasmTableObject::Get(isolate_, table, entry_index);
|
2021-03-04 17:37:44 +00:00
|
|
|
Push(WasmValue(value, table->type()));
|
2019-07-08 12:23:20 +00:00
|
|
|
len = 1 + imm.length;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprTableSet: {
|
2020-09-30 09:37:50 +00:00
|
|
|
TableIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2019-07-08 12:23:20 +00:00
|
|
|
HandleScope handle_scope(isolate_);
|
|
|
|
auto table = handle(
|
|
|
|
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
|
|
|
|
isolate_);
|
|
|
|
uint32_t table_size = table->current_length();
|
2021-03-04 17:37:44 +00:00
|
|
|
Handle<Object> value = Pop().to_ref();
|
2019-07-08 12:23:20 +00:00
|
|
|
uint32_t entry_index = Pop().to<uint32_t>();
|
|
|
|
if (entry_index >= table_size) {
|
|
|
|
return DoTrap(kTrapTableOutOfBounds, pc);
|
|
|
|
}
|
|
|
|
WasmTableObject::Set(isolate_, table, entry_index, value);
|
|
|
|
len = 1 + imm.length;
|
|
|
|
break;
|
|
|
|
}
|
2017-10-02 07:39:30 +00:00
|
|
|
#define LOAD_CASE(name, ctype, mtype, rep) \
|
|
|
|
case kExpr##name: { \
|
2019-07-09 16:07:55 +00:00
|
|
|
if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, &len, \
|
2017-10-02 07:39:30 +00:00
|
|
|
MachineRepresentation::rep)) \
|
|
|
|
return; \
|
|
|
|
break; \
|
|
|
|
}
|
|
|
|
|
|
|
|
LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
|
|
|
|
LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
|
|
|
|
LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
|
|
|
|
LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
|
|
|
|
LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8);
|
|
|
|
LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16);
|
|
|
|
LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16);
|
|
|
|
LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16);
|
|
|
|
LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32);
|
|
|
|
LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
|
|
|
|
LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
|
|
|
|
LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
|
2017-10-24 12:28:30 +00:00
|
|
|
LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32);
|
|
|
|
LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64);
|
2016-05-25 08:32:37 +00:00
|
|
|
#undef LOAD_CASE
|
|
|
|
|
2017-10-02 07:39:30 +00:00
|
|
|
#define STORE_CASE(name, ctype, mtype, rep) \
|
|
|
|
case kExpr##name: { \
|
2019-07-09 16:07:55 +00:00
|
|
|
if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, &len, \
|
2017-10-02 07:39:30 +00:00
|
|
|
MachineRepresentation::rep)) \
|
|
|
|
return; \
|
|
|
|
break; \
|
|
|
|
}
|
|
|
|
|
|
|
|
STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
|
|
|
|
STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
|
|
|
|
STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8);
|
|
|
|
STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16);
|
|
|
|
STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
|
|
|
|
STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
|
|
|
|
STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
|
2017-10-24 12:28:30 +00:00
|
|
|
STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32);
|
|
|
|
STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64);
|
2016-05-25 08:32:37 +00:00
|
|
|
#undef STORE_CASE
|
|
|
|
|
2017-10-16 09:45:56 +00:00
|
|
|
#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
|
|
|
|
case kExpr##name: { \
|
|
|
|
uint32_t index = Pop().to<uint32_t>(); \
|
|
|
|
ctype result; \
|
2018-04-13 22:28:05 +00:00
|
|
|
Address addr = BoundsCheckMem<mtype>(0, index); \
|
2017-12-18 13:04:30 +00:00
|
|
|
if (!addr) { \
|
2017-10-16 09:45:56 +00:00
|
|
|
result = defval; \
|
|
|
|
} else { \
|
|
|
|
/* TODO(titzer): alignment for asmjs load mem? */ \
|
|
|
|
result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
|
|
|
|
} \
|
|
|
|
Push(WasmValue(result)); \
|
|
|
|
break; \
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
|
|
|
|
ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
|
|
|
|
ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
|
|
|
|
ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
|
|
|
|
ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
|
|
|
|
ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
|
|
|
|
std::numeric_limits<float>::quiet_NaN());
|
|
|
|
ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
|
|
|
|
std::numeric_limits<double>::quiet_NaN());
|
|
|
|
#undef ASMJS_LOAD_CASE
|
|
|
|
|
|
|
|
#define ASMJS_STORE_CASE(name, ctype, mtype) \
|
|
|
|
case kExpr##name: { \
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue val = Pop(); \
|
2016-05-25 08:32:37 +00:00
|
|
|
uint32_t index = Pop().to<uint32_t>(); \
|
2018-04-13 22:28:05 +00:00
|
|
|
Address addr = BoundsCheckMem<mtype>(0, index); \
|
2017-12-18 13:04:30 +00:00
|
|
|
if (addr) { \
|
2016-05-25 08:32:37 +00:00
|
|
|
*(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
|
|
|
|
} \
|
2017-04-26 17:41:26 +00:00
|
|
|
Push(val); \
|
2016-05-25 08:32:37 +00:00
|
|
|
break; \
|
|
|
|
}
|
|
|
|
|
|
|
|
ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
|
|
|
|
ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
|
|
|
|
ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
|
|
|
|
ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
|
|
|
|
ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
|
|
|
|
#undef ASMJS_STORE_CASE
|
2018-10-26 17:28:37 +00:00
|
|
|
case kExprMemoryGrow: {
|
2020-09-30 09:37:50 +00:00
|
|
|
MemoryIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2020-11-23 18:42:18 +00:00
|
|
|
// TODO(clemensb): Fix this for memory64.
|
2016-09-14 09:19:02 +00:00
|
|
|
uint32_t delta_pages = Pop().to<uint32_t>();
|
2019-04-04 11:22:49 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2018-06-23 09:05:50 +00:00
|
|
|
Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
|
2019-04-04 11:22:49 +00:00
|
|
|
isolate_);
|
|
|
|
int32_t result =
|
|
|
|
WasmMemoryObject::Grow(isolate_, memory, delta_pages);
|
2017-10-16 09:45:56 +00:00
|
|
|
Push(WasmValue(result));
|
2018-05-03 11:59:06 +00:00
|
|
|
len = 1 + imm.length;
|
2018-02-07 12:30:32 +00:00
|
|
|
// Treat one grow_memory instruction like 1000 other instructions,
|
|
|
|
// because it is a really expensive operation.
|
|
|
|
if (max > 0) max = std::max(0, max - 1000);
|
2016-09-14 09:19:02 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
case kExprMemorySize: {
|
2020-09-30 09:37:50 +00:00
|
|
|
MemoryIndexImmediate<Decoder::kNoValidation> imm(&decoder,
|
|
|
|
code->at(pc + 1));
|
2020-11-23 18:42:18 +00:00
|
|
|
uint64_t num_pages = instance_object_->memory_size() / kWasmPageSize;
|
|
|
|
Push(module()->is_memory64
|
|
|
|
? WasmValue(num_pages)
|
|
|
|
: WasmValue(static_cast<uint32_t>(num_pages)));
|
2018-05-03 11:59:06 +00:00
|
|
|
len = 1 + imm.length;
|
2016-05-25 08:32:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-01-20 10:46:48 +00:00
|
|
|
// We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
|
|
|
|
// specially to guarantee that the quiet bit of a NaN is preserved on
|
|
|
|
// ia32 by the reinterpret casts.
|
|
|
|
case kExprI32ReinterpretF32: {
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue val = Pop();
|
|
|
|
Push(WasmValue(ExecuteI32ReinterpretF32(val)));
|
2017-01-20 10:46:48 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case kExprI64ReinterpretF64: {
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue val = Pop();
|
|
|
|
Push(WasmValue(ExecuteI64ReinterpretF64(val)));
|
2017-01-20 10:46:48 +00:00
|
|
|
break;
|
2017-12-27 18:54:38 +00:00
|
|
|
}
|
2018-06-01 21:27:29 +00:00
|
|
|
#define SIGN_EXTENSION_CASE(name, wtype, ntype) \
|
|
|
|
case kExpr##name: { \
|
|
|
|
ntype val = static_cast<ntype>(Pop().to<wtype>()); \
|
|
|
|
Push(WasmValue(static_cast<wtype>(val))); \
|
|
|
|
break; \
|
|
|
|
}
|
|
|
|
SIGN_EXTENSION_CASE(I32SExtendI8, int32_t, int8_t);
|
|
|
|
SIGN_EXTENSION_CASE(I32SExtendI16, int32_t, int16_t);
|
|
|
|
SIGN_EXTENSION_CASE(I64SExtendI8, int64_t, int8_t);
|
|
|
|
SIGN_EXTENSION_CASE(I64SExtendI16, int64_t, int16_t);
|
|
|
|
SIGN_EXTENSION_CASE(I64SExtendI32, int64_t, int32_t);
|
|
|
|
#undef SIGN_EXTENSION_CASE
|
2019-04-03 11:06:41 +00:00
|
|
|
case kExprRefIsNull: {
|
2020-06-23 13:41:10 +00:00
|
|
|
len = 1;
|
2019-04-05 12:12:50 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2021-03-04 17:37:44 +00:00
|
|
|
uint32_t result = Pop().to_ref()->IsNull() ? 1 : 0;
|
2019-04-03 11:06:41 +00:00
|
|
|
Push(WasmValue(result));
|
|
|
|
break;
|
|
|
|
}
|
2017-12-27 18:54:38 +00:00
|
|
|
case kNumericPrefix: {
|
2019-07-09 16:07:55 +00:00
|
|
|
if (!ExecuteNumericOp(opcode, &decoder, code, pc, &len)) return;
|
2017-12-27 18:54:38 +00:00
|
|
|
break;
|
2017-01-20 10:46:48 +00:00
|
|
|
}
|
2017-11-04 01:03:03 +00:00
|
|
|
case kAtomicPrefix: {
|
2019-07-09 16:07:55 +00:00
|
|
|
if (!ExecuteAtomicOp(opcode, &decoder, code, pc, &len)) return;
|
2017-11-04 01:03:03 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-05-18 21:47:59 +00:00
|
|
|
case kSimdPrefix: {
|
2020-06-26 12:24:26 +00:00
|
|
|
if (!ExecuteSimdOp(opcode, &decoder, code, pc, &len)) return;
|
2018-05-18 21:47:59 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-11-04 01:03:03 +00:00
|
|
|
|
2017-07-14 13:49:01 +00:00
|
|
|
#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
|
|
|
|
case kExpr##name: { \
|
|
|
|
WasmValue rval = Pop(); \
|
|
|
|
WasmValue lval = Pop(); \
|
2017-10-26 07:45:12 +00:00
|
|
|
auto result = lval.to<ctype>() op rval.to<ctype>(); \
|
|
|
|
possible_nondeterminism_ |= has_nondeterminism(result); \
|
|
|
|
Push(WasmValue(result)); \
|
2017-07-14 13:49:01 +00:00
|
|
|
break; \
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
|
|
|
|
#undef EXECUTE_SIMPLE_BINOP
|
|
|
|
|
2017-10-26 07:45:12 +00:00
|
|
|
#define EXECUTE_OTHER_BINOP(name, ctype) \
|
|
|
|
case kExpr##name: { \
|
|
|
|
TrapReason trap = kTrapCount; \
|
|
|
|
ctype rval = Pop().to<ctype>(); \
|
|
|
|
ctype lval = Pop().to<ctype>(); \
|
|
|
|
auto result = Execute##name(lval, rval, &trap); \
|
|
|
|
possible_nondeterminism_ |= has_nondeterminism(result); \
|
|
|
|
if (trap != kTrapCount) return DoTrap(trap, pc); \
|
|
|
|
Push(WasmValue(result)); \
|
|
|
|
break; \
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
|
|
|
|
#undef EXECUTE_OTHER_BINOP
|
|
|
|
|
2018-01-16 19:32:52 +00:00
|
|
|
#define EXECUTE_UNOP(name, ctype, exec_fn) \
|
2017-10-26 07:45:12 +00:00
|
|
|
case kExpr##name: { \
|
|
|
|
TrapReason trap = kTrapCount; \
|
|
|
|
ctype val = Pop().to<ctype>(); \
|
2018-01-16 19:32:52 +00:00
|
|
|
auto result = exec_fn(val, &trap); \
|
2017-10-26 07:45:12 +00:00
|
|
|
possible_nondeterminism_ |= has_nondeterminism(result); \
|
|
|
|
if (trap != kTrapCount) return DoTrap(trap, pc); \
|
|
|
|
Push(WasmValue(result)); \
|
|
|
|
break; \
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
2018-01-16 19:32:52 +00:00
|
|
|
|
|
|
|
#define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name)
|
2016-05-25 08:32:37 +00:00
|
|
|
FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
|
|
|
|
#undef EXECUTE_OTHER_UNOP
|
|
|
|
|
2018-01-16 19:32:52 +00:00
|
|
|
#define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \
|
|
|
|
EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>)
|
|
|
|
FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
|
|
|
|
#undef EXECUTE_I32CONV_FLOATOP
|
|
|
|
#undef EXECUTE_UNOP
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
default:
|
2018-02-13 10:18:54 +00:00
|
|
|
FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
|
2020-05-25 12:46:55 +00:00
|
|
|
WasmOpcodes::OpcodeName(
|
|
|
|
static_cast<WasmOpcode>(code->start[pc])));
|
2016-05-25 08:32:37 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
2017-04-25 09:43:39 +00:00
|
|
|
#ifdef DEBUG
|
2019-02-07 11:33:37 +00:00
|
|
|
if (!WasmOpcodes::IsControlOpcode(opcode)) {
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_EQ(expected_new_stack_height, StackHeight());
|
2017-04-25 09:43:39 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
pc += len;
|
2017-01-27 09:48:08 +00:00
|
|
|
if (pc == limit) {
|
|
|
|
// Fell off end of code; do an implicit return.
|
|
|
|
TRACE("@%-3zu: ImplicitReturn\n", pc);
|
2019-01-31 13:35:57 +00:00
|
|
|
size_t arity = code->function->sig->return_count();
|
|
|
|
DCHECK_EQ(StackHeight() - arity, frames_.back().llimit());
|
|
|
|
if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
|
2017-01-27 09:48:08 +00:00
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
2017-03-24 15:42:49 +00:00
|
|
|
|
2017-01-24 10:13:33 +00:00
|
|
|
state_ = WasmInterpreter::PAUSED;
|
|
|
|
CommitPc(pc);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue Pop() {
|
[base] Define CHECK comparison for signed vs. unsigned
The current CHECK/DCHECK implementation fails statically if a signed
value is compared against an unsigned value. The common solution is to
cast on each caller, which is tedious and error-prone (might hide bugs).
This CL implements signed vs. unsigned comparisons by executing up to
two comparisons. For example, if i is int32_t and u is uint_32_t, a
DCHECK_LE(i, u) would create the check
i <= 0 || static_cast<uint32_t>(i) <= u.
For checks against constants, at least one of the checks can be removed
by compiler optimizations.
The tradeoff we have to make is to sometimes silently execute an
additional comparison. And we increase code complexity of course, even
though the usage is just as easy (or even easier) as before.
The compile time impact seems to be minimal:
I ran 3 full compilations for Optdebug on my local machine, one time on
the current ToT, one time with this CL plus http://crrev.com/2524093002.
Before: 143.72 +- 1.21 seconds
Now: 144.18 +- 0.67 seconds
In order to check that the new comparisons are working, I refactored
some DCHECKs in wasm to use the new magic, and added unit test cases.
R=ishell@chromium.org, titzer@chromium.org
CC=ahaas@chromium.org, bmeurer@chromium.org
Committed: https://crrev.com/5925074a9dab5a8577766545b91b62f2c531d3dc
Review-Url: https://codereview.chromium.org/2526783002
Cr-Original-Commit-Position: refs/heads/master@{#41275}
Cr-Commit-Position: refs/heads/master@{#41411}
2016-12-01 08:52:31 +00:00
|
|
|
DCHECK_GT(frames_.size(), 0);
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_GT(StackHeight(), frames_.back().llimit()); // can't pop into locals
|
2019-04-05 12:12:50 +00:00
|
|
|
StackValue stack_value = *--sp_;
|
|
|
|
// Note that {StackHeight} depends on the current {sp} value, hence this
|
|
|
|
// operation is split into two statements to ensure proper evaluation order.
|
2019-05-15 08:37:41 +00:00
|
|
|
WasmValue val = stack_value.ExtractValue(this, StackHeight());
|
|
|
|
stack_value.ClearValue(this, StackHeight());
|
|
|
|
return val;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2019-04-05 12:12:50 +00:00
|
|
|
void Drop(int n = 1) {
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_GE(StackHeight(), n);
|
[base] Define CHECK comparison for signed vs. unsigned
The current CHECK/DCHECK implementation fails statically if a signed
value is compared against an unsigned value. The common solution is to
cast on each caller, which is tedious and error-prone (might hide bugs).
This CL implements signed vs. unsigned comparisons by executing up to
two comparisons. For example, if i is int32_t and u is uint_32_t, a
DCHECK_LE(i, u) would create the check
i <= 0 || static_cast<uint32_t>(i) <= u.
For checks against constants, at least one of the checks can be removed
by compiler optimizations.
The tradeoff we have to make is to sometimes silently execute an
additional comparison. And we increase code complexity of course, even
though the usage is just as easy (or even easier) as before.
The compile time impact seems to be minimal:
I ran 3 full compilations for Optdebug on my local machine, one time on
the current ToT, one time with this CL plus http://crrev.com/2524093002.
Before: 143.72 +- 1.21 seconds
Now: 144.18 +- 0.67 seconds
In order to check that the new comparisons are working, I refactored
some DCHECKs in wasm to use the new magic, and added unit test cases.
R=ishell@chromium.org, titzer@chromium.org
CC=ahaas@chromium.org, bmeurer@chromium.org
Committed: https://crrev.com/5925074a9dab5a8577766545b91b62f2c531d3dc
Review-Url: https://codereview.chromium.org/2526783002
Cr-Original-Commit-Position: refs/heads/master@{#41275}
Cr-Commit-Position: refs/heads/master@{#41411}
2016-12-01 08:52:31 +00:00
|
|
|
DCHECK_GT(frames_.size(), 0);
|
2017-04-26 17:41:26 +00:00
|
|
|
// Check that we don't pop into locals.
|
|
|
|
DCHECK_GE(StackHeight() - n, frames_.back().llimit());
|
2019-05-15 08:37:41 +00:00
|
|
|
StackValue::ClearValues(this, StackHeight() - n, n);
|
2017-04-26 17:41:26 +00:00
|
|
|
sp_ -= n;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue PopArity(size_t arity) {
|
|
|
|
if (arity == 0) return WasmValue();
|
[base] Define CHECK comparison for signed vs. unsigned
The current CHECK/DCHECK implementation fails statically if a signed
value is compared against an unsigned value. The common solution is to
cast on each caller, which is tedious and error-prone (might hide bugs).
This CL implements signed vs. unsigned comparisons by executing up to
two comparisons. For example, if i is int32_t and u is uint_32_t, a
DCHECK_LE(i, u) would create the check
i <= 0 || static_cast<uint32_t>(i) <= u.
For checks against constants, at least one of the checks can be removed
by compiler optimizations.
The tradeoff we have to make is to sometimes silently execute an
additional comparison. And we increase code complexity of course, even
though the usage is just as easy (or even easier) as before.
The compile time impact seems to be minimal:
I ran 3 full compilations for Optdebug on my local machine, one time on
the current ToT, one time with this CL plus http://crrev.com/2524093002.
Before: 143.72 +- 1.21 seconds
Now: 144.18 +- 0.67 seconds
In order to check that the new comparisons are working, I refactored
some DCHECKs in wasm to use the new magic, and added unit test cases.
R=ishell@chromium.org, titzer@chromium.org
CC=ahaas@chromium.org, bmeurer@chromium.org
Committed: https://crrev.com/5925074a9dab5a8577766545b91b62f2c531d3dc
Review-Url: https://codereview.chromium.org/2526783002
Cr-Original-Commit-Position: refs/heads/master@{#41275}
Cr-Commit-Position: refs/heads/master@{#41411}
2016-12-01 08:52:31 +00:00
|
|
|
CHECK_EQ(1, arity);
|
2016-05-25 08:32:37 +00:00
|
|
|
return Pop();
|
|
|
|
}
|
|
|
|
|
2017-07-14 13:49:01 +00:00
|
|
|
void Push(WasmValue val) {
|
|
|
|
DCHECK_NE(kWasmStmt, val.type());
|
2021-03-04 17:37:44 +00:00
|
|
|
DCHECK_NE(kWasmI8, val.type());
|
|
|
|
DCHECK_NE(kWasmI16, val.type());
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_LE(1, stack_limit_ - sp_);
|
2019-05-15 08:37:41 +00:00
|
|
|
DCHECK(StackValue::IsClearedValue(this, StackHeight()));
|
2019-04-05 12:12:50 +00:00
|
|
|
StackValue stack_value(val, this, StackHeight());
|
|
|
|
// Note that {StackHeight} depends on the current {sp} value, hence this
|
|
|
|
// operation is split into two statements to ensure proper evaluation order.
|
|
|
|
*sp_++ = stack_value;
|
2017-04-26 17:41:26 +00:00
|
|
|
}
|
|
|
|
|
2017-07-14 13:49:01 +00:00
|
|
|
void Push(WasmValue* vals, size_t arity) {
|
2017-04-26 17:41:26 +00:00
|
|
|
DCHECK_LE(arity, stack_limit_ - sp_);
|
2017-07-14 13:49:01 +00:00
|
|
|
for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
|
|
|
|
DCHECK_NE(kWasmStmt, val->type());
|
2019-04-05 12:12:50 +00:00
|
|
|
Push(*val);
|
2017-04-26 17:41:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-15 08:37:41 +00:00
|
|
|
void ResetStack(sp_t new_height) {
|
|
|
|
DCHECK_LE(new_height, StackHeight()); // Only allowed to shrink.
|
|
|
|
int count = static_cast<int>(StackHeight() - new_height);
|
|
|
|
StackValue::ClearValues(this, new_height, count);
|
|
|
|
sp_ = stack_.get() + new_height;
|
|
|
|
}
|
|
|
|
|
2017-04-26 17:41:26 +00:00
|
|
|
void EnsureStackSpace(size_t size) {
|
|
|
|
if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
|
2018-07-05 09:00:20 +00:00
|
|
|
size_t old_size = stack_limit_ - stack_.get();
|
2017-04-27 16:16:53 +00:00
|
|
|
size_t requested_size =
|
2018-07-05 09:00:20 +00:00
|
|
|
base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
|
2020-11-05 05:49:55 +00:00
|
|
|
size_t new_size =
|
|
|
|
std::max(size_t{8}, std::max(2 * old_size, requested_size));
|
2019-04-05 12:12:50 +00:00
|
|
|
std::unique_ptr<StackValue[]> new_stack(new StackValue[new_size]);
|
2019-01-25 00:34:59 +00:00
|
|
|
if (old_size > 0) {
|
|
|
|
memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
|
|
|
|
}
|
2018-07-05 09:00:20 +00:00
|
|
|
sp_ = new_stack.get() + (sp_ - stack_.get());
|
|
|
|
stack_ = std::move(new_stack);
|
|
|
|
stack_limit_ = stack_.get() + new_size;
|
2019-04-05 12:12:50 +00:00
|
|
|
// Also resize the reference stack to the same size.
|
|
|
|
int grow_by = static_cast<int>(new_size - old_size);
|
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
|
|
|
Handle<FixedArray> new_ref_stack =
|
2020-06-08 20:27:55 +00:00
|
|
|
isolate_->factory()->CopyFixedArrayAndGrow(reference_stack_, grow_by);
|
2019-05-15 08:37:41 +00:00
|
|
|
new_ref_stack->FillWithHoles(static_cast<int>(old_size),
|
|
|
|
static_cast<int>(new_size));
|
2020-06-08 20:27:55 +00:00
|
|
|
isolate_->global_handles()->Destroy(reference_stack_.location());
|
|
|
|
reference_stack_ = isolate_->global_handles()->Create(*new_ref_stack);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2018-07-05 09:00:20 +00:00
|
|
|
sp_t StackHeight() { return sp_ - stack_.get(); }
|
2017-04-26 17:41:26 +00:00
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
void TraceValueStack() {
|
2017-01-24 10:13:33 +00:00
|
|
|
#ifdef DEBUG
|
2017-04-26 17:41:26 +00:00
|
|
|
if (!FLAG_trace_wasm_interpreter) return;
|
2019-04-04 15:04:30 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2016-05-25 08:32:37 +00:00
|
|
|
Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
|
|
|
|
sp_t sp = top ? top->sp : 0;
|
|
|
|
sp_t plimit = top ? top->plimit() : 0;
|
|
|
|
sp_t llimit = top ? top->llimit() : 0;
|
2017-04-26 17:41:26 +00:00
|
|
|
for (size_t i = sp; i < StackHeight(); ++i) {
|
2020-03-12 14:29:51 +00:00
|
|
|
if (i < plimit) {
|
2017-04-26 17:41:26 +00:00
|
|
|
PrintF(" p%zu:", i);
|
2020-03-12 14:29:51 +00:00
|
|
|
} else if (i < llimit) {
|
2017-04-26 17:41:26 +00:00
|
|
|
PrintF(" l%zu:", i);
|
2020-03-12 14:29:51 +00:00
|
|
|
} else {
|
2017-04-26 17:41:26 +00:00
|
|
|
PrintF(" s%zu:", i);
|
2020-03-12 14:29:51 +00:00
|
|
|
}
|
2017-07-14 13:49:01 +00:00
|
|
|
WasmValue val = GetStackValue(i);
|
2020-03-12 14:29:51 +00:00
|
|
|
switch (val.type().kind()) {
|
2021-02-22 09:28:44 +00:00
|
|
|
case kI32:
|
2017-04-26 17:41:26 +00:00
|
|
|
PrintF("i32:%d", val.to<int32_t>());
|
|
|
|
break;
|
2021-02-22 09:28:44 +00:00
|
|
|
case kI64:
|
2017-04-26 17:41:26 +00:00
|
|
|
PrintF("i64:%" PRId64 "", val.to<int64_t>());
|
|
|
|
break;
|
2021-02-22 09:28:44 +00:00
|
|
|
case kF32:
|
2020-08-10 14:17:26 +00:00
|
|
|
PrintF("f32:%a", val.to<float>());
|
2017-04-26 17:41:26 +00:00
|
|
|
break;
|
2021-02-22 09:28:44 +00:00
|
|
|
case kF64:
|
2020-08-10 14:17:26 +00:00
|
|
|
PrintF("f64:%la", val.to<double>());
|
2017-04-26 17:41:26 +00:00
|
|
|
break;
|
2021-02-22 09:28:44 +00:00
|
|
|
case kS128: {
|
2019-03-25 21:00:56 +00:00
|
|
|
// This defaults to tracing all S128 values as i32x4 values for now,
|
|
|
|
// when there is more state to know what type of values are on the
|
|
|
|
// stack, the right format should be printed here.
|
|
|
|
int4 s = val.to_s128().to_i32x4();
|
2019-04-04 15:04:30 +00:00
|
|
|
PrintF("i32x4:%d,%d,%d,%d", s.val[0], s.val[1], s.val[2], s.val[3]);
|
2019-03-25 21:00:56 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kStmt:
|
2017-04-26 17:41:26 +00:00
|
|
|
PrintF("void");
|
|
|
|
break;
|
2021-02-22 09:28:44 +00:00
|
|
|
case kRef:
|
|
|
|
case kOptRef: {
|
2020-06-29 09:24:51 +00:00
|
|
|
if (val.type().is_reference_to(HeapType::kExtern)) {
|
2021-03-04 17:37:44 +00:00
|
|
|
Handle<Object> ref = val.to_ref();
|
[wasm-gc] Change ValueType representation to account for new types
Motivation:
Changes to the typed function references and gc proposals solidified
the notion of heap type, clarified nullable vs. non-nullable reference
types, and introduced rtts, which contain an integer depth field in
addition to a heap type. This required us to overhaul our ValueType
representation, which results in extensive changes.
To keep this CL "small", we do not try to implement the binary encoding
as described in the proposals, but rather devise a simpler one of our
own (see below). Also, we do not try to implement additional
functionality for the new types.
Changes:
- Introduce HeapType. Move heap types from ValueType to HeapType.
- Introduce Nullability for reference types.
- Rework ValueType helper methods.
- Introduce rtts in ValueType with an integer depth field. Include depth
in the ValueType encoding.
- Make the constructor of ValueType private, instead expose static
functions which explicitly state what they create.
- Change every switch statement on ValueType::Kind. Sometimes, we need
nested switches.
- Introduce temporary constants in ValueTypeCode for nullable types,
use them for decoding.
- In WasmGlobalObject, split 'flags' into 'raw_type' and 'is_mutable'.
- Change IsSubtypeOfRef to IsSubtypeOfHeap and implement changes in
subtyping.
- kWasmFuncRef initializers are now non-nullable. Initializers are
only required to be subtypes of the declared global type.
- Change tests and fuzzers as needed.
Bug: v8:7748
Change-Id: If41f783bd4128443b07e94188cea7dd53ab0bfa5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2247657
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68408}
2020-06-18 11:24:07 +00:00
|
|
|
if (ref->IsNull()) {
|
|
|
|
PrintF("ref:null");
|
|
|
|
} else {
|
|
|
|
PrintF("ref:0x%" V8PRIxPTR, ref->ptr());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// TODO(7748): Implement this properly.
|
|
|
|
PrintF("ref/ref null");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2021-02-22 09:28:44 +00:00
|
|
|
case kRtt:
|
|
|
|
case kRttWithDepth:
|
[wasm-gc] Change ValueType representation to account for new types
Motivation:
Changes to the typed function references and gc proposals solidified
the notion of heap type, clarified nullable vs. non-nullable reference
types, and introduced rtts, which contain an integer depth field in
addition to a heap type. This required us to overhaul our ValueType
representation, which results in extensive changes.
To keep this CL "small", we do not try to implement the binary encoding
as described in the proposals, but rather devise a simpler one of our
own (see below). Also, we do not try to implement additional
functionality for the new types.
Changes:
- Introduce HeapType. Move heap types from ValueType to HeapType.
- Introduce Nullability for reference types.
- Rework ValueType helper methods.
- Introduce rtts in ValueType with an integer depth field. Include depth
in the ValueType encoding.
- Make the constructor of ValueType private, instead expose static
functions which explicitly state what they create.
- Change every switch statement on ValueType::Kind. Sometimes, we need
nested switches.
- Introduce temporary constants in ValueTypeCode for nullable types,
use them for decoding.
- In WasmGlobalObject, split 'flags' into 'raw_type' and 'is_mutable'.
- Change IsSubtypeOfRef to IsSubtypeOfHeap and implement changes in
subtyping.
- kWasmFuncRef initializers are now non-nullable. Initializers are
only required to be subtypes of the declared global type.
- Change tests and fuzzers as needed.
Bug: v8:7748
Change-Id: If41f783bd4128443b07e94188cea7dd53ab0bfa5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2247657
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68408}
2020-06-18 11:24:07 +00:00
|
|
|
// TODO(7748): Implement properly.
|
2020-06-23 14:18:46 +00:00
|
|
|
PrintF("rtt");
|
2020-03-12 14:29:51 +00:00
|
|
|
break;
|
2021-02-22 09:28:44 +00:00
|
|
|
case kI8:
|
|
|
|
case kI16:
|
|
|
|
case kBottom:
|
2017-04-26 17:41:26 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-24 10:13:33 +00:00
|
|
|
#endif // DEBUG
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
2017-03-15 15:57:02 +00:00
|
|
|
|
2020-05-27 13:41:19 +00:00
|
|
|
CallResult CallIndirectFunction(uint32_t table_index, uint32_t entry_index,
|
|
|
|
uint32_t sig_index) {
|
2019-07-09 10:40:11 +00:00
|
|
|
HandleScope handle_scope(isolate_); // Avoid leaking handles.
|
2020-10-05 14:43:14 +00:00
|
|
|
uint32_t expected_sig_id = module()->canonicalized_type_ids[sig_index];
|
2018-04-06 10:18:18 +00:00
|
|
|
DCHECK_EQ(expected_sig_id,
|
2020-04-20 18:19:12 +00:00
|
|
|
module()->signature_map.Find(*module()->signature(sig_index)));
|
2018-04-06 10:18:18 +00:00
|
|
|
// Bounds check against table size.
|
2019-07-09 10:40:11 +00:00
|
|
|
if (entry_index >=
|
|
|
|
static_cast<uint32_t>(WasmInstanceObject::IndirectFunctionTableSize(
|
|
|
|
isolate_, instance_object_, table_index))) {
|
2020-05-27 13:41:19 +00:00
|
|
|
return {CallResult::INVALID_FUNC};
|
2018-04-04 15:14:01 +00:00
|
|
|
}
|
|
|
|
|
2019-07-09 10:40:11 +00:00
|
|
|
IndirectFunctionTableEntry entry(instance_object_, table_index,
|
|
|
|
entry_index);
|
2018-06-19 09:47:17 +00:00
|
|
|
// Signature check.
|
|
|
|
if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
|
2020-05-27 13:41:19 +00:00
|
|
|
return {CallResult::SIGNATURE_MISMATCH};
|
2018-06-19 09:47:17 +00:00
|
|
|
}
|
Revert "[wasm] Introduce jump table"
This reverts commit 733b7c8258872dbbb44222831694c5f6b69424ab.
Reason for revert: breaks arm64 gc-stress: https://ci.chromium.org/buildbot/client.v8.ports/V8%20Linux%20-%20arm64%20-%20sim%20-%20gc%20stress/11659
Original change's description:
> [wasm] Introduce jump table
>
> This introduces the concept of a jump table for WebAssembly, which is
> used for every direct and indirect call to any WebAssembly function.
> For lazy compilation, it will initially contain code to call the
> WasmCompileLazy builtin, where it passes the function index to be
> called.
> For non-lazy-compilation, it will contain a jump to the actual code.
> The jump table allows to easily redirect functions for lazy
> compilation, tier-up, debugging and (in the future) code aging. After
> this CL, we will not need to patch existing code any more for any of
> these operations.
>
> R=mstarzinger@chromium.org, titzer@chromium.org
>
> Bug: v8:7758
> Change-Id: I45f9983c2b06ae81bf5ce9847f4542fb48844a4f
> Reviewed-on: https://chromium-review.googlesource.com/1097075
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Ben Titzer <titzer@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#53805}
TBR=mstarzinger@chromium.org,titzer@chromium.org,clemensh@chromium.org,sreten.kovacevic@mips.com
Change-Id: Iea358db2cf13656a65cf69a6d82cbbc10d3e7e1c
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7758
Reviewed-on: https://chromium-review.googlesource.com/1105157
Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53807}
2018-06-18 20:37:10 +00:00
|
|
|
|
2019-04-04 11:22:49 +00:00
|
|
|
Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
|
2020-05-27 13:41:19 +00:00
|
|
|
// Check that this is an internal call (within the same instance).
|
|
|
|
CHECK(object_ref->IsWasmInstanceObject() &&
|
|
|
|
instance_object_.is_identical_to(object_ref));
|
2018-04-06 10:18:18 +00:00
|
|
|
|
2020-06-23 10:53:52 +00:00
|
|
|
NativeModule* native_module =
|
|
|
|
instance_object_->module_object().native_module();
|
2020-07-28 16:15:27 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
{
|
|
|
|
WasmCodeRefScope code_ref_scope;
|
|
|
|
WasmCode* wasm_code = native_module->Lookup(entry.target());
|
|
|
|
DCHECK_EQ(native_module, wasm_code->native_module());
|
|
|
|
DCHECK_EQ(WasmCode::kJumpTable, wasm_code->kind());
|
|
|
|
}
|
|
|
|
#endif
|
2020-06-23 10:53:52 +00:00
|
|
|
uint32_t func_index =
|
|
|
|
native_module->GetFunctionIndexFromJumpTableSlot(entry.target());
|
|
|
|
|
|
|
|
return {CallResult::INTERNAL, codemap_.GetCode(func_index)};
|
2017-03-15 15:57:02 +00:00
|
|
|
}
|
2017-01-18 11:40:29 +00:00
|
|
|
|
2016-11-30 15:02:40 +00:00
|
|
|
// Create a copy of the module bytes for the interpreter, since the passed
|
|
|
|
// pointer might be invalidated after constructing the interpreter.
|
|
|
|
const ZoneVector<uint8_t> module_bytes_;
|
2016-05-25 08:32:37 +00:00
|
|
|
CodeMap codemap_;
|
2020-06-05 11:05:04 +00:00
|
|
|
Isolate* isolate_;
|
|
|
|
Handle<WasmInstanceObject> instance_object_;
|
|
|
|
std::unique_ptr<StackValue[]> stack_;
|
|
|
|
StackValue* stack_limit_ = nullptr; // End of allocated stack space.
|
|
|
|
StackValue* sp_ = nullptr; // Current stack pointer.
|
2020-06-08 20:27:55 +00:00
|
|
|
// References are on an on-heap stack.
|
|
|
|
Handle<FixedArray> reference_stack_;
|
2020-06-05 11:05:04 +00:00
|
|
|
ZoneVector<Frame> frames_;
|
|
|
|
WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
|
|
|
|
TrapReason trap_reason_ = kTrapCount;
|
|
|
|
bool possible_nondeterminism_ = false;
|
|
|
|
uint64_t num_interpreted_calls_ = 0;
|
2016-05-25 08:32:37 +00:00
|
|
|
};
|
|
|
|
|
2018-04-06 10:18:18 +00:00
|
|
|
namespace {
|
2018-09-12 14:01:24 +00:00
|
|
|
void NopFinalizer(const v8::WeakCallbackInfo<void>& data) {
|
2018-10-31 22:52:56 +00:00
|
|
|
Address* global_handle_location =
|
|
|
|
reinterpret_cast<Address*>(data.GetParameter());
|
2018-09-12 14:01:24 +00:00
|
|
|
GlobalHandles::Destroy(global_handle_location);
|
2018-04-06 10:18:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Handle<WasmInstanceObject> MakeWeak(
|
|
|
|
Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
|
2018-09-12 14:01:24 +00:00
|
|
|
Handle<WasmInstanceObject> weak_instance =
|
|
|
|
isolate->global_handles()->Create<WasmInstanceObject>(*instance_object);
|
2018-10-31 22:52:56 +00:00
|
|
|
Address* global_handle_location = weak_instance.location();
|
2018-09-12 14:01:24 +00:00
|
|
|
GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
|
|
|
|
&NopFinalizer, v8::WeakCallbackType::kParameter);
|
|
|
|
return weak_instance;
|
2018-04-06 10:18:18 +00:00
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
//============================================================================
|
|
|
|
// Implementation of the public interface of the interpreter.
|
|
|
|
//============================================================================
|
2017-08-07 17:17:06 +00:00
|
|
|
WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
|
|
|
|
const ModuleWireBytes& wire_bytes,
|
2018-04-06 10:18:18 +00:00
|
|
|
Handle<WasmInstanceObject> instance_object)
|
2017-03-23 09:46:16 +00:00
|
|
|
: zone_(isolate->allocator(), ZONE_NAME),
|
2019-09-17 12:31:28 +00:00
|
|
|
internals_(new WasmInterpreterInternals(
|
2018-07-18 16:05:36 +00:00
|
|
|
&zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2019-09-17 12:31:28 +00:00
|
|
|
// The destructor is here so we can forward declare {WasmInterpreterInternals}
|
|
|
|
// used in the {unique_ptr} in the header.
|
2020-06-03 05:26:14 +00:00
|
|
|
WasmInterpreter::~WasmInterpreter() = default;
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-08-10 14:20:01 +00:00
|
|
|
WasmInterpreter::State WasmInterpreter::state() const {
|
|
|
|
return internals_->state();
|
|
|
|
}
|
2020-06-05 11:05:04 +00:00
|
|
|
|
|
|
|
void WasmInterpreter::InitFrame(const WasmFunction* function, WasmValue* args) {
|
|
|
|
internals_->InitFrame(function, args);
|
|
|
|
}
|
|
|
|
|
|
|
|
WasmInterpreter::State WasmInterpreter::Run(int num_steps) {
|
|
|
|
return internals_->Run(num_steps);
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-06-05 11:05:04 +00:00
|
|
|
void WasmInterpreter::Pause() { internals_->Pause(); }
|
|
|
|
|
|
|
|
void WasmInterpreter::Reset() { internals_->Reset(); }
|
|
|
|
|
2020-08-10 14:20:01 +00:00
|
|
|
WasmValue WasmInterpreter::GetReturnValue(int index) const {
|
2020-06-05 11:05:04 +00:00
|
|
|
return internals_->GetReturnValue(index);
|
|
|
|
}
|
|
|
|
|
2020-08-10 14:20:01 +00:00
|
|
|
TrapReason WasmInterpreter::GetTrapReason() const {
|
2020-06-05 11:05:04 +00:00
|
|
|
return internals_->GetTrapReason();
|
|
|
|
}
|
2016-05-25 08:32:37 +00:00
|
|
|
|
2020-08-10 14:20:01 +00:00
|
|
|
bool WasmInterpreter::PossibleNondeterminism() const {
|
2020-06-05 11:05:04 +00:00
|
|
|
return internals_->PossibleNondeterminism();
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-08-10 14:20:01 +00:00
|
|
|
uint64_t WasmInterpreter::NumInterpretedCalls() const {
|
2020-06-05 11:05:04 +00:00
|
|
|
return internals_->NumInterpretedCalls();
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 15:54:43 +00:00
|
|
|
void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
|
2020-06-05 11:05:04 +00:00
|
|
|
internals_->codemap()->AddFunction(function, nullptr, nullptr);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 15:54:43 +00:00
|
|
|
void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
|
2016-05-25 08:32:37 +00:00
|
|
|
const byte* start,
|
|
|
|
const byte* end) {
|
2020-06-05 11:05:04 +00:00
|
|
|
internals_->codemap()->SetFunctionCode(function, start, end);
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
|
2017-04-25 09:43:39 +00:00
|
|
|
Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
|
|
|
|
// Create some dummy structures, to avoid special-casing the implementation
|
|
|
|
// just for testing.
|
|
|
|
FunctionSig sig(0, 0, nullptr);
|
2020-02-17 16:21:02 +00:00
|
|
|
WasmFunction function{&sig, // sig
|
|
|
|
0, // func_index
|
|
|
|
0, // sig_index
|
|
|
|
{0, 0}, // code
|
|
|
|
false, // imported
|
|
|
|
false, // exported
|
|
|
|
false}; // declared
|
2020-05-25 12:46:55 +00:00
|
|
|
InterpreterCode code{&function, BodyLocalDecls(zone), start, end, nullptr};
|
2017-04-25 09:43:39 +00:00
|
|
|
|
|
|
|
// Now compute and return the control transfers.
|
2017-04-26 17:41:26 +00:00
|
|
|
SideTable side_table(zone, module, &code);
|
|
|
|
return side_table.map_;
|
2016-05-25 08:32:37 +00:00
|
|
|
}
|
|
|
|
|
2017-08-09 08:11:21 +00:00
|
|
|
#undef TRACE
|
2018-08-10 11:20:40 +00:00
|
|
|
#undef LANE
|
2017-09-08 13:42:59 +00:00
|
|
|
#undef FOREACH_SIMPLE_BINOP
|
|
|
|
#undef FOREACH_OTHER_BINOP
|
2018-01-16 19:32:52 +00:00
|
|
|
#undef FOREACH_I32CONV_FLOATOP
|
2017-09-08 13:42:59 +00:00
|
|
|
#undef FOREACH_OTHER_UNOP
|
2017-08-09 08:11:21 +00:00
|
|
|
|
2016-05-25 08:32:37 +00:00
|
|
|
} // namespace wasm
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|