[wasm] Implement wasm sign extension opcodes

- Shift opcode numbers for asmjs-compat opcodes
 - Add --experimental-wasm-se flag to gate sign extension opccodes
 - Fix codegen for ia32 movsx instructions

Bug: v8:6532
Change-Id: If7c9eff5ac76d24496effb2314ae2601bb8bba85
Reviewed-on: https://chromium-review.googlesource.com/838403
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: Bill Budge <bbudge@chromium.org>
Reviewed-by: Ben Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50875}
This commit is contained in:
Deepti Gandluri 2018-01-25 14:40:32 -08:00 committed by Commit Bot
parent 8f4407fbd6
commit 1abeb5a3b1
26 changed files with 326 additions and 38 deletions

View File

@ -194,6 +194,7 @@
'../test/cctest/wasm/test-run-wasm-js.cc',
'../test/cctest/wasm/test-run-wasm-module.cc',
'../test/cctest/wasm/test-run-wasm-relocation.cc',
'../test/cctest/wasm/test-run-wasm-sign-extension.cc',
'../test/cctest/wasm/test-run-wasm-simd.cc',
'../test/cctest/wasm/test-wasm-breakpoints.cc',
"../test/cctest/wasm/test-wasm-codegen.cc",

View File

@ -2631,6 +2631,18 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmSxtb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0));
}
void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmSxth, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0));
}
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}

View File

@ -1159,6 +1159,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Sxth32:
__ Sxth(i.OutputRegister32(), i.InputRegister32(0));
break;
case kArm64Sxtb:
__ Sxtb(i.OutputRegister(), i.InputRegister32(0));
break;
case kArm64Sxth:
__ Sxth(i.OutputRegister(), i.InputRegister32(0));
break;
case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;

View File

@ -67,6 +67,8 @@ namespace compiler {
V(Arm64Mov32) \
V(Arm64Sxtb32) \
V(Arm64Sxth32) \
V(Arm64Sxtb) \
V(Arm64Sxth) \
V(Arm64Sxtw) \
V(Arm64Sbfx32) \
V(Arm64Ubfx) \

View File

@ -67,7 +67,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ror:
case kArm64Ror32:
case kArm64Mov32:
case kArm64Sxtb:
case kArm64Sxtb32:
case kArm64Sxth:
case kArm64Sxth32:
case kArm64Sxtw:
case kArm64Sbfx32:

View File

@ -3132,6 +3132,26 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
VisitRR(this, kArm64Sxtb32, node);
}
void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
VisitRR(this, kArm64Sxth32, node);
}
void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
VisitRR(this, kArm64Sxtb, node);
}
void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
VisitRR(this, kArm64Sxth, node);
}
void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
VisitRR(this, kArm64Sxtw, node);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {

View File

@ -372,6 +372,17 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ j(not_equal, &binop); \
} while (false)
#define ASSEMBLE_MOVX(mov_instr) \
do { \
if (instr->addressing_mode() != kMode_None) { \
__ mov_instr(i.OutputRegister(), i.MemoryOperand()); \
} else if (instr->InputAt(0)->IsRegister()) { \
__ mov_instr(i.OutputRegister(), i.InputRegister(0)); \
} else { \
__ mov_instr(i.OutputRegister(), i.InputOperand(0)); \
} \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@ -1427,10 +1438,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
ASSEMBLE_MOVX(movsx_b);
break;
case kIA32Movzxbl:
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
ASSEMBLE_MOVX(movzx_b);
break;
case kIA32Movb: {
size_t index = 0;
@ -1443,10 +1454,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Movsxwl:
__ movsx_w(i.OutputRegister(), i.MemoryOperand());
ASSEMBLE_MOVX(movsx_w);
break;
case kIA32Movzxwl:
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
ASSEMBLE_MOVX(movzx_w);
break;
case kIA32Movw: {
size_t index = 0;
@ -3475,6 +3486,13 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
#undef __
#undef kScratchDoubleReg
#undef ASSEMBLE_COMPARE
#undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_BINOP
#undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_MOVX
} // namespace compiler
} // namespace internal

View File

@ -710,27 +710,29 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kIA32Ror);
}
#define RO_OP_LIST(V) \
V(Word32Clz, kIA32Lzcnt) \
V(Word32Ctz, kIA32Tzcnt) \
V(Word32Popcnt, kIA32Popcnt) \
V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \
V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
V(BitcastFloat32ToInt32, kIA32BitcastFI) \
V(BitcastInt32ToFloat32, kIA32BitcastIF) \
V(Float32Sqrt, kSSEFloat32Sqrt) \
V(Float64Sqrt, kSSEFloat64Sqrt) \
V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
#define RO_OP_LIST(V) \
V(Word32Clz, kIA32Lzcnt) \
V(Word32Ctz, kIA32Tzcnt) \
V(Word32Popcnt, kIA32Popcnt) \
V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \
V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
V(BitcastFloat32ToInt32, kIA32BitcastFI) \
V(BitcastInt32ToFloat32, kIA32BitcastIF) \
V(Float32Sqrt, kSSEFloat32Sqrt) \
V(Float64Sqrt, kSSEFloat64Sqrt) \
V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
V(SignExtendWord8ToInt32, kIA32Movsxbl) \
V(SignExtendWord16ToInt32, kIA32Movsxwl)
#define RR_OP_LIST(V) \
V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
@ -766,6 +768,7 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
}
RO_OP_LIST(RO_VISITOR)
#undef RO_VISITOR
#undef RO_OP_LIST
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@ -773,6 +776,7 @@ RO_OP_LIST(RO_VISITOR)
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
#undef RR_OP_LIST
#define RRO_FLOAT_VISITOR(Name, avx, sse) \
void InstructionSelector::Visit##Name(Node* node) { \
@ -780,6 +784,7 @@ RR_OP_LIST(RR_VISITOR)
}
RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
#undef RRO_FLOAT_VISITOR
#undef RRO_FLOAT_OP_LIST
#define FLOAT_UNOP_VISITOR(Name, avx, sse) \
void InstructionSelector::Visit##Name(Node* node) { \
@ -787,6 +792,7 @@ RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
}
FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
#undef FLOAT_UNOP_VISITOR
#undef FLOAT_UNOP_LIST
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
@ -1928,6 +1934,7 @@ SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
SIMD_INT_TYPES(VISIT_SIMD_REPLACE_LANE)
VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
#undef SIMD_INT_TYPES
#define VISIT_SIMD_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@ -1942,6 +1949,7 @@ VISIT_SIMD_REPLACE_LANE(F32x4)
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
#undef SIMD_SHIFT_OPCODES
#define VISIT_SIMD_INT_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@ -1950,6 +1958,7 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
}
SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
#undef VISIT_SIMD_INT_UNOP
#undef SIMD_INT_UNOP_LIST
#define VISIT_SIMD_OTHER_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@ -1959,6 +1968,7 @@ SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
}
SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
#undef VISIT_SIMD_OTHER_UNOP
#undef SIMD_OTHER_UNOP_LIST
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@ -1966,6 +1976,7 @@ SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();

View File

@ -1538,6 +1538,16 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type.representation(), node);
return VisitProtectedLoad(node);
}
case IrOpcode::kSignExtendWord8ToInt32:
return MarkAsWord32(node), VisitSignExtendWord8ToInt32(node);
case IrOpcode::kSignExtendWord16ToInt32:
return MarkAsWord32(node), VisitSignExtendWord16ToInt32(node);
case IrOpcode::kSignExtendWord8ToInt64:
return MarkAsWord64(node), VisitSignExtendWord8ToInt64(node);
case IrOpcode::kSignExtendWord16ToInt64:
return MarkAsWord64(node), VisitSignExtendWord16ToInt64(node);
case IrOpcode::kSignExtendWord32ToInt64:
return MarkAsWord64(node), VisitSignExtendWord32ToInt64(node);
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
@ -2078,6 +2088,18 @@ void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
UNIMPLEMENTED();
}
#endif // V8_TARGET_ARCH_32_BIT
// 64 bit targets do not implement the following instructions.

View File

@ -169,6 +169,11 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(SignExtendWord8ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(SignExtendWord16ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(SignExtendWord8ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(SignExtendWord16ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(SignExtendWord32ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Add, Operator::kCommutative, 2, 0, 1) \
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \

View File

@ -350,6 +350,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* BitcastInt32ToFloat32();
const Operator* BitcastInt64ToFloat64();
// These operators sign-extend to Int32/Int64
const Operator* SignExtendWord8ToInt32();
const Operator* SignExtendWord16ToInt32();
const Operator* SignExtendWord8ToInt64();
const Operator* SignExtendWord16ToInt64();
const Operator* SignExtendWord32ToInt64();
// Floating point operators always operate with IEEE 754 round-to-nearest
// (single-precision).
const Operator* Float32Add();

View File

@ -2259,6 +2259,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
UNIMPLEMENTED();
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {

View File

@ -2921,6 +2921,26 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
UNIMPLEMENTED();
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {

View File

@ -622,6 +622,11 @@
V(AtomicOr) \
V(AtomicXor) \
V(SpeculationFence) \
V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \
V(SignExtendWord8ToInt64) \
V(SignExtendWord16ToInt64) \
V(SignExtendWord32ToInt64) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \

View File

@ -1653,6 +1653,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kAtomicOr:
case IrOpcode::kAtomicXor:
case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32:
case IrOpcode::kSignExtendWord8ToInt64:
case IrOpcode::kSignExtendWord16ToInt64:
case IrOpcode::kSignExtendWord32ToInt64:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)

View File

@ -780,6 +780,21 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
}
op = m->RoundUint64ToFloat64();
break;
case wasm::kExprI32SExtendI8:
op = m->SignExtendWord8ToInt32();
break;
case wasm::kExprI32SExtendI16:
op = m->SignExtendWord16ToInt32();
break;
case wasm::kExprI64SExtendI8:
op = m->SignExtendWord8ToInt64();
break;
case wasm::kExprI64SExtendI16:
op = m->SignExtendWord16ToInt64();
break;
case wasm::kExprI64SExtendI32:
op = m->SignExtendWord32ToInt64();
break;
case wasm::kExprI64SConvertF32:
return BuildI64SConvertF32(input, position);
case wasm::kExprI64SConvertF64:

View File

@ -1244,7 +1244,12 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(BitcastInt32ToFloat32, kX64BitcastIF) \
V(BitcastInt64ToFloat64, kX64BitcastLD) \
V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
V(SignExtendWord8ToInt32, kX64Movsxbl) \
V(SignExtendWord16ToInt32, kX64Movsxwl) \
V(SignExtendWord8ToInt64, kX64Movsxbq) \
V(SignExtendWord16ToInt64, kX64Movsxwq) \
V(SignExtendWord32ToInt64, kX64Movsxlq)
#define RR_OP_LIST(V) \
V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \

View File

@ -559,6 +559,8 @@ DEFINE_BOOL(experimental_wasm_threads, false,
"enable prototype threads for wasm")
DEFINE_BOOL(experimental_wasm_sat_f2i_conversions, false,
"enable non-trapping float-to-int conversions for wasm")
DEFINE_BOOL(experimental_wasm_se, false,
"enable prototype sign extension opcodes for wasm")
DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
DEFINE_BOOL(wasm_no_bounds_checks, false,

View File

@ -37,6 +37,12 @@ struct WasmException;
return true; \
}())
#define RET_ON_PROTOTYPE_OPCODE(flag) \
DCHECK(!this->module_ || !this->module_->is_asm_js()); \
if (!FLAG_experimental_wasm_##flag) { \
this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
}
#define CHECK_PROTOTYPE_OPCODE(flag) \
DCHECK(!this->module_ || !this->module_->is_asm_js()); \
if (!FLAG_experimental_wasm_##flag) { \
@ -2344,6 +2350,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
if (WasmOpcodes::IsSignExtensionOpcode(opcode)) {
RET_ON_PROTOTYPE_OPCODE(se);
}
switch (sig->parameter_count()) {
case 1: {
auto val = Pop(0, sig->GetParam(0));

View File

@ -116,6 +116,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I64_OP(ReinterpretF64, "reinterpret/f64")
CASE_F32_OP(ReinterpretI32, "reinterpret/i32")
CASE_F64_OP(ReinterpretI64, "reinterpret/i64")
CASE_INT_OP(SExtendI8, "sign_extend8")
CASE_INT_OP(SExtendI16, "sign_extend16")
CASE_I64_OP(SExtendI32, "sign_extend32")
CASE_OP(Unreachable, "unreachable")
CASE_OP(Nop, "nop")
CASE_OP(Block, "block")
@ -320,6 +323,19 @@ bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
}
}
bool WasmOpcodes::IsSignExtensionOpcode(WasmOpcode opcode) {
switch (opcode) {
case kExprI32SExtendI8:
case kExprI32SExtendI16:
case kExprI64SExtendI8:
case kExprI64SExtendI16:
case kExprI64SExtendI32:
return true;
default:
return false;
}
}
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {

View File

@ -225,21 +225,26 @@ using WasmName = Vector<const char>;
V(I32ReinterpretF32, 0xbc, i_f) \
V(I64ReinterpretF64, 0xbd, l_d) \
V(F32ReinterpretI32, 0xbe, f_i) \
V(F64ReinterpretI64, 0xbf, d_l)
V(F64ReinterpretI64, 0xbf, d_l) \
V(I32SExtendI8, 0xc0, i_i) \
V(I32SExtendI16, 0xc1, i_i) \
V(I64SExtendI8, 0xc2, l_l) \
V(I64SExtendI16, 0xc3, l_l) \
V(I64SExtendI32, 0xc4, l_l)
// For compatibility with Asm.js.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
V(F64Acos, 0xc2, d_d) \
V(F64Asin, 0xc3, d_d) \
V(F64Atan, 0xc4, d_d) \
V(F64Cos, 0xc5, d_d) \
V(F64Sin, 0xc6, d_d) \
V(F64Tan, 0xc7, d_d) \
V(F64Exp, 0xc8, d_d) \
V(F64Log, 0xc9, d_d) \
V(F64Atan2, 0xca, d_dd) \
V(F64Pow, 0xcb, d_dd) \
V(F64Mod, 0xcc, d_dd) \
V(F64Acos, 0xc5, d_d) \
V(F64Asin, 0xc6, d_d) \
V(F64Atan, 0xc7, d_d) \
V(F64Cos, 0xc8, d_d) \
V(F64Sin, 0xc9, d_d) \
V(F64Tan, 0xca, d_d) \
V(F64Exp, 0xcb, d_d) \
V(F64Log, 0xcc, d_d) \
V(F64Atan2, 0xcd, d_dd) \
V(F64Pow, 0xce, d_dd) \
V(F64Mod, 0xcf, d_dd) \
V(I32AsmjsDivS, 0xd0, i_ii) \
V(I32AsmjsDivU, 0xd1, i_ii) \
V(I32AsmjsRemS, 0xd2, i_ii) \
@ -647,6 +652,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static FunctionSig* AsmjsSignature(WasmOpcode opcode);
static bool IsPrefixOpcode(WasmOpcode opcode);
static bool IsControlOpcode(WasmOpcode opcode);
static bool IsSignExtensionOpcode(WasmOpcode opcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
static bool IsUnconditionalJump(WasmOpcode opcode);

View File

@ -227,6 +227,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-js.cc",
"wasm/test-run-wasm-module.cc",
"wasm/test-run-wasm-relocation.cc",
"wasm/test-run-wasm-sign-extension.cc",
"wasm/test-run-wasm-simd.cc",
"wasm/test-run-wasm.cc",
"wasm/test-streaming-compilation.cc",

View File

@ -0,0 +1,77 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/cctest/cctest.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
namespace wasm {
#if !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
WASM_COMPILED_EXEC_TEST(I32SExtendI8) {
EXPERIMENTAL_FLAG_SCOPE(se);
WasmRunner<int32_t, int32_t> r(execution_mode);
BUILD(r, WASM_I32_SIGN_EXT_I8(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
CHECK_EQ(0x7a, r.Call(0x7a));
CHECK_EQ(-0x80, r.Call(0x80));
}
WASM_COMPILED_EXEC_TEST(I32SExtendI16) {
EXPERIMENTAL_FLAG_SCOPE(se);
WasmRunner<int32_t, int32_t> r(execution_mode);
BUILD(r, WASM_I32_SIGN_EXT_I16(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
CHECK_EQ(0x7afa, r.Call(0x7afa));
CHECK_EQ(-0x8000, r.Call(0x8000));
}
#endif // !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
// TODO(gdeepti): Enable tests to run in the interpreter, and on 32 bit
// platforms after int64 lowering support. Add JS tests once all ops can be run
// on 32 bit platforms.
#if V8_TARGET_ARCH_64_BIT && !V8_TARGET_ARCH_MIPS64
WASM_COMPILED_EXEC_TEST(I64SExtendI8) {
EXPERIMENTAL_FLAG_SCOPE(se);
WasmRunner<int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_SIGN_EXT_I8(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
CHECK_EQ(0x7a, r.Call(0x7a));
CHECK_EQ(-0x80, r.Call(0x80));
}
WASM_COMPILED_EXEC_TEST(I64SExtendI16) {
EXPERIMENTAL_FLAG_SCOPE(se);
WasmRunner<int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_SIGN_EXT_I16(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
CHECK_EQ(0x7afa, r.Call(0x7afa));
CHECK_EQ(-0x8000, r.Call(0x8000));
}
WASM_COMPILED_EXEC_TEST(I64SExtendI32) {
EXPERIMENTAL_FLAG_SCOPE(se);
WasmRunner<int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_SIGN_EXT_I32(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
CHECK_EQ(-1, r.Call(-1));
CHECK_EQ(0x7fffffff, r.Call(0x7fffffff));
CHECK_EQ(-0x80000000LL, r.Call(0x80000000));
}
#endif // V8_TARGET_ARCH_64_BIT && !V8_TARGET_ARCH_MIPS64
} // namespace wasm
} // namespace internal
} // namespace v8

View File

@ -1949,6 +1949,9 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
// TODO(gdeepti): Enable this test for sign extension opcodes when lowering
// is enabled.
if (WasmOpcodes::IsSignExtensionOpcode(opcode)) return;
// Enable all optional operators.
compiler::CommonOperatorBuilder common(&zone);
compiler::MachineOperatorBuilder machine(

View File

@ -616,4 +616,13 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
x, y, WASM_ATOMICS_OP(op), \
static_cast<byte>(ElementSizeLog2Of(representation)), ZERO_OFFSET
//------------------------------------------------------------------------------
// Sign Externsion Operations.
//------------------------------------------------------------------------------
#define WASM_I32_SIGN_EXT_I8(x) x, kExprI32SExtendI8
#define WASM_I32_SIGN_EXT_I16(x) x, kExprI32SExtendI16
#define WASM_I64_SIGN_EXT_I8(x) x, kExprI64SExtendI8
#define WASM_I64_SIGN_EXT_I16(x) x, kExprI64SExtendI16
#define WASM_I64_SIGN_EXT_I32(x) x, kExprI64SExtendI32
#endif // V8_WASM_MACRO_GEN_H_

View File

@ -1228,6 +1228,7 @@ TEST_F(FunctionBodyDecoderTest, MacrosInt64) {
}
TEST_F(FunctionBodyDecoderTest, AllSimpleExpressions) {
EXPERIMENTAL_FLAG_SCOPE(se);
// Test all simple expressions which are described by a signature.
#define DECODE_TEST(name, opcode, sig) \
{ \