[wasm] Add I64 Atomic binary operations for x64

Bug: v8:6532
Change-Id: I6fde1fd2cc5776628af4e8a92e9b9ec030b398f7
Reviewed-on: https://chromium-review.googlesource.com/923718
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Ben Smith <binji@chromium.org>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51675}
This commit is contained in:
Deepti Gandluri 2018-02-28 14:17:39 -08:00 committed by Commit Bot
parent 658af9dd7a
commit ad3d0ba83c
18 changed files with 603 additions and 105 deletions

View File

@ -1546,19 +1546,24 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kWord32AtomicStore:
return VisitWord32AtomicStore(node);
#define ATOMIC_CASE(name) \
case IrOpcode::kWord32Atomic##name: { \
#define ATOMIC_CASE(name, rep) \
case IrOpcode::k##rep##Atomic##name: { \
MachineType type = AtomicOpRepresentationOf(node->op()); \
MarkAsRepresentation(type.representation(), node); \
return VisitWord32Atomic##name(node); \
return Visit##rep##Atomic##name(node); \
}
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
ATOMIC_CASE(Add)
ATOMIC_CASE(Sub)
ATOMIC_CASE(And)
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
ATOMIC_CASE(Add, Word32)
ATOMIC_CASE(Add, Word64)
ATOMIC_CASE(Sub, Word32)
ATOMIC_CASE(Sub, Word64)
ATOMIC_CASE(And, Word32)
ATOMIC_CASE(And, Word64)
ATOMIC_CASE(Or, Word32)
ATOMIC_CASE(Or, Word64)
ATOMIC_CASE(Xor, Word32)
ATOMIC_CASE(Xor, Word64)
ATOMIC_CASE(Exchange, Word32)
ATOMIC_CASE(CompareExchange, Word32)
#undef ATOMIC_CASE
case IrOpcode::kSpeculationFence:
return VisitSpeculationFence(node);
@ -2166,6 +2171,18 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_X64
void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }

View File

@ -442,6 +442,10 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitAtomicBinaryOperation(Node* node, ArchOpcode int8_op,
ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op);
void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op,
ArchOpcode uint16_op,
ArchOpcode uint32_op,
ArchOpcode uint64_op);
// ===========================================================================

View File

@ -401,6 +401,12 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(Int32) \
V(Uint32)
#define ATOMIC64_TYPE_LIST(V) \
V(Uint8) \
V(Uint16) \
V(Uint32) \
V(Uint64)
#define ATOMIC_REPRESENTATION_LIST(V) \
V(kWord8) \
V(kWord16) \
@ -606,6 +612,14 @@ struct MachineOperatorGlobalCache {
ATOMIC_OP(Word32AtomicXor, type)
ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
#undef ATOMIC_OP_LIST
#define ATOMIC64_OP_LIST(type) \
ATOMIC_OP(Word64AtomicAdd, type) \
ATOMIC_OP(Word64AtomicSub, type) \
ATOMIC_OP(Word64AtomicAnd, type) \
ATOMIC_OP(Word64AtomicOr, type) \
ATOMIC_OP(Word64AtomicXor, type)
ATOMIC64_TYPE_LIST(ATOMIC64_OP_LIST)
#undef ATOMIC64_OP_LIST
#undef ATOMIC_OP
#define ATOMIC_COMPARE_EXCHANGE(Type) \
@ -944,6 +958,56 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType rep) {
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType rep) {
#define ADD(kRep) \
if (rep == MachineType::kRep()) { \
return &cache_.kWord64AtomicAdd##kRep; \
}
ATOMIC64_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType rep) {
#define SUB(kRep) \
if (rep == MachineType::kRep()) { \
return &cache_.kWord64AtomicSub##kRep; \
}
ATOMIC64_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType rep) {
#define AND(kRep) \
if (rep == MachineType::kRep()) { \
return &cache_.kWord64AtomicAnd##kRep; \
}
ATOMIC64_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType rep) {
#define OR(kRep) \
if (rep == MachineType::kRep()) { \
return &cache_.kWord64AtomicOr##kRep; \
}
ATOMIC64_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType rep) {
#define XOR(kRep) \
if (rep == MachineType::kRep()) { \
return &cache_.kWord64AtomicXor##kRep; \
}
ATOMIC64_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
}
const OptionalOperator MachineOperatorBuilder::SpeculationFence() {
return OptionalOperator(flags_ & kSpeculationFence,
&cache_.kSpeculationFence);
@ -1006,6 +1070,7 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
#undef MACHINE_TYPE_LIST
#undef MACHINE_REPRESENTATION_LIST
#undef ATOMIC_TYPE_LIST
#undef ATOMIC64_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
#undef SIMD_FORMAT_LIST

View File

@ -627,6 +627,16 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word32AtomicOr(MachineType rep);
// atomic-xor [base + index], value
const Operator* Word32AtomicXor(MachineType rep);
// atomic-load [base + index]
const Operator* Word64AtomicAdd(MachineType rep);
// atomic-sub [base + index], value
const Operator* Word64AtomicSub(MachineType rep);
// atomic-and [base + index], value
const Operator* Word64AtomicAnd(MachineType rep);
// atomic-or [base + index], value
const Operator* Word64AtomicOr(MachineType rep);
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType rep);
const OptionalOperator SpeculationFence();

View File

@ -629,6 +629,11 @@
V(Word32AtomicAnd) \
V(Word32AtomicOr) \
V(Word32AtomicXor) \
V(Word64AtomicAdd) \
V(Word64AtomicSub) \
V(Word64AtomicAnd) \
V(Word64AtomicOr) \
V(Word64AtomicXor) \
V(SpeculationFence) \
V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \

View File

@ -1688,6 +1688,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32AtomicAnd:
case IrOpcode::kWord32AtomicOr:
case IrOpcode::kWord32AtomicXor:
case IrOpcode::kWord64AtomicAdd:
case IrOpcode::kWord64AtomicSub:
case IrOpcode::kWord64AtomicAnd:
case IrOpcode::kWord64AtomicOr:
case IrOpcode::kWord64AtomicXor:
case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32:

View File

@ -4542,25 +4542,45 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
inputs[0], inputs[1]);
}
#define ATOMIC_BINOP_LIST(V) \
V(I32AtomicAdd, Add, Uint32) \
V(I32AtomicAdd8U, Add, Uint8) \
V(I32AtomicAdd16U, Add, Uint16) \
V(I32AtomicSub, Sub, Uint32) \
V(I32AtomicSub8U, Sub, Uint8) \
V(I32AtomicSub16U, Sub, Uint16) \
V(I32AtomicAnd, And, Uint32) \
V(I32AtomicAnd8U, And, Uint8) \
V(I32AtomicAnd16U, And, Uint16) \
V(I32AtomicOr, Or, Uint32) \
V(I32AtomicOr8U, Or, Uint8) \
V(I32AtomicOr16U, Or, Uint16) \
V(I32AtomicXor, Xor, Uint32) \
V(I32AtomicXor8U, Xor, Uint8) \
V(I32AtomicXor16U, Xor, Uint16) \
V(I32AtomicExchange, Exchange, Uint32) \
V(I32AtomicExchange8U, Exchange, Uint8) \
V(I32AtomicExchange16U, Exchange, Uint16)
#define ATOMIC_BINOP_LIST(V) \
V(I32AtomicAdd, Add, Uint32, Word32) \
V(I64AtomicAdd, Add, Uint64, Word64) \
V(I32AtomicAdd8U, Add, Uint8, Word32) \
V(I32AtomicAdd16U, Add, Uint16, Word32) \
V(I64AtomicAdd8U, Add, Uint8, Word64) \
V(I64AtomicAdd16U, Add, Uint16, Word64) \
V(I64AtomicAdd32U, Add, Uint32, Word64) \
V(I32AtomicSub, Sub, Uint32, Word32) \
V(I64AtomicSub, Sub, Uint64, Word64) \
V(I32AtomicSub8U, Sub, Uint8, Word32) \
V(I32AtomicSub16U, Sub, Uint16, Word32) \
V(I64AtomicSub8U, Sub, Uint8, Word64) \
V(I64AtomicSub16U, Sub, Uint16, Word64) \
V(I64AtomicSub32U, Sub, Uint32, Word64) \
V(I32AtomicAnd, And, Uint32, Word32) \
V(I64AtomicAnd, And, Uint64, Word64) \
V(I32AtomicAnd8U, And, Uint8, Word32) \
V(I64AtomicAnd16U, And, Uint16, Word64) \
V(I32AtomicAnd16U, And, Uint16, Word32) \
V(I64AtomicAnd8U, And, Uint8, Word64) \
V(I64AtomicAnd32U, And, Uint32, Word64) \
V(I32AtomicOr, Or, Uint32, Word32) \
V(I64AtomicOr, Or, Uint64, Word64) \
V(I32AtomicOr8U, Or, Uint8, Word32) \
V(I32AtomicOr16U, Or, Uint16, Word32) \
V(I64AtomicOr8U, Or, Uint8, Word64) \
V(I64AtomicOr16U, Or, Uint16, Word64) \
V(I64AtomicOr32U, Or, Uint32, Word64) \
V(I32AtomicXor, Xor, Uint32, Word32) \
V(I64AtomicXor, Xor, Uint64, Word64) \
V(I32AtomicXor8U, Xor, Uint8, Word32) \
V(I32AtomicXor16U, Xor, Uint16, Word32) \
V(I64AtomicXor8U, Xor, Uint8, Word64) \
V(I64AtomicXor16U, Xor, Uint16, Word64) \
V(I64AtomicXor32U, Xor, Uint32, Word64) \
V(I32AtomicExchange, Exchange, Uint32, Word32) \
V(I32AtomicExchange8U, Exchange, Uint8, Word32) \
V(I32AtomicExchange16U, Exchange, Uint16, Word32)
#define ATOMIC_TERNARY_LIST(V) \
V(I32AtomicCompareExchange, CompareExchange, Uint32) \
@ -4583,15 +4603,15 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
case wasm::kExpr##Name: { \
Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->Word32Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), index, inputs[1], *effect_, *control_); \
break; \
#define BUILD_ATOMIC_BINOP(Name, Operation, Type, Prefix) \
case wasm::kExpr##Name: { \
Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->Prefix##Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), index, inputs[1], *effect_, *control_); \
break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP

View File

@ -482,6 +482,18 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
#define ASSEMBLE_ATOMIC64_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
do { \
Label binop; \
__ bind(&binop); \
__ mov_inst(rax, i.MemoryOperand(1)); \
__ movq(i.TempRegister(0), rax); \
__ bin_inst(i.TempRegister(0), i.InputRegister(0)); \
__ lock(); \
__ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0)); \
__ j(not_equal, &binop); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp);
@ -2724,6 +2736,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orl)
ATOMIC_BINOP_CASE(Xor, xorl)
#undef ATOMIC_BINOP_CASE
#define ATOMIC64_BINOP_CASE(op, inst) \
case kX64Word64Atomic##op##Uint8: \
ASSEMBLE_ATOMIC64_BINOP(inst, movb, cmpxchgb); \
__ movzxbq(rax, rax); \
break; \
case kX64Word64Atomic##op##Uint16: \
ASSEMBLE_ATOMIC64_BINOP(inst, movw, cmpxchgw); \
__ movzxwq(rax, rax); \
break; \
case kX64Word64Atomic##op##Uint32: \
ASSEMBLE_ATOMIC64_BINOP(inst, movl, cmpxchgl); \
break; \
case kX64Word64Atomic##op##Uint64: \
ASSEMBLE_ATOMIC64_BINOP(inst, movq, cmpxchgq); \
break;
ATOMIC64_BINOP_CASE(Add, addq)
ATOMIC64_BINOP_CASE(Sub, subq)
ATOMIC64_BINOP_CASE(And, andq)
ATOMIC64_BINOP_CASE(Or, orq)
ATOMIC64_BINOP_CASE(Xor, xorq)
#undef ATOMIC64_BINOP_CASE
case kWord32AtomicLoadInt8:
case kWord32AtomicLoadUint8:
case kWord32AtomicLoadInt16:
@ -2736,7 +2769,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
return kSuccess;
} // NOLINT(readability/fn_size)
} // NOLadability/fn_size)
#undef ASSEMBLE_UNOP
#undef ASSEMBLE_BINOP
#undef ASSEMBLE_COMPARE
#undef ASSEMBLE_MULT
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SSE_BINOP
#undef ASSEMBLE_SSE_UNOP
#undef ASSEMBLE_AVX_BINOP
#undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_ATOMIC64_BINOP
namespace {
@ -2774,19 +2821,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
#undef ASSEMBLE_UNOP
#undef ASSEMBLE_BINOP
#undef ASSEMBLE_COMPARE
#undef ASSEMBLE_MULT
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SSE_BINOP
#undef ASSEMBLE_SSE_UNOP
#undef ASSEMBLE_AVX_BINOP
#undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_ATOMIC_BINOP
} // namespace
// Assembles branches after this instruction.

View File

@ -234,7 +234,27 @@ namespace compiler {
V(X64S128Xor) \
V(X64S128Not) \
V(X64S128Select) \
V(X64S128Zero)
V(X64S128Zero) \
V(X64Word64AtomicAddUint8) \
V(X64Word64AtomicAddUint16) \
V(X64Word64AtomicAddUint32) \
V(X64Word64AtomicAddUint64) \
V(X64Word64AtomicSubUint8) \
V(X64Word64AtomicSubUint16) \
V(X64Word64AtomicSubUint32) \
V(X64Word64AtomicSubUint64) \
V(X64Word64AtomicAndUint8) \
V(X64Word64AtomicAndUint16) \
V(X64Word64AtomicAndUint32) \
V(X64Word64AtomicAndUint64) \
V(X64Word64AtomicOrUint8) \
V(X64Word64AtomicOrUint16) \
V(X64Word64AtomicOrUint32) \
V(X64Word64AtomicOrUint64) \
V(X64Word64AtomicXorUint8) \
V(X64Word64AtomicXorUint16) \
V(X64Word64AtomicXorUint32) \
V(X64Word64AtomicXorUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes

View File

@ -267,6 +267,28 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kLFence:
return kHasSideEffect;
case kX64Word64AtomicAddUint8:
case kX64Word64AtomicAddUint16:
case kX64Word64AtomicAddUint32:
case kX64Word64AtomicAddUint64:
case kX64Word64AtomicSubUint8:
case kX64Word64AtomicSubUint16:
case kX64Word64AtomicSubUint32:
case kX64Word64AtomicSubUint64:
case kX64Word64AtomicAndUint8:
case kX64Word64AtomicAndUint16:
case kX64Word64AtomicAndUint32:
case kX64Word64AtomicAndUint64:
case kX64Word64AtomicOrUint8:
case kX64Word64AtomicOrUint16:
case kX64Word64AtomicOrUint32:
case kX64Word64AtomicOrUint64:
case kX64Word64AtomicXorUint8:
case kX64Word64AtomicXorUint16:
case kX64Word64AtomicXorUint32:
case kX64Word64AtomicXorUint64:
return kHasSideEffect;
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE

View File

@ -2339,6 +2339,61 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode word64_op) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
} else if (type == MachineType::Uint32()) {
opcode = uint32_op;
} else if (type == MachineType::Uint64()) {
opcode = word64_op;
} else {
UNREACHABLE();
return;
}
InstructionOperand outputs[1];
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
outputs[0] = g.DefineAsFixed(node, rax);
InstructionOperand temp[1];
temp[0] = g.TempRegister();
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
VisitWord64AtomicBinaryOperation( \
node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \
kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
VISIT_ATOMIC_BINOP(And)
VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_TYPES(V) \
V(F32x4) \
V(I32x4) \

View File

@ -61,18 +61,38 @@ struct WasmException;
V(I32AtomicAdd, Uint32) \
V(I32AtomicAdd8U, Uint8) \
V(I32AtomicAdd16U, Uint16) \
V(I64AtomicAdd, Uint64) \
V(I64AtomicAdd8U, Uint8) \
V(I64AtomicAdd16U, Uint16) \
V(I64AtomicAdd32U, Uint32) \
V(I32AtomicSub, Uint32) \
V(I64AtomicSub, Uint64) \
V(I32AtomicSub8U, Uint8) \
V(I32AtomicSub16U, Uint16) \
V(I64AtomicSub8U, Uint8) \
V(I64AtomicSub16U, Uint16) \
V(I64AtomicSub32U, Uint32) \
V(I32AtomicAnd, Uint32) \
V(I64AtomicAnd, Uint64) \
V(I32AtomicAnd8U, Uint8) \
V(I32AtomicAnd16U, Uint16) \
V(I64AtomicAnd8U, Uint8) \
V(I64AtomicAnd16U, Uint16) \
V(I64AtomicAnd32U, Uint32) \
V(I32AtomicOr, Uint32) \
V(I64AtomicOr, Uint64) \
V(I32AtomicOr8U, Uint8) \
V(I32AtomicOr16U, Uint16) \
V(I64AtomicOr8U, Uint8) \
V(I64AtomicOr16U, Uint16) \
V(I64AtomicOr32U, Uint32) \
V(I32AtomicXor, Uint32) \
V(I64AtomicXor, Uint64) \
V(I32AtomicXor8U, Uint8) \
V(I32AtomicXor16U, Uint16) \
V(I64AtomicXor8U, Uint8) \
V(I64AtomicXor16U, Uint16) \
V(I64AtomicXor32U, Uint32) \
V(I32AtomicExchange, Uint32) \
V(I32AtomicExchange8U, Uint8) \
V(I32AtomicExchange16U, Uint16) \

View File

@ -60,6 +60,12 @@ namespace wasm {
CASE_I32_OP(name, str "32") \
CASE_UNSIGNED_OP(I32, name##8, str "8") \
CASE_UNSIGNED_OP(I32, name##16, str "16")
#define CASE_UNSIGNED_ALL_OP(name, str) \
CASE_U32_OP(name, str) \
CASE_I64_OP(name, str "64") \
CASE_UNSIGNED_OP(I64, name##8, str "8") \
CASE_UNSIGNED_OP(I64, name##16, str "16") \
CASE_UNSIGNED_OP(I64, name##32, str "32")
const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
switch (opcode) {
@ -247,13 +253,13 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x16_OP(AllTrue, "all_true")
// Atomic operations.
CASE_U32_OP(AtomicLoad, "atomic_load")
CASE_U32_OP(AtomicStore, "atomic_store")
CASE_U32_OP(AtomicAdd, "atomic_add")
CASE_U32_OP(AtomicSub, "atomic_sub")
CASE_U32_OP(AtomicAnd, "atomic_and")
CASE_U32_OP(AtomicOr, "atomic_or")
CASE_U32_OP(AtomicXor, "atomic_xor")
CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic_load")
CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic_store")
CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic_add")
CASE_UNSIGNED_ALL_OP(AtomicSub, "atomic_sub")
CASE_UNSIGNED_ALL_OP(AtomicAnd, "atomic_and")
CASE_UNSIGNED_ALL_OP(AtomicOr, "atomic_or")
CASE_UNSIGNED_ALL_OP(AtomicXor, "atomic_xor")
CASE_U32_OP(AtomicExchange, "atomic_xchng")
CASE_U32_OP(AtomicCompareExchange, "atomic_cmpxchng")
@ -285,6 +291,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_SIMDI_OP
#undef CASE_SIGN_OP
#undef CASE_UNSIGNED_OP
#undef CASE_UNSIGNED_ALL_OP
#undef CASE_ALL_SIGN_OP
#undef CASE_CONVERT_OP
#undef CASE_CONVERT_SAT_OP

View File

@ -416,26 +416,54 @@ using WasmName = Vector<const char>;
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicLoad, 0xfe10, i_i) \
V(I64AtomicLoad, 0xfe11, l_i) \
V(I32AtomicLoad8U, 0xfe12, i_i) \
V(I32AtomicLoad16U, 0xfe13, i_i) \
V(I64AtomicLoad8U, 0xfe14, l_i) \
V(I64AtomicLoad16U, 0xfe15, l_i) \
V(I64AtomicLoad32U, 0xfe16, l_i) \
V(I32AtomicStore, 0xfe17, v_ii) \
V(I64AtomicStore, 0xfe18, v_il) \
V(I32AtomicStore8U, 0xfe19, v_ii) \
V(I32AtomicStore16U, 0xfe1a, v_ii) \
V(I64AtomicStore8U, 0xfe1b, v_il) \
V(I64AtomicStore16U, 0xfe1c, v_il) \
V(I64AtomicStore32U, 0xfe1d, v_il) \
V(I32AtomicAdd, 0xfe1e, i_ii) \
V(I64AtomicAdd, 0xfe1f, l_il) \
V(I32AtomicAdd8U, 0xfe20, i_ii) \
V(I32AtomicAdd16U, 0xfe21, i_ii) \
V(I64AtomicAdd8U, 0xfe22, l_il) \
V(I64AtomicAdd16U, 0xfe23, l_il) \
V(I64AtomicAdd32U, 0xfe24, l_il) \
V(I32AtomicSub, 0xfe25, i_ii) \
V(I64AtomicSub, 0xfe26, l_il) \
V(I32AtomicSub8U, 0xfe27, i_ii) \
V(I32AtomicSub16U, 0xfe28, i_ii) \
V(I64AtomicSub8U, 0xfe29, l_il) \
V(I64AtomicSub16U, 0xfe2a, l_il) \
V(I64AtomicSub32U, 0xfe2b, l_il) \
V(I32AtomicAnd, 0xfe2c, i_ii) \
V(I64AtomicAnd, 0xfe2d, l_il) \
V(I32AtomicAnd8U, 0xfe2e, i_ii) \
V(I32AtomicAnd16U, 0xfe2f, i_ii) \
V(I64AtomicAnd8U, 0xfe30, l_il) \
V(I64AtomicAnd16U, 0xfe31, l_il) \
V(I64AtomicAnd32U, 0xfe32, l_il) \
V(I32AtomicOr, 0xfe33, i_ii) \
V(I64AtomicOr, 0xfe34, l_il) \
V(I32AtomicOr8U, 0xfe35, i_ii) \
V(I32AtomicOr16U, 0xfe36, i_ii) \
V(I64AtomicOr8U, 0xfe37, l_il) \
V(I64AtomicOr16U, 0xfe38, l_il) \
V(I64AtomicOr32U, 0xfe39, l_il) \
V(I32AtomicXor, 0xfe3a, i_ii) \
V(I64AtomicXor, 0xfe3b, l_il) \
V(I32AtomicXor8U, 0xfe3c, i_ii) \
V(I32AtomicXor16U, 0xfe3d, i_ii) \
V(I64AtomicXor8U, 0xfe3e, l_il) \
V(I64AtomicXor16U, 0xfe3f, l_il) \
V(I64AtomicXor32U, 0xfe40, l_il) \
V(I32AtomicExchange, 0xfe41, i_ii) \
V(I32AtomicExchange8U, 0xfe43, i_ii) \
V(I32AtomicExchange16U, 0xfe44, i_ii) \
@ -491,7 +519,8 @@ using WasmName = Vector<const char>;
V(d_id, kWasmF64, kWasmI32, kWasmF64) \
V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
V(f_if, kWasmF32, kWasmI32, kWasmF32) \
V(v_il, kWasmI64, kWasmI32, kWasmI64) \
V(v_il, kWasmStmt, kWasmI32, kWasmI64) \
V(l_il, kWasmI64, kWasmI32, kWasmI64) \
V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32)
#define FOREACH_SIMD_SIGNATURE(V) \

View File

@ -237,6 +237,7 @@ v8_source_set("cctest_sources") {
"wasm/test-wasm-interpreter-entry.cc",
"wasm/test-wasm-stack.cc",
"wasm/test-wasm-trap-position.cc",
"wasm/wasm-atomics-utils.h",
"wasm/wasm-run-utils.cc",
"wasm/wasm-run-utils.h",
]
@ -337,6 +338,7 @@ v8_source_set("cctest_sources") {
"test-log-stack-tracer.cc",
"test-macro-assembler-x64.cc",
"test-run-wasm-relocation-x64.cc",
"wasm/test-run-wasm-atomics64.cc",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###

View File

@ -2,56 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/cctest/wasm/wasm-atomics-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
namespace wasm {
typedef uint32_t (*Uint32BinOp)(uint32_t, uint32_t);
typedef uint16_t (*Uint16BinOp)(uint16_t, uint16_t);
typedef uint8_t (*Uint8BinOp)(uint8_t, uint8_t);
template <typename T>
T Add(T a, T b) {
return a + b;
}
template <typename T>
T Sub(T a, T b) {
return a - b;
}
template <typename T>
T And(T a, T b) {
return a & b;
}
template <typename T>
T Or(T a, T b) {
return a | b;
}
template <typename T>
T Xor(T a, T b) {
return a ^ b;
}
template <typename T>
T Exchange(T a, T b) {
return b;
}
template <typename T>
T CompareExchange(T initial, T a, T b) {
if (initial == a) return b;
return a;
}
void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint32BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);

View File

@ -0,0 +1,164 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/cctest/wasm/wasm-atomics-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace atomics_64 {
void RunU64BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint64BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
uint64_t* memory = r.builder().AddMemoryElems<uint64_t>(8);
r.builder().SetHasSharedMemory();
BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0),
MachineRepresentation::kWord64));
FOR_UINT64_INPUTS(i) {
uint64_t initial = *i;
FOR_UINT64_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(initial, r.Call(*j));
uint64_t expected = expected_op(*i, *j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicAdd) {
RunU64BinOp(execution_mode, kExprI64AtomicAdd, Add);
}
WASM_COMPILED_EXEC_TEST(I64AtomicSub) {
RunU64BinOp(execution_mode, kExprI64AtomicSub, Sub);
}
WASM_COMPILED_EXEC_TEST(I64AtomicAnd) {
RunU64BinOp(execution_mode, kExprI64AtomicAnd, And);
}
WASM_COMPILED_EXEC_TEST(I64AtomicOr) {
RunU64BinOp(execution_mode, kExprI64AtomicOr, Or);
}
WASM_COMPILED_EXEC_TEST(I64AtomicXor) {
RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor);
}
void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint32BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
r.builder().SetHasSharedMemory();
BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0),
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
uint32_t initial = *i;
FOR_UINT32_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(initial, r.Call(*j));
uint32_t expected = expected_op(*i, *j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicAdd32U) {
RunU32BinOp(execution_mode, kExprI64AtomicAdd32U, Add);
}
WASM_COMPILED_EXEC_TEST(I64AtomicSub32U) {
RunU32BinOp(execution_mode, kExprI64AtomicSub32U, Sub);
}
WASM_COMPILED_EXEC_TEST(I64AtomicAnd32U) {
RunU32BinOp(execution_mode, kExprI64AtomicAnd32U, And);
}
WASM_COMPILED_EXEC_TEST(I64AtomicOr32U) {
RunU32BinOp(execution_mode, kExprI64AtomicOr32U, Or);
}
WASM_COMPILED_EXEC_TEST(I64AtomicXor32U) {
RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor);
}
void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
Uint16BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(mode);
r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0),
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
uint16_t initial = *i;
FOR_UINT16_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(initial, r.Call(*j));
uint16_t expected = expected_op(*i, *j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicAdd16U) {
RunU16BinOp(execution_mode, kExprI64AtomicAdd16U, Add);
}
WASM_COMPILED_EXEC_TEST(I64AtomicSub16U) {
RunU16BinOp(execution_mode, kExprI64AtomicSub16U, Sub);
}
WASM_COMPILED_EXEC_TEST(I64AtomicAnd16U) {
RunU16BinOp(execution_mode, kExprI64AtomicAnd16U, And);
}
WASM_COMPILED_EXEC_TEST(I64AtomicOr16U) {
RunU16BinOp(execution_mode, kExprI64AtomicOr16U, Or);
}
WASM_COMPILED_EXEC_TEST(I64AtomicXor16U) {
RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor);
}
void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint8BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
BUILD(r, WASM_ATOMICS_BINOP(wasm_op, WASM_I32V_1(0), WASM_GET_LOCAL(0),
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
uint8_t initial = *i;
FOR_UINT8_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(initial, r.Call(*j));
uint8_t expected = expected_op(*i, *j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicAdd8U) {
RunU8BinOp(execution_mode, kExprI64AtomicAdd8U, Add);
}
WASM_COMPILED_EXEC_TEST(I64AtomicSub8U) {
RunU8BinOp(execution_mode, kExprI64AtomicSub8U, Sub);
}
WASM_COMPILED_EXEC_TEST(I64AtomicAnd8U) {
RunU8BinOp(execution_mode, kExprI64AtomicAnd8U, And);
}
WASM_COMPILED_EXEC_TEST(I64AtomicOr8U) {
RunU8BinOp(execution_mode, kExprI64AtomicOr8U, Or);
}
WASM_COMPILED_EXEC_TEST(I64AtomicXor8U) {
RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor);
}
} // namespace atomics_64
} // namespace wasm
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,62 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef WASM_ATOMICOP_UTILS_H
#define WASM_ATOMICOP_UTILS_H
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
namespace v8 {
namespace internal {
namespace wasm {
typedef uint64_t (*Uint64BinOp)(uint64_t, uint64_t);
typedef uint32_t (*Uint32BinOp)(uint32_t, uint32_t);
typedef uint16_t (*Uint16BinOp)(uint16_t, uint16_t);
typedef uint8_t (*Uint8BinOp)(uint8_t, uint8_t);
template <typename T>
T Add(T a, T b) {
return a + b;
}
template <typename T>
T Sub(T a, T b) {
return a - b;
}
template <typename T>
T And(T a, T b) {
return a & b;
}
template <typename T>
T Or(T a, T b) {
return a | b;
}
template <typename T>
T Xor(T a, T b) {
return a ^ b;
}
template <typename T>
T Exchange(T a, T b) {
return b;
}
template <typename T>
T CompareExchange(T initial, T a, T b) {
if (initial == a) return b;
return a;
}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif