Revert "Revert of [Atomics] Implement ldaxr/stlxr instructions in ARM64 simulator (patchset #8 id:140001 of https://codereview.chromium.org/2711473002/ )"

This reverts commit 2362f869a4.

BUG=v8:4614

Review-Url: https://codereview.chromium.org/2720133004
Cr-Commit-Position: refs/heads/master@{#43467}
This commit is contained in:
aseemgarg 2017-02-27 19:31:39 -08:00 committed by Commit bot
parent 6543519977
commit 048a0a13e7
7 changed files with 771 additions and 6 deletions

View File

@ -199,7 +199,14 @@ const unsigned kFloatExponentBits = 8;
V_(SysOp1, 18, 16, Bits) \
V_(SysOp2, 7, 5, Bits) \
V_(CRn, 15, 12, Bits) \
V_(CRm, 11, 8, Bits)
V_(CRm, 11, 8, Bits) \
\
/* Load-/store-exclusive */ \
V_(LoadStoreXLoad, 22, 22, Bits) \
V_(LoadStoreXNotExclusive, 23, 23, Bits) \
V_(LoadStoreXAcquireRelease, 15, 15, Bits) \
V_(LoadStoreXSizeLog2, 31, 30, Bits) \
V_(LoadStoreXPair, 21, 21, Bits)
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
/* NZCV */ \

View File

@ -55,6 +55,9 @@ TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
// static
base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
LAZY_INSTANCE_INITIALIZER;
// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
void Simulator::TraceSim(const char* format, ...) {
@ -429,6 +432,7 @@ void Simulator::ResetState() {
Simulator::~Simulator() {
global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
delete[] reinterpret_cast<byte*>(stack_);
if (FLAG_log_instruction_stats) {
delete instrument_;
@ -1628,6 +1632,15 @@ void Simulator::LoadStoreHelper(Instruction* instr,
uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
uintptr_t stack = 0;
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (instr->IsLoad()) {
local_monitor_.NotifyLoad(address);
} else {
local_monitor_.NotifyStore(address);
global_monitor_.Pointer()->NotifyStore_Locked(address,
&global_monitor_processor_);
}
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
// to be interrupted in between. The simulator is not thread safe and V8 does
@ -1730,6 +1743,19 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
uintptr_t address2 = address + access_size;
uintptr_t stack = 0;
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (instr->IsLoad()) {
local_monitor_.NotifyLoad(address);
local_monitor_.NotifyLoad(address2);
} else {
local_monitor_.NotifyStore(address);
local_monitor_.NotifyStore(address2);
global_monitor_.Pointer()->NotifyStore_Locked(address,
&global_monitor_processor_);
global_monitor_.Pointer()->NotifyStore_Locked(address2,
&global_monitor_processor_);
}
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
// to be interrupted in between. The simulator is not thread safe and V8 does
@ -1853,6 +1879,9 @@ void Simulator::VisitLoadLiteral(Instruction* instr) {
uintptr_t address = instr->LiteralAddress();
unsigned rt = instr->Rt();
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
local_monitor_.NotifyLoad(address);
switch (instr->Mask(LoadLiteralMask)) {
// Use _no_log variants to suppress the register trace (LOG_REGS,
// LOG_FP_REGS), then print a more detailed log.
@ -1906,8 +1935,81 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
}
}
Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
switch (size) {
case 0:
return TransactionSize::None;
case 1:
return TransactionSize::Byte;
case 2:
return TransactionSize::HalfWord;
case 4:
return TransactionSize::Word;
default:
UNREACHABLE();
}
return TransactionSize::None;
}
void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
// TODO(binji)
unsigned rs = instr->Rs();
unsigned rt = instr->Rt();
unsigned rn = instr->Rn();
LoadStoreAcquireReleaseOp op = static_cast<LoadStoreAcquireReleaseOp>(
instr->Mask(LoadStoreAcquireReleaseMask));
int32_t is_acquire_release = instr->LoadStoreXAcquireRelease();
int32_t is_not_exclusive = instr->LoadStoreXNotExclusive();
int32_t is_load = instr->LoadStoreXLoad();
int32_t is_pair = instr->LoadStoreXPair();
DCHECK_NE(is_acquire_release, 0);
DCHECK_EQ(is_not_exclusive, 0); // Non exclusive unimplemented.
DCHECK_EQ(is_pair, 0); // Pair unimplemented.
unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
DCHECK(address % access_size == 0);
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (is_load != 0) {
local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size));
global_monitor_.Pointer()->NotifyLoadExcl_Locked(
address, &global_monitor_processor_);
switch (op) {
case LDAXR_b:
set_wreg_no_log(rt, MemoryRead<uint8_t>(address));
break;
case LDAXR_h:
set_wreg_no_log(rt, MemoryRead<uint16_t>(address));
break;
case LDAXR_w:
set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
break;
default:
UNIMPLEMENTED();
}
LogRead(address, access_size, rt);
} else {
if (local_monitor_.NotifyStoreExcl(address,
get_transaction_size(access_size)) &&
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
address, &global_monitor_processor_)) {
switch (op) {
case STLXR_b:
MemoryWrite<uint8_t>(address, wreg(rt));
break;
case STLXR_h:
MemoryWrite<uint16_t>(address, wreg(rt));
break;
case STLXR_w:
MemoryWrite<uint32_t>(address, wreg(rt));
break;
default:
UNIMPLEMENTED();
}
LogWrite(address, access_size, rt);
set_wreg(rs, 0);
} else {
set_wreg(rs, 1);
}
}
}
void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
@ -3877,6 +3979,186 @@ void Simulator::DoPrintf(Instruction* instr) {
delete[] format;
}
Simulator::LocalMonitor::LocalMonitor()
: access_state_(MonitorAccess::Open),
tagged_addr_(0),
size_(TransactionSize::None) {}
void Simulator::LocalMonitor::Clear() {
access_state_ = MonitorAccess::Open;
tagged_addr_ = 0;
size_ = TransactionSize::None;
}
void Simulator::LocalMonitor::NotifyLoad(uintptr_t addr) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive load could clear the local monitor. As a result, it's
// most strict to unconditionally clear the local monitor on load.
Clear();
}
}
void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr,
TransactionSize size) {
access_state_ = MonitorAccess::Exclusive;
tagged_addr_ = addr;
size_ = size;
}
void Simulator::LocalMonitor::NotifyStore(uintptr_t addr) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive store could clear the local monitor. As a result, it's
// most strict to unconditionally clear the local monitor on store.
Clear();
}
}
bool Simulator::LocalMonitor::NotifyStoreExcl(uintptr_t addr,
TransactionSize size) {
if (access_state_ == MonitorAccess::Exclusive) {
// It is allowed for a processor to require that the address matches
// exactly (B2.10.1), so this comparison does not mask addr.
if (addr == tagged_addr_ && size_ == size) {
Clear();
return true;
} else {
// It is implementation-defined whether an exclusive store to a
// non-tagged address will update memory. As a result, it's most strict
// to unconditionally clear the local monitor.
Clear();
return false;
}
} else {
DCHECK(access_state_ == MonitorAccess::Open);
return false;
}
}
Simulator::GlobalMonitor::Processor::Processor()
: access_state_(MonitorAccess::Open),
tagged_addr_(0),
next_(nullptr),
prev_(nullptr),
failure_counter_(0) {}
void Simulator::GlobalMonitor::Processor::Clear_Locked() {
access_state_ = MonitorAccess::Open;
tagged_addr_ = 0;
}
void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(
uintptr_t addr) {
access_state_ = MonitorAccess::Exclusive;
tagged_addr_ = addr;
}
void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
uintptr_t addr, bool is_requesting_processor) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive store could clear the global monitor. As a result, it's
// most strict to unconditionally clear global monitors on store.
Clear_Locked();
}
}
bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
uintptr_t addr, bool is_requesting_processor) {
if (access_state_ == MonitorAccess::Exclusive) {
if (is_requesting_processor) {
// It is allowed for a processor to require that the address matches
// exactly (B2.10.2), so this comparison does not mask addr.
if (addr == tagged_addr_) {
Clear_Locked();
// Introduce occasional stxr failures. This is to simulate the
// behavior of hardware, which can randomly fail due to background
// cache evictions.
if (failure_counter_++ >= kMaxFailureCounter) {
failure_counter_ = 0;
return false;
} else {
return true;
}
}
} else if ((addr & kExclusiveTaggedAddrMask) ==
(tagged_addr_ & kExclusiveTaggedAddrMask)) {
// Check the masked addresses when responding to a successful lock by
// another processor so the implementation is more conservative (i.e. the
// granularity of locking is as large as possible.)
Clear_Locked();
return false;
}
}
return false;
}
Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr,
Processor* processor) {
processor->NotifyLoadExcl_Locked(addr);
PrependProcessor_Locked(processor);
}
void Simulator::GlobalMonitor::NotifyStore_Locked(uintptr_t addr,
Processor* processor) {
// Notify each processor of the store operation.
for (Processor* iter = head_; iter; iter = iter->next_) {
bool is_requesting_processor = iter == processor;
iter->NotifyStore_Locked(addr, is_requesting_processor);
}
}
bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(uintptr_t addr,
Processor* processor) {
DCHECK(IsProcessorInLinkedList_Locked(processor));
if (processor->NotifyStoreExcl_Locked(addr, true)) {
// Notify the other processors that this StoreExcl succeeded.
for (Processor* iter = head_; iter; iter = iter->next_) {
if (iter != processor) {
iter->NotifyStoreExcl_Locked(addr, false);
}
}
return true;
} else {
return false;
}
}
bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
Processor* processor) const {
return head_ == processor || processor->next_ || processor->prev_;
}
void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
if (IsProcessorInLinkedList_Locked(processor)) {
return;
}
if (head_) {
head_->prev_ = processor;
}
processor->prev_ = nullptr;
processor->next_ = head_;
head_ = processor;
}
void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
base::LockGuard<base::Mutex> lock_guard(&mutex);
if (!IsProcessorInLinkedList_Locked(processor)) {
return;
}
if (processor->prev_) {
processor->prev_->next_ = processor->next_;
} else {
head_ = processor->next_;
}
if (processor->next_) {
processor->next_->prev_ = processor->prev_;
}
processor->prev_ = nullptr;
processor->next_ = nullptr;
}
#endif // USE_SIMULATOR

View File

@ -865,6 +865,97 @@ class Simulator : public DecoderVisitor {
char* last_debugger_input() { return last_debugger_input_; }
char* last_debugger_input_;
// Synchronization primitives. See ARM DDI 0487A.a, B2.10. Pair types not
// implemented.
enum class MonitorAccess {
Open,
Exclusive,
};
enum class TransactionSize {
None = 0,
Byte = 1,
HalfWord = 2,
Word = 4,
};
TransactionSize get_transaction_size(unsigned size);
// The least-significant bits of the address are ignored. The number of bits
// is implementation-defined, between 3 and 11. See ARM DDI 0487A.a, B2.10.3.
static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 11) - 1);
class LocalMonitor {
public:
LocalMonitor();
// These functions manage the state machine for the local monitor, but do
// not actually perform loads and stores. NotifyStoreExcl only returns
// true if the exclusive store is allowed; the global monitor will still
// have to be checked to see whether the memory should be updated.
void NotifyLoad(uintptr_t addr);
void NotifyLoadExcl(uintptr_t addr, TransactionSize size);
void NotifyStore(uintptr_t addr);
bool NotifyStoreExcl(uintptr_t addr, TransactionSize size);
private:
void Clear();
MonitorAccess access_state_;
uintptr_t tagged_addr_;
TransactionSize size_;
};
class GlobalMonitor {
public:
GlobalMonitor();
class Processor {
public:
Processor();
private:
friend class GlobalMonitor;
// These functions manage the state machine for the global monitor, but do
// not actually perform loads and stores.
void Clear_Locked();
void NotifyLoadExcl_Locked(uintptr_t addr);
void NotifyStore_Locked(uintptr_t addr, bool is_requesting_processor);
bool NotifyStoreExcl_Locked(uintptr_t addr, bool is_requesting_processor);
MonitorAccess access_state_;
uintptr_t tagged_addr_;
Processor* next_;
Processor* prev_;
// A stxr can fail due to background cache evictions. Rather than
// simulating this, we'll just occasionally introduce cases where an
// exclusive store fails. This will happen once after every
// kMaxFailureCounter exclusive stores.
static const int kMaxFailureCounter = 5;
int failure_counter_;
};
// Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
base::Mutex mutex;
void NotifyLoadExcl_Locked(uintptr_t addr, Processor* processor);
void NotifyStore_Locked(uintptr_t addr, Processor* processor);
bool NotifyStoreExcl_Locked(uintptr_t addr, Processor* processor);
// Called when the simulator is destroyed.
void RemoveProcessor(Processor* processor);
private:
bool IsProcessorInLinkedList_Locked(Processor* processor) const;
void PrependProcessor_Locked(Processor* processor);
Processor* head_;
};
LocalMonitor local_monitor_;
GlobalMonitor::Processor global_monitor_processor_;
static base::LazyInstance<GlobalMonitor>::type global_monitor_;
private:
void Init(FILE* stream);

View File

@ -224,6 +224,7 @@ v8_executable("cctest") {
"test-javascript-arm64.cc",
"test-js-arm64-variables.cc",
"test-run-wasm-relocation-arm64.cc",
"test-simulator-arm64.cc",
"test-utils-arm64.cc",
"test-utils-arm64.h",
]

View File

@ -264,6 +264,7 @@
'test-javascript-arm64.cc',
'test-js-arm64-variables.cc',
'test-run-wasm-relocation-arm64.cc',
'test-simulator-arm64.cc',
],
'cctest_sources_s390': [ ### gcmole(arch:s390) ###
'test-assembler-s390.cc',

View File

@ -67,10 +67,10 @@ struct MemoryAccess {
MemoryAccess(Kind kind, Size size, size_t offset, int value = 0)
: kind(kind), size(size), offset(offset), value(value) {}
Kind kind;
Size size;
size_t offset;
int value;
Kind kind = Kind::None;
Size size = Size::Byte;
size_t offset = 0;
int value = 0;
};
struct TestData {

View File

@ -0,0 +1,383 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "src/arm64/simulator-arm64.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
#if defined(USE_SIMULATOR)
#ifndef V8_TARGET_LITTLE_ENDIAN
#error Expected ARM to be little-endian
#endif
using namespace v8::base;
using namespace v8::internal;
#define __ masm.
struct MemoryAccess {
enum class Kind {
None,
Load,
LoadExcl,
Store,
StoreExcl,
};
enum class Size {
Byte,
HalfWord,
Word,
};
MemoryAccess() : kind(Kind::None) {}
MemoryAccess(Kind kind, Size size, size_t offset, int value = 0)
: kind(kind), size(size), offset(offset), value(value) {}
Kind kind = Kind::None;
Size size = Size::Byte;
size_t offset = 0;
int value = 0;
};
struct TestData {
explicit TestData(int w) : w(w) {}
union {
int32_t w;
int16_t h;
int8_t b;
};
int dummy;
};
static void AssembleMemoryAccess(MacroAssembler* assembler, MemoryAccess access,
Register dest_reg, Register value_reg,
Register addr_reg) {
MacroAssembler& masm = *assembler;
__ Add(addr_reg, x0, Operand(access.offset));
switch (access.kind) {
case MemoryAccess::Kind::None:
break;
case MemoryAccess::Kind::Load:
switch (access.size) {
case MemoryAccess::Size::Byte:
__ ldrb(value_reg, MemOperand(addr_reg));
break;
case MemoryAccess::Size::HalfWord:
__ ldrh(value_reg, MemOperand(addr_reg));
break;
case MemoryAccess::Size::Word:
__ ldr(value_reg, MemOperand(addr_reg));
break;
}
break;
case MemoryAccess::Kind::LoadExcl:
switch (access.size) {
case MemoryAccess::Size::Byte:
__ ldaxrb(value_reg, addr_reg);
break;
case MemoryAccess::Size::HalfWord:
__ ldaxrh(value_reg, addr_reg);
break;
case MemoryAccess::Size::Word:
__ ldaxr(value_reg, addr_reg);
break;
}
break;
case MemoryAccess::Kind::Store:
switch (access.size) {
case MemoryAccess::Size::Byte:
__ Mov(value_reg, Operand(access.value));
__ strb(value_reg, MemOperand(addr_reg));
break;
case MemoryAccess::Size::HalfWord:
__ Mov(value_reg, Operand(access.value));
__ strh(value_reg, MemOperand(addr_reg));
break;
case MemoryAccess::Size::Word:
__ Mov(value_reg, Operand(access.value));
__ str(value_reg, MemOperand(addr_reg));
break;
}
break;
case MemoryAccess::Kind::StoreExcl:
switch (access.size) {
case MemoryAccess::Size::Byte:
__ Mov(value_reg, Operand(access.value));
__ stlxrb(dest_reg, value_reg, addr_reg);
break;
case MemoryAccess::Size::HalfWord:
__ Mov(value_reg, Operand(access.value));
__ stlxrh(dest_reg, value_reg, addr_reg);
break;
case MemoryAccess::Size::Word:
__ Mov(value_reg, Operand(access.value));
__ stlxr(dest_reg, value_reg, addr_reg);
break;
}
break;
}
}
static void AssembleLoadExcl(MacroAssembler* assembler, MemoryAccess access,
Register value_reg, Register addr_reg) {
DCHECK(access.kind == MemoryAccess::Kind::LoadExcl);
AssembleMemoryAccess(assembler, access, no_reg, value_reg, addr_reg);
}
static void AssembleStoreExcl(MacroAssembler* assembler, MemoryAccess access,
Register dest_reg, Register value_reg,
Register addr_reg) {
DCHECK(access.kind == MemoryAccess::Kind::StoreExcl);
AssembleMemoryAccess(assembler, access, dest_reg, value_reg, addr_reg);
}
static void TestInvalidateExclusiveAccess(
TestData initial_data, MemoryAccess access1, MemoryAccess access2,
MemoryAccess access3, int expected_res, TestData expected_data) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler masm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
AssembleLoadExcl(&masm, access1, w1, x1);
AssembleMemoryAccess(&masm, access2, w3, w2, x1);
AssembleStoreExcl(&masm, access3, w0, w3, x1);
__ br(lr);
CodeDesc desc;
masm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
TestData t = initial_data;
Simulator::CallArgument args[] = {
Simulator::CallArgument(reinterpret_cast<uintptr_t>(&t)),
Simulator::CallArgument::End()};
Simulator::current(isolate)->CallVoid(code->entry(), args);
int res = Simulator::current(isolate)->wreg(0);
CHECK_EQ(expected_res, res);
switch (access3.size) {
case MemoryAccess::Size::Byte:
CHECK_EQ(expected_data.b, t.b);
break;
case MemoryAccess::Size::HalfWord:
CHECK_EQ(expected_data.h, t.h);
break;
case MemoryAccess::Size::Word:
CHECK_EQ(expected_data.w, t.w);
break;
}
}
TEST(simulator_invalidate_exclusive_access) {
using Kind = MemoryAccess::Kind;
using Size = MemoryAccess::Size;
MemoryAccess ldaxr_w(Kind::LoadExcl, Size::Word, offsetof(TestData, w));
MemoryAccess stlxr_w(Kind::StoreExcl, Size::Word, offsetof(TestData, w), 7);
// Address mismatch.
TestInvalidateExclusiveAccess(
TestData(1), ldaxr_w,
MemoryAccess(Kind::LoadExcl, Size::Word, offsetof(TestData, dummy)),
stlxr_w, 1, TestData(1));
// Size mismatch.
TestInvalidateExclusiveAccess(
TestData(1), ldaxr_w, MemoryAccess(),
MemoryAccess(Kind::StoreExcl, Size::HalfWord, offsetof(TestData, w), 7),
1, TestData(1));
// Load between ldaxr/stlxr.
TestInvalidateExclusiveAccess(
TestData(1), ldaxr_w,
MemoryAccess(Kind::Load, Size::Word, offsetof(TestData, dummy)), stlxr_w,
1, TestData(1));
// Store between ldaxr/stlxr.
TestInvalidateExclusiveAccess(
TestData(1), ldaxr_w,
MemoryAccess(Kind::Store, Size::Word, offsetof(TestData, dummy)), stlxr_w,
1, TestData(1));
// Match
TestInvalidateExclusiveAccess(TestData(1), ldaxr_w, MemoryAccess(), stlxr_w,
0, TestData(7));
}
static int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
MemoryAccess access) {
HandleScope scope(isolate);
MacroAssembler masm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
AssembleMemoryAccess(&masm, access, w0, w2, x1);
__ br(lr);
CodeDesc desc;
masm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
Simulator::CallArgument args[] = {
Simulator::CallArgument(reinterpret_cast<uintptr_t>(test_data)),
Simulator::CallArgument::End()};
Simulator::current(isolate)->CallVoid(code->entry(), args);
return Simulator::current(isolate)->wreg(0);
}
class MemoryAccessThread : public v8::base::Thread {
public:
MemoryAccessThread()
: Thread(Options("MemoryAccessThread")),
test_data_(NULL),
is_finished_(false),
has_request_(false),
did_request_(false) {}
virtual void Run() {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
v8::Isolate::Scope scope(isolate);
v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
while (!is_finished_) {
while (!(has_request_ || is_finished_)) {
has_request_cv_.Wait(&mutex_);
}
if (is_finished_) {
break;
}
ExecuteMemoryAccess(i_isolate, test_data_, access_);
has_request_ = false;
did_request_ = true;
did_request_cv_.NotifyOne();
}
}
void NextAndWait(TestData* test_data, MemoryAccess access) {
DCHECK(!has_request_);
v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
test_data_ = test_data;
access_ = access;
has_request_ = true;
has_request_cv_.NotifyOne();
while (!did_request_) {
did_request_cv_.Wait(&mutex_);
}
did_request_ = false;
}
void Finish() {
v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
is_finished_ = true;
has_request_cv_.NotifyOne();
}
private:
TestData* test_data_;
MemoryAccess access_;
bool is_finished_;
bool has_request_;
bool did_request_;
v8::base::Mutex mutex_;
v8::base::ConditionVariable has_request_cv_;
v8::base::ConditionVariable did_request_cv_;
};
TEST(simulator_invalidate_exclusive_access_threaded) {
using Kind = MemoryAccess::Kind;
using Size = MemoryAccess::Size;
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
TestData test_data(1);
MemoryAccessThread thread;
thread.Start();
MemoryAccess ldaxr_w(Kind::LoadExcl, Size::Word, offsetof(TestData, w));
MemoryAccess stlxr_w(Kind::StoreExcl, Size::Word, offsetof(TestData, w), 7);
// Exclusive store completed by another thread first.
test_data = TestData(1);
thread.NextAndWait(&test_data, MemoryAccess(Kind::LoadExcl, Size::Word,
offsetof(TestData, w)));
ExecuteMemoryAccess(isolate, &test_data, ldaxr_w);
thread.NextAndWait(&test_data, MemoryAccess(Kind::StoreExcl, Size::Word,
offsetof(TestData, w), 5));
CHECK_EQ(1, ExecuteMemoryAccess(isolate, &test_data, stlxr_w));
CHECK_EQ(5, test_data.w);
// Exclusive store completed by another thread; different address, but masked
// to same
test_data = TestData(1);
ExecuteMemoryAccess(isolate, &test_data, ldaxr_w);
thread.NextAndWait(&test_data, MemoryAccess(Kind::LoadExcl, Size::Word,
offsetof(TestData, dummy)));
thread.NextAndWait(&test_data, MemoryAccess(Kind::StoreExcl, Size::Word,
offsetof(TestData, dummy), 5));
CHECK_EQ(1, ExecuteMemoryAccess(isolate, &test_data, stlxr_w));
CHECK_EQ(1, test_data.w);
// Test failure when store between ldaxr/stlxr.
test_data = TestData(1);
ExecuteMemoryAccess(isolate, &test_data, ldaxr_w);
thread.NextAndWait(&test_data, MemoryAccess(Kind::Store, Size::Word,
offsetof(TestData, dummy)));
CHECK_EQ(1, ExecuteMemoryAccess(isolate, &test_data, stlxr_w));
CHECK_EQ(1, test_data.w);
thread.Finish();
thread.Join();
}
#undef __
#endif // USE_SIMULATOR