Fix data race in TypedArray::copyWithin

Just like many other operations implemented in elements.cc, copyWithin
also needs to use relaxed atomics if operating on a shared array buffer
to avoid races with other threads.
Since the ranges can overlap, this CL also adds a {Relaxed_Memmove}
function that either copies forwards (like {Relaxed_Memcpy}) or
backwards depending on the ordering of source and destination.

R=leszeks@chromium.org

Bug: chromium:1221035
Change-Id: I76b7e43810ac9b85f4ff9abbc5a0406618771c25
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3032084
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75752}
This commit is contained in:
Clemens Backes 2021-07-16 12:59:10 +02:00 committed by V8 LUCI CQ
parent 200fd550f5
commit 974190b484
5 changed files with 73 additions and 1 deletions

View File

@ -316,6 +316,43 @@ inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src,
}
}
inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src,
size_t bytes) {
// Use Relaxed_Memcpy if copying forwards is safe. This is the case if there
// is no overlap, or {dst} lies before {src}.
// This single check checks for both:
if (reinterpret_cast<uintptr_t>(dst) - reinterpret_cast<uintptr_t>(src) >=
bytes) {
Relaxed_Memcpy(dst, src, bytes);
return;
}
// Otherwise copy backwards.
dst += bytes;
src += bytes;
constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
while (bytes > 0 &&
!IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
Relaxed_Store(--dst, Relaxed_Load(--src));
--bytes;
}
if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
while (bytes >= kAtomicWordSize) {
dst -= kAtomicWordSize;
src -= kAtomicWordSize;
bytes -= kAtomicWordSize;
Relaxed_Store(
reinterpret_cast<volatile AtomicWord*>(dst),
Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
}
}
while (bytes > 0) {
Relaxed_Store(--dst, Relaxed_Load(--src));
--bytes;
}
}
} // namespace base
} // namespace v8

View File

@ -99,7 +99,12 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
count = count * element_size;
uint8_t* data = static_cast<uint8_t*>(array->DataPtr());
std::memmove(data + to, data + from, count);
if (array->buffer().is_shared()) {
base::Relaxed_Memmove(reinterpret_cast<base::Atomic8*>(data + to),
reinterpret_cast<base::Atomic8*>(data + from), count);
} else {
std::memmove(data + to, data + from, count);
}
return *array;
}

View File

@ -286,5 +286,22 @@ TEST(Load) {
TestLoad<AtomicWord>();
}
TEST(Relaxed_Memmove) {
constexpr size_t kLen = 6;
Atomic8 arr[kLen];
{
for (size_t i = 0; i < kLen; ++i) arr[i] = i;
Relaxed_Memmove(arr + 2, arr + 3, 2);
uint8_t expected[]{0, 1, 3, 4, 4, 5};
for (size_t i = 0; i < kLen; ++i) CHECK_EQ(arr[i], expected[i]);
}
{
for (size_t i = 0; i < kLen; ++i) arr[i] = i;
Relaxed_Memmove(arr + 3, arr + 2, 2);
uint8_t expected[]{0, 1, 2, 2, 3, 5};
for (size_t i = 0; i < kLen; ++i) CHECK_EQ(arr[i], expected[i]);
}
}
} // namespace base
} // namespace v8

View File

@ -1093,6 +1093,7 @@
'wasm/futex': [SKIP],
'regress/regress-1205290': [SKIP],
'regress/regress-1212404': [SKIP],
'regress/regress-1221035': [SKIP],
'regress/wasm/regress-1067621': [SKIP],
# BUG(v8:9975).

View File

@ -0,0 +1,12 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
let sab = new SharedArrayBuffer(40);
let i32arr = new Int32Array(sab);
let worker = new Worker(
'onmessage = function(memory) { while (memory[1] == 0) {} };',
{type: 'string'});
worker.postMessage(i32arr);
i32arr.copyWithin(Array(0x8000).fill("a"), 0);
i32arr[1] = 1;