[ubsan] Replace AtomicElement with UB-safe alternative

The previous AtomicElement wrapper fundamentally relied on
reinterpret_casting a heap address to an instance of a C++
object, which is an invalid cast. This patch replaces that
pattern with an ObjectSlot-based alternative that does not
rely on UB.

Bug: v8:3770
Change-Id: I62fb3c7589ac59e9e18139b525174de77e0e2149
Reviewed-on: https://chromium-review.googlesource.com/c/1309297
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57197}
This commit is contained in:
Jakob Kummerow 2018-10-31 21:35:22 -07:00 committed by Commit Bot
parent 2ef0aa662f
commit 6b226ea2ff
6 changed files with 141 additions and 61 deletions

View File

@ -2325,6 +2325,7 @@ v8_source_set("v8_base") {
"src/objects/script.h",
"src/objects/shared-function-info-inl.h",
"src/objects/shared-function-info.h",
"src/objects/slots-atomic-inl.h",
"src/objects/slots-inl.h",
"src/objects/slots.h",
"src/objects/stack-frame-info-inl.h",

View File

@ -341,42 +341,6 @@ class AsAtomicPointer {
}
};
// This class is intended to be used as a wrapper for elements of an array
// that is passed in to STL functions such as std::sort. It ensures that
// elements accesses are atomic.
// Usage example:
// Object** given_array;
// AtomicElement<Object*>* wrapped =
// reinterpret_cast<AtomicElement<Object*>(given_array);
// std::sort(wrapped, wrapped + given_length, cmp);
// where the cmp function uses the value() accessor to compare the elements.
template <typename T>
class AtomicElement {
public:
AtomicElement(const AtomicElement<T>& other) {
AsAtomicPointer::Relaxed_Store(
&value_, AsAtomicPointer::Relaxed_Load(&other.value_));
}
void operator=(const AtomicElement<T>& other) {
AsAtomicPointer::Relaxed_Store(
&value_, AsAtomicPointer::Relaxed_Load(&other.value_));
}
T value() const { return AsAtomicPointer::Relaxed_Load(&value_); }
bool operator<(const AtomicElement<T>& other) const {
return value() < other.value();
}
bool operator==(const AtomicElement<T>& other) const {
return value() == other.value();
}
private:
T value_;
};
template <typename T,
typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
inline void CheckedIncrement(std::atomic<T>* number, T amount) {

View File

@ -17,6 +17,7 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots.h"
#include "src/utils.h"
@ -459,16 +460,13 @@ static void TraceTopFrame(Isolate* isolate) {
static void SortIndices(
Isolate* isolate, Handle<FixedArray> indices, uint32_t sort_size,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) {
// Use AtomicElement wrapper to ensure that std::sort uses atomic load and
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
base::AtomicElement<Object*>* start =
reinterpret_cast<base::AtomicElement<Object*>*>(
indices->GetFirstElementAddress().address());
AtomicSlot start(indices->GetFirstElementAddress());
std::sort(start, start + sort_size,
[isolate](const base::AtomicElement<Object*>& elementA,
const base::AtomicElement<Object*>& elementB) {
const Object* a = elementA.value();
const Object* b = elementB.value();
[isolate](Address elementA, Address elementB) {
const Object* a = reinterpret_cast<Object*>(elementA);
const Object* b = reinterpret_cast<Object*>(elementB);
if (a->IsSmi() || !a->IsUndefined(isolate)) {
if (!b->IsSmi() && b->IsUndefined(isolate)) {
return true;

View File

@ -89,6 +89,7 @@
#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/property-descriptor.h"
@ -17938,10 +17939,9 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
template <typename Dictionary>
struct EnumIndexComparator {
explicit EnumIndexComparator(Dictionary* dict) : dict(dict) {}
bool operator()(const base::AtomicElement<Smi*>& a,
const base::AtomicElement<Smi*>& b) {
PropertyDetails da(dict->DetailsAt(a.value()->value()));
PropertyDetails db(dict->DetailsAt(b.value()->value()));
bool operator()(Address a, Address b) {
PropertyDetails da(dict->DetailsAt(reinterpret_cast<Smi*>(a)->value()));
PropertyDetails db(dict->DetailsAt(reinterpret_cast<Smi*>(b)->value()));
return da.dictionary_index() < db.dictionary_index();
}
Dictionary* dict;
@ -17984,11 +17984,9 @@ void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
Derived* raw_dictionary = *dictionary;
FixedArray* raw_storage = *storage;
EnumIndexComparator<Derived> cmp(raw_dictionary);
// Use AtomicElement wrapper to ensure that std::sort uses atomic load and
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
base::AtomicElement<Smi*>* start =
reinterpret_cast<base::AtomicElement<Smi*>*>(
storage->GetFirstElementAddress().address());
AtomicSlot start(storage->GetFirstElementAddress());
std::sort(start, start + length, cmp);
for (int i = 0; i < length; i++) {
int index = Smi::ToInt(raw_storage->get(i));
@ -18016,11 +18014,9 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
DCHECK_EQ(array_size, length);
EnumIndexComparator<Derived> cmp(raw_dictionary);
// Use AtomicElement wrapper to ensure that std::sort uses atomic load and
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
base::AtomicElement<Smi*>* start =
reinterpret_cast<base::AtomicElement<Smi*>*>(
array->GetFirstElementAddress().address());
AtomicSlot start(array->GetFirstElementAddress());
std::sort(start, start + array_size, cmp);
}
return FixedArray::ShrinkOrEmpty(isolate, array, array_size);
@ -18058,11 +18054,9 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
}
EnumIndexComparator<Derived> cmp(raw_dictionary);
// Use AtomicElement wrapper to ensure that std::sort uses atomic load and
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
base::AtomicElement<Smi*>* start =
reinterpret_cast<base::AtomicElement<Smi*>*>(
array->GetFirstElementAddress().address());
AtomicSlot start(array->GetFirstElementAddress());
std::sort(start, start + array_size, cmp);
}

View File

@ -0,0 +1,101 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_OBJECTS_SLOTS_ATOMIC_INL_H_
#define V8_OBJECTS_SLOTS_ATOMIC_INL_H_
#include "src/base/atomic-utils.h"
#include "src/objects/slots.h"
namespace v8 {
namespace internal {
// This class is intended to be used as a wrapper for elements of an array
// that is passed in to STL functions such as std::sort. It ensures that
// elements accesses are atomic.
// Usage example:
// FixedArray array;
// AtomicSlot start(array->GetFirstElementAddress());
// std::sort(start, start + given_length,
// [](Address a, Address b) {
// return my_comparison(a, b);
// });
// Note how the comparator operates on Address values, representing the raw
// data found at the given heap location, so you probably want to construct
// an Object from it.
class AtomicSlot : public SlotBase<AtomicSlot> {
public:
// This class is a stand-in for "Address&" that uses custom atomic
// read/write operations for the actual memory accesses.
class Reference {
public:
explicit Reference(Address* address) : address_(address) {}
Reference(const Reference& other) : address_(other.address_) {}
Reference& operator=(const Reference& other) {
base::AsAtomicWord::Relaxed_Store(
address_, base::AsAtomicWord::Relaxed_Load(other.address_));
return *this;
}
Reference& operator=(Address value) {
base::AsAtomicWord::Relaxed_Store(address_, value);
return *this;
}
// Values of type AtomicSlot::reference must be implicitly convertible
// to AtomicSlot::value_type.
operator Address() const {
return base::AsAtomicWord::Relaxed_Load(address_);
}
void swap(Reference& other) {
Address tmp = value();
base::AsAtomicWord::Relaxed_Store(address_, other.value());
base::AsAtomicWord::Relaxed_Store(other.address_, tmp);
}
bool operator<(const Reference& other) const {
return value() < other.value();
}
bool operator==(const Reference& other) const {
return value() == other.value();
}
private:
Address value() const { return base::AsAtomicWord::Relaxed_Load(address_); }
Address* address_;
};
// The rest of this class follows C++'s "RandomAccessIterator" requirements.
// Most of the heavy lifting is inherited from SlotBase.
typedef int difference_type;
typedef Address value_type;
typedef Reference reference;
typedef void* pointer; // Must be present, but should not be used.
typedef std::random_access_iterator_tag iterator_category;
AtomicSlot() : SlotBase(kNullAddress) {}
explicit AtomicSlot(Address address) : SlotBase(address) {}
explicit AtomicSlot(ObjectSlot slot) : SlotBase(slot.address()) {}
Reference operator*() const {
return Reference(reinterpret_cast<Address*>(address()));
}
Reference operator[](difference_type i) const {
return Reference(reinterpret_cast<Address*>(address() + i * kPointerSize));
}
friend void swap(Reference lhs, Reference rhs) { lhs.swap(rhs); }
friend difference_type operator-(AtomicSlot a, AtomicSlot b) {
return static_cast<int>(a.address() - b.address()) / kPointerSize;
}
};
} // namespace internal
} // namespace v8
#endif // V8_OBJECTS_SLOTS_ATOMIC_INL_H_

View File

@ -13,10 +13,24 @@ namespace internal {
template <typename Subclass>
class SlotBase {
public:
Subclass& operator++() {
Subclass& operator++() { // Prefix increment.
ptr_ += kPointerSize;
return *static_cast<Subclass*>(this);
}
Subclass operator++(int) { // Postfix increment.
Subclass result = *static_cast<Subclass*>(this);
ptr_ += kPointerSize;
return result;
}
Subclass& operator--() { // Prefix decrement.
ptr_ -= kPointerSize;
return *static_cast<Subclass*>(this);
}
Subclass operator--(int) { // Postfix decrement.
Subclass result = *static_cast<Subclass*>(this);
ptr_ -= kPointerSize;
return result;
}
bool operator<(const SlotBase& other) const { return ptr_ < other.ptr_; }
bool operator<=(const SlotBase& other) const { return ptr_ <= other.ptr_; }
@ -30,10 +44,18 @@ class SlotBase {
}
Subclass operator-(int i) const { return Subclass(ptr_ - i * kPointerSize); }
Subclass operator+(int i) const { return Subclass(ptr_ + i * kPointerSize); }
friend Subclass operator+(int i, const Subclass& slot) {
return Subclass(slot.ptr_ + i * kPointerSize);
}
Subclass& operator+=(int i) {
ptr_ += i * kPointerSize;
return *static_cast<Subclass*>(this);
}
Subclass operator-(int i) { return Subclass(ptr_ - i * kPointerSize); }
Subclass& operator-=(int i) {
ptr_ -= i * kPointerSize;
return *static_cast<Subclass*>(this);
}
void* ToVoidPtr() const { return reinterpret_cast<void*>(address()); }