[base] Remove OffsetFrom and AddressFrom

Those two methods are spread over the code base, and their purpose is
often not clear. Historically, they were used to turn pointers into
integers in order to do computations on them. Today we have {Address}
which is uintptr_t, so we can compute directly on that.

This also makes the {RoundUp} and {RoundDown} macros only work on
integral values (including {Address}).

R=mlippautz@chromium.org

Bug: v8:8015
Change-Id: Ia98fb826793ee5d3a2a5b18c09c329d088443772
Reviewed-on: https://chromium-review.googlesource.com/1233914
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56048}
This commit is contained in:
Clemens Hammacher 2018-09-19 17:24:17 +02:00 committed by Commit Bot
parent d333075033
commit 60d6f7c240
10 changed files with 29 additions and 44 deletions

View File

@ -347,47 +347,32 @@ V8_INLINE A implicit_cast(A x) {
// write V8_2PART_UINT64_C(0x12345678,90123456); // write V8_2PART_UINT64_C(0x12345678,90123456);
#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u)) #define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
// Compute the 0-relative offset of some absolute value x of type T.
// This allows conversion of Addresses and integral types into
// 0-relative int offsets.
template <typename T>
constexpr inline intptr_t OffsetFrom(T x) {
return x - static_cast<T>(0);
}
// Compute the absolute value of type T for some 0-relative offset x.
// This allows conversion of 0-relative int offsets into Addresses and
// integral types.
template <typename T>
constexpr inline T AddressFrom(intptr_t x) {
return static_cast<T>(static_cast<T>(0) + x);
}
// Return the largest multiple of m which is <= x. // Return the largest multiple of m which is <= x.
template <typename T> template <typename T>
inline T RoundDown(T x, intptr_t m) { inline T RoundDown(T x, intptr_t m) {
STATIC_ASSERT(std::is_integral<T>::value);
// m must be a power of two. // m must be a power of two.
DCHECK(m != 0 && ((m & (m - 1)) == 0)); DCHECK(m != 0 && ((m & (m - 1)) == 0));
return AddressFrom<T>(OffsetFrom(x) & -m); return x & -m;
} }
template <intptr_t m, typename T> template <intptr_t m, typename T>
constexpr inline T RoundDown(T x) { constexpr inline T RoundDown(T x) {
STATIC_ASSERT(std::is_integral<T>::value);
// m must be a power of two. // m must be a power of two.
STATIC_ASSERT(m != 0 && ((m & (m - 1)) == 0)); STATIC_ASSERT(m != 0 && ((m & (m - 1)) == 0));
return AddressFrom<T>(OffsetFrom(x) & -m); return x & -m;
} }
// Return the smallest multiple of m which is >= x. // Return the smallest multiple of m which is >= x.
template <typename T> template <typename T>
inline T RoundUp(T x, intptr_t m) { inline T RoundUp(T x, intptr_t m) {
STATIC_ASSERT(std::is_integral<T>::value);
return RoundDown<T>(static_cast<T>(x + m - 1), m); return RoundDown<T>(static_cast<T>(x + m - 1), m);
} }
template <intptr_t m, typename T> template <intptr_t m, typename T>
constexpr inline T RoundUp(T x) { constexpr inline T RoundUp(T x) {
return RoundDown<m, T>(static_cast<T>(x + m - 1)); STATIC_ASSERT(std::is_integral<T>::value);
return RoundDown<m, T>(static_cast<T>(x + (m - 1)));
} }
template <typename T, typename U> template <typename T, typename U>

View File

@ -67,7 +67,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
} }
uint8_t* base = reinterpret_cast<uint8_t*>(reservation); uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment); uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
// Unmap extra memory reserved before and after the desired block. // Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) { if (aligned_base != base) {

View File

@ -309,7 +309,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// Unmap memory allocated before the aligned base address. // Unmap memory allocated before the aligned base address.
uint8_t* base = static_cast<uint8_t*>(result); uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment); uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (aligned_base != base) { if (aligned_base != base) {
DCHECK_LT(base, aligned_base); DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base); size_t prefix_size = static_cast<size_t>(aligned_base - base);

View File

@ -822,7 +822,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
if (base == nullptr) return nullptr; // Can't allocate, we're OOM. if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done. // If address is suitably aligned, we're done.
uint8_t* aligned_base = RoundUp(base, alignment); uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (base == aligned_base) return reinterpret_cast<void*>(base); if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation. // Otherwise, free it and try a larger allocation.
@ -843,7 +844,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// Try to trim the allocation by freeing the padded allocation and then // Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base. // calling VirtualAlloc at the aligned base.
CHECK(Free(base, padded_size)); CHECK(Free(base, padded_size));
aligned_base = RoundUp(base, alignment); aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
base = reinterpret_cast<uint8_t*>( base = reinterpret_cast<uint8_t*>(
VirtualAlloc(aligned_base, size, flags, protect)); VirtualAlloc(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case, // We might not get the reduced allocation due to a race. In that case,

View File

@ -289,7 +289,7 @@ class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
#undef FEEDBACK_VECTOR_FIELDS #undef FEEDBACK_VECTOR_FIELDS
static const int kHeaderSize = static const int kHeaderSize =
RoundUp<kPointerAlignment>(kUnalignedHeaderSize); RoundUp<kPointerAlignment>(int{kUnalignedHeaderSize});
static const int kFeedbackSlotsOffset = kHeaderSize; static const int kFeedbackSlotsOffset = kHeaderSize;
class BodyDescriptor; class BodyDescriptor;

View File

@ -261,7 +261,7 @@ class StackFrame {
} }
// Get the id of this stack frame. // Get the id of this stack frame.
Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); } Id id() const { return static_cast<Id>(caller_sp()); }
// Get the top handler from the current stack iterator. // Get the top handler from the current stack iterator.
inline StackHandler* top_handler() const; inline StackHandler* top_handler() const;

View File

@ -2589,10 +2589,9 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) { int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
intptr_t offset = OffsetFrom(address); if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
return kPointerSize; return kPointerSize;
if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0) if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
return kDoubleSize - kPointerSize; // No fill if double is always aligned. return kDoubleSize - kPointerSize; // No fill if double is always aligned.
return 0; return 0;
} }

View File

@ -110,11 +110,10 @@ class Space;
// Some assertion macros used in the debugging mode. // Some assertion macros used in the debugging mode.
#define DCHECK_PAGE_ALIGNED(address) \ #define DCHECK_PAGE_ALIGNED(address) DCHECK_EQ(0, (address)&kPageAlignmentMask)
DCHECK((OffsetFrom(address) & kPageAlignmentMask) == 0)
#define DCHECK_OBJECT_ALIGNED(address) \ #define DCHECK_OBJECT_ALIGNED(address) \
DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) DCHECK_EQ(0, (address)&kObjectAlignmentMask)
#define DCHECK_OBJECT_SIZE(size) \ #define DCHECK_OBJECT_SIZE(size) \
DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize)) DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
@ -411,7 +410,7 @@ class MemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk. // Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) { static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); return reinterpret_cast<MemoryChunk*>(a & ~kAlignmentMask);
} }
// Only works if the object is in the first kPageSize of the MemoryChunk. // Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(const HeapObject* o) { static MemoryChunk* FromHeapObject(const HeapObject* o) {
@ -777,7 +776,7 @@ class Page : public MemoryChunk {
// from [page_addr .. page_addr + kPageSize[. This only works if the object // from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page. // is in fact in a page.
static Page* FromAddress(Address addr) { static Page* FromAddress(Address addr) {
return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask); return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
} }
static Page* FromHeapObject(const HeapObject* o) { static Page* FromHeapObject(const HeapObject* o) {
return reinterpret_cast<Page*>(reinterpret_cast<Address>(o) & return reinterpret_cast<Page*>(reinterpret_cast<Address>(o) &
@ -799,7 +798,7 @@ class Page : public MemoryChunk {
// Checks whether an address is page aligned. // Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address addr) { static bool IsAlignedToPageSize(Address addr) {
return (OffsetFrom(addr) & kPageAlignmentMask) == 0; return (addr & kPageAlignmentMask) == 0;
} }
static bool IsAtObjectStart(Address addr) { static bool IsAtObjectStart(Address addr) {
@ -1127,7 +1126,7 @@ class SkipList {
} }
static inline int RegionNumber(Address addr) { static inline int RegionNumber(Address addr) {
return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2; return (addr & kPageAlignmentMask) >> kRegionSizeLog2;
} }
static void Update(Address addr, int size) { static void Update(Address addr, int size) {

View File

@ -2299,11 +2299,11 @@ bool Foreign::IsNormalized(Object* value) {
} }
Address Foreign::foreign_address() { Address Foreign::foreign_address() {
return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset)); return READ_UINTPTR_FIELD(this, kForeignAddressOffset);
} }
void Foreign::set_foreign_address(Address value) { void Foreign::set_foreign_address(Address value) {
WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value)); WRITE_UINTPTR_FIELD(this, kForeignAddressOffset, value);
} }
template <class Derived> template <class Derived>

View File

@ -166,13 +166,11 @@ inline bool IsAligned(T value, U alignment) {
return (value & (alignment - 1)) == 0; return (value & (alignment - 1)) == 0;
} }
// Returns true if {addr + offset} is aligned.
// Returns true if (addr + offset) is aligned.
inline bool IsAddressAligned(Address addr, inline bool IsAddressAligned(Address addr,
intptr_t alignment, intptr_t alignment,
int offset = 0) { int offset = 0) {
intptr_t offs = OffsetFrom(addr + offset); return IsAligned(addr + offset, alignment);
return IsAligned(offs, alignment);
} }