diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 55dfd1aaf6..51d4c9c908 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -1934,7 +1934,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector
* maps) {
AllocateRaw(size, type, AllocationOrigin::kRuntime, align);
#else
if (space == NEW_SPACE) {
- allocation = new_space()->AllocateRawUnaligned(size);
+ allocation = new_space()->AllocateRaw(
+ size, AllocationAlignment::kWordAligned);
} else if (space == RO_SPACE) {
allocation = read_only_space()->AllocateRaw(
size, AllocationAlignment::kWordAligned);
diff --git a/src/heap/new-spaces-inl.h b/src/heap/new-spaces-inl.h
index 8020c0dfdd..6b5677755c 100644
--- a/src/heap/new-spaces-inl.h
+++ b/src/heap/new-spaces-inl.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_NEW_SPACES_INL_H_
#define V8_HEAP_NEW_SPACES_INL_H_
+#include "src/common/globals.h"
+#include "src/heap/heap.h"
#include "src/heap/new-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/tagged-impl.h"
@@ -82,23 +84,54 @@ HeapObject SemiSpaceObjectIterator::Next() {
// -----------------------------------------------------------------------------
// NewSpace
-AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ AllocationResult result;
+
+ if (alignment != kWordAligned) {
+ result = AllocateFastAligned(size_in_bytes, alignment, origin);
+ } else {
+ result = AllocateFastUnaligned(size_in_bytes, origin);
+ }
+
+ if (!result.IsRetry()) {
+ return result;
+ } else {
+ return AllocateRawSlow(size_in_bytes, alignment, origin);
+ }
+}
+
+AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
+ Address top = allocation_info_.top();
+ if (allocation_info_.limit() < top + size_in_bytes) {
+ return AllocationResult::Retry();
+ }
+
+ HeapObject obj = HeapObject::FromAddress(top);
+ allocation_info_.set_top(top + size_in_bytes);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return obj;
+}
+
+AllocationResult NewSpace::AllocateFastAligned(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (allocation_info_.limit() - top <
static_cast(aligned_size_in_bytes)) {
- // See if we can create room.
- if (!EnsureAllocation(size_in_bytes, alignment)) {
- return AllocationResult::Retry();
- }
-
- top = allocation_info_.top();
- filler_size = Heap::GetFillToAlign(top, alignment);
- aligned_size_in_bytes = size_in_bytes + filler_size;
+ return AllocationResult::Retry();
}
HeapObject obj = HeapObject::FromAddress(top);
@@ -118,55 +151,6 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return obj;
}
-AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- Address top = allocation_info_.top();
- if (allocation_info_.limit() < top + size_in_bytes) {
- // See if we can create room.
- if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
- return AllocationResult::Retry();
- }
-
- top = allocation_info_.top();
- }
-
- HeapObject obj = HeapObject::FromAddress(top);
- allocation_info_.set_top(top + size_in_bytes);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return obj;
-}
-
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- if (top() < top_on_previous_step_) {
- // Generated code decreased the top() pointer to do folded allocations
- DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
- Page::FromAllocationAreaAddress(top_on_previous_step_));
- top_on_previous_step_ = top();
- }
-#ifdef V8_HOST_ARCH_32_BIT
- return alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment, origin)
- : AllocateRawUnaligned(size_in_bytes, origin);
-#else
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): Consider using aligned allocations once the
- // allocation alignment inconsistency is fixed. For now we keep using
- // unaligned access since both x64 and arm64 architectures (where pointer
- // compression is supported) allow unaligned access to doubles and full words.
-#endif // V8_COMPRESS_POINTERS
- return AllocateRawUnaligned(size_in_bytes, origin);
-#endif
-}
-
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
diff --git a/src/heap/new-spaces.cc b/src/heap/new-spaces.cc
index c4fcbabeef..6d9bbccefa 100644
--- a/src/heap/new-spaces.cc
+++ b/src/heap/new-spaces.cc
@@ -563,6 +563,91 @@ std::unique_ptr NewSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr(new SemiSpaceObjectIterator(this));
}
+AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ if (top() < top_on_previous_step_) {
+ // Generated code decreased the top() pointer to do folded allocations
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
+ Page::FromAllocationAreaAddress(top_on_previous_step_));
+ top_on_previous_step_ = top();
+ }
+#ifdef V8_HOST_ARCH_32_BIT
+ return alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
+#else
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): Consider using aligned allocations once the
+ // allocation alignment inconsistency is fixed. For now we keep using
+ // unaligned access since both x64 and arm64 architectures (where pointer
+ // compression is supported) allow unaligned access to doubles and full words.
+#endif // V8_COMPRESS_POINTERS
+ return AllocateRawUnaligned(size_in_bytes, origin);
+#endif
+}
+
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
+ Address top = allocation_info_.top();
+ if (allocation_info_.limit() < top + size_in_bytes) {
+ // See if we can create room.
+ if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
+ return AllocationResult::Retry();
+ }
+
+ top = allocation_info_.top();
+ }
+
+ HeapObject obj = HeapObject::FromAddress(top);
+ allocation_info_.set_top(top + size_in_bytes);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return obj;
+}
+
+AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ Address top = allocation_info_.top();
+ int filler_size = Heap::GetFillToAlign(top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + filler_size;
+
+ if (allocation_info_.limit() - top <
+ static_cast(aligned_size_in_bytes)) {
+ // See if we can create room.
+ if (!EnsureAllocation(size_in_bytes, alignment)) {
+ return AllocationResult::Retry();
+ }
+
+ top = allocation_info_.top();
+ filler_size = Heap::GetFillToAlign(top, alignment);
+ aligned_size_in_bytes = size_in_bytes + filler_size;
+ }
+
+ HeapObject obj = HeapObject::FromAddress(top);
+ allocation_info_.set_top(top + aligned_size_in_bytes);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ if (filler_size > 0) {
+ obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
+ }
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return obj;
+}
+
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
diff --git a/src/heap/new-spaces.h b/src/heap/new-spaces.h
index 73613152fa..e96ccb35f2 100644
--- a/src/heap/new-spaces.h
+++ b/src/heap/new-spaces.h
@@ -391,13 +391,6 @@ class V8_EXPORT_PRIVATE NewSpace
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
- int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
-
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
@@ -482,6 +475,25 @@ class V8_EXPORT_PRIVATE NewSpace
SemiSpace from_space_;
VirtualMemory reservation_;
+ // Internal allocation methods.
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateFastAligned(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
+
+ V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
+
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsInlineAllocation() override { return true; }
diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc
index 3bd6c4e48d..780af2702e 100644
--- a/test/cctest/heap/test-heap.cc
+++ b/test/cctest/heap/test-heap.cc
@@ -33,6 +33,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/macro-assembler-inl.h"
+#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/execution.h"
@@ -1712,8 +1713,7 @@ TEST(TestAlignmentCalculations) {
static HeapObject NewSpaceAllocateAligned(int size,
AllocationAlignment alignment) {
Heap* heap = CcTest::heap();
- AllocationResult allocation =
- heap->new_space()->AllocateRawAligned(size, alignment);
+ AllocationResult allocation = heap->new_space()->AllocateRaw(size, alignment);
HeapObject obj;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
@@ -3662,9 +3662,9 @@ TEST(Regress169928) {
// We need filler the size of AllocationMemento object, plus an extra
// fill pointer value.
HeapObject obj;
- AllocationResult allocation =
- CcTest::heap()->new_space()->AllocateRawUnaligned(
- AllocationMemento::kSize + kTaggedSize);
+ AllocationResult allocation = CcTest::heap()->new_space()->AllocateRaw(
+ AllocationMemento::kSize + kTaggedSize,
+ AllocationAlignment::kWordAligned);
CHECK(allocation.To(&obj));
Address addr_obj = obj.address();
CcTest::heap()->CreateFillerObjectAt(addr_obj,
diff --git a/test/cctest/heap/test-spaces.cc b/test/cctest/heap/test-spaces.cc
index b5796d3c2f..0bf400a71c 100644
--- a/test/cctest/heap/test-spaces.cc
+++ b/test/cctest/heap/test-spaces.cc
@@ -274,9 +274,10 @@ TEST(NewSpace) {
CHECK(new_space.MaximumCapacity());
while (new_space.Available() >= kMaxRegularHeapObjectSize) {
- CHECK(new_space.Contains(
- new_space.AllocateRawUnaligned(kMaxRegularHeapObjectSize)
- .ToObjectChecked()));
+ CHECK(new_space.Contains(new_space
+ .AllocateRaw(kMaxRegularHeapObjectSize,
+ AllocationAlignment::kWordAligned)
+ .ToObjectChecked()));
}
new_space.TearDown();