diff --git a/src/heap/base/asm/ppc/push_registers_asm.cc b/src/heap/base/asm/ppc/push_registers_asm.cc index c0e6c0bf53..056d6f3015 100644 --- a/src/heap/base/asm/ppc/push_registers_asm.cc +++ b/src/heap/base/asm/ppc/push_registers_asm.cc @@ -29,7 +29,7 @@ asm( #endif // Push all callee-saved registers. // lr, TOC pointer, r16 to r31. 160 bytes. - // The parameter save area shall be allocated by the caller. 112 btes. + // The parameter save area shall be allocated by the caller. 112 bytes. // At anytime, SP (r1) needs to be multiple of 16 (i.e. 16-aligned). " mflr 0 \n" " std 0, 16(1) \n" diff --git a/src/heap/base/asm/x64/push_registers_asm.cc b/src/heap/base/asm/x64/push_registers_asm.cc index 9780b877b8..1781a5816a 100644 --- a/src/heap/base/asm/x64/push_registers_asm.cc +++ b/src/heap/base/asm/x64/push_registers_asm.cc @@ -6,7 +6,7 @@ // stack scanning. // // We cannot rely on clang generating the function and right symbol mangling -// as `__attribite__((naked))` does not prevent clang from generating TSAN +// as `__attribute__((naked))` does not prevent clang from generating TSAN // function entry stubs (`__tsan_func_entry`). Even with // `__attribute__((no_sanitize_thread)` annotation clang generates the entry // stub. diff --git a/src/heap/cppgc/allocation.cc b/src/heap/cppgc/allocation.cc index 1282c3a164..e6d7a6b73c 100644 --- a/src/heap/cppgc/allocation.cc +++ b/src/heap/cppgc/allocation.cc @@ -22,7 +22,7 @@ static_assert(api_constants::kLargeObjectSizeThreshold == kLargeObjectSizeThreshold); #if !(defined(V8_TARGET_ARCH_32_BIT) && defined(V8_CC_GNU)) -// GCC on x86 has alignof(std::max_alignt) == 16 (quad word) which is not +// GCC on x86 has alignof(std::max_align_t) == 16 (quad word) which is not // satisfied by Oilpan. static_assert(api_constants::kMaxSupportedAlignment >= alignof(std::max_align_t), diff --git a/src/heap/cppgc/compactor.cc b/src/heap/cppgc/compactor.cc index 376178197b..ebd2cd5323 100644 --- a/src/heap/cppgc/compactor.cc +++ b/src/heap/cppgc/compactor.cc @@ -199,14 +199,14 @@ void MovableReferences::RelocateInteriorReferences(Address from, Address to, if (!interior_it->second) { // Update the interior reference value, so that when the object the slot // is pointing to is moved, it can re-use this value. - Address refernece = to + offset; - interior_it->second = refernece; + Address reference = to + offset; + interior_it->second = reference; // If the |slot|'s content is pointing into the region [from, from + // size) we are dealing with an interior pointer that does not point to // a valid HeapObjectHeader. Such references need to be fixed up // immediately. - Address& reference_contents = *reinterpret_cast(refernece); + Address& reference_contents = *reinterpret_cast(reference); if (reference_contents > from && reference_contents < (from + size)) { reference_contents = reference_contents - from + to; } diff --git a/src/heap/cppgc/object-allocator.cc b/src/heap/cppgc/object-allocator.cc index ab1527a2f0..d35b44b464 100644 --- a/src/heap/cppgc/object-allocator.cc +++ b/src/heap/cppgc/object-allocator.cc @@ -169,7 +169,7 @@ void ObjectAllocator::RefillLinearAllocationBuffer(NormalPageSpace& space, // allocation or we finish sweeping all pages of this heap. Sweeper& sweeper = raw_heap_.heap()->sweeper(); // TODO(chromium:1056170): Investigate whether this should be a loop which - // would result in more agressive re-use of memory at the expense of + // would result in more aggressive re-use of memory at the expense of // potentially larger allocation time. if (sweeper.SweepForAllocationIfRunning(&space, size)) { // Sweeper found a block of at least `size` bytes. Allocation from the diff --git a/src/heap/cppgc/object-start-bitmap.h b/src/heap/cppgc/object-start-bitmap.h index 7efcbcebfd..dff8b6eae3 100644 --- a/src/heap/cppgc/object-start-bitmap.h +++ b/src/heap/cppgc/object-start-bitmap.h @@ -88,7 +88,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap { inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const; const Address offset_; - // `fully_populated_` is used to denote that the bitmap is popluated with all + // `fully_populated_` is used to denote that the bitmap is populated with all // currently allocated objects on the page and is in a consistent state. It is // used to guard against using the bitmap for finding headers during // concurrent sweeping. diff --git a/src/heap/cppgc/page-memory.h b/src/heap/cppgc/page-memory.h index 5a56e7b1a4..6cac6fd1c3 100644 --- a/src/heap/cppgc/page-memory.h +++ b/src/heap/cppgc/page-memory.h @@ -194,7 +194,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryPool final { // A backend that is used for allocating and freeing normal and large pages. // -// Internally maintaints a set of PageMemoryRegions. The backend keeps its used +// Internally maintains a set of PageMemoryRegions. The backend keeps its used // regions alive. class V8_EXPORT_PRIVATE PageBackend final { public: diff --git a/src/heap/cppgc/sweeper.cc b/src/heap/cppgc/sweeper.cc index 18b84cae9a..ea62a5620a 100644 --- a/src/heap/cppgc/sweeper.cc +++ b/src/heap/cppgc/sweeper.cc @@ -807,7 +807,7 @@ class Sweeper::SweeperImpl final { StatsCollector::kIncrementalSweep); StatsCollector::EnabledScope inner_scope( stats_collector_, StatsCollector::kSweepOnAllocation); - MutatorThreadSweepingScope sweeping_in_progresss(*this); + MutatorThreadSweepingScope sweeping_in_progress(*this); { // First, process unfinalized pages as finalizing a page is faster than @@ -924,7 +924,7 @@ class Sweeper::SweeperImpl final { StatsCollector::ScopeId internal_scope_id) { if (!is_in_progress_) return true; - MutatorThreadSweepingScope sweeping_in_progresss(*this); + MutatorThreadSweepingScope sweeping_in_progress(*this); bool sweep_complete; {