[base] Never return false from PageAllocator::{Free,Release}Pages

Instead of returning false and failing in the caller, do fail inside the
PageAllocator directly. Failure to free pages should never happen, and
handling this case in the PageAllocator directly gives us better options
to surface more detailed information in follow-up patches.

R=mlippautz@chromium.org

Bug: v8:12656, chromium:1299735
Change-Id: I6d2aa3a5613c0f1102210fccbccc6ad0e522a6ed
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3484323
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79276}
This commit is contained in:
Clemens Backes 2022-02-23 18:06:09 +01:00 committed by V8 LUCI CQ
parent 8264058c28
commit 9091311fb0
3 changed files with 17 additions and 20 deletions

View File

@ -118,8 +118,7 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
MutexGuard guard(&mutex_);
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
if (freed_size != size) return false;
CHECK_EQ(size, region_allocator_.FreeRegion(address));
if (page_initialization_mode_ ==
PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
// When we are required to return zero-initialized pages, we decommit the
@ -167,15 +166,15 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
if (page_initialization_mode_ ==
PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
// See comment in FreePages().
return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
free_size);
CHECK(page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
free_size));
} else {
DCHECK_EQ(page_initialization_mode_,
PageInitializationMode::kAllocatedPagesCanBeUninitialized);
return page_allocator_->SetPermissions(
reinterpret_cast<void*>(free_address), free_size,
PageAllocator::kNoAccess);
CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
free_size, PageAllocator::kNoAccess));
}
return true;
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,

View File

@ -50,25 +50,21 @@ bool LsanPageAllocator::CanAllocateSharedPages() {
}
bool LsanPageAllocator::FreePages(void* address, size_t size) {
bool result = page_allocator_->FreePages(address, size);
CHECK(page_allocator_->FreePages(address, size));
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
}
__lsan_unregister_root_region(address, size);
#endif
return result;
return true;
}
bool LsanPageAllocator::ReleasePages(void* address, size_t size,
size_t new_size) {
bool result = page_allocator_->ReleasePages(address, size, new_size);
CHECK(page_allocator_->ReleasePages(address, size, new_size));
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
__lsan_register_root_region(address, new_size);
}
__lsan_unregister_root_region(address, size);
__lsan_register_root_region(address, new_size);
#endif
return result;
return true;
}
} // namespace base

View File

@ -28,7 +28,8 @@ bool VirtualAddressSpacePageAllocator::FreePages(void* ptr, size_t size) {
size = result->second;
resized_allocations_.erase(result);
}
return vas_->FreePages(address, size);
CHECK(vas_->FreePages(address, size));
return true;
}
bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size,
@ -46,7 +47,8 @@ bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size,
// Will fail if the allocation was resized previously, which is desired.
Address address = reinterpret_cast<Address>(ptr);
resized_allocations_.insert({address, size});
return vas_->DecommitPages(address + new_size, size - new_size);
CHECK(vas_->DecommitPages(address + new_size, size - new_size));
return true;
}
bool VirtualAddressSpacePageAllocator::SetPermissions(