277fdd1de7
This CL renames a number of things related to the V8 sandbox. Mainly, what used to be under V8_HEAP_SANDBOX is now under V8_SANDBOXED_EXTERNAL_POINTERS, while the previous V8 VirtualMemoryCage is now simply the V8 Sandbox: V8_VIRTUAL_MEMORY_CAGE => V8_SANDBOX V8_HEAP_SANDBOX => V8_SANDBOXED_EXTERNAL_POINTERS V8_CAGED_POINTERS => V8_SANDBOXED_POINTERS V8VirtualMemoryCage => Sandbox CagedPointer => SandboxedPointer fake cage => partially reserved sandbox src/security => src/sandbox This naming scheme should simplify things: the sandbox is now the large region of virtual address space inside which V8 mainly operates and which should be considered untrusted. Mechanisms like sandboxed pointers are then used to attempt to prevent escapes from the sandbox (i.e. corruption of memory outside of it). Furthermore, the new naming scheme avoids the confusion with the various other "cages" in V8, in particular, the VirtualMemoryCage class, by dropping that name entirely. Future sandbox features are developed under their own V8_SANDBOX_X flag, and will, once final, be merged into V8_SANDBOX. Current future features are sandboxed external pointers (using the external pointer table), and sandboxed pointers (pointers guaranteed to point into the sandbox, e.g. because they are encoded as offsets). This CL then also introduces a new build flag, v8_enable_sandbox_future, which enables all future features. Bug: v8:10391 Change-Id: I5174ea8f5ab40fb96a04af10853da735ad775c96 Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3322981 Reviewed-by: Hannes Payer <hpayer@chromium.org> Reviewed-by: Igor Sheludko <ishell@chromium.org> Reviewed-by: Michael Achenbach <machenbach@chromium.org> Reviewed-by: Toon Verwaest <verwaest@chromium.org> Commit-Queue: Samuel Groß <saelo@chromium.org> Cr-Commit-Position: refs/heads/main@{#78384}
370 lines
14 KiB
C++
370 lines
14 KiB
C++
// Copyright 2014 the V8 project authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file.
|
|
|
|
#include <map>
|
|
|
|
#include "src/base/region-allocator.h"
|
|
#include "src/execution/isolate.h"
|
|
#include "src/heap/heap-inl.h"
|
|
#include "src/heap/memory-allocator.h"
|
|
#include "src/heap/spaces-inl.h"
|
|
#include "src/utils/ostreams.h"
|
|
#include "test/unittests/test-utils.h"
|
|
#include "testing/gtest/include/gtest/gtest.h"
|
|
|
|
namespace v8 {
|
|
namespace internal {
|
|
|
|
// This is a v8::PageAllocator implementation that decorates provided page
|
|
// allocator object with page tracking functionality.
|
|
class TrackingPageAllocator : public ::v8::PageAllocator {
|
|
public:
|
|
explicit TrackingPageAllocator(v8::PageAllocator* page_allocator)
|
|
: page_allocator_(page_allocator),
|
|
allocate_page_size_(page_allocator_->AllocatePageSize()),
|
|
commit_page_size_(page_allocator_->CommitPageSize()),
|
|
region_allocator_(kNullAddress, size_t{0} - commit_page_size_,
|
|
commit_page_size_) {
|
|
CHECK_NOT_NULL(page_allocator);
|
|
CHECK(IsAligned(allocate_page_size_, commit_page_size_));
|
|
}
|
|
~TrackingPageAllocator() override = default;
|
|
|
|
size_t AllocatePageSize() override { return allocate_page_size_; }
|
|
|
|
size_t CommitPageSize() override { return commit_page_size_; }
|
|
|
|
void SetRandomMmapSeed(int64_t seed) override {
|
|
return page_allocator_->SetRandomMmapSeed(seed);
|
|
}
|
|
|
|
void* GetRandomMmapAddr() override {
|
|
return page_allocator_->GetRandomMmapAddr();
|
|
}
|
|
|
|
void* AllocatePages(void* address, size_t size, size_t alignment,
|
|
PageAllocator::Permission access) override {
|
|
void* result =
|
|
page_allocator_->AllocatePages(address, size, alignment, access);
|
|
if (result) {
|
|
// Mark pages as used.
|
|
Address current_page = reinterpret_cast<Address>(result);
|
|
CHECK(IsAligned(current_page, allocate_page_size_));
|
|
CHECK(IsAligned(size, allocate_page_size_));
|
|
CHECK(region_allocator_.AllocateRegionAt(current_page, size));
|
|
Address end = current_page + size;
|
|
while (current_page < end) {
|
|
page_permissions_.insert({current_page, access});
|
|
current_page += commit_page_size_;
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
bool FreePages(void* address, size_t size) override {
|
|
bool result = page_allocator_->FreePages(address, size);
|
|
if (result) {
|
|
// Mark pages as free.
|
|
Address start = reinterpret_cast<Address>(address);
|
|
CHECK(IsAligned(start, allocate_page_size_));
|
|
CHECK(IsAligned(size, allocate_page_size_));
|
|
size_t freed_size = region_allocator_.FreeRegion(start);
|
|
CHECK(IsAligned(freed_size, commit_page_size_));
|
|
CHECK_EQ(RoundUp(freed_size, allocate_page_size_), size);
|
|
auto start_iter = page_permissions_.find(start);
|
|
CHECK_NE(start_iter, page_permissions_.end());
|
|
auto end_iter = page_permissions_.lower_bound(start + size);
|
|
page_permissions_.erase(start_iter, end_iter);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
bool ReleasePages(void* address, size_t size, size_t new_size) override {
|
|
bool result = page_allocator_->ReleasePages(address, size, new_size);
|
|
if (result) {
|
|
Address start = reinterpret_cast<Address>(address);
|
|
CHECK(IsAligned(start, allocate_page_size_));
|
|
CHECK(IsAligned(size, commit_page_size_));
|
|
CHECK(IsAligned(new_size, commit_page_size_));
|
|
CHECK_LT(new_size, size);
|
|
CHECK_EQ(region_allocator_.TrimRegion(start, new_size), size - new_size);
|
|
auto start_iter = page_permissions_.find(start + new_size);
|
|
CHECK_NE(start_iter, page_permissions_.end());
|
|
auto end_iter = page_permissions_.lower_bound(start + size);
|
|
page_permissions_.erase(start_iter, end_iter);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
bool DecommitPages(void* address, size_t size) override {
|
|
bool result = page_allocator_->DecommitPages(address, size);
|
|
if (result) {
|
|
// Mark pages as non-accessible.
|
|
UpdatePagePermissions(reinterpret_cast<Address>(address), size,
|
|
kNoAccess);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
bool SetPermissions(void* address, size_t size,
|
|
PageAllocator::Permission access) override {
|
|
bool result = page_allocator_->SetPermissions(address, size, access);
|
|
if (result) {
|
|
UpdatePagePermissions(reinterpret_cast<Address>(address), size, access);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// Returns true if all the allocated pages were freed.
|
|
bool IsEmpty() { return page_permissions_.empty(); }
|
|
|
|
void CheckIsFree(Address address, size_t size) {
|
|
CHECK(IsAligned(address, allocate_page_size_));
|
|
CHECK(IsAligned(size, allocate_page_size_));
|
|
EXPECT_TRUE(region_allocator_.IsFree(address, size));
|
|
}
|
|
|
|
void CheckPagePermissions(Address address, size_t size,
|
|
PageAllocator::Permission access) {
|
|
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
|
|
EXPECT_EQ(access, value->second);
|
|
});
|
|
}
|
|
|
|
void Print(const char* comment) const {
|
|
i::StdoutStream os;
|
|
os << "\n========================================="
|
|
<< "\nTracingPageAllocator state: ";
|
|
if (comment) os << comment;
|
|
os << "\n-----------------------------------------\n";
|
|
region_allocator_.Print(os);
|
|
os << "-----------------------------------------"
|
|
<< "\nPage permissions:";
|
|
if (page_permissions_.empty()) {
|
|
os << " empty\n";
|
|
return;
|
|
}
|
|
os << "\n" << std::hex << std::showbase;
|
|
|
|
Address contiguous_region_start = static_cast<Address>(-1);
|
|
Address contiguous_region_end = contiguous_region_start;
|
|
PageAllocator::Permission contiguous_region_access =
|
|
PageAllocator::kNoAccess;
|
|
for (auto& pair : page_permissions_) {
|
|
if (contiguous_region_end == pair.first &&
|
|
pair.second == contiguous_region_access) {
|
|
contiguous_region_end += commit_page_size_;
|
|
continue;
|
|
}
|
|
if (contiguous_region_start != contiguous_region_end) {
|
|
PrintRegion(os, contiguous_region_start, contiguous_region_end,
|
|
contiguous_region_access);
|
|
}
|
|
contiguous_region_start = pair.first;
|
|
contiguous_region_end = pair.first + commit_page_size_;
|
|
contiguous_region_access = pair.second;
|
|
}
|
|
if (contiguous_region_start != contiguous_region_end) {
|
|
PrintRegion(os, contiguous_region_start, contiguous_region_end,
|
|
contiguous_region_access);
|
|
}
|
|
}
|
|
|
|
private:
|
|
using PagePermissionsMap = std::map<Address, PageAllocator::Permission>;
|
|
using ForEachFn = std::function<void(PagePermissionsMap::value_type*)>;
|
|
|
|
static void PrintRegion(std::ostream& os, Address start, Address end,
|
|
PageAllocator::Permission access) {
|
|
os << " page: [" << start << ", " << end << "), access: ";
|
|
switch (access) {
|
|
case PageAllocator::kNoAccess:
|
|
case PageAllocator::kNoAccessWillJitLater:
|
|
os << "--";
|
|
break;
|
|
case PageAllocator::kRead:
|
|
os << "R";
|
|
break;
|
|
case PageAllocator::kReadWrite:
|
|
os << "RW";
|
|
break;
|
|
case PageAllocator::kReadWriteExecute:
|
|
os << "RWX";
|
|
break;
|
|
case PageAllocator::kReadExecute:
|
|
os << "RX";
|
|
break;
|
|
}
|
|
os << "\n";
|
|
}
|
|
|
|
void ForEachPage(Address address, size_t size, const ForEachFn& fn) {
|
|
CHECK(IsAligned(address, commit_page_size_));
|
|
CHECK(IsAligned(size, commit_page_size_));
|
|
auto start_iter = page_permissions_.find(address);
|
|
// Start page must exist in page_permissions_.
|
|
CHECK_NE(start_iter, page_permissions_.end());
|
|
auto end_iter = page_permissions_.find(address + size - commit_page_size_);
|
|
// Ensure the last but one page exists in page_permissions_.
|
|
CHECK_NE(end_iter, page_permissions_.end());
|
|
// Now make it point to the next element in order to also process is by the
|
|
// following for loop.
|
|
++end_iter;
|
|
for (auto iter = start_iter; iter != end_iter; ++iter) {
|
|
PagePermissionsMap::value_type& pair = *iter;
|
|
fn(&pair);
|
|
}
|
|
}
|
|
|
|
void UpdatePagePermissions(Address address, size_t size,
|
|
PageAllocator::Permission access) {
|
|
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
|
|
value->second = access;
|
|
});
|
|
}
|
|
|
|
v8::PageAllocator* const page_allocator_;
|
|
const size_t allocate_page_size_;
|
|
const size_t commit_page_size_;
|
|
// Region allocator tracks page allocation/deallocation requests.
|
|
base::RegionAllocator region_allocator_;
|
|
// This map keeps track of allocated pages' permissions.
|
|
PagePermissionsMap page_permissions_;
|
|
};
|
|
|
|
// This test is currently incompatible with the sandbox. Enable it
|
|
// once the VirtualAddressSpace interface is stable.
|
|
#if !V8_OS_FUCHSIA && !V8_SANDBOX
|
|
class SequentialUnmapperTest : public TestWithIsolate {
|
|
public:
|
|
SequentialUnmapperTest() = default;
|
|
~SequentialUnmapperTest() override = default;
|
|
SequentialUnmapperTest(const SequentialUnmapperTest&) = delete;
|
|
SequentialUnmapperTest& operator=(const SequentialUnmapperTest&) = delete;
|
|
|
|
static void SetUpTestCase() {
|
|
CHECK_NULL(tracking_page_allocator_);
|
|
old_page_allocator_ = GetPlatformPageAllocator();
|
|
tracking_page_allocator_ = new TrackingPageAllocator(old_page_allocator_);
|
|
CHECK(tracking_page_allocator_->IsEmpty());
|
|
CHECK_EQ(old_page_allocator_,
|
|
SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
|
|
old_flag_ = i::FLAG_concurrent_sweeping;
|
|
i::FLAG_concurrent_sweeping = false;
|
|
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
|
// Reinitialize the process-wide pointer cage so it can pick up the
|
|
// TrackingPageAllocator.
|
|
// The pointer cage must be destroyed before the sandbox.
|
|
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
|
|
#ifdef V8_SANDBOX
|
|
// Reinitialze the sandbox so it uses the TrackingPageAllocator.
|
|
GetProcessWideSandbox()->TearDown();
|
|
constexpr bool use_guard_regions = false;
|
|
CHECK(GetProcessWideSandbox()->Initialize(
|
|
tracking_page_allocator_, kSandboxMinimumSize, use_guard_regions));
|
|
#endif
|
|
IsolateAllocator::InitializeOncePerProcess();
|
|
#endif
|
|
TestWithIsolate::SetUpTestCase();
|
|
}
|
|
|
|
static void TearDownTestCase() {
|
|
TestWithIsolate::TearDownTestCase();
|
|
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
|
// Free the process-wide cage reservation, otherwise the pages won't be
|
|
// freed until process teardown.
|
|
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
|
|
#endif
|
|
#ifdef V8_SANDBOX
|
|
GetProcessWideSandbox()->TearDown();
|
|
#endif
|
|
i::FLAG_concurrent_sweeping = old_flag_;
|
|
CHECK(tracking_page_allocator_->IsEmpty());
|
|
|
|
// Restore the original v8::PageAllocator and delete the tracking one.
|
|
CHECK_EQ(tracking_page_allocator_,
|
|
SetPlatformPageAllocatorForTesting(old_page_allocator_));
|
|
delete tracking_page_allocator_;
|
|
tracking_page_allocator_ = nullptr;
|
|
}
|
|
|
|
Heap* heap() { return isolate()->heap(); }
|
|
MemoryAllocator* allocator() { return heap()->memory_allocator(); }
|
|
MemoryAllocator::Unmapper* unmapper() { return allocator()->unmapper(); }
|
|
|
|
TrackingPageAllocator* tracking_page_allocator() {
|
|
return tracking_page_allocator_;
|
|
}
|
|
|
|
private:
|
|
static TrackingPageAllocator* tracking_page_allocator_;
|
|
static v8::PageAllocator* old_page_allocator_;
|
|
static bool old_flag_;
|
|
};
|
|
|
|
TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
|
|
nullptr;
|
|
v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
|
|
bool SequentialUnmapperTest::old_flag_;
|
|
|
|
// See v8:5945.
|
|
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
|
|
if (FLAG_enable_third_party_heap) return;
|
|
Page* page = allocator()->AllocatePage(
|
|
MemoryChunkLayout::AllocatableMemoryInDataPage(),
|
|
static_cast<PagedSpace*>(heap()->old_space()),
|
|
Executability::NOT_EXECUTABLE);
|
|
EXPECT_NE(nullptr, page);
|
|
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
|
|
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
|
PageAllocator::kReadWrite);
|
|
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
|
|
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
|
PageAllocator::kReadWrite);
|
|
unmapper()->FreeQueuedChunks();
|
|
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
|
PageAllocator::kNoAccess);
|
|
unmapper()->TearDown();
|
|
#ifdef V8_COMPRESS_POINTERS
|
|
// In this mode Isolate uses bounded page allocator which allocates pages
|
|
// inside prereserved region. Thus these pages are kept reserved until
|
|
// the Isolate dies.
|
|
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
|
PageAllocator::kNoAccess);
|
|
#else
|
|
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
|
|
#endif // V8_COMPRESS_POINTERS
|
|
}
|
|
|
|
// See v8:5945.
|
|
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
|
|
if (FLAG_enable_third_party_heap) return;
|
|
Page* page = allocator()->AllocatePage(
|
|
MemoryChunkLayout::AllocatableMemoryInDataPage(),
|
|
static_cast<PagedSpace*>(heap()->old_space()),
|
|
Executability::NOT_EXECUTABLE);
|
|
EXPECT_NE(nullptr, page);
|
|
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
|
|
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
|
PageAllocator::kReadWrite);
|
|
|
|
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
|
|
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
|
PageAllocator::kReadWrite);
|
|
unmapper()->TearDown();
|
|
#ifdef V8_COMPRESS_POINTERS
|
|
// In this mode Isolate uses bounded page allocator which allocates pages
|
|
// inside prereserved region. Thus these pages are kept reserved until
|
|
// the Isolate dies.
|
|
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
|
PageAllocator::kNoAccess);
|
|
#else
|
|
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
|
|
#endif // V8_COMPRESS_POINTERS
|
|
}
|
|
#endif // !V8_OS_FUCHSIA && !V8_SANDBOX
|
|
|
|
} // namespace internal
|
|
} // namespace v8
|