v8/test/unittests/heap/heap-unittest.cc
Ulan Degenbaev 0640cbf378 [heap] Rework ASLR for base::Platform::VirtualMemory
Currently every VirtualMemory allocation on 64-bit systems
uses a random 46-bit address hint for ASLR.

This leads to wired page leak on MacOS discovered by Erik Chen (see
crbug.com/700928 and https://chromium-review.googlesource.com/c/557958/):
"The Darwin kernel [as of macOS 10.12.5] does not clean up page directory
entries [PDE] created from mmap or mach_vm_allocate, even after
the region is destroyed. Using a virtual address space that is too large
causes a leak of about 1 wired [can never be paged out] page per call to
mmap(). The page is only reclaimed when the process is killed."

This patch changes VirtualMemory to accept the hint parameter explicitly.

On MacOS the hints are confined to 4GB contiguous region. Algorithm:
- On startup, set heap.mmap_region_base_ to a random address.
- For each mmap use heap.mmap_region_base_ + (random_offset % (4*GB)).

BUG=chromium:700928

Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: I2ae6a024e02fbe63f940105d7920b57c19abacc6
Reviewed-on: https://chromium-review.googlesource.com/558876
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46656}
2017-07-14 07:15:40 +00:00

126 lines
4.0 KiB
C++

// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <cmath>
#include <iostream>
#include <limits>
#include "src/objects.h"
#include "src/objects-inl.h"
#include "src/handles.h"
#include "src/handles-inl.h"
#include "src/heap/heap.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
typedef TestWithIsolate HeapTest;
double Round(double x) {
// Round to three digits.
return floor(x * 1000 + 0.5) / 1000;
}
void CheckEqualRounded(double expected, double actual) {
expected = Round(expected);
actual = Round(actual);
EXPECT_DOUBLE_EQ(expected, actual);
}
TEST(Heap, HeapGrowingFactor) {
CheckEqualRounded(Heap::kMaxHeapGrowingFactor,
Heap::HeapGrowingFactor(34, 1, 4.0));
CheckEqualRounded(3.553, Heap::HeapGrowingFactor(45, 1, 4.0));
CheckEqualRounded(2.830, Heap::HeapGrowingFactor(50, 1, 4.0));
CheckEqualRounded(1.478, Heap::HeapGrowingFactor(100, 1, 4.0));
CheckEqualRounded(1.193, Heap::HeapGrowingFactor(200, 1, 4.0));
CheckEqualRounded(1.121, Heap::HeapGrowingFactor(300, 1, 4.0));
CheckEqualRounded(Heap::HeapGrowingFactor(300, 1, 4.0),
Heap::HeapGrowingFactor(600, 2, 4.0));
CheckEqualRounded(Heap::kMinHeapGrowingFactor,
Heap::HeapGrowingFactor(400, 1, 4.0));
}
TEST(Heap, MaxHeapGrowingFactor) {
CheckEqualRounded(
1.3, Heap::MaxHeapGrowingFactor(Heap::kMinOldGenerationSize * MB));
CheckEqualRounded(
1.600, Heap::MaxHeapGrowingFactor(Heap::kMaxOldGenerationSize / 2 * MB));
CheckEqualRounded(
1.999,
Heap::MaxHeapGrowingFactor(
(Heap::kMaxOldGenerationSize - Heap::kPointerMultiplier) * MB));
CheckEqualRounded(4.0,
Heap::MaxHeapGrowingFactor(
static_cast<size_t>(Heap::kMaxOldGenerationSize) * MB));
}
TEST(Heap, SemiSpaceSize) {
uint64_t configurations[][2] = {
{0, 1 * i::Heap::kPointerMultiplier},
{512 * i::MB, 1 * i::Heap::kPointerMultiplier},
{1 * i::GB, 3 * i::Heap::kPointerMultiplier},
{2 * static_cast<uint64_t>(i::GB), i::Heap::kMaxSemiSpaceSize},
{4 * static_cast<uint64_t>(i::GB), i::Heap::kMaxSemiSpaceSize},
{8 * static_cast<uint64_t>(i::GB), i::Heap::kMaxSemiSpaceSize}};
for (auto configuration : configurations) {
ASSERT_EQ(configuration[1],
static_cast<uint64_t>(
i::Heap::ComputeMaxSemiSpaceSize(configuration[0])));
}
}
TEST(Heap, OldGenerationSize) {
uint64_t configurations[][2] = {
{0, i::Heap::kMinOldGenerationSize},
{512, i::Heap::kMinOldGenerationSize},
{1 * i::GB, 256 * i::Heap::kPointerMultiplier},
{2 * static_cast<uint64_t>(i::GB), 512 * i::Heap::kPointerMultiplier},
{4 * static_cast<uint64_t>(i::GB), i::Heap::kMaxOldGenerationSize},
{8 * static_cast<uint64_t>(i::GB), i::Heap::kMaxOldGenerationSize}};
for (auto configuration : configurations) {
ASSERT_EQ(configuration[1],
static_cast<uint64_t>(
i::Heap::ComputeMaxOldGenerationSize(configuration[0])));
}
}
TEST_F(HeapTest, ASLR) {
#if V8_TARGET_ARCH_X64
#if V8_OS_MACOSX
Heap* heap = i_isolate()->heap();
std::set<void*> hints;
for (int i = 0; i < 1000; i++) {
hints.insert(heap->GetRandomMmapAddr());
}
if (hints.size() == 1) {
EXPECT_TRUE((*hints.begin()) == nullptr);
EXPECT_TRUE(base::OS::GetRandomMmapAddr() == nullptr);
} else {
// It is unlikely that 1000 random samples will collide to less then 500
// values.
EXPECT_GT(hints.size(), 500u);
const uintptr_t kRegionMask = 0xFFFFFFFFu;
void* first = *hints.begin();
for (void* hint : hints) {
uintptr_t diff = reinterpret_cast<uintptr_t>(first) ^
reinterpret_cast<uintptr_t>(hint);
EXPECT_LE(diff, kRegionMask);
}
}
#endif // V8_OS_MACOSX
#endif // V8_TARGET_ARCH_X64
}
} // namespace internal
} // namespace v8