Reland "[zone] Use 32kb instead of 1MB as high zone page size"
This is a reland of a04862021e
.
It turns out that this gives ~2x speedup on highly parallel WebAssembly
compilation, so let's try again landing this in isolation.
Original change's description:
> [zone] Use 32kb instead of 1MB as high zone page size
>
> It seems that allocating smaller pages is actually quite a bit faster than
> larger pages, probably because they can be cached by malloc. Let's see what the
> bots say.
>
> In a follow-up I'll check whether the segment-pool is actually beneficial or
> whether we should just remove it.
>
> This also drops SegmentSize::kLarge as a way to make compilation deterministic.
> Turns out that by now we need >8mb anyway, and the previous 1mb wasn't enough.
> At the same time the compiler was fixed to not rely on virtual addresses of
> zone objects anymore, and there's a bot checking whether the snapshot is
> determistic.
>
> Change-Id: I38cbb0d209d68b3671fd38763b42714811f4223e
> Reviewed-on: https://chromium-review.googlesource.com/c/1346370
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Toon Verwaest <verwaest@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#57683}
Change-Id: I243ba741f0968879b4cfe9f366d81ddc53a9bf27
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1645326
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62007}
This commit is contained in:
parent
3167b3b600
commit
2d44118519
@ -157,10 +157,7 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
|
||||
// to code targets without dereferencing their handles.
|
||||
CanonicalHandleScope canonical(isolate);
|
||||
|
||||
SegmentSize segment_size = isolate->serializer_enabled()
|
||||
? SegmentSize::kLarge
|
||||
: SegmentSize::kDefault;
|
||||
Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
|
||||
Zone zone(isolate->allocator(), ZONE_NAME);
|
||||
const int argc_with_recv =
|
||||
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
|
||||
compiler::CodeAssemblerState state(
|
||||
@ -181,10 +178,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
|
||||
// Canonicalize handles, so that we can share constant pool entries pointing
|
||||
// to code targets without dereferencing their handles.
|
||||
CanonicalHandleScope canonical(isolate);
|
||||
SegmentSize segment_size = isolate->serializer_enabled()
|
||||
? SegmentSize::kLarge
|
||||
: SegmentSize::kDefault;
|
||||
Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
|
||||
Zone zone(isolate->allocator(), ZONE_NAME);
|
||||
// The interface descriptor with given key must be initialized at this point
|
||||
// and this construction just queries the details from the descriptors table.
|
||||
CallInterfaceDescriptor descriptor(interface_descriptor);
|
||||
|
@ -27,8 +27,7 @@ constexpr size_t kASanRedzoneBytes = 0;
|
||||
|
||||
} // namespace
|
||||
|
||||
Zone::Zone(AccountingAllocator* allocator, const char* name,
|
||||
SegmentSize segment_size)
|
||||
Zone::Zone(AccountingAllocator* allocator, const char* name)
|
||||
: allocation_size_(0),
|
||||
segment_bytes_allocated_(0),
|
||||
position_(0),
|
||||
@ -36,8 +35,7 @@ Zone::Zone(AccountingAllocator* allocator, const char* name,
|
||||
allocator_(allocator),
|
||||
segment_head_(nullptr),
|
||||
name_(name),
|
||||
sealed_(false),
|
||||
segment_size_(segment_size) {
|
||||
sealed_(false) {
|
||||
allocator_->ZoneCreation(this);
|
||||
}
|
||||
|
||||
@ -137,12 +135,9 @@ Address Zone::NewExpand(size_t size) {
|
||||
V8::FatalProcessOutOfMemory(nullptr, "Zone");
|
||||
return kNullAddress;
|
||||
}
|
||||
if (segment_size_ == SegmentSize::kLarge) {
|
||||
new_size = kMaximumSegmentSize;
|
||||
}
|
||||
if (new_size < kMinimumSegmentSize) {
|
||||
new_size = kMinimumSegmentSize;
|
||||
} else if (new_size > kMaximumSegmentSize) {
|
||||
} else if (new_size >= kMaximumSegmentSize) {
|
||||
// Limit the size of new segments to avoid growing the segment size
|
||||
// exponentially, thus putting pressure on contiguous virtual address space.
|
||||
// All the while making sure to allocate a segment large enough to hold the
|
||||
|
@ -37,12 +37,9 @@ namespace internal {
|
||||
// Note: The implementation is inherently not thread safe. Do not use
|
||||
// from multi-threaded code.
|
||||
|
||||
enum class SegmentSize { kLarge, kDefault };
|
||||
|
||||
class V8_EXPORT_PRIVATE Zone final {
|
||||
public:
|
||||
Zone(AccountingAllocator* allocator, const char* name,
|
||||
SegmentSize segment_size = SegmentSize::kDefault);
|
||||
Zone(AccountingAllocator* allocator, const char* name);
|
||||
~Zone();
|
||||
|
||||
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
|
||||
@ -102,7 +99,7 @@ class V8_EXPORT_PRIVATE Zone final {
|
||||
static const size_t kMinimumSegmentSize = 8 * KB;
|
||||
|
||||
// Never allocate segments larger than this size in bytes.
|
||||
static const size_t kMaximumSegmentSize = 1 * MB;
|
||||
static const size_t kMaximumSegmentSize = 32 * KB;
|
||||
|
||||
// Report zone excess when allocation exceeds this limit.
|
||||
static const size_t kExcessLimit = 256 * MB;
|
||||
@ -136,7 +133,6 @@ class V8_EXPORT_PRIVATE Zone final {
|
||||
Segment* segment_head_;
|
||||
const char* name_;
|
||||
bool sealed_;
|
||||
SegmentSize segment_size_;
|
||||
};
|
||||
|
||||
// ZoneObject is an abstraction that helps define classes of objects
|
||||
|
Loading…
Reference in New Issue
Block a user