[array] Make array normalization independent of heap configuration.
The heuristic for deciding whether to normalize elements in a JSArray should not depend on the current old generation size, for the sake of predictability. This also wouldn't work when we start inlining this into optimized code, where we'd bake in the max old generation size value at the time of optimization. Bug: v8:6399 Change-Id: Ie30d8855953b8fa97b86b18d9eac6e5de87e5aa9 Reviewed-on: https://chromium-review.googlesource.com/609013 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#47272}
This commit is contained in:
parent
a5f321cd9b
commit
4dfd750302
@ -6058,13 +6058,7 @@ void JSArray::set_length(Smi* length) {
|
||||
|
||||
|
||||
bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
|
||||
// This constant is somewhat arbitrary. Any large enough value would work.
|
||||
const uint32_t kMaxFastArrayLength = 32 * 1024 * 1024;
|
||||
// If the new array won't fit in a some non-trivial fraction of the max old
|
||||
// space size, then force it to go dictionary mode.
|
||||
uint32_t heap_based_upper_bound =
|
||||
static_cast<uint32_t>((heap->MaxOldGenerationSize() / kDoubleSize) / 4);
|
||||
return new_length >= Min(kMaxFastArrayLength, heap_based_upper_bound);
|
||||
return new_length > kMaxFastArrayLength;
|
||||
}
|
||||
|
||||
|
||||
|
@ -6868,6 +6868,9 @@ class JSArray: public JSObject {
|
||||
// Max. number of elements being copied in Array builtins.
|
||||
static const int kMaxCopyElements = 100;
|
||||
|
||||
// This constant is somewhat arbitrary. Any large enough value would work.
|
||||
static const uint32_t kMaxFastArrayLength = 32 * 1024 * 1024;
|
||||
|
||||
static const int kInitialMaxFastElementArray =
|
||||
(kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - kSize -
|
||||
AllocationMemento::kSize) /
|
||||
|
Loading…
Reference in New Issue
Block a user