[refactoring] Return TNode in AllocateFixedArray.
R=petermarshall@chromium.org Bug: v8:7570 Change-Id: I0418ea6d2eb114ddac4d7be1251f429596464b79 Reviewed-on: https://chromium-review.googlesource.com/1032438 Commit-Queue: Simon Zünd <szuend@google.com> Reviewed-by: Peter Marshall <petermarshall@chromium.org> Cr-Commit-Position: refs/heads/master@{#52887}
This commit is contained in:
parent
c224c67a1a
commit
9035ca190a
@ -2031,8 +2031,8 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
|
||||
|
||||
// See HashTable::NewInternal().
|
||||
TNode<IntPtrT> length = KeyIndexFromEntry(capacity);
|
||||
TNode<Object> table = CAST(AllocateFixedArray(
|
||||
HOLEY_ELEMENTS, length, INTPTR_PARAMETERS, kAllowLargeObjectAllocation));
|
||||
TNode<FixedArray> table = AllocateFixedArray(
|
||||
HOLEY_ELEMENTS, length, INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
|
||||
|
||||
Heap::RootListIndex map_root_index =
|
||||
static_cast<Heap::RootListIndex>(ObjectHashTableShape::GetMapRootIndex());
|
||||
|
@ -827,7 +827,7 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
|
||||
// This is the likely case where the new queue fits into new space,
|
||||
// and thus we don't need any write barriers for initializing it.
|
||||
TNode<FixedArray> new_queue =
|
||||
CAST(AllocateFixedArray(PACKED_ELEMENTS, new_queue_length));
|
||||
AllocateFixedArray(PACKED_ELEMENTS, new_queue_length);
|
||||
CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks,
|
||||
SKIP_WRITE_BARRIER);
|
||||
StoreFixedArrayElement(new_queue, num_tasks, microtask,
|
||||
@ -841,9 +841,9 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
|
||||
BIND(&if_lospace);
|
||||
{
|
||||
// The fallback case where the new queue ends up in large object space.
|
||||
TNode<FixedArray> new_queue = CAST(AllocateFixedArray(
|
||||
TNode<FixedArray> new_queue = AllocateFixedArray(
|
||||
PACKED_ELEMENTS, new_queue_length, INTPTR_PARAMETERS,
|
||||
AllocationFlag::kAllowLargeObjectAllocation));
|
||||
AllocationFlag::kAllowLargeObjectAllocation);
|
||||
CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks);
|
||||
StoreFixedArrayElement(new_queue, num_tasks, microtask);
|
||||
FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
|
||||
|
@ -3407,16 +3407,14 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
|
||||
return result;
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
|
||||
Node* capacity_node,
|
||||
ParameterMode mode,
|
||||
AllocationFlags flags,
|
||||
Node* fixed_array_map) {
|
||||
TNode<FixedArray> CodeStubAssembler::AllocateFixedArray(
|
||||
ElementsKind kind, Node* capacity, ParameterMode mode,
|
||||
AllocationFlags flags, SloppyTNode<Map> fixed_array_map) {
|
||||
Comment("AllocateFixedArray");
|
||||
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode));
|
||||
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
|
||||
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
|
||||
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity,
|
||||
IntPtrOrSmiConstant(0, mode), mode));
|
||||
Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
|
||||
TNode<IntPtrT> total_size = GetFixedArrayAllocationSize(capacity, kind, mode);
|
||||
|
||||
if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment;
|
||||
// Allocate both array and elements object, and initialize the JSArray.
|
||||
@ -3441,8 +3439,8 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
|
||||
StoreMapNoWriteBarrier(array, map_index);
|
||||
}
|
||||
StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
|
||||
ParameterToTagged(capacity_node, mode));
|
||||
return array;
|
||||
ParameterToTagged(capacity, mode));
|
||||
return UncheckedCast<FixedArray>(array);
|
||||
}
|
||||
|
||||
TNode<FixedArray> CodeStubAssembler::ExtractFixedArray(
|
||||
|
@ -1027,18 +1027,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
|
||||
Node* capacity = nullptr,
|
||||
Node* allocation_site = nullptr);
|
||||
|
||||
Node* AllocateFixedArray(ElementsKind kind, Node* capacity,
|
||||
ParameterMode mode = INTPTR_PARAMETERS,
|
||||
AllocationFlags flags = kNone,
|
||||
Node* fixed_array_map = nullptr);
|
||||
TNode<FixedArray> AllocateFixedArray(
|
||||
ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
|
||||
AllocationFlags flags = kNone,
|
||||
SloppyTNode<Map> fixed_array_map = nullptr);
|
||||
|
||||
Node* AllocateFixedArray(ElementsKind kind, TNode<Smi> capacity,
|
||||
AllocationFlags flags = kNone) {
|
||||
TNode<FixedArray> AllocateFixedArray(ElementsKind kind, TNode<Smi> capacity,
|
||||
AllocationFlags flags = kNone) {
|
||||
return AllocateFixedArray(kind, capacity, SMI_PARAMETERS, flags);
|
||||
}
|
||||
|
||||
Node* AllocateFixedArray(ElementsKind kind, TNode<Smi> capacity,
|
||||
TNode<Map> map, AllocationFlags flags = kNone) {
|
||||
TNode<FixedArray> AllocateFixedArray(ElementsKind kind, TNode<Smi> capacity,
|
||||
TNode<Map> map,
|
||||
AllocationFlags flags = kNone) {
|
||||
return AllocateFixedArray(kind, capacity, SMI_PARAMETERS, flags, map);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user