Refactor and improve inlined double-aligned allocations

Change is performance neutral but generates smaller code and encapsulates double alignment in the macro-assembler rather than at the allocation site.

Review URL: https://codereview.chromium.org/11684005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13284 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
danno@chromium.org 2012-12-28 11:09:16 +00:00
parent 0aee912480
commit 17326e8746
13 changed files with 160 additions and 103 deletions

View File

@ -361,12 +361,11 @@ static void GenerateFastCloneShallowArrayCommon(
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
__ AllocateInNewSpace(size,
r0,
r1,
r2,
fail,
TAG_OBJECT);
AllocationFlags flags = TAG_OBJECT;
if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
}
__ AllocateInNewSpace(size, r0, r1, r2, fail, flags);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {

View File

@ -193,27 +193,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
// Align the array conveniently for doubles.
// Store a filler value in the unused memory.
Label aligned, aligned_done;
__ tst(r6, Operand(kDoubleAlignmentMask));
__ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map()));
__ b(eq, &aligned);
// Store at the beginning of the allocated memory and update the base pointer.
__ str(ip, MemOperand(r6, kPointerSize, PostIndex));
__ b(&aligned_done);
__ bind(&aligned);
// Store the filler at the end of the allocated memory.
__ sub(lr, lr, Operand(kPointerSize));
__ str(ip, MemOperand(r6, lr));
__ bind(&aligned_done);
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));

View File

@ -1617,6 +1617,18 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
ldr(ip, MemOperand(topaddr, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
if (obj_size_operand.is_single_instruction(this)) {
@ -1702,6 +1714,18 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
ldr(ip, MemOperand(topaddr, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.

View File

@ -54,20 +54,6 @@ inline Operand SmiUntagOperand(Register object) {
const Register cp = { 8 }; // JavaScript context pointer
const Register kRootRegister = { 10 }; // Roots array pointer.
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1,
// Specify that the requested size of the space to allocate is specified in
// words instead of bytes.
SIZE_IN_WORDS = 1 << 2
};
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.

View File

@ -257,6 +257,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ AllocateInNewSpace(FixedArray::kHeaderSize,
times_pointer_size,
edx,
REGISTER_VALUE_IS_INT32,
edi,
ecx,
no_reg,
@ -1102,8 +1103,9 @@ static void AllocateJSArray(MacroAssembler* masm,
// requested elements.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
times_half_pointer_size, // array_size is a smi.
times_pointer_size,
array_size,
REGISTER_VALUE_IS_SMI,
result,
elements_array_end,
scratch,

View File

@ -340,7 +340,11 @@ static void GenerateFastCloneShallowArrayCommon(
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
__ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
AllocationFlags flags = TAG_OBJECT;
if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
}
__ AllocateInNewSpace(size, eax, ebx, edx, fail, flags);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@ -4080,8 +4084,9 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
__ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
times_half_pointer_size,
ebx, // In: Number of elements (times 2, being a smi)
times_pointer_size,
ebx, // In: Number of elements as a smi
REGISTER_VALUE_IS_SMI,
eax, // Out: Start of allocation (tagged).
ecx, // Out: End of allocation.
edx, // Scratch register

View File

@ -434,24 +434,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
// edx: receiver
// edi: length of source FixedArray (smi-tagged)
__ lea(esi, Operand(edi,
times_4,
FixedDoubleArray::kHeaderSize + kPointerSize));
__ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
Label aligned, aligned_done;
__ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
__ j(zero, &aligned, Label::kNear);
__ mov(FieldOperand(eax, 0),
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
__ add(eax, Immediate(kPointerSize));
__ jmp(&aligned_done);
__ bind(&aligned);
__ mov(Operand(eax, esi, times_1, -kPointerSize-1),
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
__ bind(&aligned_done);
AllocationFlags flags =
static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
__ AllocateInNewSpace(FixedDoubleArray::kHeaderSize, times_8,
edi, REGISTER_VALUE_IS_SMI,
eax, ebx, no_reg, &gc_required, flags);
// eax: destination FixedDoubleArray
// edi: number of elements

View File

@ -1241,6 +1241,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1260,6 +1261,19 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
Register top_reg = result_end.is_valid() ? result_end : result;
// Calculate new top and bail out if new space is exhausted.
@ -1278,26 +1292,31 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
UpdateAllocationTopHelper(top_reg, scratch);
// Tag result if requested.
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if ((flags & TAG_OBJECT) != 0) {
if (tag_result) {
sub(result, Immediate(object_size - kHeapObjectTag));
} else {
sub(result, Immediate(object_size));
}
} else if ((flags & TAG_OBJECT) != 0) {
add(result, Immediate(kHeapObjectTag));
} else if (tag_result) {
ASSERT(kHeapObjectTag == 1);
inc(result);
}
}
void MacroAssembler::AllocateInNewSpace(int header_size,
ScaleFactor element_size,
Register element_count,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
void MacroAssembler::AllocateInNewSpace(
int header_size,
ScaleFactor element_size,
Register element_count,
RegisterValueType element_count_type,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1316,21 +1335,44 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
// We assume that element_count*element_size + header_size does not
// overflow.
if (element_count_type == REGISTER_VALUE_IS_SMI) {
STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
ASSERT(element_size >= times_2);
ASSERT(kSmiTagSize == 1);
element_size = static_cast<ScaleFactor>(element_size - 1);
} else {
ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
}
lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
lea(result, Operand(result, kHeapObjectTag));
ASSERT(kHeapObjectTag == 1);
inc(result);
}
// Update allocation top.
@ -1344,6 +1386,8 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (DOUBLE_ALIGNMENT | RESULT_CONTAINS_TOP |
SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -1433,6 +1477,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
times_1,
scratch1,
REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,
@ -1468,6 +1513,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
AllocateInNewSpace(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,

View File

@ -35,18 +35,6 @@
namespace v8 {
namespace internal {
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1
};
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@ -55,6 +43,12 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RegisterValueType {
REGISTER_VALUE_IS_SMI,
REGISTER_VALUE_IS_INT32
};
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
@ -576,6 +570,7 @@ class MacroAssembler: public Assembler {
void AllocateInNewSpace(int header_size,
ScaleFactor element_size,
Register element_count,
RegisterValueType element_count_type,
Register result,
Register result_end,
Register scratch,

View File

@ -36,6 +36,23 @@ enum InvokeFlag {
};
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1,
// Specify that the requested size of the space to allocate is specified in
// words instead of bytes.
SIZE_IN_WORDS = 1 << 2,
// Align the allocation to a multiple of kDoubleSize
DOUBLE_ALIGNMENT = 1 << 3
};
// Invalid depth in prototype chain.
const int kInvalidProtoDepth = -1;

View File

@ -333,7 +333,11 @@ static void GenerateFastCloneShallowArrayCommon(
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
__ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
AllocationFlags flags = TAG_OBJECT;
if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
}
__ AllocateInNewSpace(size, rax, rbx, rdx, fail, flags);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {

View File

@ -3758,6 +3758,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -3777,6 +3778,13 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
testq(result, Immediate(kDoubleAlignmentMask));
Check(zero, "Allocation is not double aligned");
}
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
@ -3795,15 +3803,17 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Update allocation top.
UpdateAllocationTopHelper(top_reg, scratch);
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if ((flags & TAG_OBJECT) != 0) {
if (tag_result) {
subq(result, Immediate(object_size - kHeapObjectTag));
} else {
subq(result, Immediate(object_size));
}
} else if ((flags & TAG_OBJECT) != 0) {
} else if (tag_result) {
// Tag the result if requested.
addq(result, Immediate(kHeapObjectTag));
ASSERT(kHeapObjectTag == 1);
incq(result);
}
}
@ -3816,6 +3826,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -3834,6 +3845,13 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
testq(result, Immediate(kDoubleAlignmentMask));
Check(zero, "Allocation is not double aligned");
}
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
@ -3852,7 +3870,8 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
addq(result, Immediate(kHeapObjectTag));
ASSERT(kHeapObjectTag == 1);
incq(result);
}
}
@ -3863,6 +3882,8 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (DOUBLE_ALIGNMENT | RESULT_CONTAINS_TOP |
SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.

View File

@ -35,18 +35,6 @@
namespace v8 {
namespace internal {
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
// Return the pointer to the allocated already tagged as a heap object.
TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
RESULT_CONTAINS_TOP = 1 << 1
};
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.