X64: Change some smi operations to work on untagged integers instead.

Use direct reading and writing of integers from Smi fields.
Change RecordWrite with 0 offset to take untagged index instead of
smi tagged index.

Review URL: http://codereview.chromium.org/2872005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4893 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
lrn@chromium.org 2010-06-17 15:48:43 +00:00
parent 93387f272e
commit 26e692af2f
6 changed files with 138 additions and 92 deletions

View File

@ -1148,6 +1148,15 @@ void Assembler::incl(const Operand& dst) {
}
void Assembler::incl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_modrm(0, dst);
}
void Assembler::int3() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;

View File

@ -765,6 +765,7 @@ class Assembler : public Malloced {
void incq(Register dst);
void incq(const Operand& dst);
void incl(Register dst);
void incl(const Operand& dst);
void lea(Register dst, const Operand& src);

View File

@ -4769,8 +4769,8 @@ void DeferredSearchCache::Generate() {
__ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &first_loop);
__ Integer32ToSmi(scratch_, dst_);
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
__ Integer32ToSmiField(
FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
@ -4791,8 +4791,8 @@ void DeferredSearchCache::Generate() {
__ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &second_loop);
__ Integer32ToSmi(scratch_, dst_);
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
__ Integer32ToSmiField(
FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
@ -4814,50 +4814,50 @@ void DeferredSearchCache::Generate() {
// cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache.
__ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
__ SmiCompare(rbx, r9);
__ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ SmiToInteger32(r9,
FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
__ cmpl(rbx, r9);
__ j(greater, &add_new_entry);
// Check if we could evict entry after finger.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ SmiToInteger32(rdx, rdx);
__ SmiToInteger32(rbx, rbx);
__ addq(rdx, kEntrySizeImm);
__ SmiToInteger32(rdx,
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ addl(rdx, kEntrySizeImm);
Label forward;
__ cmpq(rbx, rdx);
__ cmpl(rbx, rdx);
__ j(greater, &forward);
// Need to wrap over the cache.
__ movl(rdx, kEntriesIndexImm);
__ bind(&forward);
__ Integer32ToSmi(r9, rdx);
__ movl(r9, rdx);
__ jmp(&update_cache);
__ bind(&add_new_entry);
// r9 holds cache size as smi.
__ SmiToInteger32(rdx, r9);
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
// r9 holds cache size as int32.
__ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
__ Integer32ToSmiField(
FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
// Update the cache itself.
// rdx holds the index as int.
// r9 holds the index as smi.
// r9 holds the index as int32.
__ bind(&update_cache);
__ pop(rbx); // restore the key
__ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
__ Integer32ToSmiField(
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
// Store key.
__ movq(ArrayElement(rcx, rdx), rbx);
__ movq(ArrayElement(rcx, r9), rbx);
__ RecordWrite(rcx, 0, rbx, r9);
// Store value.
__ pop(rcx); // restore the cache.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ SmiAddConstant(rdx, rdx, Smi::FromInt(1));
__ movq(r9, rdx);
__ SmiToInteger32(rdx, rdx);
__ SmiToInteger32(rdx,
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ incl(rdx);
// Backup rax, because the RecordWrite macro clobbers its arguments.
__ movq(rbx, rax);
__ movq(ArrayElement(rcx, rdx), rbx);
__ RecordWrite(rcx, 0, rbx, r9);
__ movq(ArrayElement(rcx, rdx), rax);
__ RecordWrite(rcx, 0, rbx, rdx);
if (!dst_.is(rax)) {
__ movq(dst_, rax);
@ -8551,18 +8551,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
__ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP));
__ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
__ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
__ j(not_equal, &runtime);
// rcx: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
__ SmiToInteger32(rdx,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
__ leal(rdx, Operand(rdx, rdx, times_1, 2));
// Check that the static offsets vector buffer is large enough.
__ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
__ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
__ j(above, &runtime);
// rcx: RegExp data (FixedArray)
@ -8572,17 +8572,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(rax, &runtime);
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
__ j(NegateCondition(is_string), &runtime);
// Get the length of the string to rbx.
__ movq(rbx, FieldOperand(rax, String::kLengthOffset));
// rbx: Length of subject string as smi
// rcx: RegExp data (FixedArray)
// rdx: Number of capture registers
// rax: Subject string.
// rcx: RegExp data (FixedArray).
// rdx: Number of capture registers.
// Check that the third argument is a positive smi less than the string
// length. A negative value will be greater (unsigned comparison).
__ movq(rax, Operand(rsp, kPreviousIndexOffset));
__ JumpIfNotSmi(rax, &runtime);
__ SmiCompare(rax, rbx);
__ movq(rbx, Operand(rsp, kPreviousIndexOffset));
__ JumpIfNotSmi(rbx, &runtime);
__ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
__ j(above_equal, &runtime);
// rcx: RegExp data (FixedArray)
@ -8600,8 +8598,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
__ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
__ SmiToInteger32(rax, rax);
__ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
__ cmpl(rdx, rax);
__ j(greater, &runtime);
@ -8674,8 +8671,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r12: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ movq(rbx, Operand(rsp, kPreviousIndexOffset));
__ SmiToInteger64(rbx, rbx); // Previous index from smi.
__ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
// rax: subject string
// rbx: previous index
@ -8787,10 +8783,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&success);
__ movq(rax, Operand(rsp, kJSRegExpOffset));
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
__ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
__ leal(rdx, Operand(rax, rax, times_1, 2));
// rdx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
@ -8833,7 +8829,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
rdx,
times_pointer_size,
RegExpImpl::kFirstCaptureOffset),
rdi);
rdi);
__ jmp(&next_capture);
__ bind(&done);
@ -8877,9 +8873,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
// Divide smi tagged length by two.
__ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
__ SmiToInteger32(
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ shrl(mask, Immediate(1));
__ subq(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
@ -8916,8 +8912,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
}
__ bind(&is_smi);
__ movq(scratch, object);
__ SmiToInteger32(scratch, scratch);
__ SmiToInteger32(scratch, object);
GenerateConvertHashCodeToIndex(masm, scratch, mask);
Register index = scratch;
@ -9344,29 +9339,30 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ j(equal, &adaptor_frame);
// Get the length from the frame.
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movq(Operand(rsp, 1 * kPointerSize), rcx);
__ SmiToInteger32(rcx,
Operand(rdx,
ArgumentsAdaptorFrameConstants::kLengthOffset));
// Space on stack must already hold a smi.
__ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
// Do not clobber the length index for the indexing operation since
// it is used compute the size for allocation later.
SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2);
__ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
__ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
__ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
__ testq(rcx, rcx);
__ testl(rcx, rcx);
__ j(zero, &add_arguments_object);
index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
__ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
__ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
__ addq(rcx, Immediate(Heap::kArgumentsObjectSize));
__ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
@ -9378,10 +9374,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(rdi, Operand(rdi, offset));
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
__ movq(kScratchRegister, FieldOperand(rdi, i));
__ movq(FieldOperand(rax, i), kScratchRegister);
}
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
__ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
__ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
__ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
__ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
__ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
__ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
// Setup the callee in-object property.
ASSERT(Heap::arguments_callee_index == 0);
@ -9395,7 +9394,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
__ testq(rcx, rcx);
__ SmiTest(rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack and untag the length.
@ -9417,7 +9416,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
__ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize));
__ decq(rcx);
__ decl(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@ -10832,19 +10831,13 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ push(rax);
// Push this stub's key.
__ movq(rax, Immediate(MinorKey()));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ Push(Smi::FromInt(MinorKey()));
// Although the operation and the type info are encoded into the key,
// the encoding is opaque, so push them too.
__ movq(rax, Immediate(op_));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ Push(Smi::FromInt(op_));
__ movq(rax, Immediate(runtime_operands_type_));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ Push(Smi::FromInt(runtime_operands_type_));
__ push(rcx);

View File

@ -1013,6 +1013,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
rax);
__ movq(rdx, rax);
__ SmiToInteger32(rcx, rcx);
__ RecordWriteNonSmi(rbx, 0, rdx, rcx);
__ ret(0);
}

View File

@ -96,8 +96,8 @@ void MacroAssembler::RecordWriteHelper(Register object,
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
// method for more details.
and_(addr, Immediate(Page::kPageAlignmentMask));
shrl(addr, Immediate(Page::kRegionSizeLog2));
andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
// Set dirty mark for region.
bts(Operand(object, Page::kDirtyFlagOffset), addr);
@ -106,25 +106,25 @@ void MacroAssembler::RecordWriteHelper(Register object,
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the smi_index register contains the array index into
// the elements array represented as a smi. Otherwise it can be used as a
// scratch register.
// If offset is zero, then the index register contains the array index into
// the elements array represented a zero extended int32. Otherwise it can be
// used as a scratch register.
// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
Register smi_index) {
Register index) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
JumpIfSmi(value, &done);
RecordWriteNonSmi(object, offset, value, smi_index);
RecordWriteNonSmi(object, offset, value, index);
bind(&done);
// Clobber all input registers when running with the debug-code flag
@ -135,7 +135,7 @@ void MacroAssembler::RecordWrite(Register object,
if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
@ -143,7 +143,7 @@ void MacroAssembler::RecordWrite(Register object,
void MacroAssembler::RecordWriteNonSmi(Register object,
int offset,
Register scratch,
Register smi_index) {
Register index) {
Label done;
if (FLAG_debug_code) {
@ -151,6 +151,16 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
JumpIfNotSmi(object, &okay);
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
bind(&okay);
if (offset == 0) {
// index must be int32.
Register tmp = index.is(rax) ? rbx : rax;
push(tmp);
movl(tmp, index);
cmpq(tmp, index);
Check(equal, "Index register for RecordWrite must be untagged int32.");
pop(tmp);
}
}
// Test that the object address is not in the new space. We cannot
@ -163,16 +173,15 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
Register dst = smi_index;
Register dst = index;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric.
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, FieldOperand(object,
index.reg,
index.scale,
index,
times_pointer_size,
FixedArray::kHeaderSize));
}
RecordWriteHelper(object, dst, scratch);
@ -184,7 +193,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
@ -485,6 +494,23 @@ void MacroAssembler::Integer32ToSmi(Register dst,
}
void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
if (FLAG_debug_code) {
testb(dst, Immediate(0x01));
Label ok;
j(zero, &ok);
if (allow_stub_calls()) {
Abort("Integer32ToSmiField writing to non-smi location");
} else {
int3();
}
bind(&ok);
}
ASSERT(kSmiShift % kBitsPerByte == 0);
movl(Operand(dst, kSmiShift / kBitsPerByte), src);
}
void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
Register src,
int constant) {
@ -520,6 +546,11 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) {
}
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
}
void MacroAssembler::SmiTest(Register src) {
testq(src, src);
}
@ -556,6 +587,11 @@ void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
}
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
}
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power) {

View File

@ -203,6 +203,9 @@ class MacroAssembler: public Assembler {
// NOTICE: Destroys the dst register even if unsuccessful!
void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
// Stores an integer32 value into a memory field that already holds a smi.
void Integer32ToSmiField(const Operand& dst, Register src);
// Adds constant to src and tags the result as a smi.
// Result must be a valid smi.
void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
@ -214,6 +217,7 @@ class MacroAssembler: public Assembler {
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
void SmiToInteger64(Register dst, const Operand& src);
// Multiply a positive smi's integer value by a power of two.
// Provides result as 64-bit integer value.
@ -234,6 +238,8 @@ class MacroAssembler: public Assembler {
void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src);
void SmiCompare(const Operand& dst, Smi* src);
// Compare the int32 in src register to the value of the smi stored at dst.
void SmiCompareInteger32(const Operand& dst, Register src);
// Sets sign and zero flags depending on value of smi in register.
void SmiTest(Register src);