[cleanup] Delete MacroAssembler::CopyBytes, it is dead code

Review-Url: https://chromiumcodereview.appspot.com/2434753003
Cr-Commit-Position: refs/heads/master@{#40459}
This commit is contained in:
jkummerow 2016-10-20 03:15:21 -07:00 committed by Commit bot
parent ff46fcb9e4
commit 818d61ab28
24 changed files with 14 additions and 945 deletions

View File

@ -3245,50 +3245,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
cmp(length, Operand(kPointerSize));
b(le, &byte_loop);
bind(&align_loop_1);
tst(src, Operand(kPointerSize - 1));
b(eq, &word_loop);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
b(&align_loop_1);
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
tst(src, Operand(kPointerSize - 1));
Assert(eq, kExpectingAlignmentForCopyBytes);
}
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
str(scratch, MemOperand(dst, kPointerSize, PostIndex));
sub(length, length, Operand(kPointerSize));
b(&word_loop);
// Copy the last bytes if any left.
bind(&byte_loop);
cmp(length, Operand::Zero());
b(eq, &done);
bind(&byte_loop_1);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
b(ne, &byte_loop_1);
bind(&done);
}
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {

View File

@ -841,14 +841,6 @@ class MacroAssembler: public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
void CopyBytes(Register src,
Register dst,
Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.

View File

@ -2169,62 +2169,6 @@ void MacroAssembler::ClampDoubleToUint8(Register output,
Fcvtnu(output, dbl_scratch);
}
void MacroAssembler::CopyBytes(Register dst,
Register src,
Register length,
Register scratch,
CopyHint hint) {
UseScratchRegisterScope temps(this);
Register tmp1 = temps.AcquireX();
Register tmp2 = temps.AcquireX();
DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
DCHECK(!AreAliased(src, dst, csp));
if (emit_debug_code()) {
// Check copy length.
Cmp(length, 0);
Assert(ge, kUnexpectedNegativeValue);
// Check src and dst buffers don't overlap.
Add(scratch, src, length); // Calculate end of src buffer.
Cmp(scratch, dst);
Add(scratch, dst, length); // Calculate end of dst buffer.
Ccmp(scratch, src, ZFlag, gt);
Assert(le, kCopyBuffersOverlap);
}
Label short_copy, short_loop, bulk_loop, done;
if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
Register bulk_length = scratch;
int pair_size = 2 * kXRegSize;
int pair_mask = pair_size - 1;
Bic(bulk_length, length, pair_mask);
Cbz(bulk_length, &short_copy);
Bind(&bulk_loop);
Sub(bulk_length, bulk_length, pair_size);
Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
Cbnz(bulk_length, &bulk_loop);
And(length, length, pair_mask);
}
Bind(&short_copy);
Cbz(length, &done);
Bind(&short_loop);
Sub(length, length, 1);
Ldrb(tmp1, MemOperand(src, 1, PostIndex));
Strb(tmp1, MemOperand(dst, 1, PostIndex));
Cbnz(length, &short_loop);
Bind(&done);
}
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {

View File

@ -1099,16 +1099,6 @@ class MacroAssembler : public Assembler {
void InitializeFieldsWithFiller(Register current_address,
Register end_address, Register filler);
// Copies a number of bytes from src to dst. All passed registers are
// clobbered. On exit src and dst will point to the place just after where the
// last byte was read or written and length will be zero. Hint may be used to
// determine which is the most efficient algorithm to use for copying.
void CopyBytes(Register dst,
Register src,
Register length,
Register scratch,
CopyHint hint = kCopyUnknown);
// ---- String Utilities ----

View File

@ -75,7 +75,6 @@ namespace internal {
V(kExpectedPositiveZero, "Expected +0.0") \
V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
V(kExpectingAlignmentForCopyBytes, "Expecting alignment for CopyBytes") \
V(kExternalStringExpectedButNotFound, \
"External string expected, but not found") \
V(kForInStatementWithNonLocalEachVariable, \

View File

@ -1881,74 +1881,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Source and destination are incremented by length.
// Many variants of movsb, loop unrolling, word moves, and indexed operands
// have been tried here already, and this is fastest.
// A simpler loop is faster on small copies, but 30% slower on large ones.
// The cld() instruction must have been emitted, to set the direction flag(),
// before calling this function.
void MacroAssembler::CopyBytes(Register source,
Register destination,
Register length,
Register scratch) {
Label short_loop, len4, len8, len12, done, short_string;
DCHECK(source.is(esi));
DCHECK(destination.is(edi));
DCHECK(length.is(ecx));
cmp(length, Immediate(4));
j(below, &short_string, Label::kNear);
// Because source is 4-byte aligned in our uses of this function,
// we keep source aligned for the rep_movs call by copying the odd bytes
// at the end of the ranges.
mov(scratch, Operand(source, length, times_1, -4));
mov(Operand(destination, length, times_1, -4), scratch);
cmp(length, Immediate(8));
j(below_equal, &len4, Label::kNear);
cmp(length, Immediate(12));
j(below_equal, &len8, Label::kNear);
cmp(length, Immediate(16));
j(below_equal, &len12, Label::kNear);
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
and_(scratch, Immediate(0x3));
add(destination, scratch);
jmp(&done, Label::kNear);
bind(&len12);
mov(scratch, Operand(source, 8));
mov(Operand(destination, 8), scratch);
bind(&len8);
mov(scratch, Operand(source, 4));
mov(Operand(destination, 4), scratch);
bind(&len4);
mov(scratch, Operand(source, 0));
mov(Operand(destination, 0), scratch);
add(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
test(length, length);
j(zero, &done, Label::kNear);
bind(&short_loop);
mov_b(scratch, Operand(source, 0));
mov_b(Operand(destination, 0), scratch);
inc(source);
inc(destination);
dec(length);
j(not_zero, &short_loop);
bind(&done);
}
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {

View File

@ -676,12 +676,6 @@ class MacroAssembler: public Assembler {
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch, Label* gc_required);
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
// The contents of index and scratch are destroyed.
void CopyBytes(Register source, Register destination, Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.

View File

@ -4573,75 +4573,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
Branch(&byte_loop, le, length, Operand(kPointerSize));
bind(&align_loop_1);
And(scratch, src, kPointerSize - 1);
Branch(&word_loop, eq, scratch, Operand(zero_reg));
lbu(scratch, MemOperand(src));
Addu(src, src, 1);
sb(scratch, MemOperand(dst));
Addu(dst, dst, 1);
Subu(length, length, Operand(1));
Branch(&align_loop_1, ne, length, Operand(zero_reg));
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
And(scratch, src, kPointerSize - 1);
Assert(eq, kExpectingAlignmentForCopyBytes,
scratch, Operand(zero_reg));
}
Branch(&byte_loop, lt, length, Operand(kPointerSize));
lw(scratch, MemOperand(src));
Addu(src, src, kPointerSize);
// TODO(kalmard) check if this can be optimized to use sw in most cases.
// Can't use unaligned access - copy byte by byte.
if (kArchEndian == kLittle) {
sb(scratch, MemOperand(dst, 0));
srl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 1));
srl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 2));
srl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 3));
} else {
sb(scratch, MemOperand(dst, 3));
srl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 2));
srl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 1));
srl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 0));
}
Addu(dst, dst, 4);
Subu(length, length, Operand(kPointerSize));
Branch(&word_loop);
// Copy the last bytes if any left.
bind(&byte_loop);
Branch(&done, eq, length, Operand(zero_reg));
bind(&byte_loop_1);
lbu(scratch, MemOperand(src));
Addu(src, src, 1);
sb(scratch, MemOperand(dst));
Addu(dst, dst, 1);
Subu(length, length, Operand(1));
Branch(&byte_loop_1, ne, length, Operand(zero_reg));
bind(&done);
}
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {

View File

@ -1132,14 +1132,6 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopStackHandler();
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
void CopyBytes(Register src,
Register dst,
Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.

View File

@ -4706,90 +4706,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
Branch(&byte_loop, le, length, Operand(kPointerSize));
bind(&align_loop_1);
And(scratch, src, kPointerSize - 1);
Branch(&word_loop, eq, scratch, Operand(zero_reg));
lbu(scratch, MemOperand(src));
Daddu(src, src, 1);
sb(scratch, MemOperand(dst));
Daddu(dst, dst, 1);
Dsubu(length, length, Operand(1));
Branch(&align_loop_1, ne, length, Operand(zero_reg));
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
And(scratch, src, kPointerSize - 1);
Assert(eq, kExpectingAlignmentForCopyBytes,
scratch, Operand(zero_reg));
}
Branch(&byte_loop, lt, length, Operand(kPointerSize));
ld(scratch, MemOperand(src));
Daddu(src, src, kPointerSize);
// TODO(kalmard) check if this can be optimized to use sw in most cases.
// Can't use unaligned access - copy byte by byte.
if (kArchEndian == kLittle) {
sb(scratch, MemOperand(dst, 0));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 1));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 2));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 3));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 4));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 5));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 6));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 7));
} else {
sb(scratch, MemOperand(dst, 7));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 6));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 5));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 4));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 3));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 2));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 1));
dsrl(scratch, scratch, 8);
sb(scratch, MemOperand(dst, 0));
}
Daddu(dst, dst, 8);
Dsubu(length, length, Operand(kPointerSize));
Branch(&word_loop);
// Copy the last bytes if any left.
bind(&byte_loop);
Branch(&done, eq, length, Operand(zero_reg));
bind(&byte_loop_1);
lbu(scratch, MemOperand(src));
Daddu(src, src, 1);
sb(scratch, MemOperand(dst));
Daddu(dst, dst, 1);
Dsubu(length, length, Operand(1));
Branch(&byte_loop_1, ne, length, Operand(zero_reg));
bind(&done);
}
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {

View File

@ -1190,14 +1190,6 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopStackHandler();
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
void CopyBytes(Register src,
Register dst,
Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.

View File

@ -3176,73 +3176,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
Register scratch) {
Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
DCHECK(!scratch.is(r0));
cmpi(length, Operand::Zero());
beq(&done);
// Check src alignment and length to see whether word_loop is possible
andi(scratch, src, Operand(kPointerSize - 1));
beq(&aligned, cr0);
subfic(scratch, scratch, Operand(kPointerSize * 2));
cmp(length, scratch);
blt(&byte_loop);
// Align src before copying in word size chunks.
subi(scratch, scratch, Operand(kPointerSize));
mtctr(scratch);
bind(&align_loop);
lbz(scratch, MemOperand(src));
addi(src, src, Operand(1));
subi(length, length, Operand(1));
stb(scratch, MemOperand(dst));
addi(dst, dst, Operand(1));
bdnz(&align_loop);
bind(&aligned);
// Copy bytes in word size chunks.
if (emit_debug_code()) {
andi(r0, src, Operand(kPointerSize - 1));
Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
}
ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
cmpi(scratch, Operand::Zero());
beq(&byte_loop);
mtctr(scratch);
bind(&word_loop);
LoadP(scratch, MemOperand(src));
addi(src, src, Operand(kPointerSize));
subi(length, length, Operand(kPointerSize));
StoreP(scratch, MemOperand(dst));
addi(dst, dst, Operand(kPointerSize));
bdnz(&word_loop);
// Copy the last bytes if any left.
cmpi(length, Operand::Zero());
beq(&done);
bind(&byte_loop);
mtctr(length);
bind(&byte_loop_1);
lbz(scratch, MemOperand(src));
addi(src, src, Operand(1));
stb(scratch, MemOperand(dst));
addi(dst, dst, Operand(1));
bdnz(&byte_loop_1);
bind(&done);
}
void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
Register count,
Register filler) {

View File

@ -764,11 +764,6 @@ class MacroAssembler : public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
void CopyBytes(Register src, Register dst, Register length, Register scratch);
// Initialize fields with filler values. |count| fields starting at
// |current_address| are overwritten with the value in |filler|. At the end
// the loop, |current_address| points at the next uninitialized field.

View File

@ -2921,51 +2921,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
Register scratch) {
Label big_loop, left_bytes, done, fake_call;
DCHECK(!scratch.is(r0));
// big loop moves 256 bytes at a time
bind(&big_loop);
CmpP(length, Operand(static_cast<intptr_t>(0x100)));
blt(&left_bytes);
mvc(MemOperand(dst), MemOperand(src), 0x100);
AddP(src, Operand(static_cast<intptr_t>(0x100)));
AddP(dst, Operand(static_cast<intptr_t>(0x100)));
SubP(length, Operand(static_cast<intptr_t>(0x100)));
b(&big_loop);
bind(&left_bytes);
CmpP(length, Operand::Zero());
beq(&done);
// TODO(john.yan): More optimal version is to use MVC
// Sequence below has some undiagnosed issue.
/*
b(scratch, &fake_call); // use brasl to Save mvc addr to scratch
mvc(MemOperand(dst), MemOperand(src), 1);
bind(&fake_call);
SubP(length, Operand(static_cast<intptr_t>(-1)));
ex(length, MemOperand(scratch)); // execute mvc instr above
AddP(src, length);
AddP(dst, length);
AddP(src, Operand(static_cast<intptr_t>(0x1)));
AddP(dst, Operand(static_cast<intptr_t>(0x1)));
*/
mvc(MemOperand(dst), MemOperand(src), 1);
AddP(src, Operand(static_cast<intptr_t>(0x1)));
AddP(dst, Operand(static_cast<intptr_t>(0x1)));
SubP(length, Operand(static_cast<intptr_t>(0x1)));
b(&left_bytes);
bind(&done);
}
void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
Register count,
Register filler) {

View File

@ -1032,11 +1032,6 @@ class MacroAssembler : public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
void CopyBytes(Register src, Register dst, Register length, Register scratch);
// Initialize fields with filler values. |count| fields starting at
// |current_address| are overwritten with the value in |filler|. At the end
// the loop, |current_address| points at the next uninitialized field.

View File

@ -5174,93 +5174,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Destination is incremented by length, source, length and scratch are
// clobbered.
// A simpler loop is faster on small copies, but slower on large ones.
// The cld() instruction must have been emitted, to set the direction flag(),
// before calling this function.
void MacroAssembler::CopyBytes(Register destination,
Register source,
Register length,
int min_length,
Register scratch) {
DCHECK(min_length >= 0);
if (emit_debug_code()) {
cmpl(length, Immediate(min_length));
Assert(greater_equal, kInvalidMinLength);
}
Label short_loop, len8, len16, len24, done, short_string;
const int kLongStringLimit = 4 * kPointerSize;
if (min_length <= kLongStringLimit) {
cmpl(length, Immediate(kPointerSize));
j(below, &short_string, Label::kNear);
}
DCHECK(source.is(rsi));
DCHECK(destination.is(rdi));
DCHECK(length.is(rcx));
if (min_length <= kLongStringLimit) {
cmpl(length, Immediate(2 * kPointerSize));
j(below_equal, &len8, Label::kNear);
cmpl(length, Immediate(3 * kPointerSize));
j(below_equal, &len16, Label::kNear);
cmpl(length, Immediate(4 * kPointerSize));
j(below_equal, &len24, Label::kNear);
}
// Because source is 8-byte aligned in our uses of this function,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
movp(scratch, length);
shrl(length, Immediate(kPointerSizeLog2));
repmovsp();
// Move remaining bytes of length.
andl(scratch, Immediate(kPointerSize - 1));
movp(length, Operand(source, scratch, times_1, -kPointerSize));
movp(Operand(destination, scratch, times_1, -kPointerSize), length);
addp(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done, Label::kNear);
bind(&len24);
movp(scratch, Operand(source, 2 * kPointerSize));
movp(Operand(destination, 2 * kPointerSize), scratch);
bind(&len16);
movp(scratch, Operand(source, kPointerSize));
movp(Operand(destination, kPointerSize), scratch);
bind(&len8);
movp(scratch, Operand(source, 0));
movp(Operand(destination, 0), scratch);
// Move remaining bytes of length.
movp(scratch, Operand(source, length, times_1, -kPointerSize));
movp(Operand(destination, length, times_1, -kPointerSize), scratch);
addp(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
if (min_length == 0) {
testl(length, length);
j(zero, &done, Label::kNear);
}
bind(&short_loop);
movb(scratch, Operand(source, 0));
movb(Operand(destination, 0), scratch);
incp(source);
incp(destination);
decl(length);
j(not_zero, &short_loop, Label::kNear);
}
bind(&done);
}
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {

View File

@ -1528,18 +1528,6 @@ class MacroAssembler: public Assembler {
return code_object_;
}
// Copy length bytes from source to destination.
// Uses scratch register internally (if you have a low-eight register
// free, do use it, otherwise kScratchRegister will be used).
// The min_length is a minimum limit on the value that length will have.
// The algorithm has some special cases that might be omitted if the string
// is known to always be long.
void CopyBytes(Register destination,
Register source,
Register length,
int min_length = 0,
Register scratch = kScratchRegister);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.

View File

@ -1822,74 +1822,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Source and destination are incremented by length.
// Many variants of movsb, loop unrolling, word moves, and indexed operands
// have been tried here already, and this is fastest.
// A simpler loop is faster on small copies, but 30% slower on large ones.
// The cld() instruction must have been emitted, to set the direction flag(),
// before calling this function.
void MacroAssembler::CopyBytes(Register source,
Register destination,
Register length,
Register scratch) {
Label short_loop, len4, len8, len12, done, short_string;
DCHECK(source.is(esi));
DCHECK(destination.is(edi));
DCHECK(length.is(ecx));
cmp(length, Immediate(4));
j(below, &short_string, Label::kNear);
// Because source is 4-byte aligned in our uses of this function,
// we keep source aligned for the rep_movs call by copying the odd bytes
// at the end of the ranges.
mov(scratch, Operand(source, length, times_1, -4));
mov(Operand(destination, length, times_1, -4), scratch);
cmp(length, Immediate(8));
j(below_equal, &len4, Label::kNear);
cmp(length, Immediate(12));
j(below_equal, &len8, Label::kNear);
cmp(length, Immediate(16));
j(below_equal, &len12, Label::kNear);
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
and_(scratch, Immediate(0x3));
add(destination, scratch);
jmp(&done, Label::kNear);
bind(&len12);
mov(scratch, Operand(source, 8));
mov(Operand(destination, 8), scratch);
bind(&len8);
mov(scratch, Operand(source, 4));
mov(Operand(destination, 4), scratch);
bind(&len4);
mov(scratch, Operand(source, 0));
mov(Operand(destination, 0), scratch);
add(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
test(length, length);
j(zero, &done, Label::kNear);
bind(&short_loop);
mov_b(scratch, Operand(source, 0));
mov_b(Operand(destination, 0), scratch);
inc(source);
inc(destination);
dec(length);
j(not_zero, &short_loop);
bind(&done);
}
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {

View File

@ -665,12 +665,6 @@ class MacroAssembler: public Assembler {
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch, Label* gc_required);
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
// The contents of index and scratch are destroyed.
void CopyBytes(Register source, Register destination, Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.

View File

@ -22,7 +22,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(61),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@ -130,7 +130,7 @@ bytecodes: [
B(LdaSmi), U8(1),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrueConstant), U8(0),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@ -278,7 +278,7 @@ bytecodes: [
B(LdaSmi), U8(1),
B(TestEqualStrict), R(4), U8(0),
B(JumpIfTrueConstant), U8(3),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(5),
B(CallRuntime), U16(Runtime::kAbort), R(5), U8(1),
B(LdaSmi), U8(-2),
@ -344,7 +344,7 @@ bytecodes: [
B(LdaSmi), U8(1),
B(TestEqualStrict), R(4), U8(0),
B(JumpIfTrueConstant), U8(9),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
/* 27 S> */ B(LdrContextSlot), R(1), U8(7), U8(0), R(14),

View File

@ -22,7 +22,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(71),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@ -91,7 +91,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(71),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@ -162,7 +162,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(2), U8(0),
B(JumpIfTrue), U8(71),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), U8(-2),
@ -272,7 +272,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(2), U8(0),
B(JumpIfTrue), U8(71),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), U8(-2),
@ -374,7 +374,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(2), U8(0),
B(JumpIfTrue), U8(83),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), U8(-2),
@ -489,7 +489,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(2), U8(0),
B(JumpIfTrue), U8(83),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), U8(-2),
@ -596,7 +596,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(83),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@ -677,7 +677,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(83),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@ -771,7 +771,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(71),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@ -840,7 +840,7 @@ bytecodes: [
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(71),
B(LdaSmi), U8(77),
B(LdaSmi), U8(76),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),

View File

@ -42,98 +42,6 @@ typedef void* (*F)(int x, int y, int p2, int p3, int p4);
#define __ masm->
static byte to_non_zero(int n) {
return static_cast<unsigned>(n) % 255 + 1;
}
static bool all_zeroes(const byte* beg, const byte* end) {
CHECK(beg);
CHECK(beg <= end);
while (beg < end) {
if (*beg++ != 0)
return false;
}
return true;
}
TEST(CopyBytes) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
const int data_size = 1 * KB;
size_t act_size;
// Allocate two blocks to copy data between.
byte* src_buffer =
static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(src_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
byte* dest_buffer =
static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(dest_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
// Storage for R0 and R1.
byte* r0_;
byte* r1_;
MacroAssembler assembler(isolate, NULL, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
// Code to be generated: The stuff in CopyBytes followed by a store of R0 and
// R1, respectively.
__ CopyBytes(r0, r1, r2, r3);
__ mov(r2, Operand(reinterpret_cast<int>(&r0_)));
__ mov(r3, Operand(reinterpret_cast<int>(&r1_)));
__ str(r0, MemOperand(r2));
__ str(r1, MemOperand(r3));
__ bx(lr);
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F f = FUNCTION_CAST<F>(code->entry());
// Initialise source data with non-zero bytes.
for (int i = 0; i < data_size; i++) {
src_buffer[i] = to_non_zero(i);
}
const int fuzz = 11;
for (int size = 0; size < 600; size++) {
for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
memset(dest_buffer, 0, data_size);
CHECK(dest + size < dest_buffer + data_size);
(void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int>(src),
reinterpret_cast<int>(dest), size, 0, 0);
// R0 and R1 should point at the first byte after the copied data.
CHECK_EQ(src + size, r0_);
CHECK_EQ(dest + size, r1_);
// Check that we haven't written outside the target area.
CHECK(all_zeroes(dest_buffer, dest));
CHECK(all_zeroes(dest + size, dest_buffer + data_size));
// Check the target area.
CHECK_EQ(0, memcmp(src, dest, size));
}
}
}
// Check that the source data hasn't been clobbered.
for (int i = 0; i < data_size; i++) {
CHECK(src_buffer[i] == to_non_zero(i));
}
}
typedef int (*F5)(void*, void*, void*, void*, void*);

View File

@ -44,22 +44,6 @@ typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
#define __ masm->
static byte to_non_zero(int n) {
return static_cast<unsigned>(n) % 255 + 1;
}
static bool all_zeroes(const byte* beg, const byte* end) {
CHECK(beg);
CHECK(beg <= end);
while (beg < end) {
if (*beg++ != 0)
return false;
}
return true;
}
TEST(BYTESWAP) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@ -126,81 +110,6 @@ TEST(BYTESWAP) {
CHECK_EQ(static_cast<int32_t>(0xDE2C0000), t.r5);
}
TEST(CopyBytes) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
const int data_size = 1 * KB;
size_t act_size;
// Allocate two blocks to copy data between.
byte* src_buffer =
static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(src_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
byte* dest_buffer =
static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(dest_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
// Storage for a0 and a1.
byte* a0_;
byte* a1_;
MacroAssembler assembler(isolate, NULL, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
// Code to be generated: The stuff in CopyBytes followed by a store of a0 and
// a1, respectively.
__ CopyBytes(a0, a1, a2, a3);
__ li(a2, Operand(reinterpret_cast<int>(&a0_)));
__ li(a3, Operand(reinterpret_cast<int>(&a1_)));
__ sw(a0, MemOperand(a2));
__ jr(ra);
__ sw(a1, MemOperand(a3));
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
::F f = FUNCTION_CAST< ::F>(code->entry());
// Initialise source data with non-zero bytes.
for (int i = 0; i < data_size; i++) {
src_buffer[i] = to_non_zero(i);
}
const int fuzz = 11;
for (int size = 0; size < 600; size++) {
for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
memset(dest_buffer, 0, data_size);
CHECK(dest + size < dest_buffer + data_size);
(void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int>(src),
reinterpret_cast<int>(dest), size, 0, 0);
// a0 and a1 should point at the first byte after the copied data.
CHECK_EQ(src + size, a0_);
CHECK_EQ(dest + size, a1_);
// Check that we haven't written outside the target area.
CHECK(all_zeroes(dest_buffer, dest));
CHECK(all_zeroes(dest + size, dest_buffer + data_size));
// Check the target area.
CHECK_EQ(0, memcmp(src, dest, size));
}
}
}
// Check that the source data hasn't been clobbered.
for (int i = 0; i < data_size; i++) {
CHECK(src_buffer[i] == to_non_zero(i));
}
}
static void TestNaN(const char *code) {
// NaN value is different on MIPS and x86 architectures, and TEST(NaNx)
// tests checks the case where a x86 NaN value is serialized into the

View File

@ -45,22 +45,6 @@ typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
#define __ masm->
static byte to_non_zero(int n) {
return static_cast<unsigned>(n) % 255 + 1;
}
static bool all_zeroes(const byte* beg, const byte* end) {
CHECK(beg);
CHECK(beg <= end);
while (beg < end) {
if (*beg++ != 0)
return false;
}
return true;
}
TEST(BYTESWAP) {
DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
CcTest::InitializeVM();
@ -145,81 +129,6 @@ TEST(BYTESWAP) {
CHECK_EQ(static_cast<int64_t>(0xC3151AC800000000), t.r7);
}
TEST(CopyBytes) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
const int data_size = 1 * KB;
size_t act_size;
// Allocate two blocks to copy data between.
byte* src_buffer =
static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(src_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
byte* dest_buffer =
static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(dest_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
// Storage for a0 and a1.
byte* a0_;
byte* a1_;
MacroAssembler assembler(isolate, NULL, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
// Code to be generated: The stuff in CopyBytes followed by a store of a0 and
// a1, respectively.
__ CopyBytes(a0, a1, a2, a3);
__ li(a2, Operand(reinterpret_cast<int64_t>(&a0_)));
__ li(a3, Operand(reinterpret_cast<int64_t>(&a1_)));
__ sd(a0, MemOperand(a2));
__ jr(ra);
__ sd(a1, MemOperand(a3));
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
::F f = FUNCTION_CAST< ::F>(code->entry());
// Initialise source data with non-zero bytes.
for (int i = 0; i < data_size; i++) {
src_buffer[i] = to_non_zero(i);
}
const int fuzz = 11;
for (int size = 0; size < 600; size++) {
for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
memset(dest_buffer, 0, data_size);
CHECK(dest + size < dest_buffer + data_size);
(void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int64_t>(src),
reinterpret_cast<int64_t>(dest), size, 0, 0);
// a0 and a1 should point at the first byte after the copied data.
CHECK_EQ(src + size, a0_);
CHECK_EQ(dest + size, a1_);
// Check that we haven't written outside the target area.
CHECK(all_zeroes(dest_buffer, dest));
CHECK(all_zeroes(dest + size, dest_buffer + data_size));
// Check the target area.
CHECK_EQ(0, memcmp(src, dest, size));
}
}
}
// Check that the source data hasn't been clobbered.
for (int i = 0; i < data_size; i++) {
CHECK(src_buffer[i] == to_non_zero(i));
}
}
TEST(LoadConstants) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();