[nojit] Convert generated memcpy functions into builtins

On ia32, arm and mips we generate miscellaneous memcpy-related functions
at runtime:

arm: memcpy for uint8-uint8 and uint16-uint8 {dest-source} pairs.
ia32: memmove
mips: memcpy uint8-uint8

In jitless mode, runtime codegen is disallowed, so these must be
converted into builtins.

As far as I can tell, the mips64 files were dead code (#ifdef'd to
V8_HOST_ARCH_MIPS instead of MIPS64).

Note also the slightly changed implementation of ia32's MemMove's
jump tables.

Bug: v8:8675
Change-Id: I5dc2a50fcbad332ce9f78228425b987b0d9acdf3
Reviewed-on: https://chromium-review.googlesource.com/c/1407067
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58839}
This commit is contained in:
Jakob Gruber 2019-01-14 16:00:02 +01:00 committed by Commit Bot
parent 10a408a6a7
commit 61cb1133f9
15 changed files with 1041 additions and 1729 deletions

View File

@ -2685,7 +2685,6 @@ v8_source_set("v8_base") {
"src/ia32/assembler-ia32-inl.h",
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
"src/ia32/codegen-ia32.cc",
"src/ia32/constants-ia32.h",
"src/ia32/cpu-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
@ -2749,7 +2748,6 @@ v8_source_set("v8_base") {
"src/arm/assembler-arm-inl.h",
"src/arm/assembler-arm.cc",
"src/arm/assembler-arm.h",
"src/arm/codegen-arm.cc",
"src/arm/constants-arm.cc",
"src/arm/constants-arm.h",
"src/arm/cpu-arm.cc",
@ -2835,7 +2833,6 @@ v8_source_set("v8_base") {
"src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
"src/mips/codegen-mips.cc",
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
@ -2863,7 +2860,6 @@ v8_source_set("v8_base") {
"src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
"src/mips64/codegen-mips64.cc",
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",

View File

@ -1,148 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_ARM
#include <memory>
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/simulator-arm.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_ARM)
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(page_allocator,
page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
Register dest = r0;
Register src = r1;
Register chars = r2;
Register temp1 = r3;
Label less_4;
{
UseScratchRegisterScope temps(&masm);
Register temp2 = temps.Acquire();
Label loop;
__ bic(temp2, chars, Operand(0x3), SetCC);
__ b(&less_4, eq);
__ add(temp2, dest, temp2);
__ bind(&loop);
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ str(temp1, MemOperand(dest, 4, PostIndex));
__ cmp(dest, temp2);
__ b(&loop, ne);
}
__ bind(&less_4);
__ mov(chars, Operand(chars, LSL, 31), SetCC);
// bit0 => Z (ne), bit1 => C (cs)
__ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
__ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
__ ldrb(temp1, MemOperand(src), ne);
__ strb(temp1, MemOperand(dest), ne);
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
// Convert 8 to 16. The number of character to copy must be at least 8.
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(page_allocator,
page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
Register dest = r0;
Register src = r1;
Register chars = r2;
{
UseScratchRegisterScope temps(&masm);
Register temp1 = r3;
Register temp2 = temps.Acquire();
Register temp3 = lr;
Register temp4 = r4;
Label loop;
Label not_two;
__ Push(lr, r4);
__ bic(temp2, chars, Operand(0x3));
__ add(temp2, dest, Operand(temp2, LSL, 1));
__ bind(&loop);
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ uxtb16(temp3, temp1);
__ uxtb16(temp4, temp1, 8);
__ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
__ str(temp1, MemOperand(dest));
__ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
__ str(temp1, MemOperand(dest, 4));
__ add(dest, dest, Operand(8));
__ cmp(dest, temp2);
__ b(&loop, ne);
__ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
__ b(&not_two, cc);
__ ldrh(temp1, MemOperand(src, 2, PostIndex));
__ uxtb(temp3, temp1, 8);
__ mov(temp3, Operand(temp3, LSL, 16));
__ uxtab(temp3, temp3, temp1);
__ str(temp3, MemOperand(dest, 4, PostIndex));
__ bind(&not_two);
__ ldrb(temp1, MemOperand(src), ne);
__ strh(temp1, MemOperand(dest), ne);
__ Pop(pc, r4);
}
CodeDesc desc;
masm.GetCode(nullptr, &desc);
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
#endif
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -3141,6 +3141,84 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ ldr(pc, MemOperand(sp, 0)); // Return to calling code.
}
void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
Register dest = r0;
Register src = r1;
Register chars = r2;
Register temp1 = r3;
Label less_4;
{
UseScratchRegisterScope temps(masm);
Register temp2 = temps.Acquire();
Label loop;
__ bic(temp2, chars, Operand(0x3), SetCC);
__ b(&less_4, eq);
__ add(temp2, dest, temp2);
__ bind(&loop);
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ str(temp1, MemOperand(dest, 4, PostIndex));
__ cmp(dest, temp2);
__ b(&loop, ne);
}
__ bind(&less_4);
__ mov(chars, Operand(chars, LSL, 31), SetCC);
// bit0 => Z (ne), bit1 => C (cs)
__ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
__ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
__ ldrb(temp1, MemOperand(src), ne);
__ strb(temp1, MemOperand(dest), ne);
__ Ret();
}
void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) {
Register dest = r0;
Register src = r1;
Register chars = r2;
{
UseScratchRegisterScope temps(masm);
Register temp1 = r3;
Register temp2 = temps.Acquire();
Register temp3 = lr;
Register temp4 = r4;
Label loop;
Label not_two;
__ Push(lr, r4);
__ bic(temp2, chars, Operand(0x3));
__ add(temp2, dest, Operand(temp2, LSL, 1));
__ bind(&loop);
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ uxtb16(temp3, temp1);
__ uxtb16(temp4, temp1, 8);
__ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
__ str(temp1, MemOperand(dest));
__ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
__ str(temp1, MemOperand(dest, 4));
__ add(dest, dest, Operand(8));
__ cmp(dest, temp2);
__ b(&loop, ne);
__ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
__ b(&not_two, cc);
__ ldrh(temp1, MemOperand(src, 2, PostIndex));
__ uxtb(temp3, temp1, 8);
__ mov(temp3, Operand(temp3, LSL, 16));
__ uxtab(temp3, temp3, temp1);
__ str(temp3, MemOperand(dest, 4, PostIndex));
__ bind(&not_two);
__ ldrb(temp1, MemOperand(src), ne);
__ strh(temp1, MemOperand(dest), ne);
__ Pop(pc, r4);
}
}
#undef __
} // namespace internal

View File

@ -1354,6 +1354,9 @@ namespace internal {
TFS(SetProperty, kReceiver, kKey, kValue) \
TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \
ASM(MathPowInternal, Dummy) \
ASM(MemCopyUint8Uint8, CCall) \
ASM(MemCopyUint16Uint8, CCall) \
ASM(MemMove, CCall) \
\
/* Trace */ \
CPP(IsTraceCategoryEnabled) \

View File

@ -758,6 +758,24 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, true);
}
#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
#endif // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
#ifndef V8_TARGET_ARCH_ARM
void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) {
masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
#endif // V8_TARGET_ARCH_ARM
#ifndef V8_TARGET_ARCH_IA32
void Builtins::Generate_MemMove(MacroAssembler* masm) {
masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
#endif // V8_TARGET_ARCH_IA32
// ES6 [[Get]] operation.
TF_BUILTIN(GetProperty, CodeStubAssembler) {
Node* object = Parameter(Descriptor::kObject);

View File

@ -3437,6 +3437,393 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ int3(); // Unused on this architecture.
}
namespace {
enum Direction { FORWARD, BACKWARD };
enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
// Expects registers:
// esi - source, aligned if alignment == ALIGNED
// edi - destination, always aligned
// ecx - count (copy size in bytes)
// edx - loop count (number of 64 byte chunks)
void MemMoveEmitMainLoop(MacroAssembler* masm, Label* move_last_15,
Direction direction, Alignment alignment) {
Register src = esi;
Register dst = edi;
Register count = ecx;
Register loop_count = edx;
Label loop, move_last_31, move_last_63;
__ cmp(loop_count, 0);
__ j(equal, &move_last_63);
__ bind(&loop);
// Main loop. Copy in 64 byte chunks.
if (direction == BACKWARD) __ sub(src, Immediate(0x40));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
__ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
__ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
__ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
if (direction == FORWARD) __ add(src, Immediate(0x40));
if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ movdqa(Operand(dst, 0x20), xmm2);
__ movdqa(Operand(dst, 0x30), xmm3);
if (direction == FORWARD) __ add(dst, Immediate(0x40));
__ dec(loop_count);
__ j(not_zero, &loop);
// At most 63 bytes left to copy.
__ bind(&move_last_63);
__ test(count, Immediate(0x20));
__ j(zero, &move_last_31);
if (direction == BACKWARD) __ sub(src, Immediate(0x20));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
__ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
if (direction == FORWARD) __ add(src, Immediate(0x20));
if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
if (direction == FORWARD) __ add(dst, Immediate(0x20));
// At most 31 bytes left to copy.
__ bind(&move_last_31);
__ test(count, Immediate(0x10));
__ j(zero, move_last_15);
if (direction == BACKWARD) __ sub(src, Immediate(0x10));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
if (direction == FORWARD) __ add(src, Immediate(0x10));
if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
if (direction == FORWARD) __ add(dst, Immediate(0x10));
}
void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
__ pop(esi);
__ pop(edi);
__ ret(0);
}
} // namespace
void Builtins::Generate_MemMove(MacroAssembler* masm) {
// Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
// 32-bit C declaration function calls pass arguments on stack.
// Stack layout:
// esp[12]: Third argument, size.
// esp[8]: Second argument, source pointer.
// esp[4]: First argument, destination pointer.
// esp[0]: return address
const int kDestinationOffset = 1 * kPointerSize;
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
// When copying up to this many bytes, use special "small" handlers.
const size_t kSmallCopySize = 8;
// When copying up to this many bytes, use special "medium" handlers.
const size_t kMediumCopySize = 63;
// When non-overlapping region of src and dst is less than this,
// use a more careful implementation (slightly slower).
const size_t kMinMoveDistance = 16;
// Note that these values are dictated by the implementation below,
// do not just change them and hope things will work!
int stack_offset = 0; // Update if we change the stack height.
Label backward, backward_much_overlap;
Label forward_much_overlap, small_size, medium_size, pop_and_return;
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
Register dst = edi;
Register src = esi;
Register count = ecx;
Register loop_count = edx;
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
__ cmp(dst, src);
__ j(equal, &pop_and_return);
__ prefetch(Operand(src, 0), 1);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ cmp(count, kMediumCopySize);
__ j(below_equal, &medium_size);
__ cmp(dst, src);
__ j(above, &backward);
{
// |dst| is a lower address than |src|. Copy front-to-back.
Label unaligned_source, move_last_15, skip_last_move;
__ mov(eax, src);
__ sub(eax, dst);
__ cmp(eax, kMinMoveDistance);
__ j(below, &forward_much_overlap);
// Copy first 16 bytes.
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(Operand(dst, 0), xmm0);
// Determine distance to alignment: 16 - (dst & 0xF).
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
__ add(edx, Immediate(16));
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
// dst is now aligned. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
// Check if src is also aligned.
__ test(src, Immediate(0xF));
__ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
MemMoveEmitMainLoop(masm, &move_last_15, FORWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
__ bind(&move_last_15);
__ and_(count, 0xF);
__ j(zero, &skip_last_move, Label::kNear);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
__ bind(&skip_last_move);
MemMoveEmitPopAndReturn(masm);
// Copy loop for unaligned source and aligned destination.
__ bind(&unaligned_source);
MemMoveEmitMainLoop(masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
__ jmp(&move_last_15);
// Less than kMinMoveDistance offset between dst and src.
Label loop_until_aligned, last_15_much_overlap;
__ bind(&loop_until_aligned);
__ mov_b(eax, Operand(src, 0));
__ inc(src);
__ mov_b(Operand(dst, 0), eax);
__ inc(dst);
__ dec(count);
__ bind(&forward_much_overlap); // Entry point into this block.
__ test(dst, Immediate(0xF));
__ j(not_zero, &loop_until_aligned);
// dst is now aligned, src can't be. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
MemMoveEmitMainLoop(masm, &last_15_much_overlap, FORWARD, MOVE_UNALIGNED);
__ bind(&last_15_much_overlap);
__ and_(count, 0xF);
__ j(zero, &pop_and_return);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ jmp(&medium_size);
}
{
// |dst| is a higher address than |src|. Copy backwards.
Label unaligned_source, move_first_15, skip_last_move;
__ bind(&backward);
// |dst| and |src| always point to the end of what's left to copy.
__ add(dst, count);
__ add(src, count);
__ mov(eax, dst);
__ sub(eax, src);
__ cmp(eax, kMinMoveDistance);
__ j(below, &backward_much_overlap);
// Copy last 16 bytes.
__ movdqu(xmm0, Operand(src, -0x10));
__ movdqu(Operand(dst, -0x10), xmm0);
// Find distance to alignment: dst & 0xF
__ mov(edx, dst);
__ and_(edx, 0xF);
__ sub(dst, edx);
__ sub(src, edx);
__ sub(count, edx);
// dst is now aligned. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
// Check if src is also aligned.
__ test(src, Immediate(0xF));
__ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
MemMoveEmitMainLoop(masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at beginning of string.
__ bind(&move_first_15);
__ and_(count, 0xF);
__ j(zero, &skip_last_move, Label::kNear);
__ sub(src, count);
__ sub(dst, count);
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(Operand(dst, 0), xmm0);
__ bind(&skip_last_move);
MemMoveEmitPopAndReturn(masm);
// Copy loop for unaligned source and aligned destination.
__ bind(&unaligned_source);
MemMoveEmitMainLoop(masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
__ jmp(&move_first_15);
// Less than kMinMoveDistance offset between dst and src.
Label loop_until_aligned, first_15_much_overlap;
__ bind(&loop_until_aligned);
__ dec(src);
__ dec(dst);
__ mov_b(eax, Operand(src, 0));
__ mov_b(Operand(dst, 0), eax);
__ dec(count);
__ bind(&backward_much_overlap); // Entry point into this block.
__ test(dst, Immediate(0xF));
__ j(not_zero, &loop_until_aligned);
// dst is now aligned, src can't be. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
MemMoveEmitMainLoop(masm, &first_15_much_overlap, BACKWARD, MOVE_UNALIGNED);
__ bind(&first_15_much_overlap);
__ and_(count, 0xF);
__ j(zero, &pop_and_return);
// Small/medium handlers expect dst/src to point to the beginning.
__ sub(dst, count);
__ sub(src, count);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ jmp(&medium_size);
}
{
// Special handlers for 9 <= copy_size < 64. No assumptions about
// alignment or move distance, so all reads must be unaligned and
// must happen before any writes.
Label f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
__ movsd(xmm0, Operand(src, 0));
__ movsd(xmm1, Operand(src, count, times_1, -8));
__ movsd(Operand(dst, 0), xmm0);
__ movsd(Operand(dst, count, times_1, -8), xmm1);
MemMoveEmitPopAndReturn(masm);
__ bind(&f17_32);
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(xmm1, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
MemMoveEmitPopAndReturn(masm);
__ bind(&f33_48);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ movdqu(xmm2, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, 0x10), xmm1);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
MemMoveEmitPopAndReturn(masm);
__ bind(&f49_63);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ movdqu(xmm2, Operand(src, 0x20));
__ movdqu(xmm3, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, 0x10), xmm1);
__ movdqu(Operand(dst, 0x20), xmm2);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
MemMoveEmitPopAndReturn(masm);
__ bind(&medium_size); // Entry point into this block.
__ mov(eax, count);
__ dec(eax);
__ shr(eax, 4);
if (FLAG_debug_code) {
Label ok;
__ cmp(eax, 3);
__ j(below_equal, &ok);
__ int3();
__ bind(&ok);
}
// Dispatch to handlers.
Label eax_is_2_or_3;
__ cmp(eax, 1);
__ j(greater, &eax_is_2_or_3);
__ j(less, &f9_16); // eax == 0.
__ jmp(&f17_32); // eax == 1.
__ bind(&eax_is_2_or_3);
__ cmp(eax, 3);
__ j(less, &f33_48); // eax == 2.
__ jmp(&f49_63); // eax == 3.
}
{
// Specialized copiers for copy_size <= 8 bytes.
Label f0, f1, f2, f3, f4, f5_8;
__ bind(&f0);
MemMoveEmitPopAndReturn(masm);
__ bind(&f1);
__ mov_b(eax, Operand(src, 0));
__ mov_b(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(masm);
__ bind(&f2);
__ mov_w(eax, Operand(src, 0));
__ mov_w(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(masm);
__ bind(&f3);
__ mov_w(eax, Operand(src, 0));
__ mov_b(edx, Operand(src, 2));
__ mov_w(Operand(dst, 0), eax);
__ mov_b(Operand(dst, 2), edx);
MemMoveEmitPopAndReturn(masm);
__ bind(&f4);
__ mov(eax, Operand(src, 0));
__ mov(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(masm);
__ bind(&f5_8);
__ mov(eax, Operand(src, 0));
__ mov(edx, Operand(src, count, times_1, -4));
__ mov(Operand(dst, 0), eax);
__ mov(Operand(dst, count, times_1, -4), edx);
MemMoveEmitPopAndReturn(masm);
__ bind(&small_size); // Entry point into this block.
if (FLAG_debug_code) {
Label ok;
__ cmp(count, 8);
__ j(below_equal, &ok);
__ int3();
__ bind(&ok);
}
// Dispatch to handlers.
Label count_is_above_3, count_is_2_or_3;
__ cmp(count, 3);
__ j(greater, &count_is_above_3);
__ cmp(count, 1);
__ j(greater, &count_is_2_or_3);
__ j(less, &f0); // count == 0.
__ jmp(&f1); // count == 1.
__ bind(&count_is_2_or_3);
__ cmp(count, 3);
__ j(less, &f2); // count == 2.
__ jmp(&f3); // count == 3.
__ bind(&count_is_above_3);
__ cmp(count, 5);
__ j(less, &f4); // count == 4.
__ jmp(&f5_8); // count in [5, 8[.
}
__ bind(&pop_and_return);
MemMoveEmitPopAndReturn(masm);
}
#undef __
} // namespace internal

View File

@ -3260,6 +3260,517 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ Jump(t9);
}
void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
{
Label lastb, unaligned, aligned, chkw, loop16w, chk1w, wordCopy_loop,
skip_pref, lastbloop, leave, ua_chk16w, ua_loop16w, ua_skip_pref,
ua_chkw, ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
// The size of each prefetch.
uint32_t pref_chunk = 32;
// The maximum size of a prefetch, it must not be less than pref_chunk.
// If the real size of a prefetch is greater than max_pref_size and
// the kPrefHintPrepareForStore hint is used, the code will not work
// correctly.
uint32_t max_pref_size = 128;
DCHECK(pref_chunk < max_pref_size);
// pref_limit is set based on the fact that we never use an offset
// greater then 5 on a store pref and that a single pref can
// never be larger then max_pref_size.
uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
int32_t pref_hint_load = kPrefHintLoadStreamed;
int32_t pref_hint_store = kPrefHintPrepareForStore;
uint32_t loadstore_chunk = 4;
// The initial prefetches may fetch bytes that are before the buffer being
// copied. Start copies with an offset of 4 so avoid this situation when
// using kPrefHintPrepareForStore.
DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
pref_chunk * 4 >= max_pref_size);
// If the size is less than 8, go to lastb. Regardless of size,
// copy dst pointer to v0 for the retuen value.
__ slti(t2, a2, 2 * loadstore_chunk);
__ bne(t2, zero_reg, &lastb);
__ mov(v0, a0); // In delay slot.
// If src and dst have different alignments, go to unaligned, if they
// have the same alignment (but are not actually aligned) do a partial
// load/store to make them aligned. If they are both already aligned
// we can start copying at aligned.
__ xor_(t8, a1, a0);
__ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
__ bne(t8, zero_reg, &unaligned);
__ subu(a3, zero_reg, a0); // In delay slot.
__ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
__ beq(a3, zero_reg, &aligned); // Already aligned.
__ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
if (kArchEndian == kLittle) {
__ lwr(t8, MemOperand(a1));
__ addu(a1, a1, a3);
__ swr(t8, MemOperand(a0));
__ addu(a0, a0, a3);
} else {
__ lwl(t8, MemOperand(a1));
__ addu(a1, a1, a3);
__ swl(t8, MemOperand(a0));
__ addu(a0, a0, a3);
}
// Now dst/src are both aligned to (word) aligned addresses. Set a2 to
// count how many bytes we have to copy after all the 64 byte chunks are
// copied and a3 to the dst pointer after all the 64 byte chunks have been
// copied. We will loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&aligned);
__ andi(t8, a2, 0x3F);
__ beq(a2, t8, &chkw); // Less than 64?
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3); // Now a3 is the final dst after loop.
// When in the loop we prefetch with kPrefHintPrepareForStore hint,
// in this case the a0+x should be past the "t0-32" address. This means:
// for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
// x=64 the last "safe" a0 address is "t0-96". In the current version we
// will use "pref hint, 128(a0)", so "t0-160" is the limit.
if (pref_hint_store == kPrefHintPrepareForStore) {
__ addu(t0, a0, a2); // t0 is the "past the end" address.
__ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
}
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
if (pref_hint_store != kPrefHintPrepareForStore) {
__ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
}
__ bind(&loop16w);
__ lw(t0, MemOperand(a1));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
__ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
}
__ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&skip_pref);
__ lw(t2, MemOperand(a1, 2, loadstore_chunk));
__ lw(t3, MemOperand(a1, 3, loadstore_chunk));
__ lw(t4, MemOperand(a1, 4, loadstore_chunk));
__ lw(t5, MemOperand(a1, 5, loadstore_chunk));
__ lw(t6, MemOperand(a1, 6, loadstore_chunk));
__ lw(t7, MemOperand(a1, 7, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
__ sw(t0, MemOperand(a0));
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
__ sw(t2, MemOperand(a0, 2, loadstore_chunk));
__ sw(t3, MemOperand(a0, 3, loadstore_chunk));
__ sw(t4, MemOperand(a0, 4, loadstore_chunk));
__ sw(t5, MemOperand(a0, 5, loadstore_chunk));
__ sw(t6, MemOperand(a0, 6, loadstore_chunk));
__ sw(t7, MemOperand(a0, 7, loadstore_chunk));
__ lw(t0, MemOperand(a1, 8, loadstore_chunk));
__ lw(t1, MemOperand(a1, 9, loadstore_chunk));
__ lw(t2, MemOperand(a1, 10, loadstore_chunk));
__ lw(t3, MemOperand(a1, 11, loadstore_chunk));
__ lw(t4, MemOperand(a1, 12, loadstore_chunk));
__ lw(t5, MemOperand(a1, 13, loadstore_chunk));
__ lw(t6, MemOperand(a1, 14, loadstore_chunk));
__ lw(t7, MemOperand(a1, 15, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
__ sw(t0, MemOperand(a0, 8, loadstore_chunk));
__ sw(t1, MemOperand(a0, 9, loadstore_chunk));
__ sw(t2, MemOperand(a0, 10, loadstore_chunk));
__ sw(t3, MemOperand(a0, 11, loadstore_chunk));
__ sw(t4, MemOperand(a0, 12, loadstore_chunk));
__ sw(t5, MemOperand(a0, 13, loadstore_chunk));
__ sw(t6, MemOperand(a0, 14, loadstore_chunk));
__ sw(t7, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
__ mov(a2, t8);
// Here we have src and dest word-aligned but less than 64-bytes to go.
// Check for a 32 bytes chunk and copy if there is one. Otherwise jump
// down to chk1w to handle the tail end of the copy.
__ bind(&chkw);
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
__ andi(t8, a2, 0x1F);
__ beq(a2, t8, &chk1w); // Less than 32?
__ nop(); // In delay slot.
__ lw(t0, MemOperand(a1));
__ lw(t1, MemOperand(a1, 1, loadstore_chunk));
__ lw(t2, MemOperand(a1, 2, loadstore_chunk));
__ lw(t3, MemOperand(a1, 3, loadstore_chunk));
__ lw(t4, MemOperand(a1, 4, loadstore_chunk));
__ lw(t5, MemOperand(a1, 5, loadstore_chunk));
__ lw(t6, MemOperand(a1, 6, loadstore_chunk));
__ lw(t7, MemOperand(a1, 7, loadstore_chunk));
__ addiu(a1, a1, 8 * loadstore_chunk);
__ sw(t0, MemOperand(a0));
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
__ sw(t2, MemOperand(a0, 2, loadstore_chunk));
__ sw(t3, MemOperand(a0, 3, loadstore_chunk));
__ sw(t4, MemOperand(a0, 4, loadstore_chunk));
__ sw(t5, MemOperand(a0, 5, loadstore_chunk));
__ sw(t6, MemOperand(a0, 6, loadstore_chunk));
__ sw(t7, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Here we have less than 32 bytes to copy. Set up for a loop to copy
// one word at a time. Set a2 to count how many bytes we have to copy
// after all the word chunks are copied and a3 to the dst pointer after
// all the word chunks have been copied. We will loop, incrementing a0
// and a1 until a0 equals a3.
__ bind(&chk1w);
__ andi(a2, t8, loadstore_chunk - 1);
__ beq(a2, t8, &lastb);
__ subu(a3, t8, a2); // In delay slot.
__ addu(a3, a0, a3);
__ bind(&wordCopy_loop);
__ lw(t3, MemOperand(a1));
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &wordCopy_loop);
__ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
__ bind(&lastb);
__ Branch(&leave, le, a2, Operand(zero_reg));
__ addu(a3, a0, a2);
__ bind(&lastbloop);
__ lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &lastbloop);
__ sb(v1, MemOperand(a0, -1)); // In delay slot.
__ bind(&leave);
__ jr(ra);
__ nop();
// Unaligned case. Only the dst gets aligned so we need to do partial
// loads of the source followed by normal stores to the dst (once we
// have aligned the destination).
__ bind(&unaligned);
__ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
__ beq(a3, zero_reg, &ua_chk16w);
__ subu(a2, a2, a3); // In delay slot.
if (kArchEndian == kLittle) {
__ lwr(v1, MemOperand(a1));
__ lwl(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ addu(a1, a1, a3);
__ swr(v1, MemOperand(a0));
__ addu(a0, a0, a3);
} else {
__ lwl(v1, MemOperand(a1));
__ lwr(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ addu(a1, a1, a3);
__ swl(v1, MemOperand(a0));
__ addu(a0, a0, a3);
}
// Now the dst (but not the source) is aligned. Set a2 to count how many
// bytes we have to copy after all the 64 byte chunks are copied and a3 to
// the dst pointer after all the 64 byte chunks have been copied. We will
// loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&ua_chk16w);
__ andi(t8, a2, 0x3F);
__ beq(a2, t8, &ua_chkw);
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3);
if (pref_hint_store == kPrefHintPrepareForStore) {
__ addu(t0, a0, a2);
__ Subu(t9, t0, pref_limit);
}
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
if (pref_hint_store != kPrefHintPrepareForStore) {
__ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
}
__ bind(&ua_loop16w);
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
if (kArchEndian == kLittle) {
__ lwr(t0, MemOperand(a1));
__ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
__ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0);
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
}
__ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&ua_skip_pref);
__ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
__ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
__ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
__ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
__ lwl(t0,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t1,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t2,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t3,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t4,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t5,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t6,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t7,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(t0, MemOperand(a1));
__ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
__ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0);
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
}
__ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&ua_skip_pref);
__ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
__ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
__ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
__ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
__ lwr(t0,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t1,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t2,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t3,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t4,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t5,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t6,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t7,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
__ sw(t0, MemOperand(a0));
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
__ sw(t2, MemOperand(a0, 2, loadstore_chunk));
__ sw(t3, MemOperand(a0, 3, loadstore_chunk));
__ sw(t4, MemOperand(a0, 4, loadstore_chunk));
__ sw(t5, MemOperand(a0, 5, loadstore_chunk));
__ sw(t6, MemOperand(a0, 6, loadstore_chunk));
__ sw(t7, MemOperand(a0, 7, loadstore_chunk));
if (kArchEndian == kLittle) {
__ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
__ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
__ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
__ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
__ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
__ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
__ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
__ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
__ lwl(t0,
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t1,
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t2,
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t3,
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t4,
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t5,
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t6,
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t7,
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
__ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
__ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
__ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
__ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
__ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
__ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
__ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
__ lwr(t0,
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t1,
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t2,
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t3,
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t4,
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t5,
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t6,
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t7,
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
__ sw(t0, MemOperand(a0, 8, loadstore_chunk));
__ sw(t1, MemOperand(a0, 9, loadstore_chunk));
__ sw(t2, MemOperand(a0, 10, loadstore_chunk));
__ sw(t3, MemOperand(a0, 11, loadstore_chunk));
__ sw(t4, MemOperand(a0, 12, loadstore_chunk));
__ sw(t5, MemOperand(a0, 13, loadstore_chunk));
__ sw(t6, MemOperand(a0, 14, loadstore_chunk));
__ sw(t7, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &ua_loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
__ mov(a2, t8);
// Here less than 64-bytes. Check for
// a 32 byte chunk and copy if there is one. Otherwise jump down to
// ua_chk1w to handle the tail end of the copy.
__ bind(&ua_chkw);
__ Pref(pref_hint_load, MemOperand(a1));
__ andi(t8, a2, 0x1F);
__ beq(a2, t8, &ua_chk1w);
__ nop(); // In delay slot.
if (kArchEndian == kLittle) {
__ lwr(t0, MemOperand(a1));
__ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
__ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
__ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
__ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
__ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
__ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
__ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
__ lwl(t0,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t1,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t2,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t3,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t4,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t5,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t6,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t7,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(t0, MemOperand(a1));
__ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
__ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
__ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
__ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
__ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
__ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
__ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
__ lwr(t0,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t1,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t2,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t3,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t4,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t5,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t6,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t7,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ addiu(a1, a1, 8 * loadstore_chunk);
__ sw(t0, MemOperand(a0));
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
__ sw(t2, MemOperand(a0, 2, loadstore_chunk));
__ sw(t3, MemOperand(a0, 3, loadstore_chunk));
__ sw(t4, MemOperand(a0, 4, loadstore_chunk));
__ sw(t5, MemOperand(a0, 5, loadstore_chunk));
__ sw(t6, MemOperand(a0, 6, loadstore_chunk));
__ sw(t7, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Less than 32 bytes to copy. Set up for a loop to
// copy one word at a time.
__ bind(&ua_chk1w);
__ andi(a2, t8, loadstore_chunk - 1);
__ beq(a2, t8, &ua_smallCopy);
__ subu(a3, t8, a2); // In delay slot.
__ addu(a3, a0, a3);
__ bind(&ua_wordCopy_loop);
if (kArchEndian == kLittle) {
__ lwr(v1, MemOperand(a1));
__ lwl(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(v1, MemOperand(a1));
__ lwr(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
}
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &ua_wordCopy_loop);
__ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
// Copy the last 8 bytes.
__ bind(&ua_smallCopy);
__ beq(a2, zero_reg, &leave);
__ addu(a3, a0, a2); // In delay slot.
__ bind(&ua_smallCopy_loop);
__ lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &ua_smallCopy_loop);
__ sb(v1, MemOperand(a0, -1)); // In delay slot.
__ jr(ra);
__ nop();
}
}
#undef __
} // namespace internal

View File

@ -1,432 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_IA32
#include "src/heap/factory-inl.h"
#include "src/heap/heap.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
// Helper functions for CreateMemMoveFunction.
#define __ ACCESS_MASM(masm)
enum Direction { FORWARD, BACKWARD };
enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
// Expects registers:
// esi - source, aligned if alignment == ALIGNED
// edi - destination, always aligned
// ecx - count (copy size in bytes)
// edx - loop count (number of 64 byte chunks)
void MemMoveEmitMainLoop(MacroAssembler* masm,
Label* move_last_15,
Direction direction,
Alignment alignment) {
Register src = esi;
Register dst = edi;
Register count = ecx;
Register loop_count = edx;
Label loop, move_last_31, move_last_63;
__ cmp(loop_count, 0);
__ j(equal, &move_last_63);
__ bind(&loop);
// Main loop. Copy in 64 byte chunks.
if (direction == BACKWARD) __ sub(src, Immediate(0x40));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
__ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
__ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
__ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
if (direction == FORWARD) __ add(src, Immediate(0x40));
if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ movdqa(Operand(dst, 0x20), xmm2);
__ movdqa(Operand(dst, 0x30), xmm3);
if (direction == FORWARD) __ add(dst, Immediate(0x40));
__ dec(loop_count);
__ j(not_zero, &loop);
// At most 63 bytes left to copy.
__ bind(&move_last_63);
__ test(count, Immediate(0x20));
__ j(zero, &move_last_31);
if (direction == BACKWARD) __ sub(src, Immediate(0x20));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
__ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
if (direction == FORWARD) __ add(src, Immediate(0x20));
if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
if (direction == FORWARD) __ add(dst, Immediate(0x20));
// At most 31 bytes left to copy.
__ bind(&move_last_31);
__ test(count, Immediate(0x10));
__ j(zero, move_last_15);
if (direction == BACKWARD) __ sub(src, Immediate(0x10));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
if (direction == FORWARD) __ add(src, Immediate(0x10));
if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
if (direction == FORWARD) __ add(dst, Immediate(0x10));
}
void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
__ pop(esi);
__ pop(edi);
__ ret(0);
}
#undef __
#define __ masm.
class LabelConverter {
public:
explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
int32_t address(Label* l) const {
return reinterpret_cast<int32_t>(buffer_) + l->pos();
}
private:
byte* buffer_;
};
MemMoveFunction CreateMemMoveFunction() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(page_allocator,
page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
// 32-bit C declaration function calls pass arguments on stack.
// Stack layout:
// esp[12]: Third argument, size.
// esp[8]: Second argument, source pointer.
// esp[4]: First argument, destination pointer.
// esp[0]: return address
const int kDestinationOffset = 1 * kPointerSize;
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
// When copying up to this many bytes, use special "small" handlers.
const size_t kSmallCopySize = 8;
// When copying up to this many bytes, use special "medium" handlers.
const size_t kMediumCopySize = 63;
// When non-overlapping region of src and dst is less than this,
// use a more careful implementation (slightly slower).
const size_t kMinMoveDistance = 16;
// Note that these values are dictated by the implementation below,
// do not just change them and hope things will work!
int stack_offset = 0; // Update if we change the stack height.
Label backward, backward_much_overlap;
Label forward_much_overlap, small_size, medium_size, pop_and_return;
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
Register dst = edi;
Register src = esi;
Register count = ecx;
Register loop_count = edx;
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
__ cmp(dst, src);
__ j(equal, &pop_and_return);
__ prefetch(Operand(src, 0), 1);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ cmp(count, kMediumCopySize);
__ j(below_equal, &medium_size);
__ cmp(dst, src);
__ j(above, &backward);
{
// |dst| is a lower address than |src|. Copy front-to-back.
Label unaligned_source, move_last_15, skip_last_move;
__ mov(eax, src);
__ sub(eax, dst);
__ cmp(eax, kMinMoveDistance);
__ j(below, &forward_much_overlap);
// Copy first 16 bytes.
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(Operand(dst, 0), xmm0);
// Determine distance to alignment: 16 - (dst & 0xF).
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
__ add(edx, Immediate(16));
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
// dst is now aligned. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
// Check if src is also aligned.
__ test(src, Immediate(0xF));
__ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
__ bind(&move_last_15);
__ and_(count, 0xF);
__ j(zero, &skip_last_move, Label::kNear);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
__ bind(&skip_last_move);
MemMoveEmitPopAndReturn(&masm);
// Copy loop for unaligned source and aligned destination.
__ bind(&unaligned_source);
MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
__ jmp(&move_last_15);
// Less than kMinMoveDistance offset between dst and src.
Label loop_until_aligned, last_15_much_overlap;
__ bind(&loop_until_aligned);
__ mov_b(eax, Operand(src, 0));
__ inc(src);
__ mov_b(Operand(dst, 0), eax);
__ inc(dst);
__ dec(count);
__ bind(&forward_much_overlap); // Entry point into this block.
__ test(dst, Immediate(0xF));
__ j(not_zero, &loop_until_aligned);
// dst is now aligned, src can't be. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
FORWARD, MOVE_UNALIGNED);
__ bind(&last_15_much_overlap);
__ and_(count, 0xF);
__ j(zero, &pop_and_return);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ jmp(&medium_size);
}
{
// |dst| is a higher address than |src|. Copy backwards.
Label unaligned_source, move_first_15, skip_last_move;
__ bind(&backward);
// |dst| and |src| always point to the end of what's left to copy.
__ add(dst, count);
__ add(src, count);
__ mov(eax, dst);
__ sub(eax, src);
__ cmp(eax, kMinMoveDistance);
__ j(below, &backward_much_overlap);
// Copy last 16 bytes.
__ movdqu(xmm0, Operand(src, -0x10));
__ movdqu(Operand(dst, -0x10), xmm0);
// Find distance to alignment: dst & 0xF
__ mov(edx, dst);
__ and_(edx, 0xF);
__ sub(dst, edx);
__ sub(src, edx);
__ sub(count, edx);
// dst is now aligned. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
// Check if src is also aligned.
__ test(src, Immediate(0xF));
__ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at beginning of string.
__ bind(&move_first_15);
__ and_(count, 0xF);
__ j(zero, &skip_last_move, Label::kNear);
__ sub(src, count);
__ sub(dst, count);
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(Operand(dst, 0), xmm0);
__ bind(&skip_last_move);
MemMoveEmitPopAndReturn(&masm);
// Copy loop for unaligned source and aligned destination.
__ bind(&unaligned_source);
MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
__ jmp(&move_first_15);
// Less than kMinMoveDistance offset between dst and src.
Label loop_until_aligned, first_15_much_overlap;
__ bind(&loop_until_aligned);
__ dec(src);
__ dec(dst);
__ mov_b(eax, Operand(src, 0));
__ mov_b(Operand(dst, 0), eax);
__ dec(count);
__ bind(&backward_much_overlap); // Entry point into this block.
__ test(dst, Immediate(0xF));
__ j(not_zero, &loop_until_aligned);
// dst is now aligned, src can't be. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
BACKWARD, MOVE_UNALIGNED);
__ bind(&first_15_much_overlap);
__ and_(count, 0xF);
__ j(zero, &pop_and_return);
// Small/medium handlers expect dst/src to point to the beginning.
__ sub(dst, count);
__ sub(src, count);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ jmp(&medium_size);
}
{
// Special handlers for 9 <= copy_size < 64. No assumptions about
// alignment or move distance, so all reads must be unaligned and
// must happen before any writes.
Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
__ movsd(xmm0, Operand(src, 0));
__ movsd(xmm1, Operand(src, count, times_1, -8));
__ movsd(Operand(dst, 0), xmm0);
__ movsd(Operand(dst, count, times_1, -8), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f17_32);
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(xmm1, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f33_48);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ movdqu(xmm2, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, 0x10), xmm1);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f49_63);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ movdqu(xmm2, Operand(src, 0x20));
__ movdqu(xmm3, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, 0x10), xmm1);
__ movdqu(Operand(dst, 0x20), xmm2);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
MemMoveEmitPopAndReturn(&masm);
__ bind(&medium_handlers);
__ dd(conv.address(&f9_16));
__ dd(conv.address(&f17_32));
__ dd(conv.address(&f33_48));
__ dd(conv.address(&f49_63));
__ bind(&medium_size); // Entry point into this block.
__ mov(eax, count);
__ dec(eax);
__ shr(eax, 4);
if (FLAG_debug_code) {
Label ok;
__ cmp(eax, 3);
__ j(below_equal, &ok);
__ int3();
__ bind(&ok);
}
__ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
__ jmp(eax);
}
{
// Specialized copiers for copy_size <= 8 bytes.
Label small_handlers, f0, f1, f2, f3, f4, f5_8;
__ bind(&f0);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f1);
__ mov_b(eax, Operand(src, 0));
__ mov_b(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f2);
__ mov_w(eax, Operand(src, 0));
__ mov_w(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f3);
__ mov_w(eax, Operand(src, 0));
__ mov_b(edx, Operand(src, 2));
__ mov_w(Operand(dst, 0), eax);
__ mov_b(Operand(dst, 2), edx);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f4);
__ mov(eax, Operand(src, 0));
__ mov(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f5_8);
__ mov(eax, Operand(src, 0));
__ mov(edx, Operand(src, count, times_1, -4));
__ mov(Operand(dst, 0), eax);
__ mov(Operand(dst, count, times_1, -4), edx);
MemMoveEmitPopAndReturn(&masm);
__ bind(&small_handlers);
__ dd(conv.address(&f0));
__ dd(conv.address(&f1));
__ dd(conv.address(&f2));
__ dd(conv.address(&f3));
__ dd(conv.address(&f4));
__ dd(conv.address(&f5_8));
__ dd(conv.address(&f5_8));
__ dd(conv.address(&f5_8));
__ dd(conv.address(&f5_8));
__ bind(&small_size); // Entry point into this block.
if (FLAG_debug_code) {
Label ok;
__ cmp(count, 8);
__ j(below_equal, &ok);
__ int3();
__ bind(&ok);
}
__ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
__ jmp(eax);
}
__ bind(&pop_and_return);
MemMoveEmitPopAndReturn(&masm);
CodeDesc desc;
masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_IA32

View File

@ -463,6 +463,9 @@ class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
// descriptor associated.
typedef VoidDescriptor DummyDescriptor;
// Dummy descriptor that marks builtins with C calling convention.
typedef VoidDescriptor CCallDescriptor;
class AllocateDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)

View File

@ -183,6 +183,22 @@ void FreeCurrentEmbeddedBlob() {
sticky_embedded_blob_size_ = 0;
}
// static
bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() {
// In some situations, we must be able to rely on the embedded blob being
// immortal immovable. This is the case if the blob is binary-embedded.
// See blob lifecycle controls above for descriptions of when the current
// embedded blob may change (e.g. in tests or mksnapshot). If the blob is
// binary-embedded, it is immortal immovable.
const uint8_t* blob =
current_embedded_blob_.load(std::memory_order::memory_order_relaxed);
if (blob == nullptr) return false;
#ifdef V8_MULTI_SNAPSHOTS
if (blob == TrustedEmbeddedBlob()) return true;
#endif
return blob == DefaultEmbeddedBlob();
}
void Isolate::SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size) {
CHECK_NOT_NULL(blob);
@ -304,7 +320,6 @@ void Isolate::InitializeOncePerProcess() {
base::Relaxed_Store(&isolate_key_created_, 1);
#endif
per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
init_memcopy_functions();
}
Address Isolate::get_address_from_id(IsolateAddressId id) {
@ -3283,6 +3298,10 @@ bool Isolate::Init(StartupDeserializer* des) {
CreateAndSetEmbeddedBlob();
}
// Initialize custom memcopy and memmove functions (must happen after
// embedded blob setup).
init_memcopy_functions();
if (FLAG_log_internal_timer_events) {
set_event_logger(Logger::DefaultEventLoggerSentinel);
}

View File

@ -1484,6 +1484,7 @@ class Isolate final : private HiddenFactory {
static const uint8_t* CurrentEmbeddedBlob();
static uint32_t CurrentEmbeddedBlobSize();
static bool CurrentEmbeddedBlobIsBinaryEmbedded();
// These always return the same result as static methods above, but don't
// access the global atomic variable (and thus *might be* slightly faster).

View File

@ -4,6 +4,8 @@
#include "src/memcopy.h"
#include "src/snapshot/embedded-data.h"
namespace v8 {
namespace internal {
@ -15,9 +17,6 @@ static void MemMoveWrapper(void* dest, const void* src, size_t size) {
// Initialize to library version so we can call this at any time during startup.
static MemMoveFunction memmove_function = &MemMoveWrapper;
// Defined in codegen-ia32.cc.
MemMoveFunction CreateMemMoveFunction();
// Copy memory area to disjoint memory area.
V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size) {
if (size == 0) return;
@ -25,7 +24,6 @@ V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size) {
// on all architectures we currently support.
(*memmove_function)(dest, src, size);
}
#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
size_t chars) {
@ -39,34 +37,33 @@ V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
&MemCopyUint8Wrapper;
MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
&MemCopyUint16Uint8Wrapper;
// Defined in codegen-arm.cc.
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
MemCopyUint16Uint8Function stub);
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
&MemCopyUint8Wrapper;
// Defined in codegen-mips.cc.
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
#endif
static bool g_memcopy_functions_initialized = false;
void init_memcopy_functions() {
if (g_memcopy_functions_initialized) return;
g_memcopy_functions_initialized = true;
#if V8_TARGET_ARCH_IA32
MemMoveFunction generated_memmove = CreateMemMoveFunction();
if (generated_memmove != nullptr) {
memmove_function = generated_memmove;
if (Isolate::CurrentEmbeddedBlobIsBinaryEmbedded()) {
EmbeddedData d = EmbeddedData::FromBlob();
memmove_function = reinterpret_cast<MemMoveFunction>(
d.InstructionStartOfBuiltin(Builtins::kMemMove));
}
#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
memcopy_uint16_uint8_function =
CreateMemCopyUint16Uint8Function(&MemCopyUint16Uint8Wrapper);
if (Isolate::CurrentEmbeddedBlobIsBinaryEmbedded()) {
EmbeddedData d = EmbeddedData::FromBlob();
memcopy_uint8_function = reinterpret_cast<MemCopyUint8Function>(
d.InstructionStartOfBuiltin(Builtins::kMemCopyUint8Uint8));
memcopy_uint16_uint8_function =
reinterpret_cast<MemCopyUint16Uint8Function>(
d.InstructionStartOfBuiltin(Builtins::kMemCopyUint16Uint8));
}
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
if (Isolate::CurrentEmbeddedBlobIsBinaryEmbedded()) {
EmbeddedData d = EmbeddedData::FromBlob();
memcopy_uint8_function = reinterpret_cast<MemCopyUint8Function>(
d.InstructionStartOfBuiltin(Builtins::kMemCopyUint8Uint8));
}
#endif
}

View File

@ -18,9 +18,8 @@ namespace internal {
typedef uintptr_t Address;
// ----------------------------------------------------------------------------
// Generated memcpy/memmove
// Generated memcpy/memmove for ia32, arm, and mips.
// Initializes the codegen support that depends on CPU features.
void init_memcopy_functions();
#if defined(V8_TARGET_ARCH_IA32)
@ -148,12 +147,8 @@ inline void MemsetPointer(Address* dest, Address value, size_t counter) {
#if V8_HOST_ARCH_IA32
#define STOS "stosl"
#elif V8_HOST_ARCH_X64
#if V8_HOST_ARCH_32_BIT
#define STOS "addr32 stosl"
#else
#define STOS "stosq"
#endif
#endif
#if defined(MEMORY_SANITIZER)
// MemorySanitizer does not understand inline assembly.

View File

@ -1,558 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_MIPS
#include <memory>
#include "src/macro-assembler.h"
#include "src/mips/simulator-mips.h"
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
defined(_MIPS_ARCH_MIPS32RX)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(page_allocator,
page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
{
Label lastb, unaligned, aligned, chkw,
loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
// The size of each prefetch.
uint32_t pref_chunk = 32;
// The maximum size of a prefetch, it must not be less than pref_chunk.
// If the real size of a prefetch is greater than max_pref_size and
// the kPrefHintPrepareForStore hint is used, the code will not work
// correctly.
uint32_t max_pref_size = 128;
DCHECK(pref_chunk < max_pref_size);
// pref_limit is set based on the fact that we never use an offset
// greater then 5 on a store pref and that a single pref can
// never be larger then max_pref_size.
uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
int32_t pref_hint_load = kPrefHintLoadStreamed;
int32_t pref_hint_store = kPrefHintPrepareForStore;
uint32_t loadstore_chunk = 4;
// The initial prefetches may fetch bytes that are before the buffer being
// copied. Start copies with an offset of 4 so avoid this situation when
// using kPrefHintPrepareForStore.
DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
pref_chunk * 4 >= max_pref_size);
// If the size is less than 8, go to lastb. Regardless of size,
// copy dst pointer to v0 for the retuen value.
__ slti(t2, a2, 2 * loadstore_chunk);
__ bne(t2, zero_reg, &lastb);
__ mov(v0, a0); // In delay slot.
// If src and dst have different alignments, go to unaligned, if they
// have the same alignment (but are not actually aligned) do a partial
// load/store to make them aligned. If they are both already aligned
// we can start copying at aligned.
__ xor_(t8, a1, a0);
__ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
__ bne(t8, zero_reg, &unaligned);
__ subu(a3, zero_reg, a0); // In delay slot.
__ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
__ beq(a3, zero_reg, &aligned); // Already aligned.
__ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
if (kArchEndian == kLittle) {
__ lwr(t8, MemOperand(a1));
__ addu(a1, a1, a3);
__ swr(t8, MemOperand(a0));
__ addu(a0, a0, a3);
} else {
__ lwl(t8, MemOperand(a1));
__ addu(a1, a1, a3);
__ swl(t8, MemOperand(a0));
__ addu(a0, a0, a3);
}
// Now dst/src are both aligned to (word) aligned addresses. Set a2 to
// count how many bytes we have to copy after all the 64 byte chunks are
// copied and a3 to the dst pointer after all the 64 byte chunks have been
// copied. We will loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&aligned);
__ andi(t8, a2, 0x3F);
__ beq(a2, t8, &chkw); // Less than 64?
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3); // Now a3 is the final dst after loop.
// When in the loop we prefetch with kPrefHintPrepareForStore hint,
// in this case the a0+x should be past the "t0-32" address. This means:
// for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
// x=64 the last "safe" a0 address is "t0-96". In the current version we
// will use "pref hint, 128(a0)", so "t0-160" is the limit.
if (pref_hint_store == kPrefHintPrepareForStore) {
__ addu(t0, a0, a2); // t0 is the "past the end" address.
__ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
}
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
if (pref_hint_store != kPrefHintPrepareForStore) {
__ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
}
__ bind(&loop16w);
__ lw(t0, MemOperand(a1));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
__ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
}
__ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&skip_pref);
__ lw(t2, MemOperand(a1, 2, loadstore_chunk));
__ lw(t3, MemOperand(a1, 3, loadstore_chunk));
__ lw(t4, MemOperand(a1, 4, loadstore_chunk));
__ lw(t5, MemOperand(a1, 5, loadstore_chunk));
__ lw(t6, MemOperand(a1, 6, loadstore_chunk));
__ lw(t7, MemOperand(a1, 7, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
__ sw(t0, MemOperand(a0));
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
__ sw(t2, MemOperand(a0, 2, loadstore_chunk));
__ sw(t3, MemOperand(a0, 3, loadstore_chunk));
__ sw(t4, MemOperand(a0, 4, loadstore_chunk));
__ sw(t5, MemOperand(a0, 5, loadstore_chunk));
__ sw(t6, MemOperand(a0, 6, loadstore_chunk));
__ sw(t7, MemOperand(a0, 7, loadstore_chunk));
__ lw(t0, MemOperand(a1, 8, loadstore_chunk));
__ lw(t1, MemOperand(a1, 9, loadstore_chunk));
__ lw(t2, MemOperand(a1, 10, loadstore_chunk));
__ lw(t3, MemOperand(a1, 11, loadstore_chunk));
__ lw(t4, MemOperand(a1, 12, loadstore_chunk));
__ lw(t5, MemOperand(a1, 13, loadstore_chunk));
__ lw(t6, MemOperand(a1, 14, loadstore_chunk));
__ lw(t7, MemOperand(a1, 15, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
__ sw(t0, MemOperand(a0, 8, loadstore_chunk));
__ sw(t1, MemOperand(a0, 9, loadstore_chunk));
__ sw(t2, MemOperand(a0, 10, loadstore_chunk));
__ sw(t3, MemOperand(a0, 11, loadstore_chunk));
__ sw(t4, MemOperand(a0, 12, loadstore_chunk));
__ sw(t5, MemOperand(a0, 13, loadstore_chunk));
__ sw(t6, MemOperand(a0, 14, loadstore_chunk));
__ sw(t7, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
__ mov(a2, t8);
// Here we have src and dest word-aligned but less than 64-bytes to go.
// Check for a 32 bytes chunk and copy if there is one. Otherwise jump
// down to chk1w to handle the tail end of the copy.
__ bind(&chkw);
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
__ andi(t8, a2, 0x1F);
__ beq(a2, t8, &chk1w); // Less than 32?
__ nop(); // In delay slot.
__ lw(t0, MemOperand(a1));
__ lw(t1, MemOperand(a1, 1, loadstore_chunk));
__ lw(t2, MemOperand(a1, 2, loadstore_chunk));
__ lw(t3, MemOperand(a1, 3, loadstore_chunk));
__ lw(t4, MemOperand(a1, 4, loadstore_chunk));
__ lw(t5, MemOperand(a1, 5, loadstore_chunk));
__ lw(t6, MemOperand(a1, 6, loadstore_chunk));
__ lw(t7, MemOperand(a1, 7, loadstore_chunk));
__ addiu(a1, a1, 8 * loadstore_chunk);
__ sw(t0, MemOperand(a0));
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
__ sw(t2, MemOperand(a0, 2, loadstore_chunk));
__ sw(t3, MemOperand(a0, 3, loadstore_chunk));
__ sw(t4, MemOperand(a0, 4, loadstore_chunk));
__ sw(t5, MemOperand(a0, 5, loadstore_chunk));
__ sw(t6, MemOperand(a0, 6, loadstore_chunk));
__ sw(t7, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Here we have less than 32 bytes to copy. Set up for a loop to copy
// one word at a time. Set a2 to count how many bytes we have to copy
// after all the word chunks are copied and a3 to the dst pointer after
// all the word chunks have been copied. We will loop, incrementing a0
// and a1 until a0 equals a3.
__ bind(&chk1w);
__ andi(a2, t8, loadstore_chunk - 1);
__ beq(a2, t8, &lastb);
__ subu(a3, t8, a2); // In delay slot.
__ addu(a3, a0, a3);
__ bind(&wordCopy_loop);
__ lw(t3, MemOperand(a1));
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &wordCopy_loop);
__ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
__ bind(&lastb);
__ Branch(&leave, le, a2, Operand(zero_reg));
__ addu(a3, a0, a2);
__ bind(&lastbloop);
__ lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &lastbloop);
__ sb(v1, MemOperand(a0, -1)); // In delay slot.
__ bind(&leave);
__ jr(ra);
__ nop();
// Unaligned case. Only the dst gets aligned so we need to do partial
// loads of the source followed by normal stores to the dst (once we
// have aligned the destination).
__ bind(&unaligned);
__ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
__ beq(a3, zero_reg, &ua_chk16w);
__ subu(a2, a2, a3); // In delay slot.
if (kArchEndian == kLittle) {
__ lwr(v1, MemOperand(a1));
__ lwl(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ addu(a1, a1, a3);
__ swr(v1, MemOperand(a0));
__ addu(a0, a0, a3);
} else {
__ lwl(v1, MemOperand(a1));
__ lwr(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ addu(a1, a1, a3);
__ swl(v1, MemOperand(a0));
__ addu(a0, a0, a3);
}
// Now the dst (but not the source) is aligned. Set a2 to count how many
// bytes we have to copy after all the 64 byte chunks are copied and a3 to
// the dst pointer after all the 64 byte chunks have been copied. We will
// loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&ua_chk16w);
__ andi(t8, a2, 0x3F);
__ beq(a2, t8, &ua_chkw);
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3);
if (pref_hint_store == kPrefHintPrepareForStore) {
__ addu(t0, a0, a2);
__ Subu(t9, t0, pref_limit);
}
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
if (pref_hint_store != kPrefHintPrepareForStore) {
__ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
}
__ bind(&ua_loop16w);
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
if (kArchEndian == kLittle) {
__ lwr(t0, MemOperand(a1));
__ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
__ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0);
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
}
__ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&ua_skip_pref);
__ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
__ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
__ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
__ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
__ lwl(t0,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t1,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t2,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t3,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t4,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t5,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t6,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t7,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(t0, MemOperand(a1));
__ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
__ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0);
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
}
__ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&ua_skip_pref);
__ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
__ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
__ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
__ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
__ lwr(t0,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t1,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t2,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t3,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t4,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t5,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t6,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t7,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
__ sw(t0, MemOperand(a0));
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
__ sw(t2, MemOperand(a0, 2, loadstore_chunk));
__ sw(t3, MemOperand(a0, 3, loadstore_chunk));
__ sw(t4, MemOperand(a0, 4, loadstore_chunk));
__ sw(t5, MemOperand(a0, 5, loadstore_chunk));
__ sw(t6, MemOperand(a0, 6, loadstore_chunk));
__ sw(t7, MemOperand(a0, 7, loadstore_chunk));
if (kArchEndian == kLittle) {
__ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
__ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
__ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
__ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
__ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
__ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
__ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
__ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
__ lwl(t0,
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t1,
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t2,
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t3,
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t4,
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t5,
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t6,
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t7,
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
__ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
__ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
__ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
__ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
__ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
__ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
__ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
__ lwr(t0,
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t1,
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t2,
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t3,
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t4,
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t5,
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t6,
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t7,
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
__ sw(t0, MemOperand(a0, 8, loadstore_chunk));
__ sw(t1, MemOperand(a0, 9, loadstore_chunk));
__ sw(t2, MemOperand(a0, 10, loadstore_chunk));
__ sw(t3, MemOperand(a0, 11, loadstore_chunk));
__ sw(t4, MemOperand(a0, 12, loadstore_chunk));
__ sw(t5, MemOperand(a0, 13, loadstore_chunk));
__ sw(t6, MemOperand(a0, 14, loadstore_chunk));
__ sw(t7, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &ua_loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
__ mov(a2, t8);
// Here less than 64-bytes. Check for
// a 32 byte chunk and copy if there is one. Otherwise jump down to
// ua_chk1w to handle the tail end of the copy.
__ bind(&ua_chkw);
__ Pref(pref_hint_load, MemOperand(a1));
__ andi(t8, a2, 0x1F);
__ beq(a2, t8, &ua_chk1w);
__ nop(); // In delay slot.
if (kArchEndian == kLittle) {
__ lwr(t0, MemOperand(a1));
__ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
__ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
__ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
__ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
__ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
__ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
__ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
__ lwl(t0,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t1,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t2,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t3,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t4,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t5,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t6,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t7,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(t0, MemOperand(a1));
__ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
__ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
__ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
__ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
__ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
__ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
__ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
__ lwr(t0,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t1,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t2,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t3,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t4,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t5,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t6,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t7,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ addiu(a1, a1, 8 * loadstore_chunk);
__ sw(t0, MemOperand(a0));
__ sw(t1, MemOperand(a0, 1, loadstore_chunk));
__ sw(t2, MemOperand(a0, 2, loadstore_chunk));
__ sw(t3, MemOperand(a0, 3, loadstore_chunk));
__ sw(t4, MemOperand(a0, 4, loadstore_chunk));
__ sw(t5, MemOperand(a0, 5, loadstore_chunk));
__ sw(t6, MemOperand(a0, 6, loadstore_chunk));
__ sw(t7, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Less than 32 bytes to copy. Set up for a loop to
// copy one word at a time.
__ bind(&ua_chk1w);
__ andi(a2, t8, loadstore_chunk - 1);
__ beq(a2, t8, &ua_smallCopy);
__ subu(a3, t8, a2); // In delay slot.
__ addu(a3, a0, a3);
__ bind(&ua_wordCopy_loop);
if (kArchEndian == kLittle) {
__ lwr(v1, MemOperand(a1));
__ lwl(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(v1, MemOperand(a1));
__ lwr(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
}
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &ua_wordCopy_loop);
__ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
// Copy the last 8 bytes.
__ bind(&ua_smallCopy);
__ beq(a2, zero_reg, &leave);
__ addu(a3, a0, a2); // In delay slot.
__ bind(&ua_smallCopy_loop);
__ lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &ua_smallCopy_loop);
__ sb(v1, MemOperand(a0, -1)); // In delay slot.
__ jr(ra);
__ nop();
}
CodeDesc desc;
masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
#endif
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS

View File

@ -1,558 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_MIPS64
#include <memory>
#include "src/macro-assembler.h"
#include "src/mips64/simulator-mips64.h"
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(page_allocator,
page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
{
Label lastb, unaligned, aligned, chkw,
loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
// The size of each prefetch.
uint32_t pref_chunk = 32;
// The maximum size of a prefetch, it must not be less than pref_chunk.
// If the real size of a prefetch is greater than max_pref_size and
// the kPrefHintPrepareForStore hint is used, the code will not work
// correctly.
uint32_t max_pref_size = 128;
DCHECK(pref_chunk < max_pref_size);
// pref_limit is set based on the fact that we never use an offset
// greater then 5 on a store pref and that a single pref can
// never be larger then max_pref_size.
uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
int32_t pref_hint_load = kPrefHintLoadStreamed;
int32_t pref_hint_store = kPrefHintPrepareForStore;
uint32_t loadstore_chunk = 4;
// The initial prefetches may fetch bytes that are before the buffer being
// copied. Start copies with an offset of 4 so avoid this situation when
// using kPrefHintPrepareForStore.
DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
pref_chunk * 4 >= max_pref_size);
// If the size is less than 8, go to lastb. Regardless of size,
// copy dst pointer to v0 for the retuen value.
__ slti(a6, a2, 2 * loadstore_chunk);
__ bne(a6, zero_reg, &lastb);
__ mov(v0, a0); // In delay slot.
// If src and dst have different alignments, go to unaligned, if they
// have the same alignment (but are not actually aligned) do a partial
// load/store to make them aligned. If they are both already aligned
// we can start copying at aligned.
__ xor_(t8, a1, a0);
__ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
__ bne(t8, zero_reg, &unaligned);
__ subu(a3, zero_reg, a0); // In delay slot.
__ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
__ beq(a3, zero_reg, &aligned); // Already aligned.
__ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
if (kArchEndian == kLittle) {
__ lwr(t8, MemOperand(a1));
__ addu(a1, a1, a3);
__ swr(t8, MemOperand(a0));
__ addu(a0, a0, a3);
} else {
__ lwl(t8, MemOperand(a1));
__ addu(a1, a1, a3);
__ swl(t8, MemOperand(a0));
__ addu(a0, a0, a3);
}
// Now dst/src are both aligned to (word) aligned addresses. Set a2 to
// count how many bytes we have to copy after all the 64 byte chunks are
// copied and a3 to the dst pointer after all the 64 byte chunks have been
// copied. We will loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&aligned);
__ andi(t8, a2, 0x3F);
__ beq(a2, t8, &chkw); // Less than 64?
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3); // Now a3 is the final dst after loop.
// When in the loop we prefetch with kPrefHintPrepareForStore hint,
// in this case the a0+x should be past the "a4-32" address. This means:
// for x=128 the last "safe" a0 address is "a4-160". Alternatively, for
// x=64 the last "safe" a0 address is "a4-96". In the current version we
// will use "pref hint, 128(a0)", so "a4-160" is the limit.
if (pref_hint_store == kPrefHintPrepareForStore) {
__ addu(a4, a0, a2); // a4 is the "past the end" address.
__ Subu(t9, a4, pref_limit); // t9 is the "last safe pref" address.
}
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
if (pref_hint_store != kPrefHintPrepareForStore) {
__ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
}
__ bind(&loop16w);
__ Lw(a4, MemOperand(a1));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
__ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
}
__ Lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&skip_pref);
__ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
__ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
__ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
__ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
__ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
__ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
__ Sw(a4, MemOperand(a0));
__ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
__ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
__ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
__ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
__ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
__ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
__ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
__ Lw(a4, MemOperand(a1, 8, loadstore_chunk));
__ Lw(a5, MemOperand(a1, 9, loadstore_chunk));
__ Lw(a6, MemOperand(a1, 10, loadstore_chunk));
__ Lw(a7, MemOperand(a1, 11, loadstore_chunk));
__ Lw(t0, MemOperand(a1, 12, loadstore_chunk));
__ Lw(t1, MemOperand(a1, 13, loadstore_chunk));
__ Lw(t2, MemOperand(a1, 14, loadstore_chunk));
__ Lw(t3, MemOperand(a1, 15, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
__ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
__ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
__ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
__ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
__ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
__ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
__ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
__ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
__ mov(a2, t8);
// Here we have src and dest word-aligned but less than 64-bytes to go.
// Check for a 32 bytes chunk and copy if there is one. Otherwise jump
// down to chk1w to handle the tail end of the copy.
__ bind(&chkw);
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
__ andi(t8, a2, 0x1F);
__ beq(a2, t8, &chk1w); // Less than 32?
__ nop(); // In delay slot.
__ Lw(a4, MemOperand(a1));
__ Lw(a5, MemOperand(a1, 1, loadstore_chunk));
__ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
__ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
__ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
__ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
__ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
__ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
__ addiu(a1, a1, 8 * loadstore_chunk);
__ Sw(a4, MemOperand(a0));
__ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
__ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
__ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
__ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
__ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
__ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
__ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Here we have less than 32 bytes to copy. Set up for a loop to copy
// one word at a time. Set a2 to count how many bytes we have to copy
// after all the word chunks are copied and a3 to the dst pointer after
// all the word chunks have been copied. We will loop, incrementing a0
// and a1 until a0 equals a3.
__ bind(&chk1w);
__ andi(a2, t8, loadstore_chunk - 1);
__ beq(a2, t8, &lastb);
__ subu(a3, t8, a2); // In delay slot.
__ addu(a3, a0, a3);
__ bind(&wordCopy_loop);
__ Lw(a7, MemOperand(a1));
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &wordCopy_loop);
__ Sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
__ bind(&lastb);
__ Branch(&leave, le, a2, Operand(zero_reg));
__ addu(a3, a0, a2);
__ bind(&lastbloop);
__ Lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &lastbloop);
__ Sb(v1, MemOperand(a0, -1)); // In delay slot.
__ bind(&leave);
__ jr(ra);
__ nop();
// Unaligned case. Only the dst gets aligned so we need to do partial
// loads of the source followed by normal stores to the dst (once we
// have aligned the destination).
__ bind(&unaligned);
__ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
__ beq(a3, zero_reg, &ua_chk16w);
__ subu(a2, a2, a3); // In delay slot.
if (kArchEndian == kLittle) {
__ lwr(v1, MemOperand(a1));
__ lwl(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ addu(a1, a1, a3);
__ swr(v1, MemOperand(a0));
__ addu(a0, a0, a3);
} else {
__ lwl(v1, MemOperand(a1));
__ lwr(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ addu(a1, a1, a3);
__ swl(v1, MemOperand(a0));
__ addu(a0, a0, a3);
}
// Now the dst (but not the source) is aligned. Set a2 to count how many
// bytes we have to copy after all the 64 byte chunks are copied and a3 to
// the dst pointer after all the 64 byte chunks have been copied. We will
// loop, incrementing a0 and a1 until a0 equals a3.
__ bind(&ua_chk16w);
__ andi(t8, a2, 0x3F);
__ beq(a2, t8, &ua_chkw);
__ subu(a3, a2, t8); // In delay slot.
__ addu(a3, a0, a3);
if (pref_hint_store == kPrefHintPrepareForStore) {
__ addu(a4, a0, a2);
__ Subu(t9, a4, pref_limit);
}
__ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
if (pref_hint_store != kPrefHintPrepareForStore) {
__ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
}
__ bind(&ua_loop16w);
if (kArchEndian == kLittle) {
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
__ lwr(a4, MemOperand(a1));
__ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
__ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0);
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
}
__ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&ua_skip_pref);
__ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
__ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
__ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
__ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
__ lwl(a4,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(a5,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(a6,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(a7,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t0,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t1,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t2,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t3,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
__ lwl(a4, MemOperand(a1));
__ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
__ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0);
__ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
}
__ lwl(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&ua_skip_pref);
__ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
__ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
__ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
__ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
__ lwr(a4,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(a5,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(a6,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(a7,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t0,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t1,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t2,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t3,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
__ Sw(a4, MemOperand(a0));
__ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
__ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
__ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
__ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
__ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
__ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
__ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
if (kArchEndian == kLittle) {
__ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
__ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
__ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
__ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
__ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
__ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
__ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
__ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
__ lwl(a4,
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(a5,
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(a6,
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(a7,
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t0,
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t1,
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t2,
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t3,
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(a4, MemOperand(a1, 8, loadstore_chunk));
__ lwl(a5, MemOperand(a1, 9, loadstore_chunk));
__ lwl(a6, MemOperand(a1, 10, loadstore_chunk));
__ lwl(a7, MemOperand(a1, 11, loadstore_chunk));
__ lwl(t0, MemOperand(a1, 12, loadstore_chunk));
__ lwl(t1, MemOperand(a1, 13, loadstore_chunk));
__ lwl(t2, MemOperand(a1, 14, loadstore_chunk));
__ lwl(t3, MemOperand(a1, 15, loadstore_chunk));
__ lwr(a4,
MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(a5,
MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(a6,
MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(a7,
MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t0,
MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t1,
MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t2,
MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t3,
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
__ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
__ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
__ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
__ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
__ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
__ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
__ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
__ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &ua_loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
__ mov(a2, t8);
// Here less than 64-bytes. Check for
// a 32 byte chunk and copy if there is one. Otherwise jump down to
// ua_chk1w to handle the tail end of the copy.
__ bind(&ua_chkw);
__ Pref(pref_hint_load, MemOperand(a1));
__ andi(t8, a2, 0x1F);
__ beq(a2, t8, &ua_chk1w);
__ nop(); // In delay slot.
if (kArchEndian == kLittle) {
__ lwr(a4, MemOperand(a1));
__ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
__ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
__ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
__ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
__ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
__ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
__ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
__ lwl(a4,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(a5,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(a6,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(a7,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t0,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t1,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t2,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwl(t3,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(a4, MemOperand(a1));
__ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
__ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
__ lwl(a7, MemOperand(a1, 3, loadstore_chunk));
__ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
__ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
__ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
__ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
__ lwr(a4,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(a5,
MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(a6,
MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(a7,
MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t0,
MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t1,
MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t2,
MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
__ lwr(t3,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ addiu(a1, a1, 8 * loadstore_chunk);
__ Sw(a4, MemOperand(a0));
__ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
__ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
__ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
__ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
__ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
__ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
__ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Less than 32 bytes to copy. Set up for a loop to
// copy one word at a time.
__ bind(&ua_chk1w);
__ andi(a2, t8, loadstore_chunk - 1);
__ beq(a2, t8, &ua_smallCopy);
__ subu(a3, t8, a2); // In delay slot.
__ addu(a3, a0, a3);
__ bind(&ua_wordCopy_loop);
if (kArchEndian == kLittle) {
__ lwr(v1, MemOperand(a1));
__ lwl(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
} else {
__ lwl(v1, MemOperand(a1));
__ lwr(v1,
MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
}
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &ua_wordCopy_loop);
__ Sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
// Copy the last 8 bytes.
__ bind(&ua_smallCopy);
__ beq(a2, zero_reg, &leave);
__ addu(a3, a0, a2); // In delay slot.
__ bind(&ua_smallCopy_loop);
__ Lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &ua_smallCopy_loop);
__ Sb(v1, MemOperand(a0, -1)); // In delay slot.
__ jr(ra);
__ nop();
}
CodeDesc desc;
masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
#endif
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64