Move TF parts of MacroAssembler into new TurboAssembler.
This CL introduces TurboAssembler, a super-class of Assembler and sub-class of MacroAssembler. TurboAssembler contains all the functionality that is used by Turbofan and previously was part of MacroAssembler. TurboAssembler has access to the isolate but, in contrast to MacroAssembler, does not expect to be running on the main thread. Bug: v8:6048 Change-Id: If5693f56a45fe057e5011168e830d01a3f2f772d Reviewed-on: https://chromium-review.googlesource.com/559674 Reviewed-by: Benedikt Meurer <bmeurer@chromium.org> Commit-Queue: Georg Neis <neis@chromium.org> Cr-Commit-Position: refs/heads/master@{#46477}
This commit is contained in:
parent
b82f34e17d
commit
dfdcaf4316
@ -2192,15 +2192,15 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
||||
// Fall through when we need to inform the incremental marker.
|
||||
}
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
|
||||
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
|
||||
Zone* zone) {
|
||||
if (masm->isolate()->function_entry_hook() != NULL) {
|
||||
masm->MaybeCheckConstPool();
|
||||
PredictableCodeSizeScope predictable(masm);
|
||||
predictable.ExpectSize(masm->CallStubSize() + 2 * Assembler::kInstrSize);
|
||||
__ push(lr);
|
||||
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
|
||||
__ pop(lr);
|
||||
if (tasm->isolate()->function_entry_hook() != NULL) {
|
||||
tasm->MaybeCheckConstPool();
|
||||
PredictableCodeSizeScope predictable(tasm);
|
||||
predictable.ExpectSize(tasm->CallStubSize() + 2 * Assembler::kInstrSize);
|
||||
tasm->push(lr);
|
||||
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
|
||||
tasm->pop(lr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,40 +26,28 @@ namespace internal {
|
||||
|
||||
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
|
||||
CodeObjectRequired create_code_object)
|
||||
: Assembler(isolate, buffer, size),
|
||||
has_frame_(false),
|
||||
isolate_(isolate),
|
||||
: TurboAssembler(isolate, buffer, size, create_code_object),
|
||||
jit_cookie_(0) {
|
||||
if (FLAG_mask_constants_with_cookie) {
|
||||
jit_cookie_ = isolate->random_number_generator()->NextInt();
|
||||
}
|
||||
if (create_code_object == CodeObjectRequired::kYes) {
|
||||
code_object_ =
|
||||
Handle<HeapObject>::New(isolate_->heap()->undefined_value(), isolate_);
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
|
||||
|
||||
void MacroAssembler::Jump(Register target, Condition cond) {
|
||||
bx(target, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
mov(pc, Operand(target, rmode), LeaveCC, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(!RelocInfo::IsCodeTarget(rmode));
|
||||
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
// 'code' is always generated ARM code, never THUMB code
|
||||
@ -67,13 +55,11 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
|
||||
}
|
||||
|
||||
|
||||
int MacroAssembler::CallSize(Register target, Condition cond) {
|
||||
int TurboAssembler::CallSize(Register target, Condition cond) {
|
||||
return kInstrSize;
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Call(Register target, Condition cond) {
|
||||
void TurboAssembler::Call(Register target, Condition cond) {
|
||||
// Block constant pool for the call instruction sequence.
|
||||
BlockConstPoolScope block_const_pool(this);
|
||||
Label start;
|
||||
@ -82,20 +68,19 @@ void MacroAssembler::Call(Register target, Condition cond) {
|
||||
DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
|
||||
}
|
||||
|
||||
|
||||
int MacroAssembler::CallSize(
|
||||
Address target, RelocInfo::Mode rmode, Condition cond) {
|
||||
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
Instr mov_instr = cond | MOV | LeaveCC;
|
||||
Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
|
||||
return kInstrSize +
|
||||
mov_operand.InstructionsRequired(this, mov_instr) * kInstrSize;
|
||||
}
|
||||
|
||||
int MacroAssembler::CallStubSize() {
|
||||
int TurboAssembler::CallStubSize() {
|
||||
return CallSize(Handle<Code>(), RelocInfo::CODE_TARGET, al);
|
||||
}
|
||||
|
||||
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
TargetAddressStorageMode mode,
|
||||
bool check_constant_pool) {
|
||||
// Check if we have to emit the constant pool before we block it.
|
||||
@ -139,15 +124,13 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int MacroAssembler::CallSize(Handle<Code> code,
|
||||
RelocInfo::Mode rmode,
|
||||
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
AllowHandleDereference using_location;
|
||||
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
|
||||
}
|
||||
|
||||
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Condition cond, TargetAddressStorageMode mode,
|
||||
bool check_constant_pool) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
@ -194,22 +177,19 @@ int MacroAssembler::CallDeoptimizerSize() {
|
||||
return 3 * kInstrSize;
|
||||
}
|
||||
|
||||
void MacroAssembler::Ret(Condition cond) {
|
||||
bx(lr, cond);
|
||||
}
|
||||
void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
|
||||
|
||||
|
||||
void MacroAssembler::Drop(int count, Condition cond) {
|
||||
void TurboAssembler::Drop(int count, Condition cond) {
|
||||
if (count > 0) {
|
||||
add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Drop(Register count, Condition cond) {
|
||||
void TurboAssembler::Drop(Register count, Condition cond) {
|
||||
add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
|
||||
}
|
||||
|
||||
void MacroAssembler::Ret(int drop, Condition cond) {
|
||||
void TurboAssembler::Ret(int drop, Condition cond) {
|
||||
Drop(drop, cond);
|
||||
Ret(cond);
|
||||
}
|
||||
@ -230,19 +210,16 @@ void MacroAssembler::Swap(Register reg1,
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Label* target) { bl(target); }
|
||||
|
||||
void MacroAssembler::Call(Label* target) {
|
||||
bl(target);
|
||||
}
|
||||
|
||||
void MacroAssembler::Push(Handle<HeapObject> handle) {
|
||||
void TurboAssembler::Push(Handle<HeapObject> handle) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.Acquire();
|
||||
mov(scratch, Operand(handle));
|
||||
push(scratch);
|
||||
}
|
||||
|
||||
void MacroAssembler::Push(Smi* smi) {
|
||||
void TurboAssembler::Push(Smi* smi) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.Acquire();
|
||||
mov(scratch, Operand(smi));
|
||||
@ -257,40 +234,39 @@ void MacroAssembler::PushObject(Handle<Object> handle) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
|
||||
void TurboAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
|
||||
|
||||
void MacroAssembler::Move(Register dst, Handle<HeapObject> value) {
|
||||
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
|
||||
mov(dst, Operand(value));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Move(Register dst, Register src, Condition cond) {
|
||||
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
|
||||
if (!dst.is(src)) {
|
||||
mov(dst, src, LeaveCC, cond);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
|
||||
void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
|
||||
Condition cond) {
|
||||
if (!dst.is(src)) {
|
||||
vmov(dst, src, cond);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
|
||||
void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
|
||||
Condition cond) {
|
||||
if (!dst.is(src)) {
|
||||
vmov(dst, src, cond);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
|
||||
void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
|
||||
if (!dst.is(src)) {
|
||||
vmov(dst, src);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
|
||||
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
|
||||
if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
|
||||
|
||||
DCHECK(VfpRegisterIsAvailable(srcdst0));
|
||||
@ -307,7 +283,7 @@ void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
|
||||
void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
|
||||
if (!srcdst0.is(srcdst1)) {
|
||||
vswp(srcdst0, srcdst1);
|
||||
}
|
||||
@ -406,8 +382,7 @@ void MacroAssembler::Bfi(Register dst,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
|
||||
void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
|
||||
Condition cond) {
|
||||
DCHECK(lsb < 32);
|
||||
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
|
||||
@ -457,9 +432,7 @@ void MacroAssembler::Store(Register src,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadRoot(Register destination,
|
||||
Heap::RootListIndex index,
|
||||
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
|
||||
Condition cond) {
|
||||
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
|
||||
}
|
||||
@ -790,7 +763,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::PushCommonFrame(Register marker_reg) {
|
||||
void TurboAssembler::PushCommonFrame(Register marker_reg) {
|
||||
if (marker_reg.is_valid()) {
|
||||
if (marker_reg.code() > fp.code()) {
|
||||
stm(db_w, sp, fp.bit() | lr.bit());
|
||||
@ -819,7 +792,7 @@ void MacroAssembler::PopCommonFrame(Register marker_reg) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::PushStandardFrame(Register function_reg) {
|
||||
void TurboAssembler::PushStandardFrame(Register function_reg) {
|
||||
DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
|
||||
stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
|
||||
fp.bit() | lr.bit());
|
||||
@ -949,7 +922,7 @@ void MacroAssembler::Strd(Register src1, Register src2,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
|
||||
void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
|
||||
const DwVfpRegister src,
|
||||
const Condition cond) {
|
||||
// Subtracting 0.0 preserves all inputs except for signalling NaNs, which
|
||||
@ -958,38 +931,35 @@ void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
|
||||
vsub(dst, src, kDoubleRegZero, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
|
||||
void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
|
||||
const SwVfpRegister src2,
|
||||
const Condition cond) {
|
||||
// Compare and move FPSCR flags to the normal condition flags.
|
||||
VFPCompareAndLoadFlags(src1, src2, pc, cond);
|
||||
}
|
||||
|
||||
void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
|
||||
void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
|
||||
const float src2,
|
||||
const Condition cond) {
|
||||
// Compare and move FPSCR flags to the normal condition flags.
|
||||
VFPCompareAndLoadFlags(src1, src2, pc, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
|
||||
void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
|
||||
const DwVfpRegister src2,
|
||||
const Condition cond) {
|
||||
// Compare and move FPSCR flags to the normal condition flags.
|
||||
VFPCompareAndLoadFlags(src1, src2, pc, cond);
|
||||
}
|
||||
|
||||
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
|
||||
void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
|
||||
const double src2,
|
||||
const Condition cond) {
|
||||
// Compare and move FPSCR flags to the normal condition flags.
|
||||
VFPCompareAndLoadFlags(src1, src2, pc, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
|
||||
void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
|
||||
const SwVfpRegister src2,
|
||||
const Register fpscr_flags,
|
||||
const Condition cond) {
|
||||
@ -998,7 +968,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
|
||||
vmrs(fpscr_flags, cond);
|
||||
}
|
||||
|
||||
void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
|
||||
void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
|
||||
const float src2,
|
||||
const Register fpscr_flags,
|
||||
const Condition cond) {
|
||||
@ -1007,8 +977,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
|
||||
vmrs(fpscr_flags, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
|
||||
void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
|
||||
const DwVfpRegister src2,
|
||||
const Register fpscr_flags,
|
||||
const Condition cond) {
|
||||
@ -1017,7 +986,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
|
||||
vmrs(fpscr_flags, cond);
|
||||
}
|
||||
|
||||
void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
|
||||
void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
|
||||
const double src2,
|
||||
const Register fpscr_flags,
|
||||
const Condition cond) {
|
||||
@ -1039,8 +1008,7 @@ void MacroAssembler::Vmov(const DwVfpRegister dst, Double imm,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
|
||||
void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
|
||||
if (src.code() < 16) {
|
||||
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
|
||||
vmov(dst, loc.high());
|
||||
@ -1049,8 +1017,7 @@ void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
|
||||
void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
|
||||
if (dst.code() < 16) {
|
||||
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
|
||||
vmov(loc.high(), src);
|
||||
@ -1059,8 +1026,7 @@ void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
|
||||
void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
|
||||
if (src.code() < 16) {
|
||||
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
|
||||
vmov(dst, loc.low());
|
||||
@ -1069,8 +1035,7 @@ void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
|
||||
void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
|
||||
if (dst.code() < 16) {
|
||||
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
|
||||
vmov(loc.low(), src);
|
||||
@ -1079,7 +1044,7 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::VmovExtended(Register dst, int src_code) {
|
||||
void TurboAssembler::VmovExtended(Register dst, int src_code) {
|
||||
DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code);
|
||||
DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
|
||||
if (src_code & 0x1) {
|
||||
@ -1089,7 +1054,7 @@ void MacroAssembler::VmovExtended(Register dst, int src_code) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::VmovExtended(int dst_code, Register src) {
|
||||
void TurboAssembler::VmovExtended(int dst_code, Register src) {
|
||||
DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code);
|
||||
DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
|
||||
if (dst_code & 0x1) {
|
||||
@ -1099,7 +1064,7 @@ void MacroAssembler::VmovExtended(int dst_code, Register src) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::VmovExtended(int dst_code, int src_code) {
|
||||
void TurboAssembler::VmovExtended(int dst_code, int src_code) {
|
||||
if (src_code == dst_code) return;
|
||||
|
||||
if (src_code < SwVfpRegister::kMaxNumRegisters &&
|
||||
@ -1163,7 +1128,7 @@ void MacroAssembler::VmovExtended(int dst_code, int src_code) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) {
|
||||
void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
|
||||
if (dst_code < SwVfpRegister::kMaxNumRegisters) {
|
||||
vldr(SwVfpRegister::from_code(dst_code), src);
|
||||
} else {
|
||||
@ -1175,7 +1140,7 @@ void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) {
|
||||
void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
|
||||
if (src_code < SwVfpRegister::kMaxNumRegisters) {
|
||||
vstr(SwVfpRegister::from_code(src_code), dst);
|
||||
} else {
|
||||
@ -1186,7 +1151,7 @@ void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
|
||||
void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src,
|
||||
NeonDataType dt, int lane) {
|
||||
int size = NeonSz(dt); // 0, 1, 2
|
||||
int byte = lane << size;
|
||||
@ -1198,7 +1163,7 @@ void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
|
||||
vmov(dt, dst, double_source, double_lane);
|
||||
}
|
||||
|
||||
void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src,
|
||||
void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src,
|
||||
NeonDataType dt, int lane) {
|
||||
int size = NeonSz(dt); // 0, 1, 2
|
||||
int byte = lane << size;
|
||||
@ -1207,13 +1172,13 @@ void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src,
|
||||
vmov(dt, dst, src, double_lane);
|
||||
}
|
||||
|
||||
void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
|
||||
void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
|
||||
int lane) {
|
||||
int s_code = src.code() * 4 + lane;
|
||||
VmovExtended(dst.code(), s_code);
|
||||
}
|
||||
|
||||
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
|
||||
void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
|
||||
Register src_lane, NeonDataType dt, int lane) {
|
||||
Move(dst, src);
|
||||
int size = NeonSz(dt); // 0, 1, 2
|
||||
@ -1226,14 +1191,14 @@ void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
|
||||
vmov(dt, double_dst, double_lane, src_lane);
|
||||
}
|
||||
|
||||
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
|
||||
void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
|
||||
SwVfpRegister src_lane, int lane) {
|
||||
Move(dst, src);
|
||||
int s_code = dst.code() * 4 + lane;
|
||||
VmovExtended(s_code, src_lane.code());
|
||||
}
|
||||
|
||||
void MacroAssembler::LslPair(Register dst_low, Register dst_high,
|
||||
void TurboAssembler::LslPair(Register dst_low, Register dst_high,
|
||||
Register src_low, Register src_high,
|
||||
Register scratch, Register shift) {
|
||||
DCHECK(!AreAliased(dst_high, src_low));
|
||||
@ -1256,7 +1221,7 @@ void MacroAssembler::LslPair(Register dst_low, Register dst_high,
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
void MacroAssembler::LslPair(Register dst_low, Register dst_high,
|
||||
void TurboAssembler::LslPair(Register dst_low, Register dst_high,
|
||||
Register src_low, Register src_high,
|
||||
uint32_t shift) {
|
||||
DCHECK(!AreAliased(dst_high, src_low));
|
||||
@ -1279,7 +1244,7 @@ void MacroAssembler::LslPair(Register dst_low, Register dst_high,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
|
||||
void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
|
||||
Register src_low, Register src_high,
|
||||
Register scratch, Register shift) {
|
||||
DCHECK(!AreAliased(dst_low, src_high));
|
||||
@ -1303,7 +1268,7 @@ void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
|
||||
void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
|
||||
Register src_low, Register src_high,
|
||||
uint32_t shift) {
|
||||
DCHECK(!AreAliased(dst_low, src_high));
|
||||
@ -1326,7 +1291,7 @@ void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
|
||||
void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
|
||||
Register src_low, Register src_high,
|
||||
Register scratch, Register shift) {
|
||||
DCHECK(!AreAliased(dst_low, src_high));
|
||||
@ -1349,7 +1314,7 @@ void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
|
||||
void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
|
||||
Register src_low, Register src_high,
|
||||
uint32_t shift) {
|
||||
DCHECK(!AreAliased(dst_low, src_high));
|
||||
@ -1372,14 +1337,14 @@ void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::StubPrologue(StackFrame::Type type) {
|
||||
void TurboAssembler::StubPrologue(StackFrame::Type type) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.Acquire();
|
||||
mov(scratch, Operand(StackFrame::TypeToMarker(type)));
|
||||
PushCommonFrame(scratch);
|
||||
}
|
||||
|
||||
void MacroAssembler::Prologue(bool code_pre_aging) {
|
||||
void TurboAssembler::Prologue(bool code_pre_aging) {
|
||||
{ PredictableCodeSizeScope predictible_code_size_scope(
|
||||
this, kNoCodeAgeSequenceLength);
|
||||
// The following three instructions must remain together and unmodified
|
||||
@ -1403,8 +1368,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
|
||||
ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EnterFrame(StackFrame::Type type,
|
||||
void TurboAssembler::EnterFrame(StackFrame::Type type,
|
||||
bool load_constant_pool_pointer_reg) {
|
||||
// r0-r3: preserved
|
||||
UseScratchRegisterScope temps(this);
|
||||
@ -1417,8 +1381,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
int TurboAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
// r0: preserved
|
||||
// r1: preserved
|
||||
// r2: preserved
|
||||
@ -1498,7 +1461,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
|
||||
str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
||||
}
|
||||
|
||||
int MacroAssembler::ActivationFrameAlignment() {
|
||||
int TurboAssembler::ActivationFrameAlignment() {
|
||||
#if V8_HOST_ARCH_ARM
|
||||
// Running on the real platform. Use the alignment as mandated by the local
|
||||
// environment.
|
||||
@ -1561,8 +1524,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
|
||||
void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
|
||||
if (use_eabi_hardfloat()) {
|
||||
Move(dst, d0);
|
||||
} else {
|
||||
@ -1572,11 +1534,11 @@ void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
|
||||
|
||||
|
||||
// On ARM this is just a synonym to make the purpose clear.
|
||||
void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
|
||||
void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
|
||||
MovFromFloatResult(dst);
|
||||
}
|
||||
|
||||
void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
Register caller_args_count_reg,
|
||||
Register scratch0, Register scratch1) {
|
||||
#if DEBUG
|
||||
@ -2241,7 +2203,7 @@ void MacroAssembler::CallStub(CodeStub* stub,
|
||||
false);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
|
||||
void TurboAssembler::CallStubDelayed(CodeStub* stub) {
|
||||
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
|
||||
|
||||
// Block constant pool for the call instruction sequence.
|
||||
@ -2276,9 +2238,8 @@ void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
|
||||
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
|
||||
}
|
||||
|
||||
|
||||
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
|
||||
return has_frame_ || !stub->SometimesSetsUpAFrame();
|
||||
bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
|
||||
return has_frame() || !stub->SometimesSetsUpAFrame();
|
||||
}
|
||||
|
||||
void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
|
||||
@ -2362,7 +2323,7 @@ void MacroAssembler::TryInt32Floor(Register result,
|
||||
bind(&exception);
|
||||
}
|
||||
|
||||
void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
|
||||
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
|
||||
DwVfpRegister double_input,
|
||||
Label* done) {
|
||||
LowDwVfpRegister double_scratch = kScratchDoubleReg;
|
||||
@ -2378,9 +2339,8 @@ void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
|
||||
b(lt, done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::TruncateDoubleToI(Register result,
|
||||
DwVfpRegister double_input) {
|
||||
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DwVfpRegister double_input) {
|
||||
Label done;
|
||||
|
||||
TryInlineTruncateDoubleToI(result, double_input, &done);
|
||||
@ -2390,8 +2350,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
|
||||
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
|
||||
vstr(double_input, MemOperand(sp, 0));
|
||||
|
||||
DoubleToIStub stub(isolate(), sp, result, 0, true, true);
|
||||
CallStub(&stub);
|
||||
CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
|
||||
|
||||
add(sp, sp, Operand(kDoubleSize));
|
||||
pop(lr);
|
||||
@ -2399,7 +2358,6 @@ void MacroAssembler::TruncateDoubleToI(Register result,
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::TruncateHeapNumberToI(Register result,
|
||||
Register object) {
|
||||
Label done;
|
||||
@ -2460,7 +2418,7 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
|
||||
and_(dst, src, Operand((1 << num_least_bits) - 1));
|
||||
}
|
||||
|
||||
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
|
||||
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
|
||||
SaveFPRegsMode save_doubles) {
|
||||
const Runtime::Function* f = Runtime::FunctionForId(fid);
|
||||
// TODO(1236192): Most runtime routines don't need the number of
|
||||
@ -2561,15 +2519,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
|
||||
void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
|
||||
if (emit_debug_code())
|
||||
Check(cond, reason);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void MacroAssembler::Check(Condition cond, BailoutReason reason) {
|
||||
void TurboAssembler::Check(Condition cond, BailoutReason reason) {
|
||||
Label L;
|
||||
b(cond, &L);
|
||||
Abort(reason);
|
||||
@ -2577,8 +2532,7 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
|
||||
bind(&L);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Abort(BailoutReason reason) {
|
||||
void TurboAssembler::Abort(BailoutReason reason) {
|
||||
Label abort_start;
|
||||
bind(&abort_start);
|
||||
#ifdef DEBUG
|
||||
@ -2597,7 +2551,7 @@ void MacroAssembler::Abort(BailoutReason reason) {
|
||||
Move(r1, Smi::FromInt(static_cast<int>(reason)));
|
||||
|
||||
// Disable stub call restrictions to always allow calls to abort.
|
||||
if (!has_frame_) {
|
||||
if (!has_frame()) {
|
||||
// We don't actually want to generate a pile of code for this, so just
|
||||
// claim there is a stack frame, without generating one.
|
||||
FrameScope scope(this, StackFrame::NONE);
|
||||
@ -2656,7 +2610,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::InitializeRootRegister() {
|
||||
void TurboAssembler::InitializeRootRegister() {
|
||||
ExternalReference roots_array_start =
|
||||
ExternalReference::roots_array_start(isolate());
|
||||
mov(kRootRegister, Operand(roots_array_start));
|
||||
@ -2717,7 +2671,7 @@ void MacroAssembler::NonNegativeSmiTst(Register value) {
|
||||
tst(value, Operand(kSmiTagMask | kSmiSignMask));
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
|
||||
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
|
||||
tst(value, Operand(kSmiTagMask));
|
||||
b(eq, smi_label);
|
||||
}
|
||||
@ -2988,7 +2942,7 @@ void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MacroAssembler::FloatMaxHelper(T result, T left, T right,
|
||||
void TurboAssembler::FloatMaxHelper(T result, T left, T right,
|
||||
Label* out_of_line) {
|
||||
// This trivial case is caught sooner, so that the out-of-line code can be
|
||||
// completely avoided.
|
||||
@ -3019,7 +2973,7 @@ void MacroAssembler::FloatMaxHelper(T result, T left, T right,
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
|
||||
void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
|
||||
DCHECK(!left.is(right));
|
||||
|
||||
// ARMv8: At least one of left and right is a NaN.
|
||||
@ -3032,7 +2986,7 @@ void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MacroAssembler::FloatMinHelper(T result, T left, T right,
|
||||
void TurboAssembler::FloatMinHelper(T result, T left, T right,
|
||||
Label* out_of_line) {
|
||||
// This trivial case is caught sooner, so that the out-of-line code can be
|
||||
// completely avoided.
|
||||
@ -3078,7 +3032,7 @@ void MacroAssembler::FloatMinHelper(T result, T left, T right,
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
|
||||
void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
|
||||
DCHECK(!left.is(right));
|
||||
|
||||
// At least one of left and right is a NaN. Use vadd to propagate the NaN
|
||||
@ -3086,42 +3040,42 @@ void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
|
||||
vadd(result, left, right);
|
||||
}
|
||||
|
||||
void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
|
||||
void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
|
||||
SwVfpRegister right, Label* out_of_line) {
|
||||
FloatMaxHelper(result, left, right, out_of_line);
|
||||
}
|
||||
|
||||
void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
|
||||
void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
|
||||
SwVfpRegister right, Label* out_of_line) {
|
||||
FloatMinHelper(result, left, right, out_of_line);
|
||||
}
|
||||
|
||||
void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
|
||||
void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
|
||||
DwVfpRegister right, Label* out_of_line) {
|
||||
FloatMaxHelper(result, left, right, out_of_line);
|
||||
}
|
||||
|
||||
void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
|
||||
void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
|
||||
DwVfpRegister right, Label* out_of_line) {
|
||||
FloatMinHelper(result, left, right, out_of_line);
|
||||
}
|
||||
|
||||
void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
|
||||
void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
|
||||
SwVfpRegister right) {
|
||||
FloatMaxOutOfLineHelper(result, left, right);
|
||||
}
|
||||
|
||||
void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
|
||||
void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
|
||||
SwVfpRegister right) {
|
||||
FloatMinOutOfLineHelper(result, left, right);
|
||||
}
|
||||
|
||||
void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
|
||||
void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
|
||||
DwVfpRegister right) {
|
||||
FloatMaxOutOfLineHelper(result, left, right);
|
||||
}
|
||||
|
||||
void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
|
||||
void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
|
||||
DwVfpRegister right) {
|
||||
FloatMinOutOfLineHelper(result, left, right);
|
||||
}
|
||||
@ -3143,8 +3097,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
|
||||
|
||||
static const int kRegisterPassedArguments = 4;
|
||||
|
||||
|
||||
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
|
||||
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
|
||||
int num_double_arguments) {
|
||||
int stack_passed_words = 0;
|
||||
if (use_eabi_hardfloat()) {
|
||||
@ -3213,7 +3166,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
|
||||
SmiUntag(index, index);
|
||||
}
|
||||
|
||||
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
|
||||
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
|
||||
int num_double_arguments) {
|
||||
int frame_alignment = ActivationFrameAlignment();
|
||||
int stack_passed_arguments = CalculateStackPassedWords(
|
||||
@ -3233,7 +3186,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
|
||||
void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
|
||||
DCHECK(src.is(d0));
|
||||
if (!use_eabi_hardfloat()) {
|
||||
vmov(r0, r1, src);
|
||||
@ -3242,12 +3195,11 @@ void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
|
||||
|
||||
|
||||
// On ARM this is just a synonym to make the purpose clear.
|
||||
void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
|
||||
void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
|
||||
MovToFloatParameter(src);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
|
||||
void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
|
||||
DwVfpRegister src2) {
|
||||
DCHECK(src1.is(d0));
|
||||
DCHECK(src2.is(d1));
|
||||
@ -3257,8 +3209,7 @@ void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallCFunction(ExternalReference function,
|
||||
void TurboAssembler::CallCFunction(ExternalReference function,
|
||||
int num_reg_arguments,
|
||||
int num_double_arguments) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
@ -3267,27 +3218,21 @@ void MacroAssembler::CallCFunction(ExternalReference function,
|
||||
CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallCFunction(Register function,
|
||||
int num_reg_arguments,
|
||||
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
|
||||
int num_double_arguments) {
|
||||
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallCFunction(ExternalReference function,
|
||||
void TurboAssembler::CallCFunction(ExternalReference function,
|
||||
int num_arguments) {
|
||||
CallCFunction(function, num_arguments, 0);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallCFunction(Register function,
|
||||
int num_arguments) {
|
||||
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
|
||||
CallCFunction(function, num_arguments, 0);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallCFunctionHelper(Register function,
|
||||
void TurboAssembler::CallCFunctionHelper(Register function,
|
||||
int num_reg_arguments,
|
||||
int num_double_arguments) {
|
||||
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
|
||||
@ -3325,13 +3270,8 @@ void MacroAssembler::CallCFunctionHelper(Register function,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckPageFlag(
|
||||
Register object,
|
||||
Register scratch,
|
||||
int mask,
|
||||
Condition cc,
|
||||
Label* condition_met) {
|
||||
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
|
||||
Condition cc, Label* condition_met) {
|
||||
DCHECK(cc == eq || cc == ne);
|
||||
Bfc(scratch, object, 0, kPageSizeBits);
|
||||
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2175,18 +2175,18 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
|
||||
static const unsigned int kProfileEntryHookCallSize =
|
||||
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
|
||||
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
|
||||
Zone* zone) {
|
||||
if (masm->isolate()->function_entry_hook() != NULL) {
|
||||
Assembler::BlockConstPoolScope no_const_pools(masm);
|
||||
DontEmitDebugCodeScope no_debug_code(masm);
|
||||
if (tasm->isolate()->function_entry_hook() != NULL) {
|
||||
Assembler::BlockConstPoolScope no_const_pools(tasm);
|
||||
DontEmitDebugCodeScope no_debug_code(tasm);
|
||||
Label entry_hook_call_start;
|
||||
__ Bind(&entry_hook_call_start);
|
||||
__ Push(lr);
|
||||
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
|
||||
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
|
||||
tasm->Bind(&entry_hook_call_start);
|
||||
tasm->Push(lr);
|
||||
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
|
||||
DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
|
||||
kProfileEntryHookCallSize);
|
||||
__ Pop(lr);
|
||||
tasm->Pop(lr);
|
||||
}
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -27,37 +27,16 @@ namespace internal {
|
||||
MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer,
|
||||
unsigned buffer_size,
|
||||
CodeObjectRequired create_code_object)
|
||||
: Assembler(isolate, buffer, buffer_size),
|
||||
#if DEBUG
|
||||
allow_macro_instructions_(true),
|
||||
#endif
|
||||
has_frame_(false),
|
||||
isolate_(isolate),
|
||||
use_real_aborts_(true),
|
||||
sp_(jssp),
|
||||
tmp_list_(DefaultTmpList()),
|
||||
fptmp_list_(DefaultFPTmpList()) {
|
||||
if (create_code_object == CodeObjectRequired::kYes) {
|
||||
code_object_ =
|
||||
Handle<HeapObject>::New(isolate_->heap()->undefined_value(), isolate_);
|
||||
}
|
||||
}
|
||||
: TurboAssembler(isolate, buffer, buffer_size, create_code_object) {}
|
||||
|
||||
CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); }
|
||||
|
||||
CPURegList MacroAssembler::DefaultTmpList() {
|
||||
return CPURegList(ip0, ip1);
|
||||
}
|
||||
|
||||
|
||||
CPURegList MacroAssembler::DefaultFPTmpList() {
|
||||
CPURegList TurboAssembler::DefaultFPTmpList() {
|
||||
return CPURegList(fp_scratch1, fp_scratch2);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LogicalMacro(const Register& rd,
|
||||
const Register& rn,
|
||||
const Operand& operand,
|
||||
LogicalOp op) {
|
||||
void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
|
||||
const Operand& operand, LogicalOp op) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
@ -164,9 +143,8 @@ void MacroAssembler::LogicalMacro(const Register& rd,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
|
||||
DCHECK(allow_macro_instructions());
|
||||
DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
|
||||
DCHECK(!rd.IsZero());
|
||||
|
||||
@ -243,11 +221,9 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Mov(const Register& rd,
|
||||
const Operand& operand,
|
||||
void TurboAssembler::Mov(const Register& rd, const Operand& operand,
|
||||
DiscardMoveMode discard_mode) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
DCHECK(allow_macro_instructions());
|
||||
DCHECK(!rd.IsZero());
|
||||
|
||||
// Provide a swap register for instructions that need to write into the
|
||||
@ -299,7 +275,7 @@ void MacroAssembler::Mov(const Register& rd,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
DCHECK(is_uint16(imm));
|
||||
int byte1 = (imm & 0xff);
|
||||
int byte2 = ((imm >> 8) & 0xff);
|
||||
@ -321,7 +297,7 @@ void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
DCHECK(is_uint32(imm));
|
||||
|
||||
uint8_t bytes[sizeof(imm)];
|
||||
@ -398,7 +374,7 @@ void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
// All bytes are either 0x00 or 0xff.
|
||||
{
|
||||
bool all0orff = true;
|
||||
@ -434,9 +410,9 @@ void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
|
||||
void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
|
||||
int shift_amount) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
DCHECK(allow_macro_instructions());
|
||||
if (shift_amount != 0 || shift != LSL) {
|
||||
movi(vd, imm, shift, shift_amount);
|
||||
} else if (vd.Is8B() || vd.Is16B()) {
|
||||
@ -455,7 +431,7 @@ void MacroAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
|
||||
void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
|
||||
// TODO(all): Move 128-bit values in a more efficient way.
|
||||
DCHECK(vd.Is128Bits());
|
||||
UseScratchRegisterScope temps(this);
|
||||
@ -465,8 +441,8 @@ void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
|
||||
Ins(vd.V2D(), 1, temp);
|
||||
}
|
||||
|
||||
void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
|
||||
DCHECK(allow_macro_instructions());
|
||||
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
Ldr(rd, operand.immediate());
|
||||
@ -488,8 +464,7 @@ void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
|
||||
unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
|
||||
DCHECK((reg_size % 8) == 0);
|
||||
int count = 0;
|
||||
for (unsigned i = 0; i < (reg_size / 16); i++) {
|
||||
@ -504,7 +479,7 @@ unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
|
||||
|
||||
// The movz instruction can generate immediates containing an arbitrary 16-bit
|
||||
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
|
||||
bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
|
||||
bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
|
||||
DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
|
||||
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
|
||||
}
|
||||
@ -512,15 +487,13 @@ bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
|
||||
|
||||
// The movn instruction can generate immediates containing an arbitrary 16-bit
|
||||
// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
|
||||
bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
|
||||
bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
|
||||
return IsImmMovz(~imm, reg_size);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::ConditionalCompareMacro(const Register& rn,
|
||||
void TurboAssembler::ConditionalCompareMacro(const Register& rn,
|
||||
const Operand& operand,
|
||||
StatusFlags nzcv,
|
||||
Condition cond,
|
||||
StatusFlags nzcv, Condition cond,
|
||||
ConditionalCompareOp op) {
|
||||
DCHECK((cond != al) && (cond != nv));
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
@ -551,7 +524,7 @@ void MacroAssembler::Csel(const Register& rd,
|
||||
const Register& rn,
|
||||
const Operand& operand,
|
||||
Condition cond) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
DCHECK(allow_macro_instructions());
|
||||
DCHECK(!rd.IsZero());
|
||||
DCHECK((cond != al) && (cond != nv));
|
||||
if (operand.IsImmediate()) {
|
||||
@ -583,8 +556,7 @@ void MacroAssembler::Csel(const Register& rd,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
|
||||
bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst,
|
||||
int64_t imm) {
|
||||
unsigned n, imm_s, imm_r;
|
||||
int reg_size = dst.SizeInBits();
|
||||
@ -606,7 +578,7 @@ bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
|
||||
return false;
|
||||
}
|
||||
|
||||
Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
|
||||
Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst,
|
||||
int64_t imm,
|
||||
PreShiftImmMode mode) {
|
||||
int reg_size = dst.SizeInBits();
|
||||
@ -649,11 +621,8 @@ Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
|
||||
return Operand(dst);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::AddSubMacro(const Register& rd,
|
||||
const Register& rn,
|
||||
const Operand& operand,
|
||||
FlagsUpdate S,
|
||||
void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
|
||||
const Operand& operand, FlagsUpdate S,
|
||||
AddSubOp op) {
|
||||
if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
|
||||
!operand.NeedsRelocation(this) && (S == LeaveFlags)) {
|
||||
@ -698,11 +667,9 @@ void MacroAssembler::AddSubMacro(const Register& rd,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
|
||||
void TurboAssembler::AddSubWithCarryMacro(const Register& rd,
|
||||
const Register& rn,
|
||||
const Operand& operand,
|
||||
FlagsUpdate S,
|
||||
const Operand& operand, FlagsUpdate S,
|
||||
AddSubWithCarryOp op) {
|
||||
DCHECK(rd.SizeInBits() == rn.SizeInBits());
|
||||
UseScratchRegisterScope temps(this);
|
||||
@ -749,10 +716,8 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
|
||||
const MemOperand& addr,
|
||||
LoadStoreOp op) {
|
||||
void TurboAssembler::LoadStoreMacro(const CPURegister& rt,
|
||||
const MemOperand& addr, LoadStoreOp op) {
|
||||
int64_t offset = addr.offset();
|
||||
unsigned size = CalcLSDataSize(op);
|
||||
|
||||
@ -781,7 +746,7 @@ void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
|
||||
void TurboAssembler::LoadStorePairMacro(const CPURegister& rt,
|
||||
const CPURegister& rt2,
|
||||
const MemOperand& addr,
|
||||
LoadStorePairOp op) {
|
||||
@ -859,9 +824,8 @@ void MacroAssembler::Store(const Register& rt,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
|
||||
Label *label, ImmBranchType b_type) {
|
||||
bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
|
||||
Label* label, ImmBranchType b_type) {
|
||||
bool need_longer_range = false;
|
||||
// There are two situations in which we care about the offset being out of
|
||||
// range:
|
||||
@ -885,9 +849,8 @@ bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
|
||||
return need_longer_range;
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
|
||||
DCHECK(allow_macro_instructions());
|
||||
DCHECK(!rd.IsZero());
|
||||
|
||||
if (hint == kAdrNear) {
|
||||
@ -920,8 +883,7 @@ void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
|
||||
void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) {
|
||||
DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
|
||||
(bit == -1 || type >= kBranchTypeFirstUsingBit));
|
||||
if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
|
||||
@ -940,9 +902,8 @@ void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::B(Label* label, Condition cond) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
void TurboAssembler::B(Label* label, Condition cond) {
|
||||
DCHECK(allow_macro_instructions());
|
||||
DCHECK((cond != al) && (cond != nv));
|
||||
|
||||
Label done;
|
||||
@ -958,9 +919,8 @@ void MacroAssembler::B(Label* label, Condition cond) {
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
DCHECK(allow_macro_instructions());
|
||||
|
||||
Label done;
|
||||
bool need_extra_instructions =
|
||||
@ -975,9 +935,8 @@ void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
DCHECK(allow_macro_instructions());
|
||||
|
||||
Label done;
|
||||
bool need_extra_instructions =
|
||||
@ -992,9 +951,8 @@ void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Cbnz(const Register& rt, Label* label) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
void TurboAssembler::Cbnz(const Register& rt, Label* label) {
|
||||
DCHECK(allow_macro_instructions());
|
||||
|
||||
Label done;
|
||||
bool need_extra_instructions =
|
||||
@ -1009,9 +967,8 @@ void MacroAssembler::Cbnz(const Register& rt, Label* label) {
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Cbz(const Register& rt, Label* label) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
void TurboAssembler::Cbz(const Register& rt, Label* label) {
|
||||
DCHECK(allow_macro_instructions());
|
||||
|
||||
Label done;
|
||||
bool need_extra_instructions =
|
||||
@ -1029,11 +986,9 @@ void MacroAssembler::Cbz(const Register& rt, Label* label) {
|
||||
|
||||
// Pseudo-instructions.
|
||||
|
||||
|
||||
void MacroAssembler::Abs(const Register& rd, const Register& rm,
|
||||
Label* is_not_representable,
|
||||
Label* is_representable) {
|
||||
DCHECK(allow_macro_instructions_);
|
||||
void TurboAssembler::Abs(const Register& rd, const Register& rm,
|
||||
Label* is_not_representable, Label* is_representable) {
|
||||
DCHECK(allow_macro_instructions());
|
||||
DCHECK(AreSameSizeAndType(rd, rm));
|
||||
|
||||
Cmp(rm, 1);
|
||||
@ -1055,8 +1010,7 @@ void MacroAssembler::Abs(const Register& rd, const Register& rm,
|
||||
|
||||
// Abstracted stack operations.
|
||||
|
||||
|
||||
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
|
||||
void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
|
||||
const CPURegister& src2, const CPURegister& src3) {
|
||||
DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
|
||||
|
||||
@ -1067,8 +1021,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
|
||||
PushHelper(count, size, src0, src1, src2, src3);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
|
||||
void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
|
||||
const CPURegister& src2, const CPURegister& src3,
|
||||
const CPURegister& src4, const CPURegister& src5,
|
||||
const CPURegister& src6, const CPURegister& src7) {
|
||||
@ -1082,8 +1035,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
|
||||
PushHelper(count - 4, size, src4, src5, src6, src7);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
|
||||
void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
|
||||
const CPURegister& dst2, const CPURegister& dst3) {
|
||||
// It is not valid to pop into the same register more than once in one
|
||||
// instruction, not even into the zero register.
|
||||
@ -1098,8 +1050,7 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
|
||||
PopPostamble(count, size);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
|
||||
void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
|
||||
const CPURegister& dst2, const CPURegister& dst3,
|
||||
const CPURegister& dst4, const CPURegister& dst5,
|
||||
const CPURegister& dst6, const CPURegister& dst7) {
|
||||
@ -1117,7 +1068,7 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
|
||||
PopPostamble(count, size);
|
||||
}
|
||||
|
||||
void MacroAssembler::Push(const Register& src0, const VRegister& src1) {
|
||||
void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
|
||||
int size = src0.SizeInBytes() + src1.SizeInBytes();
|
||||
|
||||
PushPreamble(size);
|
||||
@ -1179,8 +1130,7 @@ void MacroAssembler::PushPopQueue::PopQueued() {
|
||||
queued_.clear();
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PushCPURegList(CPURegList registers) {
|
||||
void TurboAssembler::PushCPURegList(CPURegList registers) {
|
||||
int size = registers.RegisterSizeInBytes();
|
||||
|
||||
PushPreamble(registers.Count(), size);
|
||||
@ -1198,8 +1148,7 @@ void MacroAssembler::PushCPURegList(CPURegList registers) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PopCPURegList(CPURegList registers) {
|
||||
void TurboAssembler::PopCPURegList(CPURegList registers) {
|
||||
int size = registers.RegisterSizeInBytes();
|
||||
|
||||
// Pop up to four registers at a time because if the current stack pointer is
|
||||
@ -1301,9 +1250,7 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PushHelper(int count, int size,
|
||||
const CPURegister& src0,
|
||||
void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
|
||||
const CPURegister& src1,
|
||||
const CPURegister& src2,
|
||||
const CPURegister& src3) {
|
||||
@ -1341,11 +1288,8 @@ void MacroAssembler::PushHelper(int count, int size,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PopHelper(int count, int size,
|
||||
const CPURegister& dst0,
|
||||
const CPURegister& dst1,
|
||||
const CPURegister& dst2,
|
||||
void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
|
||||
const CPURegister& dst1, const CPURegister& dst2,
|
||||
const CPURegister& dst3) {
|
||||
// Ensure that we don't unintentially modify scratch or debug registers.
|
||||
InstructionAccurateScope scope(this);
|
||||
@ -1382,8 +1326,7 @@ void MacroAssembler::PopHelper(int count, int size,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PushPreamble(Operand total_size) {
|
||||
void TurboAssembler::PushPreamble(Operand total_size) {
|
||||
if (csp.Is(StackPointer())) {
|
||||
// If the current stack pointer is csp, then it must be aligned to 16 bytes
|
||||
// on entry and the total size of the specified registers must also be a
|
||||
@ -1402,8 +1345,7 @@ void MacroAssembler::PushPreamble(Operand total_size) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PopPostamble(Operand total_size) {
|
||||
void TurboAssembler::PopPostamble(Operand total_size) {
|
||||
if (csp.Is(StackPointer())) {
|
||||
// If the current stack pointer is csp, then it must be aligned to 16 bytes
|
||||
// on entry and the total size of the specified registers must also be a
|
||||
@ -1422,14 +1364,14 @@ void MacroAssembler::PopPostamble(Operand total_size) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::PushPreamble(int count, int size) {
|
||||
void TurboAssembler::PushPreamble(int count, int size) {
|
||||
PushPreamble(count * size);
|
||||
}
|
||||
void MacroAssembler::PopPostamble(int count, int size) {
|
||||
void TurboAssembler::PopPostamble(int count, int size) {
|
||||
PopPostamble(count * size);
|
||||
}
|
||||
|
||||
void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
|
||||
void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
|
||||
if (offset.IsImmediate()) {
|
||||
DCHECK(offset.ImmediateValue() >= 0);
|
||||
} else if (emit_debug_code()) {
|
||||
@ -1452,9 +1394,7 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
|
||||
Ldr(dst, MemOperand(StackPointer(), offset));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PokePair(const CPURegister& src1,
|
||||
const CPURegister& src2,
|
||||
void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
|
||||
int offset) {
|
||||
DCHECK(AreSameSizeAndType(src1, src2));
|
||||
DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
|
||||
@ -1518,8 +1458,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
|
||||
ldp(d14, d15, tos);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::AssertStackConsistency() {
|
||||
void TurboAssembler::AssertStackConsistency() {
|
||||
// Avoid emitting code when !use_real_abort() since non-real aborts cause too
|
||||
// much code to be generated.
|
||||
if (emit_debug_code() && use_real_aborts()) {
|
||||
@ -1551,7 +1490,7 @@ void MacroAssembler::AssertStackConsistency() {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertCspAligned() {
|
||||
void TurboAssembler::AssertCspAligned() {
|
||||
if (emit_debug_code() && use_real_aborts()) {
|
||||
// TODO(titzer): use a real assert for alignment check?
|
||||
UseScratchRegisterScope scope(this);
|
||||
@ -1560,7 +1499,7 @@ void MacroAssembler::AssertCspAligned() {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertFPCRState(Register fpcr) {
|
||||
void TurboAssembler::AssertFPCRState(Register fpcr) {
|
||||
if (emit_debug_code()) {
|
||||
Label unexpected_mode, done;
|
||||
UseScratchRegisterScope temps(this);
|
||||
@ -1584,7 +1523,7 @@ void MacroAssembler::AssertFPCRState(Register fpcr) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::CanonicalizeNaN(const VRegister& dst,
|
||||
void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
|
||||
const VRegister& src) {
|
||||
AssertFPCRState();
|
||||
|
||||
@ -1594,8 +1533,7 @@ void MacroAssembler::CanonicalizeNaN(const VRegister& dst,
|
||||
Fsub(dst, src, fp_zero);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadRoot(CPURegister destination,
|
||||
void TurboAssembler::LoadRoot(CPURegister destination,
|
||||
Heap::RootListIndex index) {
|
||||
// TODO(jbramley): Most root values are constants, and can be synthesized
|
||||
// without a load. Refer to the ARM back end for details.
|
||||
@ -1617,7 +1555,6 @@ void MacroAssembler::LoadTrueFalseRoots(Register true_root,
|
||||
MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
|
||||
AllowDeferredHandleDereference heap_object_check;
|
||||
if (object->IsHeapObject()) {
|
||||
@ -1627,9 +1564,9 @@ void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Move(Register dst, Register src) { Mov(dst, src); }
|
||||
void MacroAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
|
||||
void MacroAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
|
||||
void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
|
||||
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
|
||||
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
|
||||
|
||||
void MacroAssembler::LoadInstanceDescriptors(Register map,
|
||||
Register descriptors) {
|
||||
@ -1778,8 +1715,7 @@ void MacroAssembler::InNewSpace(Register object,
|
||||
MemoryChunk::kIsInNewSpaceMask, cond, branch);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
|
||||
void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
|
||||
if (emit_debug_code()) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
Tst(object, kSmiTagMask);
|
||||
@ -1877,8 +1813,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::AssertPositiveOrZero(Register value) {
|
||||
void TurboAssembler::AssertPositiveOrZero(Register value) {
|
||||
if (emit_debug_code()) {
|
||||
Label done;
|
||||
int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
|
||||
@ -1888,12 +1823,8 @@ void MacroAssembler::AssertPositiveOrZero(Register value) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::CallStub(CodeStub* stub) {
|
||||
void TurboAssembler::CallStubDelayed(CodeStub* stub) {
|
||||
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
|
||||
Call(stub->GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
|
||||
BlockPoolsScope scope(this);
|
||||
#ifdef DEBUG
|
||||
Label start_call;
|
||||
@ -1908,11 +1839,16 @@ void MacroAssembler::CallStubDelayed(CodeStub* stub) {
|
||||
#endif
|
||||
}
|
||||
|
||||
void MacroAssembler::CallStub(CodeStub* stub) {
|
||||
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
|
||||
Call(stub->GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
void MacroAssembler::TailCallStub(CodeStub* stub) {
|
||||
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
|
||||
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
|
||||
SaveFPRegsMode save_doubles) {
|
||||
const Runtime::Function* f = Runtime::FunctionForId(fid);
|
||||
// TODO(1236192): Most runtime routines don't need the number of
|
||||
@ -1973,7 +1909,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
|
||||
JumpToExternalReference(ExternalReference(fid, isolate()));
|
||||
}
|
||||
|
||||
int MacroAssembler::ActivationFrameAlignment() {
|
||||
int TurboAssembler::ActivationFrameAlignment() {
|
||||
#if V8_HOST_ARCH_ARM64
|
||||
// Running on the real platform. Use the alignment as mandated by the local
|
||||
// environment.
|
||||
@ -1989,14 +1925,12 @@ int MacroAssembler::ActivationFrameAlignment() {
|
||||
#endif // V8_HOST_ARCH_ARM64
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallCFunction(ExternalReference function,
|
||||
void TurboAssembler::CallCFunction(ExternalReference function,
|
||||
int num_of_reg_args) {
|
||||
CallCFunction(function, num_of_reg_args, 0);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallCFunction(ExternalReference function,
|
||||
void TurboAssembler::CallCFunction(ExternalReference function,
|
||||
int num_of_reg_args,
|
||||
int num_of_double_args) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
@ -2007,8 +1941,7 @@ void MacroAssembler::CallCFunction(ExternalReference function,
|
||||
|
||||
static const int kRegisterPassedArguments = 8;
|
||||
|
||||
void MacroAssembler::CallCFunction(Register function,
|
||||
int num_of_reg_args,
|
||||
void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
|
||||
int num_of_double_args) {
|
||||
DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
|
||||
DCHECK(has_frame());
|
||||
@ -2093,13 +2026,9 @@ void MacroAssembler::CallCFunction(Register function,
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Jump(Register target) { Br(target); }
|
||||
|
||||
void MacroAssembler::Jump(Register target) {
|
||||
Br(target);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
if (cond == nv) return;
|
||||
UseScratchRegisterScope temps(this);
|
||||
@ -2111,23 +2040,20 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
|
||||
Bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(!RelocInfo::IsCodeTarget(rmode));
|
||||
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Condition cond) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
AllowHandleDereference using_location;
|
||||
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Call(Register target) {
|
||||
void TurboAssembler::Call(Register target) {
|
||||
BlockPoolsScope scope(this);
|
||||
#ifdef DEBUG
|
||||
Label start_call;
|
||||
@ -2141,8 +2067,7 @@ void MacroAssembler::Call(Register target) {
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Call(Label* target) {
|
||||
void TurboAssembler::Call(Label* target) {
|
||||
BlockPoolsScope scope(this);
|
||||
#ifdef DEBUG
|
||||
Label start_call;
|
||||
@ -2156,10 +2081,9 @@ void MacroAssembler::Call(Label* target) {
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// MacroAssembler::CallSize is sensitive to changes in this function, as it
|
||||
// TurboAssembler::CallSize is sensitive to changes in this function, as it
|
||||
// requires to know how many instructions are used to branch to the target.
|
||||
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
|
||||
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
|
||||
BlockPoolsScope scope(this);
|
||||
#ifdef DEBUG
|
||||
Label start_call;
|
||||
@ -2189,7 +2113,7 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
|
||||
#endif
|
||||
}
|
||||
|
||||
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
|
||||
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
|
||||
#ifdef DEBUG
|
||||
Label start_call;
|
||||
Bind(&start_call);
|
||||
@ -2204,20 +2128,17 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int MacroAssembler::CallSize(Register target) {
|
||||
int TurboAssembler::CallSize(Register target) {
|
||||
USE(target);
|
||||
return kInstructionSize;
|
||||
}
|
||||
|
||||
|
||||
int MacroAssembler::CallSize(Label* target) {
|
||||
int TurboAssembler::CallSize(Label* target) {
|
||||
USE(target);
|
||||
return kInstructionSize;
|
||||
}
|
||||
|
||||
|
||||
int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
|
||||
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
|
||||
USE(target);
|
||||
|
||||
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
|
||||
@ -2230,7 +2151,7 @@ int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
|
||||
}
|
||||
}
|
||||
|
||||
int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
|
||||
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
|
||||
USE(code);
|
||||
|
||||
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
|
||||
@ -2419,7 +2340,7 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
|
||||
B(ne, not_unique_name);
|
||||
}
|
||||
|
||||
void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
Register caller_args_count_reg,
|
||||
Register scratch0, Register scratch1) {
|
||||
#if DEBUG
|
||||
@ -2705,8 +2626,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
|
||||
InvokeFunction(x1, expected, actual, flag, call_wrapper);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::TryConvertDoubleToInt64(Register result,
|
||||
void TurboAssembler::TryConvertDoubleToInt64(Register result,
|
||||
DoubleRegister double_input,
|
||||
Label* done) {
|
||||
// Try to convert with an FPU convert instruction. It's trivial to compute
|
||||
@ -2730,9 +2650,8 @@ void MacroAssembler::TryConvertDoubleToInt64(Register result,
|
||||
B(vc, done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::TruncateDoubleToI(Register result,
|
||||
DoubleRegister double_input) {
|
||||
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DoubleRegister double_input) {
|
||||
Label done;
|
||||
|
||||
// Try to convert the double to an int64. If successful, the bottom 32 bits
|
||||
@ -2753,13 +2672,11 @@ void MacroAssembler::TruncateDoubleToI(Register result,
|
||||
// If we fell through then inline version didn't succeed - call stub instead.
|
||||
Push(lr, double_input);
|
||||
|
||||
DoubleToIStub stub(isolate(),
|
||||
jssp,
|
||||
result,
|
||||
0,
|
||||
true, // is_truncating
|
||||
true); // skip_fastpath
|
||||
CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
|
||||
auto stub = new (zone) DoubleToIStub(nullptr, jssp, result, 0,
|
||||
true, // is_truncating
|
||||
true); // skip_fastpath
|
||||
// DoubleToIStub preserves any registers it needs to clobber.
|
||||
CallStubDelayed(stub);
|
||||
|
||||
DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
|
||||
Pop(xzr, lr); // xzr to drop the double input on the stack.
|
||||
@ -2776,7 +2693,6 @@ void MacroAssembler::TruncateDoubleToI(Register result,
|
||||
Uxtw(result.W(), result.W());
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::TruncateHeapNumberToI(Register result,
|
||||
Register object) {
|
||||
Label done;
|
||||
@ -2803,7 +2719,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
|
||||
Bind(&done);
|
||||
}
|
||||
|
||||
void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
|
||||
void TurboAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
|
||||
Register temp = temps.AcquireX();
|
||||
@ -2814,7 +2730,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
|
||||
str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||
}
|
||||
|
||||
void MacroAssembler::Prologue(bool code_pre_aging) {
|
||||
void TurboAssembler::Prologue(bool code_pre_aging) {
|
||||
if (code_pre_aging) {
|
||||
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
|
||||
__ EmitCodeAgeSequence(stub);
|
||||
@ -2829,15 +2745,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
|
||||
Ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EnterFrame(StackFrame::Type type,
|
||||
bool load_constant_pool_pointer_reg) {
|
||||
// Out-of-line constant pool not implemented on arm64.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
||||
void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register type_reg = temps.AcquireX();
|
||||
Register code_reg = temps.AcquireX();
|
||||
@ -2876,8 +2784,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
if (type == StackFrame::WASM_COMPILED) {
|
||||
DCHECK(csp.Is(StackPointer()));
|
||||
Mov(csp, fp);
|
||||
@ -3575,8 +3482,8 @@ void MacroAssembler::TestAndSplit(const Register& reg,
|
||||
}
|
||||
}
|
||||
|
||||
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
|
||||
return has_frame_ || !stub->SometimesSetsUpAFrame();
|
||||
bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
|
||||
return has_frame() || !stub->SometimesSetsUpAFrame();
|
||||
}
|
||||
|
||||
void MacroAssembler::EmitSeqStringSetCharCheck(
|
||||
@ -3835,19 +3742,16 @@ void MacroAssembler::CheckPageFlag(const Register& object,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::CheckPageFlagSet(const Register& object,
|
||||
const Register& scratch,
|
||||
int mask,
|
||||
void TurboAssembler::CheckPageFlagSet(const Register& object,
|
||||
const Register& scratch, int mask,
|
||||
Label* if_any_set) {
|
||||
And(scratch, object, ~Page::kPageAlignmentMask);
|
||||
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
|
||||
TestAndBranchIfAnySet(scratch, mask, if_any_set);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckPageFlagClear(const Register& object,
|
||||
const Register& scratch,
|
||||
int mask,
|
||||
void TurboAssembler::CheckPageFlagClear(const Register& object,
|
||||
const Register& scratch, int mask,
|
||||
Label* if_all_clear) {
|
||||
And(scratch, object, ~Page::kPageAlignmentMask);
|
||||
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
|
||||
@ -4165,8 +4069,7 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
|
||||
Tbz(load_scratch, 0, value_is_white);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
|
||||
void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
|
||||
if (emit_debug_code()) {
|
||||
Check(cond, reason);
|
||||
}
|
||||
@ -4205,8 +4108,7 @@ void MacroAssembler::AssertIsString(const Register& object) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Check(Condition cond, BailoutReason reason) {
|
||||
void TurboAssembler::Check(Condition cond, BailoutReason reason) {
|
||||
Label ok;
|
||||
B(cond, &ok);
|
||||
Abort(reason);
|
||||
@ -4223,8 +4125,7 @@ void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
|
||||
Bind(&ok);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Abort(BailoutReason reason) {
|
||||
void TurboAssembler::Abort(BailoutReason reason) {
|
||||
#ifdef DEBUG
|
||||
RecordComment("Abort message: ");
|
||||
RecordComment(GetBailoutReason(reason));
|
||||
@ -4437,11 +4338,10 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
|
||||
CallPrintf(arg_count, pcs);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
|
||||
// A call to printf needs special handling for the simulator, since the system
|
||||
// printf function will use a different instruction set and the procedure-call
|
||||
// standard will not be compatible.
|
||||
void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
|
||||
// A call to printf needs special handling for the simulator, since the system
|
||||
// printf function will use a different instruction set and the procedure-call
|
||||
// standard will not be compatible.
|
||||
#ifdef USE_SIMULATOR
|
||||
{ InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
|
||||
hlt(kImmExceptionIsPrintf);
|
||||
@ -4546,8 +4446,7 @@ void MacroAssembler::Printf(const char * format,
|
||||
FPTmpList()->set_list(old_fp_tmp_list);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
|
||||
void TurboAssembler::EmitFrameSetupForCodeAgePatching() {
|
||||
// TODO(jbramley): Other architectures use the internal memcpy to copy the
|
||||
// sequence. If this is a performance bottleneck, we should consider caching
|
||||
// the sequence and copying it in the same way.
|
||||
@ -4557,9 +4456,7 @@ void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
|
||||
EmitFrameSetupForCodeAgePatching(this);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
|
||||
void TurboAssembler::EmitCodeAgeSequence(Code* stub) {
|
||||
InstructionAccurateScope scope(this,
|
||||
kNoCodeAgeSequenceLength / kInstructionSize);
|
||||
DCHECK(jssp.Is(StackPointer()));
|
||||
@ -4570,8 +4467,7 @@ void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
|
||||
#undef __
|
||||
#define __ assm->
|
||||
|
||||
|
||||
void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
|
||||
void TurboAssembler::EmitFrameSetupForCodeAgePatching(Assembler* assm) {
|
||||
Label start;
|
||||
__ bind(&start);
|
||||
|
||||
@ -4588,9 +4484,7 @@ void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
|
||||
__ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
|
||||
Code * stub) {
|
||||
void TurboAssembler::EmitCodeAgeSequence(Assembler* assm, Code* stub) {
|
||||
Label start;
|
||||
__ bind(&start);
|
||||
// When the stub is called, the sequence is replaced with the young sequence
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -142,7 +142,6 @@ class Node;
|
||||
|
||||
static const int kHasReturnedMinusZeroSentinel = 1;
|
||||
|
||||
// Stub is base classes of all stubs.
|
||||
class CodeStub : public ZoneObject {
|
||||
public:
|
||||
enum Major {
|
||||
@ -1427,7 +1426,7 @@ class ProfileEntryHookStub : public PlatformCodeStub {
|
||||
|
||||
// Generates a call to the entry hook if it's enabled.
|
||||
static void MaybeCallEntryHook(MacroAssembler* masm);
|
||||
static void MaybeCallEntryHookDelayed(MacroAssembler* masm, Zone* zone);
|
||||
static void MaybeCallEntryHookDelayed(TurboAssembler* tasm, Zone* zone);
|
||||
|
||||
private:
|
||||
static void EntryHookTrampoline(intptr_t function,
|
||||
|
@ -110,7 +110,7 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
|
||||
#endif // DEBUG
|
||||
}
|
||||
|
||||
Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
|
||||
Handle<Code> CodeGenerator::MakeCodeEpilogue(TurboAssembler* tasm,
|
||||
EhFrameWriter* eh_frame_writer,
|
||||
CompilationInfo* info,
|
||||
Handle<Object> self_reference) {
|
||||
@ -122,7 +122,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
|
||||
bool is_crankshafted =
|
||||
Code::ExtractKindFromFlags(flags) == Code::OPTIMIZED_FUNCTION ||
|
||||
info->IsStub();
|
||||
masm->GetCode(isolate, &desc);
|
||||
tasm->GetCode(isolate, &desc);
|
||||
if (eh_frame_writer) eh_frame_writer->GetEhFrame(&desc);
|
||||
|
||||
Handle<Code> code = isolate->factory()->NewCode(
|
||||
|
@ -78,7 +78,7 @@ class CodeGenerator {
|
||||
static void MakeCodePrologue(CompilationInfo* info, const char* kind);
|
||||
|
||||
// Allocate and install the code.
|
||||
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
|
||||
static Handle<Code> MakeCodeEpilogue(TurboAssembler* tasm,
|
||||
EhFrameWriter* unwinding,
|
||||
CompilationInfo* info,
|
||||
Handle<Object> self_reference);
|
||||
|
@ -19,8 +19,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
#define __ tasm()->
|
||||
|
||||
#define kScratchReg r9
|
||||
|
||||
@ -469,12 +468,12 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
||||
do { \
|
||||
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
||||
/* and generate a CallAddress instruction instead. */ \
|
||||
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
||||
__ PrepareCallCFunction(0, 2); \
|
||||
__ MovToFloatParameters(i.InputDoubleRegister(0), \
|
||||
i.InputDoubleRegister(1)); \
|
||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
|
||||
0, 2); \
|
||||
__ CallCFunction( \
|
||||
ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
|
||||
/* Move the result in the double result register. */ \
|
||||
__ MovFromFloatResult(i.OutputDoubleRegister()); \
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
|
||||
@ -484,11 +483,11 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
||||
do { \
|
||||
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
||||
/* and generate a CallAddress instruction instead. */ \
|
||||
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
||||
__ PrepareCallCFunction(0, 1); \
|
||||
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
|
||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
|
||||
0, 1); \
|
||||
__ CallCFunction( \
|
||||
ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
|
||||
/* Move the result in the double result register. */ \
|
||||
__ MovFromFloatResult(i.OutputDoubleRegister()); \
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
|
||||
@ -570,20 +569,20 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
|
||||
|
||||
namespace {
|
||||
|
||||
void FlushPendingPushRegisters(MacroAssembler* masm,
|
||||
void FlushPendingPushRegisters(TurboAssembler* tasm,
|
||||
FrameAccessState* frame_access_state,
|
||||
ZoneVector<Register>* pending_pushes) {
|
||||
switch (pending_pushes->size()) {
|
||||
case 0:
|
||||
break;
|
||||
case 1:
|
||||
masm->push((*pending_pushes)[0]);
|
||||
tasm->push((*pending_pushes)[0]);
|
||||
break;
|
||||
case 2:
|
||||
masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
|
||||
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
|
||||
break;
|
||||
case 3:
|
||||
masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
|
||||
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
|
||||
(*pending_pushes)[2]);
|
||||
break;
|
||||
default:
|
||||
@ -594,18 +593,18 @@ void FlushPendingPushRegisters(MacroAssembler* masm,
|
||||
pending_pushes->resize(0);
|
||||
}
|
||||
|
||||
void AddPendingPushRegister(MacroAssembler* masm,
|
||||
void AddPendingPushRegister(TurboAssembler* tasm,
|
||||
FrameAccessState* frame_access_state,
|
||||
ZoneVector<Register>* pending_pushes,
|
||||
Register reg) {
|
||||
pending_pushes->push_back(reg);
|
||||
if (pending_pushes->size() == 3 || reg.is(ip)) {
|
||||
FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
|
||||
FlushPendingPushRegisters(tasm, frame_access_state, pending_pushes);
|
||||
}
|
||||
}
|
||||
|
||||
void AdjustStackPointerForTailCall(
|
||||
MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
|
||||
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
|
||||
ZoneVector<Register>* pending_pushes = nullptr,
|
||||
bool allow_shrinkage = true) {
|
||||
int current_sp_offset = state->GetSPToFPSlotCount() +
|
||||
@ -613,15 +612,15 @@ void AdjustStackPointerForTailCall(
|
||||
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
||||
if (stack_slot_delta > 0) {
|
||||
if (pending_pushes != nullptr) {
|
||||
FlushPendingPushRegisters(masm, state, pending_pushes);
|
||||
FlushPendingPushRegisters(tasm, state, pending_pushes);
|
||||
}
|
||||
masm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
|
||||
tasm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
|
||||
state->IncreaseSPDelta(stack_slot_delta);
|
||||
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
||||
if (pending_pushes != nullptr) {
|
||||
FlushPendingPushRegisters(masm, state, pending_pushes);
|
||||
FlushPendingPushRegisters(tasm, state, pending_pushes);
|
||||
}
|
||||
masm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
|
||||
tasm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
|
||||
state->IncreaseSPDelta(stack_slot_delta);
|
||||
}
|
||||
}
|
||||
@ -644,20 +643,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||
LocationOperand::cast(move->destination()));
|
||||
InstructionOperand source(move->source());
|
||||
AdjustStackPointerForTailCall(
|
||||
masm(), frame_access_state(),
|
||||
tasm(), frame_access_state(),
|
||||
destination_location.index() - pending_pushes.size(),
|
||||
&pending_pushes);
|
||||
if (source.IsStackSlot()) {
|
||||
LocationOperand source_location(LocationOperand::cast(source));
|
||||
__ ldr(ip, g.SlotToMemOperand(source_location.index()));
|
||||
AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
|
||||
AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
|
||||
ip);
|
||||
} else if (source.IsRegister()) {
|
||||
LocationOperand source_location(LocationOperand::cast(source));
|
||||
AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
|
||||
AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
|
||||
source_location.GetRegister());
|
||||
} else if (source.IsImmediate()) {
|
||||
AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
|
||||
AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
|
||||
ip);
|
||||
} else {
|
||||
// Pushes of non-scalar data types is not supported.
|
||||
@ -665,15 +664,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||
}
|
||||
move->Eliminate();
|
||||
}
|
||||
FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
|
||||
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
|
||||
}
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
first_unused_stack_slot, nullptr, false);
|
||||
}
|
||||
|
||||
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
int first_unused_stack_slot) {
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
first_unused_stack_slot);
|
||||
}
|
||||
|
||||
@ -690,7 +689,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// We must not share code targets for calls to builtins for wasm code, as
|
||||
// they might need to be patched individually.
|
||||
internal::Assembler::BlockCodeTargetSharingScope scope;
|
||||
if (info()->IsWasm()) scope.Open(masm());
|
||||
if (info()->IsWasm()) scope.Open(tasm());
|
||||
|
||||
EnsureSpaceForLazyDeopt();
|
||||
if (instr->InputAt(0)->IsImmediate()) {
|
||||
@ -711,7 +710,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// We must not share code targets for calls to builtins for wasm code, as
|
||||
// they might need to be patched individually.
|
||||
internal::Assembler::BlockCodeTargetSharingScope scope;
|
||||
if (info()->IsWasm()) scope.Open(masm());
|
||||
if (info()->IsWasm()) scope.Open(tasm());
|
||||
|
||||
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
|
||||
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
|
||||
@ -850,7 +849,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
break;
|
||||
case kArchTruncateDoubleToI:
|
||||
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
|
||||
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArchStoreWithWriteBarrier: {
|
||||
@ -983,7 +983,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
i.InputRegister(2), i.OutputSBit());
|
||||
break;
|
||||
case kArmMls: {
|
||||
CpuFeatureScope scope(masm(), ARMv7);
|
||||
CpuFeatureScope scope(tasm(), ARMv7);
|
||||
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
|
||||
i.InputRegister(2));
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
@ -1007,13 +1007,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
i.InputRegister(1), i.OutputSBit());
|
||||
break;
|
||||
case kArmSdiv: {
|
||||
CpuFeatureScope scope(masm(), SUDIV);
|
||||
CpuFeatureScope scope(tasm(), SUDIV);
|
||||
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmUdiv: {
|
||||
CpuFeatureScope scope(masm(), SUDIV);
|
||||
CpuFeatureScope scope(tasm(), SUDIV);
|
||||
__ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
@ -1041,20 +1041,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
i.OutputSBit());
|
||||
break;
|
||||
case kArmBfc: {
|
||||
CpuFeatureScope scope(masm(), ARMv7);
|
||||
CpuFeatureScope scope(tasm(), ARMv7);
|
||||
__ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmUbfx: {
|
||||
CpuFeatureScope scope(masm(), ARMv7);
|
||||
CpuFeatureScope scope(tasm(), ARMv7);
|
||||
__ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
|
||||
i.InputInt8(2));
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
}
|
||||
case kArmSbfx: {
|
||||
CpuFeatureScope scope(masm(), ARMv7);
|
||||
CpuFeatureScope scope(tasm(), ARMv7);
|
||||
__ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
|
||||
i.InputInt8(2));
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
@ -1097,7 +1097,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
case kArmRbit: {
|
||||
CpuFeatureScope scope(masm(), ARMv7);
|
||||
CpuFeatureScope scope(tasm(), ARMv7);
|
||||
__ rbit(i.OutputRegister(), i.InputRegister(0));
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
break;
|
||||
@ -1288,12 +1288,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kArmVmodF64: {
|
||||
// TODO(bmeurer): We should really get rid of this special instruction,
|
||||
// and generate a CallAddress instruction instead.
|
||||
FrameScope scope(masm(), StackFrame::MANUAL);
|
||||
FrameScope scope(tasm(), StackFrame::MANUAL);
|
||||
__ PrepareCallCFunction(0, 2);
|
||||
__ MovToFloatParameters(i.InputDoubleRegister(0),
|
||||
i.InputDoubleRegister(1));
|
||||
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
|
||||
0, 2);
|
||||
__ CallCFunction(
|
||||
ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2);
|
||||
// Move the result in the double result register.
|
||||
__ MovFromFloatResult(i.OutputDoubleRegister());
|
||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||
@ -1309,47 +1309,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArmVrintmF32: {
|
||||
CpuFeatureScope scope(masm(), ARMv8);
|
||||
CpuFeatureScope scope(tasm(), ARMv8);
|
||||
__ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArmVrintmF64: {
|
||||
CpuFeatureScope scope(masm(), ARMv8);
|
||||
CpuFeatureScope scope(tasm(), ARMv8);
|
||||
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArmVrintpF32: {
|
||||
CpuFeatureScope scope(masm(), ARMv8);
|
||||
CpuFeatureScope scope(tasm(), ARMv8);
|
||||
__ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArmVrintpF64: {
|
||||
CpuFeatureScope scope(masm(), ARMv8);
|
||||
CpuFeatureScope scope(tasm(), ARMv8);
|
||||
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArmVrintzF32: {
|
||||
CpuFeatureScope scope(masm(), ARMv8);
|
||||
CpuFeatureScope scope(tasm(), ARMv8);
|
||||
__ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArmVrintzF64: {
|
||||
CpuFeatureScope scope(masm(), ARMv8);
|
||||
CpuFeatureScope scope(tasm(), ARMv8);
|
||||
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArmVrintaF64: {
|
||||
CpuFeatureScope scope(masm(), ARMv8);
|
||||
CpuFeatureScope scope(tasm(), ARMv8);
|
||||
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArmVrintnF32: {
|
||||
CpuFeatureScope scope(masm(), ARMv8);
|
||||
CpuFeatureScope scope(tasm(), ARMv8);
|
||||
__ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArmVrintnF64: {
|
||||
CpuFeatureScope scope(masm(), ARMv8);
|
||||
CpuFeatureScope scope(tasm(), ARMv8);
|
||||
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||
break;
|
||||
}
|
||||
@ -2639,14 +2639,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
|
||||
// We use the context register as the scratch register, because we do
|
||||
// not have a context here.
|
||||
__ PrepareCallCFunction(0, 0);
|
||||
__ CallCFunction(
|
||||
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
|
||||
0);
|
||||
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
|
||||
__ isolate()),
|
||||
0);
|
||||
__ LeaveFrame(StackFrame::WASM_COMPILED);
|
||||
__ Ret();
|
||||
} else {
|
||||
gen_->AssembleSourcePosition(instr_);
|
||||
__ Call(isolate()->builtins()->builtin_handle(trap_id),
|
||||
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
|
||||
RelocInfo::CODE_TARGET);
|
||||
ReferenceMap* reference_map =
|
||||
new (gen_->zone()) ReferenceMap(gen_->zone());
|
||||
@ -2719,7 +2719,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
|
||||
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
|
||||
: Deoptimizer::EAGER;
|
||||
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
|
||||
isolate(), deoptimization_id, bailout_type);
|
||||
__ isolate(), deoptimization_id, bailout_type);
|
||||
// TODO(turbofan): We should be able to generate better code by sharing the
|
||||
// actual final call site and just bl'ing to it here, similar to what we do
|
||||
// in the lithium backend.
|
||||
@ -2809,7 +2809,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
||||
if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
|
||||
__ Move(kScratchReg,
|
||||
Operand(ExternalReference::address_of_real_stack_limit(
|
||||
isolate())));
|
||||
__ isolate())));
|
||||
__ ldr(kScratchReg, MemOperand(kScratchReg));
|
||||
__ add(kScratchReg, kScratchReg,
|
||||
Operand(shrink_slots * kPointerSize));
|
||||
@ -2906,7 +2906,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void CodeGenerator::FinishCode() { masm()->CheckConstPool(true, false); }
|
||||
void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
@ -3202,10 +3202,10 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
|
||||
int space_needed = Deoptimizer::patch_size();
|
||||
// Ensure that we have enough space after the previous lazy-bailout
|
||||
// instruction for patching the code here.
|
||||
int current_pc = masm()->pc_offset();
|
||||
int current_pc = tasm()->pc_offset();
|
||||
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
|
||||
// Block literal pool emission for duration of padding.
|
||||
v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
|
||||
v8::internal::Assembler::BlockConstPoolScope block_const_pool(tasm());
|
||||
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
|
||||
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
|
||||
while (padding_size > 0) {
|
||||
|
@ -18,8 +18,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
#define __ tasm()->
|
||||
|
||||
// Adds Arm64-specific methods to convert InstructionOperands.
|
||||
class Arm64OperandConverter final : public InstructionOperandConverter {
|
||||
@ -241,13 +240,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
|
||||
MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const {
|
||||
DCHECK_NOT_NULL(op);
|
||||
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
|
||||
return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
|
||||
return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm);
|
||||
}
|
||||
|
||||
MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
|
||||
MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const {
|
||||
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
|
||||
if (offset.from_frame_pointer()) {
|
||||
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
|
||||
@ -257,7 +256,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
|
||||
offset = FrameOffset::FromStackPointer(from_sp);
|
||||
}
|
||||
}
|
||||
return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
|
||||
return MemOperand(offset.from_stack_pointer() ? tasm->StackPointer() : fp,
|
||||
offset.offset());
|
||||
}
|
||||
};
|
||||
@ -566,18 +565,18 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
||||
__ cbnz(i.TempRegister32(1), &binop); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||
do { \
|
||||
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
|
||||
0, 2); \
|
||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||
do { \
|
||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
||||
__ CallCFunction( \
|
||||
ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_IEEE754_UNOP(name) \
|
||||
do { \
|
||||
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
|
||||
0, 1); \
|
||||
#define ASSEMBLE_IEEE754_UNOP(name) \
|
||||
do { \
|
||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
||||
__ CallCFunction( \
|
||||
ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
|
||||
} while (0)
|
||||
|
||||
void CodeGenerator::AssembleDeconstructFrame() {
|
||||
@ -628,7 +627,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
|
||||
|
||||
namespace {
|
||||
|
||||
void AdjustStackPointerForTailCall(MacroAssembler* masm,
|
||||
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
||||
FrameAccessState* state,
|
||||
int new_slot_above_sp,
|
||||
bool allow_shrinkage = true) {
|
||||
@ -636,10 +635,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
|
||||
StandardFrameConstants::kFixedSlotCountAboveFp;
|
||||
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
||||
if (stack_slot_delta > 0) {
|
||||
masm->Claim(stack_slot_delta);
|
||||
tasm->Claim(stack_slot_delta);
|
||||
state->IncreaseSPDelta(stack_slot_delta);
|
||||
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
||||
masm->Drop(-stack_slot_delta);
|
||||
tasm->Drop(-stack_slot_delta);
|
||||
state->IncreaseSPDelta(stack_slot_delta);
|
||||
}
|
||||
}
|
||||
@ -648,13 +647,13 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
|
||||
|
||||
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||
int first_unused_stack_slot) {
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
first_unused_stack_slot, false);
|
||||
}
|
||||
|
||||
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
int first_unused_stack_slot) {
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
first_unused_stack_slot);
|
||||
}
|
||||
|
||||
@ -669,7 +668,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// We must not share code targets for calls to builtins for wasm code, as
|
||||
// they might need to be patched individually.
|
||||
internal::Assembler::BlockCodeTargetSharingScope scope;
|
||||
if (info()->IsWasm()) scope.Open(masm());
|
||||
if (info()->IsWasm()) scope.Open(tasm());
|
||||
|
||||
EnsureSpaceForLazyDeopt();
|
||||
if (instr->InputAt(0)->IsImmediate()) {
|
||||
@ -701,7 +700,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// We must not share code targets for calls to builtins for wasm code, as
|
||||
// they might need to be patched individually.
|
||||
internal::Assembler::BlockCodeTargetSharingScope scope;
|
||||
if (info()->IsWasm()) scope.Open(masm());
|
||||
if (info()->IsWasm()) scope.Open(tasm());
|
||||
|
||||
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
|
||||
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
|
||||
@ -734,7 +733,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
Register func = i.InputRegister(0);
|
||||
if (FLAG_debug_code) {
|
||||
// Check the function's context matches the context argument.
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register temp = scope.AcquireX();
|
||||
__ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ cmp(cp, temp);
|
||||
@ -762,7 +761,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
Register func = i.InputRegister(0);
|
||||
if (FLAG_debug_code) {
|
||||
// Check the function's context matches the context argument.
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register temp = scope.AcquireX();
|
||||
__ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
|
||||
__ cmp(cp, temp);
|
||||
@ -835,7 +834,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
AssembleReturn(instr->InputAt(0));
|
||||
break;
|
||||
case kArchStackPointer:
|
||||
__ mov(i.OutputRegister(), masm()->StackPointer());
|
||||
__ mov(i.OutputRegister(), tasm()->StackPointer());
|
||||
break;
|
||||
case kArchFramePointer:
|
||||
__ mov(i.OutputRegister(), fp);
|
||||
@ -848,7 +847,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
break;
|
||||
case kArchTruncateDoubleToI:
|
||||
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
|
||||
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
|
||||
i.InputDoubleRegister(0));
|
||||
break;
|
||||
case kArchStoreWithWriteBarrier: {
|
||||
RecordWriteMode mode =
|
||||
@ -1083,14 +1083,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
|
||||
break;
|
||||
case kArm64Imod: {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register temp = scope.AcquireX();
|
||||
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
|
||||
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArm64Imod32: {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register temp = scope.AcquireW();
|
||||
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
|
||||
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
|
||||
@ -1098,14 +1098,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kArm64Umod: {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register temp = scope.AcquireX();
|
||||
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
|
||||
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
|
||||
break;
|
||||
}
|
||||
case kArm64Umod32: {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register temp = scope.AcquireW();
|
||||
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
|
||||
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
|
||||
@ -1240,7 +1240,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// Align the CSP and store the previous JSSP on the stack. We do not
|
||||
// need to modify the SP delta here, as we will continue to access the
|
||||
// frame via JSSP.
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register tmp = scope.AcquireX();
|
||||
|
||||
// TODO(arm64): Storing JSSP on the stack is redundant when calling a C
|
||||
@ -1248,7 +1248,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// calling a code object that uses the CSP as the stack pointer). See
|
||||
// the code generation for kArchCallCodeObject vs. kArchCallCFunction
|
||||
// (the latter does not restore CSP/JSSP).
|
||||
// MacroAssembler::CallCFunction() (safely) drops this extra slot
|
||||
// TurboAssembler::CallCFunction() (safely) drops this extra slot
|
||||
// anyway.
|
||||
int sp_alignment = __ ActivationFrameAlignment();
|
||||
__ Sub(tmp, jssp, kPointerSize);
|
||||
@ -1407,13 +1407,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
case kArm64Float64Mod: {
|
||||
// TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
|
||||
FrameScope scope(masm(), StackFrame::MANUAL);
|
||||
FrameScope scope(tasm(), StackFrame::MANUAL);
|
||||
DCHECK(d0.is(i.InputDoubleRegister(0)));
|
||||
DCHECK(d1.is(i.InputDoubleRegister(1)));
|
||||
DCHECK(d0.is(i.OutputDoubleRegister()));
|
||||
// TODO(dcarney): make sure this saves all relevant registers.
|
||||
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
|
||||
0, 2);
|
||||
__ CallCFunction(
|
||||
ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2);
|
||||
break;
|
||||
}
|
||||
case kArm64Float32Max: {
|
||||
@ -1551,7 +1551,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
case kArm64Float64InsertLowWord32: {
|
||||
// TODO(arm64): This should use MOV (from general) when NEON is supported.
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register tmp = scope.AcquireX();
|
||||
__ Fmov(tmp, i.InputFloat64Register(0));
|
||||
__ Bfi(tmp, i.InputRegister(1), 0, 32);
|
||||
@ -1560,7 +1560,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
case kArm64Float64InsertHighWord32: {
|
||||
// TODO(arm64): This should use MOV (from general) when NEON is supported.
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register tmp = scope.AcquireX();
|
||||
__ Fmov(tmp.W(), i.InputFloat32Register(0));
|
||||
__ Bfi(tmp, i.InputRegister(1), 32, 32);
|
||||
@ -1919,7 +1919,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
VRegister dst = i.OutputSimd128Register(),
|
||||
src0 = i.InputSimd128Register(0),
|
||||
src1 = i.InputSimd128Register(1);
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireV(kFormat4S);
|
||||
if (dst.is(src1)) {
|
||||
__ Mov(temp, src1.V4S());
|
||||
@ -1965,7 +1965,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
VRegister dst = i.OutputSimd128Register(),
|
||||
src0 = i.InputSimd128Register(0),
|
||||
src1 = i.InputSimd128Register(1);
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireV(kFormat4S);
|
||||
if (dst.is(src1)) {
|
||||
__ Mov(temp, src1.V4S());
|
||||
@ -2014,7 +2014,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
VRegister dst = i.OutputSimd128Register(),
|
||||
src0 = i.InputSimd128Register(0),
|
||||
src1 = i.InputSimd128Register(1);
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireV(kFormat8H);
|
||||
if (dst.is(src1)) {
|
||||
__ Mov(temp, src1.V8H());
|
||||
@ -2050,7 +2050,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
VRegister dst = i.OutputSimd128Register(),
|
||||
src0 = i.InputSimd128Register(0),
|
||||
src1 = i.InputSimd128Register(1);
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireV(kFormat8H);
|
||||
if (dst.is(src1)) {
|
||||
__ Mov(temp, src1.V8H());
|
||||
@ -2087,7 +2087,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
src1 = i.InputSimd128Register(1).V4S();
|
||||
// Check for in-place shuffles.
|
||||
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireV(kFormat4S);
|
||||
if (dst.is(src0)) {
|
||||
__ Mov(temp, src0);
|
||||
@ -2150,7 +2150,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
(i.InputInt32(2) & mask) | ((i.InputInt32(3) & mask) << 32);
|
||||
int64_t imm2 =
|
||||
(i.InputInt32(4) & mask) | ((i.InputInt32(5) & mask) << 32);
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireV(kFormat16B);
|
||||
__ Movi(temp, imm2, imm1);
|
||||
|
||||
@ -2170,7 +2170,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
|
||||
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
|
||||
case Op: { \
|
||||
UseScratchRegisterScope scope(masm()); \
|
||||
UseScratchRegisterScope scope(tasm()); \
|
||||
VRegister temp = scope.AcquireV(format); \
|
||||
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
|
||||
__ Umov(i.OutputRegister32(), temp, 0); \
|
||||
@ -2284,9 +2284,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
|
||||
if (trap_id == Builtins::builtin_count) {
|
||||
// We cannot test calls to the runtime in cctest/test-run-wasm.
|
||||
// Therefore we emit a call to C here instead of a call to the runtime.
|
||||
__ CallCFunction(
|
||||
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
|
||||
0);
|
||||
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
|
||||
__ isolate()),
|
||||
0);
|
||||
__ LeaveFrame(StackFrame::WASM_COMPILED);
|
||||
__ Ret();
|
||||
} else {
|
||||
@ -2294,7 +2294,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
|
||||
// Initialize the jssp because it is required for the runtime call.
|
||||
__ Mov(jssp, csp);
|
||||
gen_->AssembleSourcePosition(instr_);
|
||||
__ Call(isolate()->builtins()->builtin_handle(trap_id),
|
||||
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
|
||||
RelocInfo::CODE_TARGET);
|
||||
ReferenceMap* reference_map =
|
||||
new (gen_->zone()) ReferenceMap(gen_->zone());
|
||||
@ -2344,7 +2344,7 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
|
||||
|
||||
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
|
||||
Arm64OperandConverter i(this, instr);
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register input = i.InputRegister32(0);
|
||||
Register temp = scope.AcquireX();
|
||||
size_t const case_count = instr->InputCount() - 2;
|
||||
@ -2371,7 +2371,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
|
||||
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
|
||||
: Deoptimizer::EAGER;
|
||||
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
|
||||
isolate(), deoptimization_id, bailout_type);
|
||||
__ isolate(), deoptimization_id, bailout_type);
|
||||
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
|
||||
if (info()->is_source_positions_enabled()) {
|
||||
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
|
||||
@ -2425,7 +2425,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
||||
__ Prologue(this->info()->GeneratePreagedPrologue());
|
||||
} else {
|
||||
__ Push(lr, fp);
|
||||
__ Mov(fp, masm_.StackPointer());
|
||||
__ Mov(fp, __ StackPointer());
|
||||
}
|
||||
if (!info()->GeneratePreagedPrologue()) {
|
||||
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
|
||||
@ -2458,11 +2458,10 @@ void CodeGenerator::AssembleConstructFrame() {
|
||||
// exception unconditionally. Thereby we can avoid the integer overflow
|
||||
// check in the condition code.
|
||||
if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register scratch = scope.AcquireX();
|
||||
__ Mov(
|
||||
scratch,
|
||||
Operand(ExternalReference::address_of_real_stack_limit(isolate())));
|
||||
__ Mov(scratch, Operand(ExternalReference::address_of_real_stack_limit(
|
||||
__ isolate())));
|
||||
__ Ldr(scratch, MemOperand(scratch));
|
||||
__ Add(scratch, scratch, Operand(shrink_slots * kPointerSize));
|
||||
__ Cmp(__ StackPointer(), scratch);
|
||||
@ -2506,7 +2505,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
||||
bool is_stub_frame =
|
||||
!descriptor->IsJSFunctionCall() && !descriptor->IsCFunctionCall();
|
||||
if (is_stub_frame) {
|
||||
UseScratchRegisterScope temps(masm());
|
||||
UseScratchRegisterScope temps(tasm());
|
||||
Register temp = temps.AcquireX();
|
||||
__ Mov(temp, StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
|
||||
__ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
|
||||
@ -2596,7 +2595,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void CodeGenerator::FinishCode() { masm()->CheckConstPool(true, false); }
|
||||
void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
@ -2609,23 +2608,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
if (destination->IsRegister()) {
|
||||
__ Mov(g.ToRegister(destination), src);
|
||||
} else {
|
||||
__ Str(src, g.ToMemOperand(destination, masm()));
|
||||
__ Str(src, g.ToMemOperand(destination, tasm()));
|
||||
}
|
||||
} else if (source->IsStackSlot()) {
|
||||
MemOperand src = g.ToMemOperand(source, masm());
|
||||
MemOperand src = g.ToMemOperand(source, tasm());
|
||||
DCHECK(destination->IsRegister() || destination->IsStackSlot());
|
||||
if (destination->IsRegister()) {
|
||||
__ Ldr(g.ToRegister(destination), src);
|
||||
} else {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register temp = scope.AcquireX();
|
||||
__ Ldr(temp, src);
|
||||
__ Str(temp, g.ToMemOperand(destination, masm()));
|
||||
__ Str(temp, g.ToMemOperand(destination, tasm()));
|
||||
}
|
||||
} else if (source->IsConstant()) {
|
||||
Constant src = g.ToConstant(ConstantOperand::cast(source));
|
||||
if (destination->IsRegister() || destination->IsStackSlot()) {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register dst = destination->IsRegister() ? g.ToRegister(destination)
|
||||
: scope.AcquireX();
|
||||
if (src.type() == Constant::kHeapObject) {
|
||||
@ -2634,13 +2633,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
if (IsMaterializableFromRoot(src_object, &index)) {
|
||||
__ LoadRoot(dst, index);
|
||||
} else {
|
||||
__ LoadObject(dst, src_object);
|
||||
__ Mov(dst, src_object);
|
||||
}
|
||||
} else {
|
||||
__ Mov(dst, g.ToImmediate(source));
|
||||
}
|
||||
if (destination->IsStackSlot()) {
|
||||
__ Str(dst, g.ToMemOperand(destination, masm()));
|
||||
__ Str(dst, g.ToMemOperand(destination, tasm()));
|
||||
}
|
||||
} else if (src.type() == Constant::kFloat32) {
|
||||
if (destination->IsFPRegister()) {
|
||||
@ -2649,12 +2648,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
} else {
|
||||
DCHECK(destination->IsFPStackSlot());
|
||||
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
|
||||
__ Str(wzr, g.ToMemOperand(destination, masm()));
|
||||
__ Str(wzr, g.ToMemOperand(destination, tasm()));
|
||||
} else {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireS();
|
||||
__ Fmov(temp, src.ToFloat32());
|
||||
__ Str(temp, g.ToMemOperand(destination, masm()));
|
||||
__ Str(temp, g.ToMemOperand(destination, tasm()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -2665,12 +2664,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
} else {
|
||||
DCHECK(destination->IsFPStackSlot());
|
||||
if (bit_cast<int64_t>(src.ToFloat64()) == 0) {
|
||||
__ Str(xzr, g.ToMemOperand(destination, masm()));
|
||||
__ Str(xzr, g.ToMemOperand(destination, tasm()));
|
||||
} else {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireD();
|
||||
__ Fmov(temp, src.ToFloat64());
|
||||
__ Str(temp, g.ToMemOperand(destination, masm()));
|
||||
__ Str(temp, g.ToMemOperand(destination, tasm()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2681,7 +2680,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
__ Fmov(dst, src);
|
||||
} else {
|
||||
DCHECK(destination->IsFPStackSlot());
|
||||
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
||||
if (destination->IsSimd128StackSlot()) {
|
||||
__ Str(src.Q(), dst);
|
||||
} else {
|
||||
@ -2690,7 +2689,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
}
|
||||
} else if (source->IsFPStackSlot()) {
|
||||
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
|
||||
MemOperand src = g.ToMemOperand(source, masm());
|
||||
MemOperand src = g.ToMemOperand(source, tasm());
|
||||
if (destination->IsFPRegister()) {
|
||||
VRegister dst = g.ToDoubleRegister(destination);
|
||||
if (destination->IsSimd128Register()) {
|
||||
@ -2699,9 +2698,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
__ Ldr(dst, src);
|
||||
}
|
||||
} else {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireD();
|
||||
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
||||
if (destination->IsSimd128StackSlot()) {
|
||||
__ Ldr(temp.Q(), src);
|
||||
__ Str(temp.Q(), dst);
|
||||
@ -2723,7 +2722,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
// combinations are possible.
|
||||
if (source->IsRegister()) {
|
||||
// Register-register.
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
Register temp = scope.AcquireX();
|
||||
Register src = g.ToRegister(source);
|
||||
if (destination->IsRegister()) {
|
||||
@ -2733,17 +2732,17 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
__ Mov(dst, temp);
|
||||
} else {
|
||||
DCHECK(destination->IsStackSlot());
|
||||
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
||||
__ Mov(temp, src);
|
||||
__ Ldr(src, dst);
|
||||
__ Str(temp, dst);
|
||||
}
|
||||
} else if (source->IsStackSlot() || source->IsFPStackSlot()) {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp_0 = scope.AcquireD();
|
||||
VRegister temp_1 = scope.AcquireD();
|
||||
MemOperand src = g.ToMemOperand(source, masm());
|
||||
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||
MemOperand src = g.ToMemOperand(source, tasm());
|
||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
||||
if (source->IsSimd128StackSlot()) {
|
||||
__ Ldr(temp_0.Q(), src);
|
||||
__ Ldr(temp_1.Q(), dst);
|
||||
@ -2756,7 +2755,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
__ Str(temp_1, src);
|
||||
}
|
||||
} else if (source->IsFPRegister()) {
|
||||
UseScratchRegisterScope scope(masm());
|
||||
UseScratchRegisterScope scope(tasm());
|
||||
VRegister temp = scope.AcquireD();
|
||||
VRegister src = g.ToDoubleRegister(source);
|
||||
if (destination->IsFPRegister()) {
|
||||
@ -2766,7 +2765,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
||||
__ Fmov(dst, temp);
|
||||
} else {
|
||||
DCHECK(destination->IsFPStackSlot());
|
||||
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
||||
if (source->IsSimd128Register()) {
|
||||
__ Fmov(temp.Q(), src.Q());
|
||||
__ Ldr(src.Q(), dst);
|
||||
@ -2798,13 +2797,13 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
|
||||
int space_needed = Deoptimizer::patch_size();
|
||||
// Ensure that we have enough space after the previous lazy-bailout
|
||||
// instruction for patching the code here.
|
||||
intptr_t current_pc = masm()->pc_offset();
|
||||
intptr_t current_pc = tasm()->pc_offset();
|
||||
|
||||
if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
|
||||
intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
|
||||
DCHECK((padding_size % kInstructionSize) == 0);
|
||||
InstructionAccurateScope instruction_accurate(
|
||||
masm(), padding_size / kInstructionSize);
|
||||
tasm(), padding_size / kInstructionSize);
|
||||
|
||||
while (padding_size > 0) {
|
||||
__ nop();
|
||||
|
@ -202,15 +202,14 @@ class OutOfLineCode : public ZoneObject {
|
||||
Label* entry() { return &entry_; }
|
||||
Label* exit() { return &exit_; }
|
||||
const Frame* frame() const { return frame_; }
|
||||
Isolate* isolate() const { return masm()->isolate(); }
|
||||
MacroAssembler* masm() const { return masm_; }
|
||||
TurboAssembler* tasm() { return tasm_; }
|
||||
OutOfLineCode* next() const { return next_; }
|
||||
|
||||
private:
|
||||
Label entry_;
|
||||
Label exit_;
|
||||
const Frame* const frame_;
|
||||
MacroAssembler* const masm_;
|
||||
TurboAssembler* const tasm_;
|
||||
OutOfLineCode* const next_;
|
||||
};
|
||||
|
||||
|
@ -48,7 +48,7 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
|
||||
current_block_(RpoNumber::Invalid()),
|
||||
start_source_position_(start_source_position),
|
||||
current_source_position_(SourcePosition::Unknown()),
|
||||
masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
|
||||
tasm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
|
||||
resolver_(this),
|
||||
safepoints_(code->zone()),
|
||||
handlers_(code->zone()),
|
||||
@ -86,7 +86,7 @@ void CodeGenerator::AssembleCode() {
|
||||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||
// the frame (that is done in AssemblePrologue).
|
||||
FrameScope frame_scope(masm(), StackFrame::MANUAL);
|
||||
FrameScope frame_scope(tasm(), StackFrame::MANUAL);
|
||||
|
||||
if (info->is_source_positions_enabled()) {
|
||||
AssembleSourcePosition(start_source_position());
|
||||
@ -94,10 +94,10 @@ void CodeGenerator::AssembleCode() {
|
||||
|
||||
// Place function entry hook if requested to do so.
|
||||
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
|
||||
ProfileEntryHookStub::MaybeCallEntryHookDelayed(masm(), zone());
|
||||
ProfileEntryHookStub::MaybeCallEntryHookDelayed(tasm(), zone());
|
||||
}
|
||||
// Architecture-specific, linkage-specific prologue.
|
||||
info->set_prologue_offset(masm()->pc_offset());
|
||||
info->set_prologue_offset(tasm()->pc_offset());
|
||||
|
||||
// Define deoptimization literals for all inlined functions.
|
||||
DCHECK_EQ(0u, deoptimization_literals_.size());
|
||||
@ -121,12 +121,12 @@ void CodeGenerator::AssembleCode() {
|
||||
continue;
|
||||
}
|
||||
// Align loop headers on 16-byte boundaries.
|
||||
if (block->IsLoopHeader()) masm()->Align(16);
|
||||
if (block->IsLoopHeader()) tasm()->Align(16);
|
||||
// Ensure lazy deopt doesn't patch handler entry points.
|
||||
if (block->IsHandler()) EnsureSpaceForLazyDeopt();
|
||||
// Bind a label for a block.
|
||||
current_block_ = block->rpo_number();
|
||||
unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
|
||||
unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
|
||||
if (FLAG_code_comments) {
|
||||
// TODO(titzer): these code comments are a giant memory leak.
|
||||
Vector<char> buffer = Vector<char>::New(200);
|
||||
@ -152,12 +152,12 @@ void CodeGenerator::AssembleCode() {
|
||||
buffer = buffer.SubVector(next, buffer.length());
|
||||
}
|
||||
SNPrintF(buffer, " --");
|
||||
masm()->RecordComment(buffer_start);
|
||||
tasm()->RecordComment(buffer_start);
|
||||
}
|
||||
|
||||
frame_access_state()->MarkHasFrame(block->needs_frame());
|
||||
|
||||
masm()->bind(GetLabel(current_block_));
|
||||
tasm()->bind(GetLabel(current_block_));
|
||||
if (block->must_construct_frame()) {
|
||||
AssembleConstructFrame();
|
||||
// We need to setup the root register after we assemble the prologue, to
|
||||
@ -165,12 +165,12 @@ void CodeGenerator::AssembleCode() {
|
||||
// using the roots.
|
||||
// TODO(mtrofin): investigate how we can avoid doing this repeatedly.
|
||||
if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
|
||||
masm()->InitializeRootRegister();
|
||||
tasm()->InitializeRootRegister();
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
|
||||
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
|
||||
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
|
||||
result_ = AssembleBlock(block);
|
||||
} else {
|
||||
result_ = AssembleBlock(block);
|
||||
@ -182,25 +182,25 @@ void CodeGenerator::AssembleCode() {
|
||||
|
||||
// Assemble all out-of-line code.
|
||||
if (ools_) {
|
||||
masm()->RecordComment("-- Out of line code --");
|
||||
tasm()->RecordComment("-- Out of line code --");
|
||||
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
|
||||
masm()->bind(ool->entry());
|
||||
tasm()->bind(ool->entry());
|
||||
ool->Generate();
|
||||
if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
|
||||
if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
|
||||
}
|
||||
}
|
||||
|
||||
// Assemble all eager deoptimization exits.
|
||||
for (DeoptimizationExit* exit : deoptimization_exits_) {
|
||||
masm()->bind(exit->label());
|
||||
tasm()->bind(exit->label());
|
||||
AssembleDeoptimizerCall(exit->deoptimization_id(), exit->pos());
|
||||
}
|
||||
|
||||
// Ensure there is space for lazy deoptimization in the code.
|
||||
if (info->ShouldEnsureSpaceForLazyDeopt()) {
|
||||
int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
|
||||
while (masm()->pc_offset() < target_offset) {
|
||||
masm()->nop();
|
||||
int target_offset = tasm()->pc_offset() + Deoptimizer::patch_size();
|
||||
while (tasm()->pc_offset() < target_offset) {
|
||||
tasm()->nop();
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,9 +208,9 @@ void CodeGenerator::AssembleCode() {
|
||||
|
||||
// Emit the jump tables.
|
||||
if (jump_tables_) {
|
||||
masm()->Align(kPointerSize);
|
||||
tasm()->Align(kPointerSize);
|
||||
for (JumpTable* table = jump_tables_; table; table = table->next()) {
|
||||
masm()->bind(table->label());
|
||||
tasm()->bind(table->label());
|
||||
AssembleJumpTable(table->targets(), table->target_count());
|
||||
}
|
||||
}
|
||||
@ -218,9 +218,9 @@ void CodeGenerator::AssembleCode() {
|
||||
// The PerfJitLogger logs code up until here, excluding the safepoint
|
||||
// table. Resolve the unwinding info now so it is aware of the same code size
|
||||
// as reported by perf.
|
||||
unwinding_info_writer_.Finish(masm()->pc_offset());
|
||||
unwinding_info_writer_.Finish(tasm()->pc_offset());
|
||||
|
||||
safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
|
||||
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
|
||||
result_ = kSuccess;
|
||||
}
|
||||
|
||||
@ -228,7 +228,7 @@ Handle<Code> CodeGenerator::FinalizeCode() {
|
||||
if (result_ != kSuccess) return Handle<Code>();
|
||||
|
||||
Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
|
||||
masm(), unwinding_info_writer_.eh_frame_writer(), info(),
|
||||
tasm(), unwinding_info_writer_.eh_frame_writer(), info(),
|
||||
Handle<Object>());
|
||||
result->set_is_turbofanned(true);
|
||||
result->set_stack_slots(frame()->GetTotalFrameSlotCount());
|
||||
@ -274,7 +274,7 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
|
||||
Safepoint::Kind kind, int arguments,
|
||||
Safepoint::DeoptMode deopt_mode) {
|
||||
Safepoint safepoint =
|
||||
safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
|
||||
safepoints()->DefineSafepoint(tasm(), kind, arguments, deopt_mode);
|
||||
int stackSlotToSpillSlotDelta =
|
||||
frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
|
||||
for (const InstructionOperand& operand : references->reference_operands()) {
|
||||
@ -464,7 +464,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
|
||||
branch.fallthru = true;
|
||||
// Assemble architecture-specific branch.
|
||||
AssembleArchBranch(instr, &branch);
|
||||
masm()->bind(&continue_label);
|
||||
tasm()->bind(&continue_label);
|
||||
break;
|
||||
}
|
||||
case kFlags_set: {
|
||||
@ -494,7 +494,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
|
||||
if (source_position == current_source_position_) return;
|
||||
current_source_position_ = source_position;
|
||||
if (!source_position.IsKnown()) return;
|
||||
source_position_table_builder_.AddPosition(masm()->pc_offset(),
|
||||
source_position_table_builder_.AddPosition(tasm()->pc_offset(),
|
||||
source_position, false);
|
||||
if (FLAG_code_comments) {
|
||||
CompilationInfo* info = this->info();
|
||||
@ -507,7 +507,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
|
||||
buffer << source_position.InliningStack(info);
|
||||
}
|
||||
buffer << " --";
|
||||
masm()->RecordComment(StrDup(buffer.str().c_str()));
|
||||
tasm()->RecordComment(StrDup(buffer.str().c_str()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -628,7 +628,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
|
||||
if (flags & CallDescriptor::kHasExceptionHandler) {
|
||||
InstructionOperandConverter i(this, instr);
|
||||
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
|
||||
handlers_.push_back({GetLabel(handler_rpo), masm()->pc_offset()});
|
||||
handlers_.push_back({GetLabel(handler_rpo), tasm()->pc_offset()});
|
||||
}
|
||||
|
||||
if (needs_frame_state) {
|
||||
@ -638,7 +638,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
|
||||
size_t frame_state_offset = 1;
|
||||
FrameStateDescriptor* descriptor =
|
||||
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
|
||||
int pc_offset = masm()->pc_offset();
|
||||
int pc_offset = tasm()->pc_offset();
|
||||
int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
|
||||
descriptor->state_combine());
|
||||
// If the pre-call frame state differs from the post-call one, produce the
|
||||
@ -853,7 +853,6 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
|
||||
return deoptimization_id;
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AddTranslationForOperand(Translation* translation,
|
||||
Instruction* instr,
|
||||
InstructionOperand* op,
|
||||
@ -981,7 +980,7 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
|
||||
|
||||
|
||||
void CodeGenerator::MarkLazyDeoptSite() {
|
||||
last_lazy_deopt_pc_ = masm()->pc_offset();
|
||||
last_lazy_deopt_pc_ = tasm()->pc_offset();
|
||||
}
|
||||
|
||||
DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
|
||||
@ -995,7 +994,7 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
|
||||
}
|
||||
|
||||
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
|
||||
: frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
|
||||
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
|
||||
gen->ools_ = this;
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ class CodeGenerator final : public GapResolver::Assembler {
|
||||
// Generate native code. After calling AssembleCode, call FinalizeCode to
|
||||
// produce the actual code object. If an error occurs during either phase,
|
||||
// FinalizeCode returns a null handle.
|
||||
void AssembleCode();
|
||||
void AssembleCode(); // Does not need to run on main thread.
|
||||
Handle<Code> FinalizeCode();
|
||||
|
||||
InstructionSequence* code() const { return code_; }
|
||||
@ -112,7 +112,7 @@ class CodeGenerator final : public GapResolver::Assembler {
|
||||
Zone* zone() const { return code()->zone(); }
|
||||
|
||||
private:
|
||||
MacroAssembler* masm() { return &masm_; }
|
||||
TurboAssembler* tasm() { return &tasm_; }
|
||||
GapResolver* resolver() { return &resolver_; }
|
||||
SafepointTableBuilder* safepoints() { return &safepoints_; }
|
||||
CompilationInfo* info() const { return info_; }
|
||||
@ -315,7 +315,7 @@ class CodeGenerator final : public GapResolver::Assembler {
|
||||
RpoNumber current_block_;
|
||||
SourcePosition start_source_position_;
|
||||
SourcePosition current_source_position_;
|
||||
MacroAssembler masm_;
|
||||
TurboAssembler tasm_;
|
||||
GapResolver resolver_;
|
||||
SafepointTableBuilder safepoints_;
|
||||
ZoneVector<HandlerInfo> handlers_;
|
||||
|
@ -18,8 +18,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
#define __ tasm()->
|
||||
|
||||
#define kScratchDoubleReg xmm0
|
||||
|
||||
@ -221,18 +220,22 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
|
||||
public:
|
||||
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
|
||||
XMMRegister input)
|
||||
: OutOfLineCode(gen), result_(result), input_(input) {}
|
||||
: OutOfLineCode(gen),
|
||||
result_(result),
|
||||
input_(input),
|
||||
zone_(gen->zone()) {}
|
||||
|
||||
void Generate() final {
|
||||
__ sub(esp, Immediate(kDoubleSize));
|
||||
__ movsd(MemOperand(esp, 0), input_);
|
||||
__ SlowTruncateToI(result_, esp, 0);
|
||||
__ SlowTruncateToIDelayed(zone_, result_, esp, 0);
|
||||
__ add(esp, Immediate(kDoubleSize));
|
||||
}
|
||||
|
||||
private:
|
||||
Register const result_;
|
||||
XMMRegister const input_;
|
||||
Zone* zone_;
|
||||
};
|
||||
|
||||
|
||||
@ -726,35 +729,35 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||
do { \
|
||||
/* Pass two doubles as arguments on the stack. */ \
|
||||
__ PrepareCallCFunction(4, eax); \
|
||||
__ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
|
||||
__ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
|
||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
|
||||
4); \
|
||||
/* Return value is in st(0) on ia32. */ \
|
||||
/* Store it into the result register. */ \
|
||||
__ sub(esp, Immediate(kDoubleSize)); \
|
||||
__ fstp_d(Operand(esp, 0)); \
|
||||
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
|
||||
__ add(esp, Immediate(kDoubleSize)); \
|
||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||
do { \
|
||||
/* Pass two doubles as arguments on the stack. */ \
|
||||
__ PrepareCallCFunction(4, eax); \
|
||||
__ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
|
||||
__ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
|
||||
__ CallCFunction( \
|
||||
ExternalReference::ieee754_##name##_function(__ isolate()), 4); \
|
||||
/* Return value is in st(0) on ia32. */ \
|
||||
/* Store it into the result register. */ \
|
||||
__ sub(esp, Immediate(kDoubleSize)); \
|
||||
__ fstp_d(Operand(esp, 0)); \
|
||||
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
|
||||
__ add(esp, Immediate(kDoubleSize)); \
|
||||
} while (false)
|
||||
|
||||
#define ASSEMBLE_IEEE754_UNOP(name) \
|
||||
do { \
|
||||
/* Pass one double as argument on the stack. */ \
|
||||
__ PrepareCallCFunction(2, eax); \
|
||||
__ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
|
||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
|
||||
2); \
|
||||
/* Return value is in st(0) on ia32. */ \
|
||||
/* Store it into the result register. */ \
|
||||
__ sub(esp, Immediate(kDoubleSize)); \
|
||||
__ fstp_d(Operand(esp, 0)); \
|
||||
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
|
||||
__ add(esp, Immediate(kDoubleSize)); \
|
||||
#define ASSEMBLE_IEEE754_UNOP(name) \
|
||||
do { \
|
||||
/* Pass one double as argument on the stack. */ \
|
||||
__ PrepareCallCFunction(2, eax); \
|
||||
__ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
|
||||
__ CallCFunction( \
|
||||
ExternalReference::ieee754_##name##_function(__ isolate()), 2); \
|
||||
/* Return value is in st(0) on ia32. */ \
|
||||
/* Store it into the result register. */ \
|
||||
__ sub(esp, Immediate(kDoubleSize)); \
|
||||
__ fstp_d(Operand(esp, 0)); \
|
||||
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
|
||||
__ add(esp, Immediate(kDoubleSize)); \
|
||||
} while (false)
|
||||
|
||||
#define ASSEMBLE_BINOP(asm_instr) \
|
||||
@ -836,7 +839,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
|
||||
|
||||
namespace {
|
||||
|
||||
void AdjustStackPointerForTailCall(MacroAssembler* masm,
|
||||
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
||||
FrameAccessState* state,
|
||||
int new_slot_above_sp,
|
||||
bool allow_shrinkage = true) {
|
||||
@ -844,10 +847,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
|
||||
StandardFrameConstants::kFixedSlotCountAboveFp;
|
||||
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
||||
if (stack_slot_delta > 0) {
|
||||
masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
|
||||
tasm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
|
||||
state->IncreaseSPDelta(stack_slot_delta);
|
||||
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
||||
masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
|
||||
tasm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
|
||||
state->IncreaseSPDelta(stack_slot_delta);
|
||||
}
|
||||
}
|
||||
@ -868,7 +871,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||
LocationOperand destination_location(
|
||||
LocationOperand::cast(move->destination()));
|
||||
InstructionOperand source(move->source());
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
destination_location.index());
|
||||
if (source.IsStackSlot()) {
|
||||
LocationOperand source_location(LocationOperand::cast(source));
|
||||
@ -886,13 +889,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||
move->Eliminate();
|
||||
}
|
||||
}
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
first_unused_stack_slot, false);
|
||||
}
|
||||
|
||||
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
int first_unused_stack_slot) {
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
first_unused_stack_slot);
|
||||
}
|
||||
|
||||
@ -1372,7 +1375,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kSSEFloat32Round: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
RoundingMode const mode =
|
||||
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
|
||||
__ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
|
||||
@ -1550,7 +1553,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
|
||||
break;
|
||||
case kSSEFloat64Round: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
RoundingMode const mode =
|
||||
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
|
||||
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
|
||||
@ -1627,25 +1630,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
|
||||
break;
|
||||
case kAVXFloat32Add: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputOperand(1));
|
||||
break;
|
||||
}
|
||||
case kAVXFloat32Sub: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputOperand(1));
|
||||
break;
|
||||
}
|
||||
case kAVXFloat32Mul: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputOperand(1));
|
||||
break;
|
||||
}
|
||||
case kAVXFloat32Div: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputOperand(1));
|
||||
// Don't delete this mov. It may improve performance on some CPUs,
|
||||
@ -1654,25 +1657,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kAVXFloat64Add: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputOperand(1));
|
||||
break;
|
||||
}
|
||||
case kAVXFloat64Sub: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputOperand(1));
|
||||
break;
|
||||
}
|
||||
case kAVXFloat64Mul: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputOperand(1));
|
||||
break;
|
||||
}
|
||||
case kAVXFloat64Div: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
|
||||
i.InputOperand(1));
|
||||
// Don't delete this mov. It may improve performance on some CPUs,
|
||||
@ -1684,7 +1687,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// TODO(bmeurer): Use RIP relative 128-bit constants.
|
||||
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
|
||||
__ psrlq(kScratchDoubleReg, 33);
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
|
||||
break;
|
||||
}
|
||||
@ -1692,7 +1695,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// TODO(bmeurer): Use RIP relative 128-bit constants.
|
||||
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
|
||||
__ psllq(kScratchDoubleReg, 31);
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
|
||||
break;
|
||||
}
|
||||
@ -1700,7 +1703,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// TODO(bmeurer): Use RIP relative 128-bit constants.
|
||||
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
|
||||
__ psrlq(kScratchDoubleReg, 1);
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
|
||||
break;
|
||||
}
|
||||
@ -1708,7 +1711,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// TODO(bmeurer): Use RIP relative 128-bit constants.
|
||||
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
|
||||
__ psllq(kScratchDoubleReg, 63);
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
|
||||
break;
|
||||
}
|
||||
@ -1903,12 +1906,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kSSEI32x4ReplaceLane: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
|
||||
break;
|
||||
}
|
||||
case kAVXI32x4ReplaceLane: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vpinsrd(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||
i.InputOperand(2), i.InputInt8(1));
|
||||
break;
|
||||
@ -1918,7 +1921,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kAVXI32x4Add: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||
i.InputOperand(1));
|
||||
break;
|
||||
@ -1928,7 +1931,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kAVXI32x4Sub: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||
i.InputOperand(1));
|
||||
break;
|
||||
@ -1951,7 +1954,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kAVXI16x8ReplaceLane: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vpinsrw(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||
i.InputOperand(2), i.InputInt8(1));
|
||||
break;
|
||||
@ -1970,12 +1973,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kSSEI8x16ReplaceLane: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
|
||||
break;
|
||||
}
|
||||
case kAVXI8x16ReplaceLane: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vpinsrb(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||
i.InputOperand(2), i.InputInt8(1));
|
||||
break;
|
||||
@ -2018,7 +2021,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
case kIA32StackCheck: {
|
||||
ExternalReference const stack_limit =
|
||||
ExternalReference::address_of_stack_limit(isolate());
|
||||
ExternalReference::address_of_stack_limit(__ isolate());
|
||||
__ cmp(esp, Operand::StaticVariable(stack_limit));
|
||||
break;
|
||||
}
|
||||
@ -2224,14 +2227,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
|
||||
// We cannot test calls to the runtime in cctest/test-run-wasm.
|
||||
// Therefore we emit a call to C here instead of a call to the runtime.
|
||||
__ PrepareCallCFunction(0, esi);
|
||||
__ CallCFunction(
|
||||
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
|
||||
0);
|
||||
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
|
||||
__ isolate()),
|
||||
0);
|
||||
__ LeaveFrame(StackFrame::WASM_COMPILED);
|
||||
__ Ret();
|
||||
} else {
|
||||
gen_->AssembleSourcePosition(instr_);
|
||||
__ Call(isolate()->builtins()->builtin_handle(trap_id),
|
||||
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
|
||||
RelocInfo::CODE_TARGET);
|
||||
ReferenceMap* reference_map =
|
||||
new (gen_->zone()) ReferenceMap(gen_->zone());
|
||||
@ -2334,7 +2337,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
|
||||
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
|
||||
: Deoptimizer::EAGER;
|
||||
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
|
||||
isolate(), deoptimization_id, bailout_type);
|
||||
__ isolate(), deoptimization_id, bailout_type);
|
||||
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
|
||||
if (info()->is_source_positions_enabled()) {
|
||||
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
|
||||
@ -2815,7 +2818,7 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
|
||||
int space_needed = Deoptimizer::patch_size();
|
||||
// Ensure that we have enough space after the previous lazy-bailout
|
||||
// instruction for patching the code here.
|
||||
int current_pc = masm()->pc_offset();
|
||||
int current_pc = tasm()->pc_offset();
|
||||
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
|
||||
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
|
||||
__ Nop(padding_size);
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -20,7 +20,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#define __ masm()->
|
||||
#define __ tasm()->
|
||||
|
||||
// Adds X64 specific methods for decoding operands.
|
||||
class X64OperandConverter : public InstructionOperandConverter {
|
||||
@ -205,14 +205,15 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
|
||||
: OutOfLineCode(gen),
|
||||
result_(result),
|
||||
input_(input),
|
||||
unwinding_info_writer_(unwinding_info_writer) {}
|
||||
unwinding_info_writer_(unwinding_info_writer),
|
||||
zone_(gen->zone()) {}
|
||||
|
||||
void Generate() final {
|
||||
__ subp(rsp, Immediate(kDoubleSize));
|
||||
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
|
||||
kDoubleSize);
|
||||
__ Movsd(MemOperand(rsp, 0), input_);
|
||||
__ SlowTruncateToI(result_, rsp, 0);
|
||||
__ SlowTruncateToIDelayed(zone_, result_, rsp, 0);
|
||||
__ addp(rsp, Immediate(kDoubleSize));
|
||||
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
|
||||
-kDoubleSize);
|
||||
@ -222,6 +223,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
|
||||
Register const result_;
|
||||
XMMRegister const input_;
|
||||
UnwindingInfoWriter* const unwinding_info_writer_;
|
||||
Zone* zone_;
|
||||
};
|
||||
|
||||
|
||||
@ -451,7 +453,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
|
||||
#define ASSEMBLE_AVX_BINOP(asm_instr) \
|
||||
do { \
|
||||
CpuFeatureScope avx_scope(masm(), AVX); \
|
||||
CpuFeatureScope avx_scope(tasm(), AVX); \
|
||||
if (instr->InputAt(1)->IsFPRegister()) { \
|
||||
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
|
||||
i.InputDoubleRegister(1)); \
|
||||
@ -696,18 +698,18 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||
do { \
|
||||
__ PrepareCallCFunction(2); \
|
||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
|
||||
2); \
|
||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||
do { \
|
||||
__ PrepareCallCFunction(2); \
|
||||
__ CallCFunction( \
|
||||
ExternalReference::ieee754_##name##_function(__ isolate()), 2); \
|
||||
} while (false)
|
||||
|
||||
#define ASSEMBLE_IEEE754_UNOP(name) \
|
||||
do { \
|
||||
__ PrepareCallCFunction(1); \
|
||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
|
||||
1); \
|
||||
#define ASSEMBLE_IEEE754_UNOP(name) \
|
||||
do { \
|
||||
__ PrepareCallCFunction(1); \
|
||||
__ CallCFunction( \
|
||||
ExternalReference::ieee754_##name##_function(__ isolate()), 1); \
|
||||
} while (false)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
|
||||
@ -794,7 +796,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||
LocationOperand destination_location(
|
||||
LocationOperand::cast(move->destination()));
|
||||
InstructionOperand source(move->source());
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
destination_location.index());
|
||||
if (source.IsStackSlot()) {
|
||||
LocationOperand source_location(LocationOperand::cast(source));
|
||||
@ -812,13 +814,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||
move->Eliminate();
|
||||
}
|
||||
}
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
first_unused_stack_slot, false);
|
||||
}
|
||||
|
||||
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
int first_unused_stack_slot) {
|
||||
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
||||
first_unused_stack_slot);
|
||||
}
|
||||
|
||||
@ -1287,7 +1289,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ASSEMBLE_SSE_UNOP(Cvtss2sd);
|
||||
break;
|
||||
case kSSEFloat32Round: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
RoundingMode const mode =
|
||||
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
|
||||
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
|
||||
@ -1344,7 +1346,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
// The following 2 instruction implicitly use rax.
|
||||
__ fnstsw_ax();
|
||||
if (CpuFeatures::IsSupported(SAHF)) {
|
||||
CpuFeatureScope sahf_scope(masm(), SAHF);
|
||||
CpuFeatureScope sahf_scope(tasm(), SAHF);
|
||||
__ sahf();
|
||||
} else {
|
||||
__ shrl(rax, Immediate(8));
|
||||
@ -1494,7 +1496,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ASSEMBLE_SSE_UNOP(Sqrtsd);
|
||||
break;
|
||||
case kSSEFloat64Round: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
RoundingMode const mode =
|
||||
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
|
||||
__ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
|
||||
@ -1763,7 +1765,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
break;
|
||||
case kAVXFloat32Cmp: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
if (instr->InputAt(1)->IsFPRegister()) {
|
||||
__ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
|
||||
} else {
|
||||
@ -1787,7 +1789,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
|
||||
break;
|
||||
case kAVXFloat64Cmp: {
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
if (instr->InputAt(1)->IsFPRegister()) {
|
||||
__ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
|
||||
} else {
|
||||
@ -1812,7 +1814,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
case kAVXFloat32Abs: {
|
||||
// TODO(bmeurer): Use RIP relative 128-bit constants.
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
||||
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
|
||||
if (instr->InputAt(0)->IsFPRegister()) {
|
||||
@ -1826,7 +1828,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
case kAVXFloat32Neg: {
|
||||
// TODO(bmeurer): Use RIP relative 128-bit constants.
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
||||
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
|
||||
if (instr->InputAt(0)->IsFPRegister()) {
|
||||
@ -1840,7 +1842,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
case kAVXFloat64Abs: {
|
||||
// TODO(bmeurer): Use RIP relative 128-bit constants.
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
||||
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
|
||||
if (instr->InputAt(0)->IsFPRegister()) {
|
||||
@ -1854,7 +1856,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
case kAVXFloat64Neg: {
|
||||
// TODO(bmeurer): Use RIP relative 128-bit constants.
|
||||
CpuFeatureScope avx_scope(masm(), AVX);
|
||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
||||
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
||||
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
|
||||
if (instr->InputAt(0)->IsFPRegister()) {
|
||||
@ -2007,7 +2009,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
break;
|
||||
case kX64Movdqu: {
|
||||
CpuFeatureScope sse_scope(masm(), SSSE3);
|
||||
CpuFeatureScope sse_scope(tasm(), SSSE3);
|
||||
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
|
||||
__ pc_offset());
|
||||
if (instr->HasOutput()) {
|
||||
@ -2174,12 +2176,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I32x4ExtractLane: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
|
||||
break;
|
||||
}
|
||||
case kX64I32x4ReplaceLane: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
if (instr->InputAt(2)->IsRegister()) {
|
||||
__ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
|
||||
i.InputInt8(1));
|
||||
@ -2189,7 +2191,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I32x4Neg: {
|
||||
CpuFeatureScope sse_scope(masm(), SSSE3);
|
||||
CpuFeatureScope sse_scope(tasm(), SSSE3);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(0);
|
||||
if (dst.is(src)) {
|
||||
@ -2214,7 +2216,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I32x4AddHoriz: {
|
||||
CpuFeatureScope sse_scope(masm(), SSSE3);
|
||||
CpuFeatureScope sse_scope(tasm(), SSSE3);
|
||||
__ phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
@ -2223,17 +2225,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I32x4Mul: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I32x4MinS: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I32x4MaxS: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
@ -2252,7 +2254,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I32x4GeS: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(1);
|
||||
__ pminsd(dst, src);
|
||||
@ -2264,17 +2266,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I32x4MinU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I32x4MaxU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I32x4GtU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(1);
|
||||
__ pmaxud(dst, src);
|
||||
@ -2284,7 +2286,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I32x4GeU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(1);
|
||||
__ pminud(dst, src);
|
||||
@ -2305,14 +2307,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I16x8ExtractLane: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
Register dst = i.OutputRegister();
|
||||
__ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
|
||||
__ movsxwl(dst, dst);
|
||||
break;
|
||||
}
|
||||
case kX64I16x8ReplaceLane: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
if (instr->InputAt(2)->IsRegister()) {
|
||||
__ pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
|
||||
i.InputInt8(1));
|
||||
@ -2322,7 +2324,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I16x8Neg: {
|
||||
CpuFeatureScope sse_scope(masm(), SSSE3);
|
||||
CpuFeatureScope sse_scope(tasm(), SSSE3);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(0);
|
||||
if (dst.is(src)) {
|
||||
@ -2351,7 +2353,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I16x8AddHoriz: {
|
||||
CpuFeatureScope sse_scope(masm(), SSSE3);
|
||||
CpuFeatureScope sse_scope(tasm(), SSSE3);
|
||||
__ phaddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
@ -2364,17 +2366,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I16x8Mul: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I16x8MinS: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I16x8MaxS: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
@ -2393,7 +2395,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I16x8GeS: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(1);
|
||||
__ pminsw(dst, src);
|
||||
@ -2413,17 +2415,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I16x8MinU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I16x8MaxU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I16x8GtU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(1);
|
||||
__ pmaxuw(dst, src);
|
||||
@ -2433,7 +2435,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I16x8GeU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(1);
|
||||
__ pminuw(dst, src);
|
||||
@ -2441,7 +2443,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I8x16Splat: {
|
||||
CpuFeatureScope sse_scope(masm(), SSSE3);
|
||||
CpuFeatureScope sse_scope(tasm(), SSSE3);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
__ movd(dst, i.InputRegister(0));
|
||||
__ xorps(kScratchDoubleReg, kScratchDoubleReg);
|
||||
@ -2449,14 +2451,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I8x16ExtractLane: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
Register dst = i.OutputRegister();
|
||||
__ pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
|
||||
__ movsxbl(dst, dst);
|
||||
break;
|
||||
}
|
||||
case kX64I8x16ReplaceLane: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
if (instr->InputAt(2)->IsRegister()) {
|
||||
__ pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
|
||||
i.InputInt8(1));
|
||||
@ -2466,7 +2468,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I8x16Neg: {
|
||||
CpuFeatureScope sse_scope(masm(), SSSE3);
|
||||
CpuFeatureScope sse_scope(tasm(), SSSE3);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(0);
|
||||
if (dst.is(src)) {
|
||||
@ -2495,12 +2497,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I8x16MinS: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I8x16MaxS: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
@ -2519,7 +2521,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I8x16GeS: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(1);
|
||||
__ pminsb(dst, src);
|
||||
@ -2535,17 +2537,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I8x16MinU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pminub(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I8x16MaxU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
__ pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(1));
|
||||
break;
|
||||
}
|
||||
case kX64I8x16GtU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(1);
|
||||
__ pmaxub(dst, src);
|
||||
@ -2555,7 +2557,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kX64I8x16GeU: {
|
||||
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
||||
XMMRegister dst = i.OutputSimd128Register();
|
||||
XMMRegister src = i.InputSimd128Register(1);
|
||||
__ pminub(dst, src);
|
||||
@ -2826,14 +2828,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
|
||||
// We cannot test calls to the runtime in cctest/test-run-wasm.
|
||||
// Therefore we emit a call to C here instead of a call to the runtime.
|
||||
__ PrepareCallCFunction(0);
|
||||
__ CallCFunction(
|
||||
ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
|
||||
0);
|
||||
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
|
||||
__ isolate()),
|
||||
0);
|
||||
__ LeaveFrame(StackFrame::WASM_COMPILED);
|
||||
__ Ret();
|
||||
} else {
|
||||
gen_->AssembleSourcePosition(instr_);
|
||||
__ Call(isolate()->builtins()->builtin_handle(trap_id),
|
||||
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
|
||||
RelocInfo::CODE_TARGET);
|
||||
ReferenceMap* reference_map =
|
||||
new (gen_->zone()) ReferenceMap(gen_->zone());
|
||||
@ -2924,7 +2926,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
|
||||
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
|
||||
: Deoptimizer::EAGER;
|
||||
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
|
||||
isolate(), deoptimization_id, bailout_type);
|
||||
__ isolate(), deoptimization_id, bailout_type);
|
||||
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
|
||||
if (info()->is_source_positions_enabled()) {
|
||||
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
|
||||
|
@ -313,6 +313,8 @@ class Immediate BASE_EMBEDDED {
|
||||
return RelocInfo::IsNone(rmode_) && i::is_uint16(immediate());
|
||||
}
|
||||
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
|
||||
private:
|
||||
inline explicit Immediate(Label* value);
|
||||
|
||||
@ -429,7 +431,6 @@ class Operand BASE_EMBEDDED {
|
||||
RelocInfo::Mode rmode_;
|
||||
|
||||
friend class Assembler;
|
||||
friend class MacroAssembler;
|
||||
};
|
||||
|
||||
|
||||
|
@ -2139,10 +2139,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
||||
// Fall through when we need to inform the incremental marker.
|
||||
}
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
|
||||
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
|
||||
Zone* zone) {
|
||||
if (masm->isolate()->function_entry_hook() != NULL) {
|
||||
masm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
|
||||
if (tasm->isolate()->function_entry_hook() != NULL) {
|
||||
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,17 +23,11 @@ namespace internal {
|
||||
|
||||
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
|
||||
CodeObjectRequired create_code_object)
|
||||
: Assembler(isolate, buffer, size),
|
||||
has_frame_(false),
|
||||
isolate_(isolate),
|
||||
: TurboAssembler(isolate, buffer, size, create_code_object),
|
||||
jit_cookie_(0) {
|
||||
if (FLAG_mask_constants_with_cookie) {
|
||||
jit_cookie_ = isolate->random_number_generator()->NextInt();
|
||||
}
|
||||
if (create_code_object == CodeObjectRequired::kYes) {
|
||||
code_object_ =
|
||||
Handle<HeapObject>::New(isolate_->heap()->undefined_value(), isolate_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -135,7 +129,6 @@ void MacroAssembler::CompareRoot(const Operand& with,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
|
||||
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
|
||||
PushObject(isolate()->heap()->root_handle(index));
|
||||
@ -279,12 +272,17 @@ void MacroAssembler::ClampUint8(Register reg) {
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg,
|
||||
Register input_reg, int offset) {
|
||||
CallStubDelayed(
|
||||
new (zone) DoubleToIStub(nullptr, input_reg, result_reg, offset, true));
|
||||
}
|
||||
|
||||
void MacroAssembler::SlowTruncateToI(Register result_reg,
|
||||
Register input_reg,
|
||||
int offset) {
|
||||
DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
|
||||
call(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
CallStub(&stub);
|
||||
}
|
||||
|
||||
|
||||
@ -397,8 +395,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
|
||||
void TurboAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
|
||||
Label done;
|
||||
cmp(src, Immediate(0));
|
||||
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
|
||||
@ -664,13 +661,12 @@ void MacroAssembler::MaybeDropFrames() {
|
||||
RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
|
||||
void TurboAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
|
||||
xorps(dst, dst);
|
||||
cvtsi2sd(dst, src);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
|
||||
void TurboAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
|
||||
Label msb_set_src;
|
||||
Label jmp_return;
|
||||
test(src, src);
|
||||
@ -688,7 +684,7 @@ void MacroAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
|
||||
bind(&jmp_return);
|
||||
}
|
||||
|
||||
void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
|
||||
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
|
||||
if (shift >= 32) {
|
||||
mov(high, low);
|
||||
shl(high, shift - 32);
|
||||
@ -699,7 +695,7 @@ void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::ShlPair_cl(Register high, Register low) {
|
||||
void TurboAssembler::ShlPair_cl(Register high, Register low) {
|
||||
shld_cl(high, low);
|
||||
shl_cl(low);
|
||||
Label done;
|
||||
@ -710,7 +706,7 @@ void MacroAssembler::ShlPair_cl(Register high, Register low) {
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
|
||||
void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
|
||||
if (shift >= 32) {
|
||||
mov(low, high);
|
||||
shr(low, shift - 32);
|
||||
@ -721,7 +717,7 @@ void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::ShrPair_cl(Register high, Register low) {
|
||||
void TurboAssembler::ShrPair_cl(Register high, Register low) {
|
||||
shrd_cl(low, high);
|
||||
shr_cl(high);
|
||||
Label done;
|
||||
@ -732,7 +728,7 @@ void MacroAssembler::ShrPair_cl(Register high, Register low) {
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
|
||||
void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
|
||||
if (shift >= 32) {
|
||||
mov(low, high);
|
||||
sar(low, shift - 32);
|
||||
@ -743,7 +739,7 @@ void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::SarPair_cl(Register high, Register low) {
|
||||
void TurboAssembler::SarPair_cl(Register high, Register low) {
|
||||
shrd_cl(low, high);
|
||||
sar_cl(high);
|
||||
Label done;
|
||||
@ -924,19 +920,19 @@ void MacroAssembler::AssertNotSmi(Register object) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::StubPrologue(StackFrame::Type type) {
|
||||
void TurboAssembler::StubPrologue(StackFrame::Type type) {
|
||||
push(ebp); // Caller's frame pointer.
|
||||
mov(ebp, esp);
|
||||
push(Immediate(StackFrame::TypeToMarker(type)));
|
||||
}
|
||||
|
||||
void MacroAssembler::Prologue(bool code_pre_aging) {
|
||||
void TurboAssembler::Prologue(bool code_pre_aging) {
|
||||
PredictableCodeSizeScope predictible_code_size_scope(this,
|
||||
kNoCodeAgeSequenceLength);
|
||||
if (code_pre_aging) {
|
||||
// Pre-age the code.
|
||||
// Pre-age the code.
|
||||
call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
|
||||
RelocInfo::CODE_AGE_SEQUENCE);
|
||||
RelocInfo::CODE_AGE_SEQUENCE);
|
||||
Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
|
||||
} else {
|
||||
push(ebp); // Caller's frame pointer.
|
||||
@ -952,15 +948,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
|
||||
mov(vector, FieldOperand(vector, Cell::kValueOffset));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EnterFrame(StackFrame::Type type,
|
||||
bool load_constant_pool_pointer_reg) {
|
||||
// Out-of-line constant pool not implemented on ia32.
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
||||
void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
push(ebp);
|
||||
mov(ebp, esp);
|
||||
push(Immediate(StackFrame::TypeToMarker(type)));
|
||||
@ -973,8 +961,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
if (emit_debug_code()) {
|
||||
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
|
||||
Immediate(StackFrame::TypeToMarker(type)));
|
||||
@ -1533,7 +1520,7 @@ void MacroAssembler::CallStub(CodeStub* stub) {
|
||||
call(stub->GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
|
||||
void TurboAssembler::CallStubDelayed(CodeStub* stub) {
|
||||
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
|
||||
call(stub);
|
||||
}
|
||||
@ -1542,10 +1529,8 @@ void MacroAssembler::TailCallStub(CodeStub* stub) {
|
||||
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
|
||||
return has_frame_ || !stub->SometimesSetsUpAFrame();
|
||||
bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
|
||||
return has_frame() || !stub->SometimesSetsUpAFrame();
|
||||
}
|
||||
|
||||
void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
||||
@ -1609,7 +1594,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
|
||||
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
void MacroAssembler::PrepareForTailCall(
|
||||
void TurboAssembler::PrepareForTailCall(
|
||||
const ParameterCount& callee_args_count, Register caller_args_count_reg,
|
||||
Register scratch0, Register scratch1, ReturnAddressState ra_state,
|
||||
int number_of_temp_values_after_return_address) {
|
||||
@ -1742,8 +1727,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
|
||||
}
|
||||
|
||||
if (!definitely_matches) {
|
||||
Handle<Code> adaptor =
|
||||
isolate()->builtins()->ArgumentsAdaptorTrampoline();
|
||||
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
|
||||
if (flag == CALL_FUNCTION) {
|
||||
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
|
||||
call(adaptor, RelocInfo::CODE_TARGET);
|
||||
@ -1993,13 +1977,9 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
|
||||
JumpIfSmi(value, miss);
|
||||
}
|
||||
|
||||
void TurboAssembler::Ret() { ret(0); }
|
||||
|
||||
void MacroAssembler::Ret() {
|
||||
ret(0);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
|
||||
void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
|
||||
if (is_uint16(bytes_dropped)) {
|
||||
ret(bytes_dropped);
|
||||
} else {
|
||||
@ -2017,33 +1997,30 @@ void MacroAssembler::Drop(int stack_elements) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Move(Register dst, Register src) {
|
||||
void TurboAssembler::Move(Register dst, Register src) {
|
||||
if (!dst.is(src)) {
|
||||
mov(dst, src);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Move(Register dst, const Immediate& x) {
|
||||
void TurboAssembler::Move(Register dst, const Immediate& x) {
|
||||
if (!x.is_heap_object_request() && x.is_zero() &&
|
||||
RelocInfo::IsNone(x.rmode_)) {
|
||||
RelocInfo::IsNone(x.rmode())) {
|
||||
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
|
||||
} else {
|
||||
mov(dst, x);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
|
||||
void TurboAssembler::Move(const Operand& dst, const Immediate& x) {
|
||||
mov(dst, x);
|
||||
}
|
||||
|
||||
void MacroAssembler::Move(Register dst, Handle<HeapObject> object) {
|
||||
void TurboAssembler::Move(Register dst, Handle<HeapObject> object) {
|
||||
mov(dst, object);
|
||||
}
|
||||
|
||||
void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
|
||||
void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
|
||||
if (src == 0) {
|
||||
pxor(dst, dst);
|
||||
} else {
|
||||
@ -2067,8 +2044,7 @@ void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
|
||||
void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
|
||||
if (src == 0) {
|
||||
pxor(dst, dst);
|
||||
} else {
|
||||
@ -2105,7 +2081,7 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Pxor(XMMRegister dst, const Operand& src) {
|
||||
void TurboAssembler::Pxor(XMMRegister dst, const Operand& src) {
|
||||
if (CpuFeatures::IsSupported(AVX)) {
|
||||
CpuFeatureScope scope(this, AVX);
|
||||
vpxor(dst, dst, src);
|
||||
@ -2114,7 +2090,7 @@ void MacroAssembler::Pxor(XMMRegister dst, const Operand& src) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Pshuflw(XMMRegister dst, const Operand& src,
|
||||
void TurboAssembler::Pshuflw(XMMRegister dst, const Operand& src,
|
||||
uint8_t shuffle) {
|
||||
if (CpuFeatures::IsSupported(AVX)) {
|
||||
CpuFeatureScope scope(this, AVX);
|
||||
@ -2124,7 +2100,7 @@ void MacroAssembler::Pshuflw(XMMRegister dst, const Operand& src,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Pshufd(XMMRegister dst, const Operand& src,
|
||||
void TurboAssembler::Pshufd(XMMRegister dst, const Operand& src,
|
||||
uint8_t shuffle) {
|
||||
if (CpuFeatures::IsSupported(AVX)) {
|
||||
CpuFeatureScope scope(this, AVX);
|
||||
@ -2134,7 +2110,7 @@ void MacroAssembler::Pshufd(XMMRegister dst, const Operand& src,
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::Pshufb(XMMRegister dst, const Operand& src) {
|
||||
void TurboAssembler::Pshufb(XMMRegister dst, const Operand& src) {
|
||||
if (CpuFeatures::IsSupported(AVX)) {
|
||||
CpuFeatureScope scope(this, AVX);
|
||||
vpshufb(dst, dst, src);
|
||||
@ -2148,7 +2124,7 @@ void MacroAssembler::Pshufb(XMMRegister dst, const Operand& src) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void MacroAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
|
||||
void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
|
||||
if (CpuFeatures::IsSupported(AVX)) {
|
||||
CpuFeatureScope scope(this, AVX);
|
||||
vpextrb(dst, src, imm8);
|
||||
@ -2162,7 +2138,7 @@ void MacroAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void MacroAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
|
||||
void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
|
||||
if (CpuFeatures::IsSupported(AVX)) {
|
||||
CpuFeatureScope scope(this, AVX);
|
||||
vpextrw(dst, src, imm8);
|
||||
@ -2176,7 +2152,7 @@ void MacroAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
|
||||
void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
|
||||
if (imm8 == 0) {
|
||||
Movd(dst, src);
|
||||
return;
|
||||
@ -2196,7 +2172,7 @@ void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
|
||||
movd(dst, xmm0);
|
||||
}
|
||||
|
||||
void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
|
||||
void TurboAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
|
||||
bool is_64_bits) {
|
||||
if (CpuFeatures::IsSupported(SSE4_1)) {
|
||||
CpuFeatureScope sse_scope(this, SSE4_1);
|
||||
@ -2224,8 +2200,7 @@ void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
|
||||
void TurboAssembler::Lzcnt(Register dst, const Operand& src) {
|
||||
if (CpuFeatures::IsSupported(LZCNT)) {
|
||||
CpuFeatureScope scope(this, LZCNT);
|
||||
lzcnt(dst, src);
|
||||
@ -2239,8 +2214,7 @@ void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
|
||||
xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
|
||||
void TurboAssembler::Tzcnt(Register dst, const Operand& src) {
|
||||
if (CpuFeatures::IsSupported(BMI1)) {
|
||||
CpuFeatureScope scope(this, BMI1);
|
||||
tzcnt(dst, src);
|
||||
@ -2253,8 +2227,7 @@ void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
|
||||
bind(¬_zero_src);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Popcnt(Register dst, const Operand& src) {
|
||||
void TurboAssembler::Popcnt(Register dst, const Operand& src) {
|
||||
if (CpuFeatures::IsSupported(POPCNT)) {
|
||||
CpuFeatureScope scope(this, POPCNT);
|
||||
popcnt(dst, src);
|
||||
@ -2326,14 +2299,11 @@ void MacroAssembler::DecrementCounter(Condition cc,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
|
||||
void TurboAssembler::Assert(Condition cc, BailoutReason reason) {
|
||||
if (emit_debug_code()) Check(cc, reason);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void MacroAssembler::Check(Condition cc, BailoutReason reason) {
|
||||
void TurboAssembler::Check(Condition cc, BailoutReason reason) {
|
||||
Label L;
|
||||
j(cc, &L);
|
||||
Abort(reason);
|
||||
@ -2341,8 +2311,7 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason) {
|
||||
bind(&L);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckStackAlignment() {
|
||||
void TurboAssembler::CheckStackAlignment() {
|
||||
int frame_alignment = base::OS::ActivationFrameAlignment();
|
||||
int frame_alignment_mask = frame_alignment - 1;
|
||||
if (frame_alignment > kPointerSize) {
|
||||
@ -2356,8 +2325,7 @@ void MacroAssembler::CheckStackAlignment() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Abort(BailoutReason reason) {
|
||||
void TurboAssembler::Abort(BailoutReason reason) {
|
||||
#ifdef DEBUG
|
||||
const char* msg = GetBailoutReason(reason);
|
||||
if (msg != NULL) {
|
||||
@ -2371,13 +2339,10 @@ void MacroAssembler::Abort(BailoutReason reason) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// Check if Abort() has already been initialized.
|
||||
DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
|
||||
|
||||
Move(edx, Smi::FromInt(static_cast<int>(reason)));
|
||||
|
||||
// Disable stub call restrictions to always allow calls to abort.
|
||||
if (!has_frame_) {
|
||||
if (!has_frame()) {
|
||||
// We don't actually want to generate a pile of code for this, so just
|
||||
// claim there is a stack frame, without generating one.
|
||||
FrameScope scope(this, StackFrame::NONE);
|
||||
@ -2506,8 +2471,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
|
||||
SmiUntag(index);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
|
||||
void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
|
||||
int frame_alignment = base::OS::ActivationFrameAlignment();
|
||||
if (frame_alignment != 0) {
|
||||
// Make stack end at alignment and make room for num_arguments words
|
||||
@ -2522,17 +2486,14 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallCFunction(ExternalReference function,
|
||||
void TurboAssembler::CallCFunction(ExternalReference function,
|
||||
int num_arguments) {
|
||||
// Trashing eax is ok as it will be the return value.
|
||||
mov(eax, Immediate(function));
|
||||
CallCFunction(eax, num_arguments);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CallCFunction(Register function,
|
||||
int num_arguments) {
|
||||
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
|
||||
DCHECK_LE(num_arguments, kMaxCParameters);
|
||||
DCHECK(has_frame());
|
||||
// Check stack alignment.
|
||||
@ -2598,14 +2559,9 @@ CodePatcher::~CodePatcher() {
|
||||
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CheckPageFlag(
|
||||
Register object,
|
||||
Register scratch,
|
||||
int mask,
|
||||
Condition cc,
|
||||
Label* condition_met,
|
||||
Label::Distance condition_met_distance) {
|
||||
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
|
||||
Condition cc, Label* condition_met,
|
||||
Label::Distance condition_met_distance) {
|
||||
DCHECK(cc == zero || cc == not_zero);
|
||||
if (scratch.is(object)) {
|
||||
and_(scratch, Immediate(~Page::kPageAlignmentMask));
|
||||
|
@ -51,16 +51,235 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
|
||||
Register reg8 = no_reg);
|
||||
#endif
|
||||
|
||||
class TurboAssembler : public Assembler {
|
||||
public:
|
||||
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
|
||||
CodeObjectRequired create_code_object)
|
||||
: Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
|
||||
if (create_code_object == CodeObjectRequired::kYes) {
|
||||
code_object_ =
|
||||
Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
|
||||
}
|
||||
}
|
||||
|
||||
void set_has_frame(bool value) { has_frame_ = value; }
|
||||
bool has_frame() const { return has_frame_; }
|
||||
|
||||
Isolate* isolate() const { return isolate_; }
|
||||
|
||||
Handle<HeapObject> CodeObject() {
|
||||
DCHECK(!code_object_.is_null());
|
||||
return code_object_;
|
||||
}
|
||||
|
||||
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
|
||||
Label* condition_met,
|
||||
Label::Distance condition_met_distance = Label::kFar);
|
||||
|
||||
// Activation support.
|
||||
void EnterFrame(StackFrame::Type type);
|
||||
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
|
||||
// Out-of-line constant pool not implemented on ia32.
|
||||
UNREACHABLE();
|
||||
}
|
||||
void LeaveFrame(StackFrame::Type type);
|
||||
|
||||
// Print a message to stdout and abort execution.
|
||||
void Abort(BailoutReason reason);
|
||||
|
||||
// Calls Abort(msg) if the condition cc is not satisfied.
|
||||
// Use --debug_code to enable.
|
||||
void Assert(Condition cc, BailoutReason reason);
|
||||
|
||||
// Like Assert(), but always enabled.
|
||||
void Check(Condition cc, BailoutReason reason);
|
||||
|
||||
// Check that the stack is aligned.
|
||||
void CheckStackAlignment();
|
||||
|
||||
// Nop, because ia32 does not have a root register.
|
||||
void InitializeRootRegister() {}
|
||||
|
||||
// Move a constant into a destination using the most efficient encoding.
|
||||
void Move(Register dst, const Immediate& x);
|
||||
|
||||
void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
|
||||
|
||||
// Move if the registers are not identical.
|
||||
void Move(Register target, Register source);
|
||||
|
||||
void Move(const Operand& dst, const Immediate& x);
|
||||
|
||||
// Move an immediate into an XMM register.
|
||||
void Move(XMMRegister dst, uint32_t src);
|
||||
void Move(XMMRegister dst, uint64_t src);
|
||||
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
|
||||
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
|
||||
|
||||
void Move(Register dst, Handle<HeapObject> handle);
|
||||
|
||||
void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
|
||||
void Call(Label* target) { call(target); }
|
||||
|
||||
inline bool AllowThisStubCall(CodeStub* stub);
|
||||
void CallStubDelayed(CodeStub* stub);
|
||||
|
||||
// Jump the register contains a smi.
|
||||
inline void JumpIfSmi(Register value, Label* smi_label,
|
||||
Label::Distance distance = Label::kFar) {
|
||||
test(value, Immediate(kSmiTagMask));
|
||||
j(zero, smi_label, distance);
|
||||
}
|
||||
// Jump if the operand is a smi.
|
||||
inline void JumpIfSmi(Operand value, Label* smi_label,
|
||||
Label::Distance distance = Label::kFar) {
|
||||
test(value, Immediate(kSmiTagMask));
|
||||
j(zero, smi_label, distance);
|
||||
}
|
||||
|
||||
void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }
|
||||
|
||||
// Removes current frame and its arguments from the stack preserving
|
||||
// the arguments and a return address pushed to the stack for the next call.
|
||||
// |ra_state| defines whether return address is already pushed to stack or
|
||||
// not. Both |callee_args_count| and |caller_args_count_reg| do not include
|
||||
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
|
||||
// is trashed. |number_of_temp_values_after_return_address| specifies
|
||||
// the number of words pushed to the stack after the return address. This is
|
||||
// to allow "allocation" of scratch registers that this function requires
|
||||
// by saving their values on the stack.
|
||||
void PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
Register caller_args_count_reg, Register scratch0,
|
||||
Register scratch1, ReturnAddressState ra_state,
|
||||
int number_of_temp_values_after_return_address);
|
||||
|
||||
// Before calling a C-function from generated code, align arguments on stack.
|
||||
// After aligning the frame, arguments must be stored in esp[0], esp[4],
|
||||
// etc., not pushed. The argument count assumes all arguments are word sized.
|
||||
// Some compilers/platforms require the stack to be aligned when calling
|
||||
// C++ code.
|
||||
// Needs a scratch register to do some arithmetic. This register will be
|
||||
// trashed.
|
||||
void PrepareCallCFunction(int num_arguments, Register scratch);
|
||||
|
||||
// Calls a C function and cleans up the space for arguments allocated
|
||||
// by PrepareCallCFunction. The called function is not allowed to trigger a
|
||||
// garbage collection, since that might move the code and invalidate the
|
||||
// return address (unless this is somehow accounted for by the called
|
||||
// function).
|
||||
void CallCFunction(ExternalReference function, int num_arguments);
|
||||
void CallCFunction(Register function, int num_arguments);
|
||||
|
||||
void ShlPair(Register high, Register low, uint8_t imm8);
|
||||
void ShlPair_cl(Register high, Register low);
|
||||
void ShrPair(Register high, Register low, uint8_t imm8);
|
||||
void ShrPair_cl(Register high, Register src);
|
||||
void SarPair(Register high, Register low, uint8_t imm8);
|
||||
void SarPair_cl(Register high, Register low);
|
||||
|
||||
// Generates function and stub prologue code.
|
||||
void StubPrologue(StackFrame::Type type);
|
||||
void Prologue(bool code_pre_aging);
|
||||
|
||||
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
|
||||
void Lzcnt(Register dst, const Operand& src);
|
||||
|
||||
void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
|
||||
void Tzcnt(Register dst, const Operand& src);
|
||||
|
||||
void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
|
||||
void Popcnt(Register dst, const Operand& src);
|
||||
|
||||
void Ret();
|
||||
|
||||
// Return and drop arguments from stack, where the number of arguments
|
||||
// may be bigger than 2^16 - 1. Requires a scratch register.
|
||||
void Ret(int bytes_dropped, Register scratch);
|
||||
|
||||
void Pxor(XMMRegister dst, XMMRegister src) { Pxor(dst, Operand(src)); }
|
||||
void Pxor(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
|
||||
Pshuflw(dst, Operand(src), shuffle);
|
||||
}
|
||||
void Pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
|
||||
void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
|
||||
Pshufd(dst, Operand(src), shuffle);
|
||||
}
|
||||
void Pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
|
||||
|
||||
// SSE/SSE2 instructions with AVX version.
|
||||
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
|
||||
void macro_name(dst_type dst, src_type src) { \
|
||||
if (CpuFeatures::IsSupported(AVX)) { \
|
||||
CpuFeatureScope scope(this, AVX); \
|
||||
v##name(dst, src); \
|
||||
} else { \
|
||||
name(dst, src); \
|
||||
} \
|
||||
}
|
||||
|
||||
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
|
||||
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, const Operand&)
|
||||
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
|
||||
AVX_OP2_WITH_TYPE(Movd, movd, const Operand&, XMMRegister)
|
||||
|
||||
#undef AVX_OP2_WITH_TYPE
|
||||
|
||||
// Non-SSE2 instructions.
|
||||
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
|
||||
void Pshufb(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Pextrb(Register dst, XMMRegister src, int8_t imm8);
|
||||
void Pextrw(Register dst, XMMRegister src, int8_t imm8);
|
||||
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
|
||||
void Pinsrd(XMMRegister dst, Register src, int8_t imm8,
|
||||
bool is_64_bits = false) {
|
||||
Pinsrd(dst, Operand(src), imm8, is_64_bits);
|
||||
}
|
||||
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
|
||||
bool is_64_bits = false);
|
||||
|
||||
void LoadUint32(XMMRegister dst, Register src) {
|
||||
LoadUint32(dst, Operand(src));
|
||||
}
|
||||
void LoadUint32(XMMRegister dst, const Operand& src);
|
||||
|
||||
// Expression support
|
||||
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
|
||||
// hinders register renaming and makes dependence chains longer. So we use
|
||||
// xorps to clear the dst register before cvtsi2sd to solve this issue.
|
||||
void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
|
||||
void Cvtsi2sd(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
|
||||
|
||||
void SlowTruncateToIDelayed(Zone* zone, Register result_reg,
|
||||
Register input_reg,
|
||||
int offset = HeapNumber::kValueOffset -
|
||||
kHeapObjectTag);
|
||||
|
||||
void Push(Register src) { push(src); }
|
||||
void Push(const Operand& src) { push(src); }
|
||||
void Push(Immediate value) { push(value); }
|
||||
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
|
||||
void Push(Smi* smi) { Push(Immediate(smi)); }
|
||||
|
||||
private:
|
||||
bool has_frame_ = false;
|
||||
Isolate* const isolate_;
|
||||
// This handle will be patched with the code object on installation.
|
||||
Handle<HeapObject> code_object_;
|
||||
};
|
||||
|
||||
// MacroAssembler implements a collection of frequently used macros.
|
||||
class MacroAssembler: public Assembler {
|
||||
class MacroAssembler : public TurboAssembler {
|
||||
public:
|
||||
MacroAssembler(Isolate* isolate, void* buffer, int size,
|
||||
CodeObjectRequired create_code_object);
|
||||
|
||||
int jit_cookie() const { return jit_cookie_; }
|
||||
|
||||
Isolate* isolate() const { return isolate_; }
|
||||
|
||||
void Load(Register dst, const Operand& src, Representation r);
|
||||
void Store(Register src, const Operand& dst, Representation r);
|
||||
|
||||
@ -133,10 +352,6 @@ class MacroAssembler: public Assembler {
|
||||
SaveFPRegsMode save_fp,
|
||||
RememberedSetFinalAction and_then);
|
||||
|
||||
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
|
||||
Label* condition_met,
|
||||
Label::Distance condition_met_distance = Label::kFar);
|
||||
|
||||
void CheckPageFlagForMap(
|
||||
Handle<Map> map, int mask, Condition cc, Label* condition_met,
|
||||
Label::Distance condition_met_distance = Label::kFar);
|
||||
@ -235,10 +450,6 @@ class MacroAssembler: public Assembler {
|
||||
// Frame restart support
|
||||
void MaybeDropFrames();
|
||||
|
||||
// Generates function and stub prologue code.
|
||||
void StubPrologue(StackFrame::Type type);
|
||||
void Prologue(bool code_pre_aging);
|
||||
|
||||
// Enter specific kind of exit frame. Expects the number of
|
||||
// arguments in register eax and sets up the number of arguments in
|
||||
// register edi and the pointer to the first argument in register
|
||||
@ -278,9 +489,6 @@ class MacroAssembler: public Assembler {
|
||||
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
|
||||
void LoadFromSafepointRegisterSlot(Register dst, Register src);
|
||||
|
||||
// Nop, because ia32 does not have a root register.
|
||||
void InitializeRootRegister() {}
|
||||
|
||||
void CmpHeapObject(Register reg, Handle<HeapObject> object);
|
||||
void PushObject(Handle<Object> object);
|
||||
|
||||
@ -302,19 +510,6 @@ class MacroAssembler: public Assembler {
|
||||
// ---------------------------------------------------------------------------
|
||||
// JavaScript invokes
|
||||
|
||||
// Removes current frame and its arguments from the stack preserving
|
||||
// the arguments and a return address pushed to the stack for the next call.
|
||||
// |ra_state| defines whether return address is already pushed to stack or
|
||||
// not. Both |callee_args_count| and |caller_args_count_reg| do not include
|
||||
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
|
||||
// is trashed. |number_of_temp_values_after_return_address| specifies
|
||||
// the number of words pushed to the stack after the return address. This is
|
||||
// to allow "allocation" of scratch registers that this function requires
|
||||
// by saving their values on the stack.
|
||||
void PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
Register caller_args_count_reg, Register scratch0,
|
||||
Register scratch1, ReturnAddressState ra_state,
|
||||
int number_of_temp_values_after_return_address);
|
||||
|
||||
// Invoke the JavaScript function code by either calling or jumping.
|
||||
|
||||
@ -343,22 +538,6 @@ class MacroAssembler: public Assembler {
|
||||
const ParameterCount& actual, InvokeFlag flag,
|
||||
const CallWrapper& call_wrapper);
|
||||
|
||||
// Expression support
|
||||
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
|
||||
// hinders register renaming and makes dependence chains longer. So we use
|
||||
// xorps to clear the dst register before cvtsi2sd to solve this issue.
|
||||
void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
|
||||
void Cvtsi2sd(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
|
||||
|
||||
void ShlPair(Register high, Register low, uint8_t imm8);
|
||||
void ShlPair_cl(Register high, Register low);
|
||||
void ShrPair(Register high, Register low, uint8_t imm8);
|
||||
void ShrPair_cl(Register high, Register src);
|
||||
void SarPair(Register high, Register low, uint8_t imm8);
|
||||
void SarPair_cl(Register high, Register low);
|
||||
|
||||
// Support for constant splitting.
|
||||
bool IsUnsafeImmediate(const Immediate& x);
|
||||
void SafeMove(Register dst, const Immediate& x);
|
||||
@ -415,35 +594,15 @@ class MacroAssembler: public Assembler {
|
||||
STATIC_ASSERT(kSmiTagSize == 1);
|
||||
add(reg, reg);
|
||||
}
|
||||
void SmiUntag(Register reg) {
|
||||
sar(reg, kSmiTagSize);
|
||||
}
|
||||
|
||||
// Modifies the register even if it does not contain a Smi!
|
||||
void SmiUntag(Register reg, Label* is_smi) {
|
||||
void UntagSmi(Register reg, Label* is_smi) {
|
||||
STATIC_ASSERT(kSmiTagSize == 1);
|
||||
sar(reg, kSmiTagSize);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
j(not_carry, is_smi);
|
||||
}
|
||||
|
||||
void LoadUint32(XMMRegister dst, Register src) {
|
||||
LoadUint32(dst, Operand(src));
|
||||
}
|
||||
void LoadUint32(XMMRegister dst, const Operand& src);
|
||||
|
||||
// Jump the register contains a smi.
|
||||
inline void JumpIfSmi(Register value, Label* smi_label,
|
||||
Label::Distance distance = Label::kFar) {
|
||||
test(value, Immediate(kSmiTagMask));
|
||||
j(zero, smi_label, distance);
|
||||
}
|
||||
// Jump if the operand is a smi.
|
||||
inline void JumpIfSmi(Operand value, Label* smi_label,
|
||||
Label::Distance distance = Label::kFar) {
|
||||
test(value, Immediate(kSmiTagMask));
|
||||
j(zero, smi_label, distance);
|
||||
}
|
||||
// Jump if register contain a non-smi.
|
||||
inline void JumpIfNotSmi(Register value, Label* not_smi_label,
|
||||
Label::Distance distance = Label::kFar) {
|
||||
@ -600,7 +759,6 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// Call a code stub. Generate the code if necessary.
|
||||
void CallStub(CodeStub* stub);
|
||||
void CallStubDelayed(CodeStub* stub);
|
||||
|
||||
// Tail call a code stub (jump). Generate the code if necessary.
|
||||
void TailCallStub(CodeStub* stub);
|
||||
@ -632,23 +790,6 @@ class MacroAssembler: public Assembler {
|
||||
// Convenience function: tail call a runtime routine (jump).
|
||||
void TailCallRuntime(Runtime::FunctionId fid);
|
||||
|
||||
// Before calling a C-function from generated code, align arguments on stack.
|
||||
// After aligning the frame, arguments must be stored in esp[0], esp[4],
|
||||
// etc., not pushed. The argument count assumes all arguments are word sized.
|
||||
// Some compilers/platforms require the stack to be aligned when calling
|
||||
// C++ code.
|
||||
// Needs a scratch register to do some arithmetic. This register will be
|
||||
// trashed.
|
||||
void PrepareCallCFunction(int num_arguments, Register scratch);
|
||||
|
||||
// Calls a C function and cleans up the space for arguments allocated
|
||||
// by PrepareCallCFunction. The called function is not allowed to trigger a
|
||||
// garbage collection, since that might move the code and invalidate the
|
||||
// return address (unless this is somehow accounted for by the called
|
||||
// function).
|
||||
void CallCFunction(ExternalReference function, int num_arguments);
|
||||
void CallCFunction(Register function, int num_arguments);
|
||||
|
||||
// Jump to a runtime routine.
|
||||
void JumpToExternalReference(const ExternalReference& ext,
|
||||
bool builtin_exit_frame = false);
|
||||
@ -656,12 +797,6 @@ class MacroAssembler: public Assembler {
|
||||
// ---------------------------------------------------------------------------
|
||||
// Utilities
|
||||
|
||||
void Ret();
|
||||
|
||||
// Return and drop arguments from stack, where the number of arguments
|
||||
// may be bigger than 2^16 - 1. Requires a scratch register.
|
||||
void Ret(int bytes_dropped, Register scratch);
|
||||
|
||||
// Emit code that loads |parameter_index|'th parameter from the stack to
|
||||
// the register according to the CallInterfaceDescriptor definition.
|
||||
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
|
||||
@ -684,95 +819,12 @@ class MacroAssembler: public Assembler {
|
||||
// from the stack, clobbering only the esp register.
|
||||
void Drop(int element_count);
|
||||
|
||||
void Call(Label* target) { call(target); }
|
||||
void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
|
||||
void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
|
||||
void Push(Register src) { push(src); }
|
||||
void Push(const Operand& src) { push(src); }
|
||||
void Push(Immediate value) { push(value); }
|
||||
void Pop(Register dst) { pop(dst); }
|
||||
void Pop(const Operand& dst) { pop(dst); }
|
||||
void PushReturnAddressFrom(Register src) { push(src); }
|
||||
void PopReturnAddressTo(Register dst) { pop(dst); }
|
||||
|
||||
// SSE/SSE2 instructions with AVX version.
|
||||
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
|
||||
void macro_name(dst_type dst, src_type src) { \
|
||||
if (CpuFeatures::IsSupported(AVX)) { \
|
||||
CpuFeatureScope scope(this, AVX); \
|
||||
v##name(dst, src); \
|
||||
} else { \
|
||||
name(dst, src); \
|
||||
} \
|
||||
}
|
||||
|
||||
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
|
||||
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, const Operand&)
|
||||
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
|
||||
AVX_OP2_WITH_TYPE(Movd, movd, const Operand&, XMMRegister)
|
||||
|
||||
#undef AVX_OP2_WITH_TYPE
|
||||
|
||||
void Pxor(XMMRegister dst, XMMRegister src) { Pxor(dst, Operand(src)); }
|
||||
void Pxor(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
|
||||
Pshuflw(dst, Operand(src), shuffle);
|
||||
}
|
||||
void Pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
|
||||
void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
|
||||
Pshufd(dst, Operand(src), shuffle);
|
||||
}
|
||||
void Pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
|
||||
|
||||
// Non-SSE2 instructions.
|
||||
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
|
||||
void Pshufb(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Pextrb(Register dst, XMMRegister src, int8_t imm8);
|
||||
void Pextrw(Register dst, XMMRegister src, int8_t imm8);
|
||||
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
|
||||
void Pinsrd(XMMRegister dst, Register src, int8_t imm8,
|
||||
bool is_64_bits = false) {
|
||||
Pinsrd(dst, Operand(src), imm8, is_64_bits);
|
||||
}
|
||||
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
|
||||
bool is_64_bits = false);
|
||||
|
||||
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
|
||||
void Lzcnt(Register dst, const Operand& src);
|
||||
|
||||
void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
|
||||
void Tzcnt(Register dst, const Operand& src);
|
||||
|
||||
void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
|
||||
void Popcnt(Register dst, const Operand& src);
|
||||
|
||||
// Move if the registers are not identical.
|
||||
void Move(Register target, Register source);
|
||||
|
||||
// Move a constant into a destination using the most efficient encoding.
|
||||
void Move(Register dst, const Immediate& x);
|
||||
void Move(const Operand& dst, const Immediate& x);
|
||||
|
||||
// Move an immediate into an XMM register.
|
||||
void Move(XMMRegister dst, uint32_t src);
|
||||
void Move(XMMRegister dst, uint64_t src);
|
||||
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
|
||||
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
|
||||
|
||||
void Move(Register dst, Handle<HeapObject> handle);
|
||||
void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
|
||||
|
||||
// Push a handle value.
|
||||
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
|
||||
void Push(Smi* smi) { Push(Immediate(smi)); }
|
||||
|
||||
Handle<HeapObject> CodeObject() {
|
||||
DCHECK(!code_object_.is_null());
|
||||
return code_object_;
|
||||
}
|
||||
|
||||
// Emit code for a truncating division by a constant. The dividend register is
|
||||
// unchanged, the result is in edx, and eax gets clobbered.
|
||||
void TruncatingDiv(Register dividend, int32_t divisor);
|
||||
@ -786,26 +838,6 @@ class MacroAssembler: public Assembler {
|
||||
void IncrementCounter(Condition cc, StatsCounter* counter, int value);
|
||||
void DecrementCounter(Condition cc, StatsCounter* counter, int value);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Debugging
|
||||
|
||||
// Calls Abort(msg) if the condition cc is not satisfied.
|
||||
// Use --debug_code to enable.
|
||||
void Assert(Condition cc, BailoutReason reason);
|
||||
|
||||
// Like Assert(), but always enabled.
|
||||
void Check(Condition cc, BailoutReason reason);
|
||||
|
||||
// Print a message to stdout and abort execution.
|
||||
void Abort(BailoutReason reason);
|
||||
|
||||
// Check that the stack is aligned.
|
||||
void CheckStackAlignment();
|
||||
|
||||
void set_has_frame(bool value) { has_frame_ = value; }
|
||||
bool has_frame() { return has_frame_; }
|
||||
inline bool AllowThisStubCall(CodeStub* stub);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// String utilities.
|
||||
|
||||
@ -834,11 +866,6 @@ class MacroAssembler: public Assembler {
|
||||
// Load the type feedback vector from a JavaScript frame.
|
||||
void EmitLoadFeedbackVector(Register vector);
|
||||
|
||||
// Activation support.
|
||||
void EnterFrame(StackFrame::Type type);
|
||||
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
|
||||
void LeaveFrame(StackFrame::Type type);
|
||||
|
||||
void EnterBuiltinFrame(Register context, Register target, Register argc);
|
||||
void LeaveBuiltinFrame(Register context, Register target, Register argc);
|
||||
|
||||
@ -857,10 +884,6 @@ class MacroAssembler: public Assembler {
|
||||
Label* no_memento_found);
|
||||
|
||||
private:
|
||||
bool has_frame_;
|
||||
Isolate* isolate_;
|
||||
// This handle will be patched with the code object on installation.
|
||||
Handle<HeapObject> code_object_;
|
||||
int jit_cookie_;
|
||||
|
||||
// Helper functions for generating invokes.
|
||||
|
@ -66,19 +66,19 @@ static constexpr int kMaxCParameters = 9;
|
||||
|
||||
class FrameScope {
|
||||
public:
|
||||
explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
|
||||
: masm_(masm), type_(type), old_has_frame_(masm->has_frame()) {
|
||||
masm->set_has_frame(true);
|
||||
explicit FrameScope(TurboAssembler* tasm, StackFrame::Type type)
|
||||
: tasm_(tasm), type_(type), old_has_frame_(tasm->has_frame()) {
|
||||
tasm->set_has_frame(true);
|
||||
if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
|
||||
masm->EnterFrame(type);
|
||||
tasm->EnterFrame(type);
|
||||
}
|
||||
}
|
||||
|
||||
~FrameScope() {
|
||||
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
|
||||
masm_->LeaveFrame(type_);
|
||||
tasm_->LeaveFrame(type_);
|
||||
}
|
||||
masm_->set_has_frame(old_has_frame_);
|
||||
tasm_->set_has_frame(old_has_frame_);
|
||||
}
|
||||
|
||||
// Normally we generate the leave-frame code when this object goes
|
||||
@ -88,11 +88,11 @@ class FrameScope {
|
||||
// the code will be generated again when it goes out of scope.
|
||||
void GenerateLeaveFrame() {
|
||||
DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
|
||||
masm_->LeaveFrame(type_);
|
||||
tasm_->LeaveFrame(type_);
|
||||
}
|
||||
|
||||
private:
|
||||
MacroAssembler* masm_;
|
||||
TurboAssembler* tasm_;
|
||||
StackFrame::Type type_;
|
||||
bool old_has_frame_;
|
||||
};
|
||||
|
@ -1737,6 +1737,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
|
||||
friend class StandardFrame;
|
||||
};
|
||||
|
||||
using TurboAssembler = MacroAssembler; // TODO(mips): Implement TurboAssembler.
|
||||
|
||||
// The code patcher is used to patch (typically) small parts of code e.g. for
|
||||
// debugging and other types of instrumentation. When using the code patcher
|
||||
|
@ -1885,6 +1885,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
|
||||
friend class StandardFrame;
|
||||
};
|
||||
|
||||
using TurboAssembler = MacroAssembler; // TODO(mips): Implement TurboAssembler.
|
||||
|
||||
// The code patcher is used to patch (typically) small parts of code e.g. for
|
||||
// debugging and other types of instrumentation. When using the code patcher
|
||||
|
@ -2101,10 +2101,10 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
||||
}
|
||||
}
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
|
||||
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
|
||||
Zone* zone) {
|
||||
if (masm->isolate()->function_entry_hook() != nullptr) {
|
||||
masm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
|
||||
if (tasm->isolate()->function_entry_hook() != nullptr) {
|
||||
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
|
||||
}
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -85,9 +85,354 @@ struct SmiIndex {
|
||||
ScaleFactor scale;
|
||||
};
|
||||
|
||||
class TurboAssembler : public Assembler {
|
||||
public:
|
||||
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
|
||||
CodeObjectRequired create_code_object)
|
||||
: Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
|
||||
if (create_code_object == CodeObjectRequired::kYes) {
|
||||
code_object_ =
|
||||
Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
|
||||
}
|
||||
}
|
||||
|
||||
void set_has_frame(bool value) { has_frame_ = value; }
|
||||
bool has_frame() const { return has_frame_; }
|
||||
|
||||
Isolate* isolate() const { return isolate_; }
|
||||
|
||||
Handle<HeapObject> CodeObject() {
|
||||
DCHECK(!code_object_.is_null());
|
||||
return code_object_;
|
||||
}
|
||||
|
||||
#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
|
||||
void macro_name(XMMRegister dst, src_type src) { \
|
||||
if (CpuFeatures::IsSupported(AVX)) { \
|
||||
CpuFeatureScope scope(this, AVX); \
|
||||
v##name(dst, dst, src); \
|
||||
} else { \
|
||||
name(dst, src); \
|
||||
} \
|
||||
}
|
||||
#define AVX_OP2_X(macro_name, name) \
|
||||
AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
|
||||
#define AVX_OP2_O(macro_name, name) \
|
||||
AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
|
||||
#define AVX_OP2_XO(macro_name, name) \
|
||||
AVX_OP2_X(macro_name, name) \
|
||||
AVX_OP2_O(macro_name, name)
|
||||
|
||||
AVX_OP2_XO(Subsd, subsd)
|
||||
AVX_OP2_XO(Divss, divss)
|
||||
AVX_OP2_XO(Divsd, divsd)
|
||||
AVX_OP2_XO(Xorpd, xorpd)
|
||||
AVX_OP2_X(Pcmpeqd, pcmpeqd)
|
||||
AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
|
||||
AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
|
||||
|
||||
#undef AVX_OP2_O
|
||||
#undef AVX_OP2_X
|
||||
#undef AVX_OP2_XO
|
||||
#undef AVX_OP2_WITH_TYPE
|
||||
|
||||
void Xorps(XMMRegister dst, XMMRegister src);
|
||||
void Xorps(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Movd(XMMRegister dst, Register src);
|
||||
void Movd(XMMRegister dst, const Operand& src);
|
||||
void Movd(Register dst, XMMRegister src);
|
||||
void Movq(XMMRegister dst, Register src);
|
||||
void Movq(Register dst, XMMRegister src);
|
||||
|
||||
void Movsd(XMMRegister dst, XMMRegister src);
|
||||
void Movsd(XMMRegister dst, const Operand& src);
|
||||
void Movsd(const Operand& dst, XMMRegister src);
|
||||
void Movss(XMMRegister dst, XMMRegister src);
|
||||
void Movss(XMMRegister dst, const Operand& src);
|
||||
void Movss(const Operand& dst, XMMRegister src);
|
||||
|
||||
void PushReturnAddressFrom(Register src) { pushq(src); }
|
||||
void PopReturnAddressTo(Register dst) { popq(dst); }
|
||||
|
||||
void Ret();
|
||||
|
||||
// Return and drop arguments from stack, where the number of arguments
|
||||
// may be bigger than 2^16 - 1. Requires a scratch register.
|
||||
void Ret(int bytes_dropped, Register scratch);
|
||||
|
||||
// Load a register with a long value as efficiently as possible.
|
||||
void Set(Register dst, int64_t x);
|
||||
void Set(const Operand& dst, intptr_t x);
|
||||
|
||||
// Operations on roots in the root-array.
|
||||
void LoadRoot(Register destination, Heap::RootListIndex index);
|
||||
void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
|
||||
LoadRoot(kScratchRegister, index);
|
||||
movp(destination, kScratchRegister);
|
||||
}
|
||||
|
||||
void Movups(XMMRegister dst, XMMRegister src);
|
||||
void Movups(XMMRegister dst, const Operand& src);
|
||||
void Movups(const Operand& dst, XMMRegister src);
|
||||
void Movapd(XMMRegister dst, XMMRegister src);
|
||||
void Movaps(XMMRegister dst, XMMRegister src);
|
||||
void Movmskpd(Register dst, XMMRegister src);
|
||||
void Movmskps(Register dst, XMMRegister src);
|
||||
|
||||
void Push(Register src);
|
||||
void Push(const Operand& src);
|
||||
void Push(Immediate value);
|
||||
void Push(Smi* smi);
|
||||
void Push(Handle<HeapObject> source);
|
||||
|
||||
// Before calling a C-function from generated code, align arguments on stack.
|
||||
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
|
||||
// etc., not pushed. The argument count assumes all arguments are word sized.
|
||||
// The number of slots reserved for arguments depends on platform. On Windows
|
||||
// stack slots are reserved for the arguments passed in registers. On other
|
||||
// platforms stack slots are only reserved for the arguments actually passed
|
||||
// on the stack.
|
||||
void PrepareCallCFunction(int num_arguments);
|
||||
|
||||
// Calls a C function and cleans up the space for arguments allocated
|
||||
// by PrepareCallCFunction. The called function is not allowed to trigger a
|
||||
// garbage collection, since that might move the code and invalidate the
|
||||
// return address (unless this is somehow accounted for by the called
|
||||
// function).
|
||||
void CallCFunction(ExternalReference function, int num_arguments);
|
||||
void CallCFunction(Register function, int num_arguments);
|
||||
|
||||
// Calculate the number of stack slots to reserve for arguments when calling a
|
||||
// C function.
|
||||
int ArgumentStackSlotsForCFunctionCall(int num_arguments);
|
||||
|
||||
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
|
||||
Label* condition_met,
|
||||
Label::Distance condition_met_distance = Label::kFar);
|
||||
|
||||
void Cvtss2sd(XMMRegister dst, XMMRegister src);
|
||||
void Cvtss2sd(XMMRegister dst, const Operand& src);
|
||||
void Cvtsd2ss(XMMRegister dst, XMMRegister src);
|
||||
void Cvtsd2ss(XMMRegister dst, const Operand& src);
|
||||
void Cvttsd2si(Register dst, XMMRegister src);
|
||||
void Cvttsd2si(Register dst, const Operand& src);
|
||||
void Cvttsd2siq(Register dst, XMMRegister src);
|
||||
void Cvttsd2siq(Register dst, const Operand& src);
|
||||
void Cvttss2si(Register dst, XMMRegister src);
|
||||
void Cvttss2si(Register dst, const Operand& src);
|
||||
void Cvttss2siq(Register dst, XMMRegister src);
|
||||
void Cvttss2siq(Register dst, const Operand& src);
|
||||
void Cvtqsi2ss(XMMRegister dst, Register src);
|
||||
void Cvtqsi2ss(XMMRegister dst, const Operand& src);
|
||||
void Cvtqsi2sd(XMMRegister dst, Register src);
|
||||
void Cvtqsi2sd(XMMRegister dst, const Operand& src);
|
||||
void Cvtlsi2ss(XMMRegister dst, Register src);
|
||||
void Cvtlsi2ss(XMMRegister dst, const Operand& src);
|
||||
void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
|
||||
void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
|
||||
|
||||
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
|
||||
// hinders register renaming and makes dependence chains longer. So we use
|
||||
// xorpd to clear the dst register before cvtsi2sd to solve this issue.
|
||||
void Cvtlsi2sd(XMMRegister dst, Register src);
|
||||
void Cvtlsi2sd(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
|
||||
void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
|
||||
|
||||
void Sqrtsd(XMMRegister dst, XMMRegister src);
|
||||
void Sqrtsd(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Ucomiss(XMMRegister src1, XMMRegister src2);
|
||||
void Ucomiss(XMMRegister src1, const Operand& src2);
|
||||
void Ucomisd(XMMRegister src1, XMMRegister src2);
|
||||
void Ucomisd(XMMRegister src1, const Operand& src2);
|
||||
|
||||
void Lzcntq(Register dst, Register src);
|
||||
void Lzcntq(Register dst, const Operand& src);
|
||||
void Lzcntl(Register dst, Register src);
|
||||
void Lzcntl(Register dst, const Operand& src);
|
||||
void Tzcntq(Register dst, Register src);
|
||||
void Tzcntq(Register dst, const Operand& src);
|
||||
void Tzcntl(Register dst, Register src);
|
||||
void Tzcntl(Register dst, const Operand& src);
|
||||
void Popcntl(Register dst, Register src);
|
||||
void Popcntl(Register dst, const Operand& src);
|
||||
void Popcntq(Register dst, Register src);
|
||||
void Popcntq(Register dst, const Operand& src);
|
||||
|
||||
// Is the value a tagged smi.
|
||||
Condition CheckSmi(Register src);
|
||||
Condition CheckSmi(const Operand& src);
|
||||
|
||||
// Jump to label if the value is a tagged smi.
|
||||
void JumpIfSmi(Register src, Label* on_smi,
|
||||
Label::Distance near_jump = Label::kFar);
|
||||
|
||||
void Move(Register dst, Smi* source);
|
||||
|
||||
void Move(const Operand& dst, Smi* source) {
|
||||
Register constant = GetSmiConstant(source);
|
||||
movp(dst, constant);
|
||||
}
|
||||
|
||||
void Move(Register dst, ExternalReference ext) {
|
||||
movp(dst, reinterpret_cast<void*>(ext.address()),
|
||||
RelocInfo::EXTERNAL_REFERENCE);
|
||||
}
|
||||
|
||||
void Move(XMMRegister dst, uint32_t src);
|
||||
void Move(XMMRegister dst, uint64_t src);
|
||||
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
|
||||
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
|
||||
|
||||
// Move if the registers are not identical.
|
||||
void Move(Register target, Register source);
|
||||
|
||||
void Move(Register dst, Handle<HeapObject> source,
|
||||
RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
|
||||
void Move(const Operand& dst, Handle<HeapObject> source,
|
||||
RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
|
||||
|
||||
// Loads a pointer into a register with a relocation mode.
|
||||
void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
|
||||
// This method must not be used with heap object references. The stored
|
||||
// address is not GC safe. Use the handle version instead.
|
||||
DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
|
||||
movp(dst, ptr, rmode);
|
||||
}
|
||||
|
||||
// Convert smi to 32-bit integer. I.e., not sign extended into
|
||||
// high 32 bits of destination.
|
||||
void SmiToInteger32(Register dst, Register src);
|
||||
void SmiToInteger32(Register dst, const Operand& src);
|
||||
|
||||
// Loads the address of the external reference into the destination
|
||||
// register.
|
||||
void LoadAddress(Register destination, ExternalReference source);
|
||||
|
||||
void Call(const Operand& op);
|
||||
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
|
||||
void Call(Address destination, RelocInfo::Mode rmode);
|
||||
void Call(ExternalReference ext);
|
||||
void Call(Label* target) { call(target); }
|
||||
|
||||
// The size of the code generated for different call instructions.
|
||||
int CallSize(ExternalReference ext);
|
||||
int CallSize(Address destination) { return kCallSequenceLength; }
|
||||
int CallSize(Handle<Code> code_object) {
|
||||
// Code calls use 32-bit relative addressing.
|
||||
return kShortCallInstructionLength;
|
||||
}
|
||||
int CallSize(Register target) {
|
||||
// Opcode: REX_opt FF /2 m64
|
||||
return (target.high_bit() != 0) ? 3 : 2;
|
||||
}
|
||||
int CallSize(const Operand& target) {
|
||||
// Opcode: REX_opt FF /2 m64
|
||||
return (target.requires_rex() ? 2 : 1) + target.operand_size();
|
||||
}
|
||||
|
||||
// Returns the size of the code generated by LoadAddress.
|
||||
// Used by CallSize(ExternalReference) to find the size of a call.
|
||||
int LoadAddressSize(ExternalReference source);
|
||||
|
||||
// Non-SSE2 instructions.
|
||||
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
|
||||
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
|
||||
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
|
||||
|
||||
void CompareRoot(Register with, Heap::RootListIndex index);
|
||||
void CompareRoot(const Operand& with, Heap::RootListIndex index);
|
||||
|
||||
// Generates function and stub prologue code.
|
||||
void StubPrologue(StackFrame::Type type);
|
||||
void Prologue(bool code_pre_aging);
|
||||
|
||||
// Calls Abort(msg) if the condition cc is not satisfied.
|
||||
// Use --debug_code to enable.
|
||||
void Assert(Condition cc, BailoutReason reason);
|
||||
|
||||
// Abort execution if a 64 bit register containing a 32 bit payload does not
|
||||
// have zeros in the top 32 bits, enabled via --debug-code.
|
||||
void AssertZeroExtended(Register reg);
|
||||
|
||||
// Like Assert(), but always enabled.
|
||||
void Check(Condition cc, BailoutReason reason);
|
||||
|
||||
// Print a message to stdout and abort execution.
|
||||
void Abort(BailoutReason msg);
|
||||
|
||||
// Check that the stack is aligned.
|
||||
void CheckStackAlignment();
|
||||
|
||||
// Activation support.
|
||||
void EnterFrame(StackFrame::Type type);
|
||||
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
|
||||
// Out-of-line constant pool not implemented on x64.
|
||||
UNREACHABLE();
|
||||
}
|
||||
void LeaveFrame(StackFrame::Type type);
|
||||
|
||||
// Removes current frame and its arguments from the stack preserving
|
||||
// the arguments and a return address pushed to the stack for the next call.
|
||||
// |ra_state| defines whether return address is already pushed to stack or
|
||||
// not. Both |callee_args_count| and |caller_args_count_reg| do not include
|
||||
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
|
||||
// is trashed.
|
||||
void PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
Register caller_args_count_reg, Register scratch0,
|
||||
Register scratch1, ReturnAddressState ra_state);
|
||||
|
||||
inline bool AllowThisStubCall(CodeStub* stub);
|
||||
|
||||
// Call a code stub. This expects {stub} to be zone-allocated, as it does not
|
||||
// trigger generation of the stub's code object but instead files a
|
||||
// HeapObjectRequest that will be fulfilled after code assembly.
|
||||
void CallStubDelayed(CodeStub* stub);
|
||||
|
||||
void SlowTruncateToIDelayed(Zone* zone, Register result_reg,
|
||||
Register input_reg,
|
||||
int offset = HeapNumber::kValueOffset -
|
||||
kHeapObjectTag);
|
||||
|
||||
// Call a runtime routine.
|
||||
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
|
||||
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
|
||||
|
||||
void InitializeRootRegister() {
|
||||
ExternalReference roots_array_start =
|
||||
ExternalReference::roots_array_start(isolate());
|
||||
Move(kRootRegister, roots_array_start);
|
||||
addp(kRootRegister, Immediate(kRootRegisterBias));
|
||||
}
|
||||
|
||||
void MoveNumber(Register dst, double value);
|
||||
void MoveNonSmi(Register dst, double value);
|
||||
|
||||
protected:
|
||||
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
||||
int smi_count = 0;
|
||||
int heap_object_count = 0;
|
||||
|
||||
bool root_array_available_ = true;
|
||||
|
||||
int64_t RootRegisterDelta(ExternalReference other);
|
||||
|
||||
// Returns a register holding the smi value. The register MUST NOT be
|
||||
// modified. It may be the "smi 1 constant" register.
|
||||
Register GetSmiConstant(Smi* value);
|
||||
|
||||
private:
|
||||
bool has_frame_ = false;
|
||||
// This handle will be patched with the code object on installation.
|
||||
Handle<HeapObject> code_object_;
|
||||
Isolate* const isolate_;
|
||||
};
|
||||
|
||||
// MacroAssembler implements a collection of frequently used macros.
|
||||
class MacroAssembler: public Assembler {
|
||||
class MacroAssembler : public TurboAssembler {
|
||||
public:
|
||||
MacroAssembler(Isolate* isolate, void* buffer, int size,
|
||||
CodeObjectRequired create_code_object);
|
||||
@ -111,8 +456,6 @@ class MacroAssembler: public Assembler {
|
||||
bool old_value_;
|
||||
};
|
||||
|
||||
Isolate* isolate() const { return isolate_; }
|
||||
|
||||
// Operand pointing to an external reference.
|
||||
// May emit code to set up the scratch register. The operand is
|
||||
// only guaranteed to be correct as long as the scratch register
|
||||
@ -129,21 +472,10 @@ class MacroAssembler: public Assembler {
|
||||
// operation(operand, ..);
|
||||
void Load(Register destination, ExternalReference source);
|
||||
void Store(ExternalReference destination, Register source);
|
||||
// Loads the address of the external reference into the destination
|
||||
// register.
|
||||
void LoadAddress(Register destination, ExternalReference source);
|
||||
// Returns the size of the code generated by LoadAddress.
|
||||
// Used by CallSize(ExternalReference) to find the size of a call.
|
||||
int LoadAddressSize(ExternalReference source);
|
||||
// Pushes the address of the external reference onto the stack.
|
||||
void PushAddress(ExternalReference source);
|
||||
|
||||
// Operations on roots in the root-array.
|
||||
void LoadRoot(Register destination, Heap::RootListIndex index);
|
||||
void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
|
||||
LoadRoot(kScratchRegister, index);
|
||||
movp(destination, kScratchRegister);
|
||||
}
|
||||
void StoreRoot(Register source, Heap::RootListIndex index);
|
||||
// Load a root value where the index (or part of it) is variable.
|
||||
// The variable_offset register is added to the fixed_offset value
|
||||
@ -151,8 +483,6 @@ class MacroAssembler: public Assembler {
|
||||
void LoadRootIndexed(Register destination,
|
||||
Register variable_offset,
|
||||
int fixed_offset);
|
||||
void CompareRoot(Register with, Heap::RootListIndex index);
|
||||
void CompareRoot(const Operand& with, Heap::RootListIndex index);
|
||||
void PushRoot(Heap::RootListIndex index);
|
||||
|
||||
// Compare the object in a register to a value and jump if they are equal.
|
||||
@ -212,13 +542,6 @@ class MacroAssembler: public Assembler {
|
||||
SaveFPRegsMode save_fp,
|
||||
RememberedSetFinalAction and_then);
|
||||
|
||||
void CheckPageFlag(Register object,
|
||||
Register scratch,
|
||||
int mask,
|
||||
Condition cc,
|
||||
Label* condition_met,
|
||||
Label::Distance condition_met_distance = Label::kFar);
|
||||
|
||||
// Check if object is in new space. Jumps if the object is not in new space.
|
||||
// The register scratch can be object itself, but scratch will be clobbered.
|
||||
void JumpIfNotInNewSpace(Register object,
|
||||
@ -314,10 +637,6 @@ class MacroAssembler: public Assembler {
|
||||
// Frame restart support.
|
||||
void MaybeDropFrames();
|
||||
|
||||
// Generates function and stub prologue code.
|
||||
void StubPrologue(StackFrame::Type type);
|
||||
void Prologue(bool code_pre_aging);
|
||||
|
||||
// Enter specific kind of exit frame; either in normal or
|
||||
// debug mode. Expects the number of arguments in register rax and
|
||||
// sets up the number of arguments in register rdi and the pointer
|
||||
@ -350,26 +669,9 @@ class MacroAssembler: public Assembler {
|
||||
void StoreToSafepointRegisterSlot(Register dst, Register src);
|
||||
void LoadFromSafepointRegisterSlot(Register dst, Register src);
|
||||
|
||||
void InitializeRootRegister() {
|
||||
ExternalReference roots_array_start =
|
||||
ExternalReference::roots_array_start(isolate());
|
||||
Move(kRootRegister, roots_array_start);
|
||||
addp(kRootRegister, Immediate(kRootRegisterBias));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// JavaScript invokes
|
||||
|
||||
// Removes current frame and its arguments from the stack preserving
|
||||
// the arguments and a return address pushed to the stack for the next call.
|
||||
// |ra_state| defines whether return address is already pushed to stack or
|
||||
// not. Both |callee_args_count| and |caller_args_count_reg| do not include
|
||||
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
|
||||
// is trashed.
|
||||
void PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
Register caller_args_count_reg, Register scratch0,
|
||||
Register scratch1, ReturnAddressState ra_state);
|
||||
|
||||
// Invoke the JavaScript function code by either calling or jumping.
|
||||
void InvokeFunctionCode(Register function, Register new_target,
|
||||
const ParameterCount& expected,
|
||||
@ -424,11 +726,6 @@ class MacroAssembler: public Assembler {
|
||||
// Result must be a valid smi.
|
||||
void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
|
||||
|
||||
// Convert smi to 32-bit integer. I.e., not sign extended into
|
||||
// high 32 bits of destination.
|
||||
void SmiToInteger32(Register dst, Register src);
|
||||
void SmiToInteger32(Register dst, const Operand& src);
|
||||
|
||||
// Convert smi to 64-bit integer (sign extended if necessary).
|
||||
void SmiToInteger64(Register dst, Register src);
|
||||
void SmiToInteger64(Register dst, const Operand& src);
|
||||
@ -458,10 +755,6 @@ class MacroAssembler: public Assembler {
|
||||
// Functions performing a check on a known or potential smi. Returns
|
||||
// a condition that is satisfied if the check is successful.
|
||||
|
||||
// Is the value a tagged smi.
|
||||
Condition CheckSmi(Register src);
|
||||
Condition CheckSmi(const Operand& src);
|
||||
|
||||
// Is the value a non-negative tagged smi.
|
||||
Condition CheckNonNegativeSmi(Register src);
|
||||
|
||||
@ -503,11 +796,6 @@ class MacroAssembler: public Assembler {
|
||||
void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
|
||||
Label::Distance near_jump = Label::kFar);
|
||||
|
||||
// Jump to label if the value is a tagged smi.
|
||||
void JumpIfSmi(Register src,
|
||||
Label* on_smi,
|
||||
Label::Distance near_jump = Label::kFar);
|
||||
|
||||
// Jump to label if the value is not a tagged smi.
|
||||
void JumpIfNotSmi(Register src,
|
||||
Label* on_not_smi,
|
||||
@ -717,18 +1005,6 @@ class MacroAssembler: public Assembler {
|
||||
// Sets flags as a normal add.
|
||||
void AddSmiField(Register dst, const Operand& src);
|
||||
|
||||
// Basic Smi operations.
|
||||
void Move(Register dst, Smi* source) {
|
||||
LoadSmiConstant(dst, source);
|
||||
}
|
||||
|
||||
void Move(const Operand& dst, Smi* source) {
|
||||
Register constant = GetSmiConstant(source);
|
||||
movp(dst, constant);
|
||||
}
|
||||
|
||||
void Push(Smi* smi);
|
||||
|
||||
// Save away a raw integer with pointer size on the stack as two integers
|
||||
// masquerading as smis so that the garbage collector skips visiting them.
|
||||
void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
|
||||
@ -777,56 +1053,12 @@ class MacroAssembler: public Assembler {
|
||||
void Load(Register dst, const Operand& src, Representation r);
|
||||
void Store(const Operand& dst, Register src, Representation r);
|
||||
|
||||
// Load a register with a long value as efficiently as possible.
|
||||
void Set(Register dst, int64_t x);
|
||||
void Set(const Operand& dst, intptr_t x);
|
||||
|
||||
void Cvtss2sd(XMMRegister dst, XMMRegister src);
|
||||
void Cvtss2sd(XMMRegister dst, const Operand& src);
|
||||
void Cvtsd2ss(XMMRegister dst, XMMRegister src);
|
||||
void Cvtsd2ss(XMMRegister dst, const Operand& src);
|
||||
|
||||
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
|
||||
// hinders register renaming and makes dependence chains longer. So we use
|
||||
// xorpd to clear the dst register before cvtsi2sd to solve this issue.
|
||||
void Cvtlsi2sd(XMMRegister dst, Register src);
|
||||
void Cvtlsi2sd(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Cvtlsi2ss(XMMRegister dst, Register src);
|
||||
void Cvtlsi2ss(XMMRegister dst, const Operand& src);
|
||||
void Cvtqsi2ss(XMMRegister dst, Register src);
|
||||
void Cvtqsi2ss(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Cvtqsi2sd(XMMRegister dst, Register src);
|
||||
void Cvtqsi2sd(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
|
||||
void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
|
||||
|
||||
void Cvtsd2si(Register dst, XMMRegister src);
|
||||
|
||||
void Cvttss2si(Register dst, XMMRegister src);
|
||||
void Cvttss2si(Register dst, const Operand& src);
|
||||
void Cvttsd2si(Register dst, XMMRegister src);
|
||||
void Cvttsd2si(Register dst, const Operand& src);
|
||||
void Cvttss2siq(Register dst, XMMRegister src);
|
||||
void Cvttss2siq(Register dst, const Operand& src);
|
||||
void Cvttsd2siq(Register dst, XMMRegister src);
|
||||
void Cvttsd2siq(Register dst, const Operand& src);
|
||||
|
||||
// Move if the registers are not identical.
|
||||
void Move(Register target, Register source);
|
||||
|
||||
// Handle support
|
||||
void Move(Register dst, Handle<HeapObject> source,
|
||||
RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
|
||||
void Move(const Operand& dst, Handle<HeapObject> source,
|
||||
RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
|
||||
void Cmp(Register dst, Handle<Object> source);
|
||||
void Cmp(const Operand& dst, Handle<Object> source);
|
||||
void Cmp(Register dst, Smi* src);
|
||||
void Cmp(const Operand& dst, Smi* src);
|
||||
void Push(Handle<HeapObject> source);
|
||||
void PushObject(Handle<Object> source);
|
||||
|
||||
// Move a Smi or HeapNumber.
|
||||
@ -859,34 +1091,11 @@ class MacroAssembler: public Assembler {
|
||||
void DropUnderReturnAddress(int stack_elements,
|
||||
Register scratch = kScratchRegister);
|
||||
|
||||
void Call(Label* target) { call(target); }
|
||||
void Push(Register src);
|
||||
void Push(const Operand& src);
|
||||
void PushQuad(const Operand& src);
|
||||
void Push(Immediate value);
|
||||
void PushImm32(int32_t imm32);
|
||||
void Pop(Register dst);
|
||||
void Pop(const Operand& dst);
|
||||
void PopQuad(const Operand& dst);
|
||||
void PushReturnAddressFrom(Register src) { pushq(src); }
|
||||
void PopReturnAddressTo(Register dst) { popq(dst); }
|
||||
void Move(Register dst, ExternalReference ext) {
|
||||
movp(dst, reinterpret_cast<void*>(ext.address()),
|
||||
RelocInfo::EXTERNAL_REFERENCE);
|
||||
}
|
||||
|
||||
// Loads a pointer into a register with a relocation mode.
|
||||
void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
|
||||
// This method must not be used with heap object references. The stored
|
||||
// address is not GC safe. Use the handle version instead.
|
||||
DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
|
||||
movp(dst, ptr, rmode);
|
||||
}
|
||||
|
||||
void Move(XMMRegister dst, uint32_t src);
|
||||
void Move(XMMRegister dst, uint64_t src);
|
||||
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
|
||||
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
|
||||
|
||||
#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
|
||||
void macro_name(XMMRegister dst, src_type src) { \
|
||||
@ -906,14 +1115,10 @@ class MacroAssembler: public Assembler {
|
||||
AVX_OP2_O(macro_name, name)
|
||||
|
||||
AVX_OP2_XO(Addsd, addsd)
|
||||
AVX_OP2_XO(Subsd, subsd)
|
||||
AVX_OP2_XO(Mulsd, mulsd)
|
||||
AVX_OP2_XO(Divss, divss)
|
||||
AVX_OP2_XO(Divsd, divsd)
|
||||
AVX_OP2_XO(Andps, andps)
|
||||
AVX_OP2_XO(Andpd, andpd)
|
||||
AVX_OP2_XO(Orpd, orpd)
|
||||
AVX_OP2_XO(Xorpd, xorpd)
|
||||
AVX_OP2_XO(Cmpeqps, cmpeqps)
|
||||
AVX_OP2_XO(Cmpltps, cmpltps)
|
||||
AVX_OP2_XO(Cmpleps, cmpleps)
|
||||
@ -926,49 +1131,12 @@ class MacroAssembler: public Assembler {
|
||||
AVX_OP2_XO(Cmpneqpd, cmpneqpd)
|
||||
AVX_OP2_XO(Cmpnltpd, cmpnltpd)
|
||||
AVX_OP2_XO(Cmpnlepd, cmpnlepd)
|
||||
AVX_OP2_X(Pcmpeqd, pcmpeqd)
|
||||
AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
|
||||
AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
|
||||
|
||||
#undef AVX_OP2_O
|
||||
#undef AVX_OP2_X
|
||||
#undef AVX_OP2_XO
|
||||
#undef AVX_OP2_WITH_TYPE
|
||||
|
||||
void Movsd(XMMRegister dst, XMMRegister src);
|
||||
void Movsd(XMMRegister dst, const Operand& src);
|
||||
void Movsd(const Operand& dst, XMMRegister src);
|
||||
void Movss(XMMRegister dst, XMMRegister src);
|
||||
void Movss(XMMRegister dst, const Operand& src);
|
||||
void Movss(const Operand& dst, XMMRegister src);
|
||||
|
||||
void Movd(XMMRegister dst, Register src);
|
||||
void Movd(XMMRegister dst, const Operand& src);
|
||||
void Movd(Register dst, XMMRegister src);
|
||||
void Movq(XMMRegister dst, Register src);
|
||||
void Movq(Register dst, XMMRegister src);
|
||||
|
||||
void Movaps(XMMRegister dst, XMMRegister src);
|
||||
void Movups(XMMRegister dst, XMMRegister src);
|
||||
void Movups(XMMRegister dst, const Operand& src);
|
||||
void Movups(const Operand& dst, XMMRegister src);
|
||||
void Movmskps(Register dst, XMMRegister src);
|
||||
void Movapd(XMMRegister dst, XMMRegister src);
|
||||
void Movmskpd(Register dst, XMMRegister src);
|
||||
|
||||
void Xorps(XMMRegister dst, XMMRegister src);
|
||||
void Xorps(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
|
||||
void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
|
||||
void Sqrtsd(XMMRegister dst, XMMRegister src);
|
||||
void Sqrtsd(XMMRegister dst, const Operand& src);
|
||||
|
||||
void Ucomiss(XMMRegister src1, XMMRegister src2);
|
||||
void Ucomiss(XMMRegister src1, const Operand& src2);
|
||||
void Ucomisd(XMMRegister src1, XMMRegister src2);
|
||||
void Ucomisd(XMMRegister src1, const Operand& src2);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SIMD macros.
|
||||
void Absps(XMMRegister dst);
|
||||
@ -982,52 +1150,6 @@ class MacroAssembler: public Assembler {
|
||||
void Jump(const Operand& op);
|
||||
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
|
||||
|
||||
void Call(Address destination, RelocInfo::Mode rmode);
|
||||
void Call(ExternalReference ext);
|
||||
void Call(const Operand& op);
|
||||
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
|
||||
|
||||
// The size of the code generated for different call instructions.
|
||||
int CallSize(Address destination) {
|
||||
return kCallSequenceLength;
|
||||
}
|
||||
int CallSize(ExternalReference ext);
|
||||
int CallSize(Handle<Code> code_object) {
|
||||
// Code calls use 32-bit relative addressing.
|
||||
return kShortCallInstructionLength;
|
||||
}
|
||||
int CallSize(Register target) {
|
||||
// Opcode: REX_opt FF /2 m64
|
||||
return (target.high_bit() != 0) ? 3 : 2;
|
||||
}
|
||||
int CallSize(const Operand& target) {
|
||||
// Opcode: REX_opt FF /2 m64
|
||||
return (target.requires_rex() ? 2 : 1) + target.operand_size();
|
||||
}
|
||||
|
||||
// Non-SSE2 instructions.
|
||||
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
|
||||
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
|
||||
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
|
||||
|
||||
void Lzcntq(Register dst, Register src);
|
||||
void Lzcntq(Register dst, const Operand& src);
|
||||
|
||||
void Lzcntl(Register dst, Register src);
|
||||
void Lzcntl(Register dst, const Operand& src);
|
||||
|
||||
void Tzcntq(Register dst, Register src);
|
||||
void Tzcntq(Register dst, const Operand& src);
|
||||
|
||||
void Tzcntl(Register dst, Register src);
|
||||
void Tzcntl(Register dst, const Operand& src);
|
||||
|
||||
void Popcntl(Register dst, Register src);
|
||||
void Popcntl(Register dst, const Operand& src);
|
||||
|
||||
void Popcntq(Register dst, Register src);
|
||||
void Popcntq(Register dst, const Operand& src);
|
||||
|
||||
// Non-x64 instructions.
|
||||
// Push/pop all general purpose registers.
|
||||
// Does not push rsp/rbp nor any of the assembler's special purpose registers
|
||||
@ -1136,10 +1258,6 @@ class MacroAssembler: public Assembler {
|
||||
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
|
||||
void AssertFixedArray(Register object);
|
||||
|
||||
// Abort execution if a 64 bit register containing a 32 bit payload does not
|
||||
// have zeros in the top 32 bits, enabled via --debug-code.
|
||||
void AssertZeroExtended(Register reg);
|
||||
|
||||
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
|
||||
void AssertFunction(Register object);
|
||||
|
||||
@ -1249,12 +1367,9 @@ class MacroAssembler: public Assembler {
|
||||
// Runtime calls
|
||||
|
||||
// Call a code stub.
|
||||
// The first version is deprecated.
|
||||
// The code object is generated immediately, in contrast to
|
||||
// TurboAssembler::CallStubDelayed.
|
||||
void CallStub(CodeStub* stub);
|
||||
// The second version, which expects {stub} to be zone-allocated, does not
|
||||
// trigger generation of the stub's code object but instead files a
|
||||
// HeapObjectRequest that will be fulfilled after code assembly.
|
||||
void CallStubDelayed(CodeStub* stub);
|
||||
|
||||
// Tail call a code stub (jump).
|
||||
void TailCallStub(CodeStub* stub);
|
||||
@ -1263,8 +1378,6 @@ class MacroAssembler: public Assembler {
|
||||
void CallRuntime(const Runtime::Function* f,
|
||||
int num_arguments,
|
||||
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
|
||||
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
|
||||
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
|
||||
|
||||
// Call a runtime function and save the value of XMM registers.
|
||||
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
|
||||
@ -1296,41 +1409,9 @@ class MacroAssembler: public Assembler {
|
||||
void JumpToExternalReference(const ExternalReference& ext,
|
||||
bool builtin_exit_frame = false);
|
||||
|
||||
// Before calling a C-function from generated code, align arguments on stack.
|
||||
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
|
||||
// etc., not pushed. The argument count assumes all arguments are word sized.
|
||||
// The number of slots reserved for arguments depends on platform. On Windows
|
||||
// stack slots are reserved for the arguments passed in registers. On other
|
||||
// platforms stack slots are only reserved for the arguments actually passed
|
||||
// on the stack.
|
||||
void PrepareCallCFunction(int num_arguments);
|
||||
|
||||
// Calls a C function and cleans up the space for arguments allocated
|
||||
// by PrepareCallCFunction. The called function is not allowed to trigger a
|
||||
// garbage collection, since that might move the code and invalidate the
|
||||
// return address (unless this is somehow accounted for by the called
|
||||
// function).
|
||||
void CallCFunction(ExternalReference function, int num_arguments);
|
||||
void CallCFunction(Register function, int num_arguments);
|
||||
|
||||
// Calculate the number of stack slots to reserve for arguments when calling a
|
||||
// C function.
|
||||
int ArgumentStackSlotsForCFunctionCall(int num_arguments);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Utilities
|
||||
|
||||
void Ret();
|
||||
|
||||
// Return and drop arguments from stack, where the number of arguments
|
||||
// may be bigger than 2^16 - 1. Requires a scratch register.
|
||||
void Ret(int bytes_dropped, Register scratch);
|
||||
|
||||
Handle<HeapObject> CodeObject() {
|
||||
DCHECK(!code_object_.is_null());
|
||||
return code_object_;
|
||||
}
|
||||
|
||||
// Initialize fields with filler values. Fields starting at |current_address|
|
||||
// not including |end_address| are overwritten with the value in |filler|. At
|
||||
// the end the loop, |current_address| takes the value of |end_address|.
|
||||
@ -1353,23 +1434,6 @@ class MacroAssembler: public Assembler {
|
||||
// ---------------------------------------------------------------------------
|
||||
// Debugging
|
||||
|
||||
// Calls Abort(msg) if the condition cc is not satisfied.
|
||||
// Use --debug_code to enable.
|
||||
void Assert(Condition cc, BailoutReason reason);
|
||||
|
||||
// Like Assert(), but always enabled.
|
||||
void Check(Condition cc, BailoutReason reason);
|
||||
|
||||
// Print a message to stdout and abort execution.
|
||||
void Abort(BailoutReason msg);
|
||||
|
||||
// Check that the stack is aligned.
|
||||
void CheckStackAlignment();
|
||||
|
||||
void set_has_frame(bool value) { has_frame_ = value; }
|
||||
bool has_frame() { return has_frame_; }
|
||||
inline bool AllowThisStubCall(CodeStub* stub);
|
||||
|
||||
static int SafepointRegisterStackIndex(Register reg) {
|
||||
return SafepointRegisterStackIndex(reg.code());
|
||||
}
|
||||
@ -1377,11 +1441,6 @@ class MacroAssembler: public Assembler {
|
||||
// Load the type feedback vector from a JavaScript frame.
|
||||
void EmitLoadFeedbackVector(Register vector);
|
||||
|
||||
// Activation support.
|
||||
void EnterFrame(StackFrame::Type type);
|
||||
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
|
||||
void LeaveFrame(StackFrame::Type type);
|
||||
|
||||
void EnterBuiltinFrame(Register context, Register target, Register argc);
|
||||
void LeaveBuiltinFrame(Register context, Register target, Register argc);
|
||||
|
||||
@ -1404,25 +1463,9 @@ class MacroAssembler: public Assembler {
|
||||
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
|
||||
static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
|
||||
static const int kNumSafepointSavedRegisters = 12;
|
||||
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
||||
|
||||
bool has_frame_;
|
||||
Isolate* isolate_;
|
||||
bool root_array_available_;
|
||||
int jit_cookie_;
|
||||
|
||||
// Returns a register holding the smi value. The register MUST NOT be
|
||||
// modified. It may be the "smi 1 constant" register.
|
||||
Register GetSmiConstant(Smi* value);
|
||||
|
||||
int64_t RootRegisterDelta(ExternalReference other);
|
||||
|
||||
// Moves the smi value to the destination register.
|
||||
void LoadSmiConstant(Register dst, Smi* value);
|
||||
|
||||
// This handle will be patched with the code object on installation.
|
||||
Handle<HeapObject> code_object_;
|
||||
|
||||
// Helper functions for generating invokes.
|
||||
void InvokePrologue(const ParameterCount& expected,
|
||||
const ParameterCount& actual,
|
||||
|
Loading…
Reference in New Issue
Block a user