PPC/s390: [compiler] Delay generation of code stubs.

Port 040fa06fb3
Port 659e8f7b5c

R=neis@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=v8:6048
LOG=N

Change-Id: Id3030a64d462344eb8612f8009b0c8e15a5edcb9
Reviewed-on: https://chromium-review.googlesource.com/581744
Reviewed-by: Georg Neis <neis@chromium.org>
Commit-Queue: Jaideep Bajwa <bjaideep@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#46843}
This commit is contained in:
Jaideep Bajwa 2017-07-21 14:44:06 -04:00 committed by Commit Bot
parent 486e641c11
commit 46344edaac
14 changed files with 337 additions and 151 deletions

View File

@ -62,11 +62,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand(isolate()->factory()->NewNumber(
constant.ToFloat64().value(), TENURED));
return Operand::EmbeddedNumber(constant.ToFloat64());
case Constant::kInt64:
#if V8_TARGET_ARCH_PPC64
return Operand(constant.ToInt64());
@ -174,7 +172,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {}
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@ -187,7 +186,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {}
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@ -206,8 +206,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ mflr(scratch1_);
__ Push(scratch1_);
}
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
if (offset_.is(no_reg)) {
__ addi(scratch1_, object_, Operand(offset_immediate_));
} else {
@ -216,9 +214,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ CallStub(&stub);
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
} else {
__ CallStub(&stub);
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
}
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
@ -236,6 +238,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
Zone* zone_;
};
@ -1533,8 +1536,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ CallStubDelayed(new (zone())
MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Move(d1, d3);
break;
}
@ -2401,12 +2404,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif
break;
case Constant::kFloat32:
__ Move(dst,
isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
__ Move(dst, isolate()->factory()->NewNumber(src.ToFloat64().value(),
TENURED));
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat64()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));

View File

@ -56,11 +56,9 @@ class S390OperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand(isolate()->factory()->NewNumber(
constant.ToFloat64().value(), TENURED));
return Operand::EmbeddedNumber(constant.ToFloat64());
case Constant::kInt64:
#if V8_TARGET_ARCH_S390X
return Operand(constant.ToInt64());
@ -208,7 +206,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {}
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@ -221,7 +220,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {}
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@ -239,15 +239,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore r14 if the frame was elided.
__ Push(r14);
}
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
if (offset_.is(no_reg)) {
__ AddP(scratch1_, object_, Operand(offset_immediate_));
} else {
DCHECK_EQ(0, offset_immediate_);
__ AddP(scratch1_, object_, offset_);
}
__ CallStub(&stub);
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
if (must_save_lr_) {
// We need to save and restore r14 if the frame was elided.
__ Pop(r14);
@ -263,6 +263,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
Zone* zone_;
};
Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
@ -1757,8 +1758,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ CallStubDelayed(new (zone())
MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Move(d1, d3);
break;
}
@ -2772,12 +2773,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif // V8_TARGET_ARCH_S390X
break;
case Constant::kFloat32:
__ Move(dst,
isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
__ Move(dst, isolate()->factory()->NewNumber(src.ToFloat64().value(),
TENURED));
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat64()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));

View File

@ -367,19 +367,19 @@ void RelocInfo::Visit(Heap* heap) {
Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm_ = immediate;
value_.immediate = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(f.address());
value_.immediate = reinterpret_cast<intptr_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(value);
value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = kRelocInfo_NONEPTR;
}

View File

@ -40,6 +40,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/macro-assembler.h"
#include "src/ppc/assembler-ppc-inl.h"
@ -204,15 +205,30 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
imm_ = reinterpret_cast<intptr_t>(handle.location());
value_.immediate = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// no relocation needed
imm_ = reinterpret_cast<intptr_t>(obj);
value_.immediate = reinterpret_cast<intptr_t>(obj);
rmode_ = kRelocInfo_NONEPTR;
}
}
Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedCode(CodeStub* stub) {
Operand result(0, RelocInfo::CODE_TARGET);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(stub);
return result;
}
MemOperand::MemOperand(Register rn, int32_t offset) {
ra_ = rn;
@ -227,6 +243,26 @@ MemOperand::MemOperand(Register ra, Register rb) {
offset_ = 0;
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
}
Address pc = buffer_ + request.offset();
Address constant_pool = NULL;
set_target_address_at(nullptr, pc, constant_pool,
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
}
}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@ -253,6 +289,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
int constant_pool_offset = EmitConstantPool();
EmitRelocations();
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
@ -740,12 +777,12 @@ void Assembler::b(int branch_offset, LKBit lk) {
void Assembler::xori(Register dst, Register src, const Operand& imm) {
d_form(XORI, src, dst, imm.imm_, false);
d_form(XORI, src, dst, imm.immediate(), false);
}
void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
d_form(XORIS, rs, ra, imm.imm_, false);
d_form(XORIS, rs, ra, imm.immediate(), false);
}
@ -779,28 +816,28 @@ void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
DCHECK((32 > val.imm_) && (val.imm_ >= 0));
rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
}
void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
DCHECK((32 > val.imm_) && (val.imm_ >= 0));
rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
}
void Assembler::clrrwi(Register dst, Register src, const Operand& val,
RCBit rc) {
DCHECK((32 > val.imm_) && (val.imm_ >= 0));
rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
}
void Assembler::clrlwi(Register dst, Register src, const Operand& val,
RCBit rc) {
DCHECK((32 > val.imm_) && (val.imm_ >= 0));
rlwinm(dst, src, 0, val.imm_, 31, rc);
DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
rlwinm(dst, src, 0, val.immediate(), 31, rc);
}
@ -820,7 +857,7 @@ void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
void Assembler::subi(Register dst, Register src, const Operand& imm) {
addi(dst, src, Operand(-(imm.imm_)));
addi(dst, src, Operand(-(imm.immediate())));
}
void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
@ -855,7 +892,7 @@ void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
}
void Assembler::subfic(Register dst, Register src, const Operand& imm) {
d_form(SUBFIC, dst, src, imm.imm_, true);
d_form(SUBFIC, dst, src, imm.immediate(), true);
}
@ -900,43 +937,43 @@ void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
void Assembler::addi(Register dst, Register src, const Operand& imm) {
DCHECK(!src.is(r0)); // use li instead to show intent
d_form(ADDI, dst, src, imm.imm_, true);
d_form(ADDI, dst, src, imm.immediate(), true);
}
void Assembler::addis(Register dst, Register src, const Operand& imm) {
DCHECK(!src.is(r0)); // use lis instead to show intent
d_form(ADDIS, dst, src, imm.imm_, true);
d_form(ADDIS, dst, src, imm.immediate(), true);
}
void Assembler::addic(Register dst, Register src, const Operand& imm) {
d_form(ADDIC, dst, src, imm.imm_, true);
d_form(ADDIC, dst, src, imm.immediate(), true);
}
void Assembler::andi(Register ra, Register rs, const Operand& imm) {
d_form(ANDIx, rs, ra, imm.imm_, false);
d_form(ANDIx, rs, ra, imm.immediate(), false);
}
void Assembler::andis(Register ra, Register rs, const Operand& imm) {
d_form(ANDISx, rs, ra, imm.imm_, false);
d_form(ANDISx, rs, ra, imm.immediate(), false);
}
void Assembler::ori(Register ra, Register rs, const Operand& imm) {
d_form(ORI, rs, ra, imm.imm_, false);
d_form(ORI, rs, ra, imm.immediate(), false);
}
void Assembler::oris(Register dst, Register src, const Operand& imm) {
d_form(ORIS, src, dst, imm.imm_, false);
d_form(ORIS, src, dst, imm.immediate(), false);
}
void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.imm_;
intptr_t imm16 = src2.immediate();
#if V8_TARGET_ARCH_PPC64
int L = 1;
#else
@ -950,7 +987,7 @@ void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
uintptr_t uimm16 = src2.imm_;
uintptr_t uimm16 = src2.immediate();
#if V8_TARGET_ARCH_PPC64
int L = 1;
#else
@ -964,7 +1001,7 @@ void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.imm_;
intptr_t imm16 = src2.immediate();
int L = 0;
int pos = pc_offset();
DCHECK(is_int16(imm16));
@ -982,7 +1019,7 @@ void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
uintptr_t uimm16 = src2.imm_;
uintptr_t uimm16 = src2.immediate();
int L = 0;
DCHECK(is_uint16(uimm16));
DCHECK(cr.code() >= 0 && cr.code() <= 7);
@ -999,12 +1036,12 @@ void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
// Pseudo op - load immediate
void Assembler::li(Register dst, const Operand& imm) {
d_form(ADDI, dst, r0, imm.imm_, true);
d_form(ADDI, dst, r0, imm.immediate(), true);
}
void Assembler::lis(Register dst, const Operand& imm) {
d_form(ADDIS, dst, r0, imm.imm_, true);
d_form(ADDIS, dst, r0, imm.immediate(), true);
}
@ -1145,28 +1182,28 @@ void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
DCHECK((64 > val.imm_) && (val.imm_ >= 0));
rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
}
void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
DCHECK((64 > val.imm_) && (val.imm_ >= 0));
rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
}
void Assembler::clrrdi(Register dst, Register src, const Operand& val,
RCBit rc) {
DCHECK((64 > val.imm_) && (val.imm_ >= 0));
rldicr(dst, src, 0, 63 - val.imm_, rc);
DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
rldicr(dst, src, 0, 63 - val.immediate(), rc);
}
void Assembler::clrldi(Register dst, Register src, const Operand& val,
RCBit rc) {
DCHECK((64 > val.imm_) && (val.imm_ >= 0));
rldicl(dst, src, 0, val.imm_, rc);
DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
rldicl(dst, src, 0, val.immediate(), rc);
}
@ -1255,7 +1292,6 @@ bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
// immediate sequence.
return false;
}
intptr_t value = src.immediate();
#if V8_TARGET_ARCH_PPC64
bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
@ -1301,14 +1337,21 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
// Todo - break this dependency so we can optimize mov() in general
// and only use the generic version when we require a fixed sequence
void Assembler::mov(Register dst, const Operand& src) {
intptr_t value = src.immediate();
intptr_t value;
if (src.IsHeapObjectRequest()) {
RequestHeapObject(src.heap_object_request());
value = 0;
} else {
value = src.immediate();
}
bool relocatable = src.must_output_reloc_info(this);
bool canOptimize;
canOptimize =
!(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
if (use_constant_pool_for_mov(dst, src, canOptimize)) {
if (!src.IsHeapObjectRequest() &&
use_constant_pool_for_mov(dst, src, canOptimize)) {
DCHECK(is_constant_pool_available());
if (relocatable) {
RecordRelocInfo(src.rmode_);

View File

@ -310,21 +310,45 @@ class Operand BASE_EMBEDDED {
// rm
INLINE(explicit Operand(Register rm));
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
// Return true if this is a register operand.
INLINE(bool is_reg() const);
bool must_output_reloc_info(const Assembler* assembler) const;
inline intptr_t immediate() const {
DCHECK(!rm_.is_valid());
return imm_;
DCHECK(IsImmediate());
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
bool IsImmediate() const { return !rm_.is_valid(); }
HeapObjectRequest heap_object_request() const {
DCHECK(IsHeapObjectRequest());
return value_.heap_object_request;
}
Register rm() const { return rm_; }
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
rmode_ == RelocInfo::EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
private:
Register rm_;
intptr_t imm_; // valid if rm_ == no_reg
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
intptr_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
@ -1519,6 +1543,12 @@ class Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
friend class RegExpMacroAssemblerPPC;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
@ -1527,16 +1557,10 @@ class Assembler : public AssemblerBase {
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
friend class RegExpMacroAssemblerPPC;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
std::forward_list<HeapObjectRequest> heap_object_requests_;
};

View File

@ -2315,7 +2315,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
UNIMPLEMENTED_PPC();
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
#if V8_TARGET_ARCH_PPC64
@ -2323,10 +2322,9 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
#else
11 * Assembler::kInstrSize);
#endif
ProfileEntryHookStub stub(masm->isolate());
__ mflr(r0);
__ Push(r0, ip);
__ CallStub(&stub);
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
__ Pop(r0, ip);
__ mtlr(r0);
}

View File

@ -126,7 +126,7 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
#endif
// This can likely be optimized to make use of bc() with 24bit relative
//
// RecordRelocInfo(x.rmode_, x.imm_);
// RecordRelocInfo(x.rmode_, x.immediate);
// bc( BA, .... offset, LKset);
//
@ -150,12 +150,13 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
Label start;
bind(&start);
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallSize(code, rmode, cond);
Label start;
bind(&start);
#endif
AllowDeferredHandleDereference using_raw_address;
@ -1992,6 +1993,16 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
// Block constant pool for the call instruction sequence.
ConstantPoolUnavailableScope constant_pool_unavailable(this);
mov(ip, Operand::EmbeddedCode(stub));
mtctr(ip);
bctrl();
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
@ -2091,6 +2102,17 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
rlwinm(dst, src, 0, 32 - num_least_bits, 31);
}
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r3, Operand(f->nargs));
mov(r4, Operand(ExternalReference(f, isolate())));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
@ -3381,7 +3403,8 @@ void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
if (rb.is_reg()) {
and_(ra, rs, rb.rm(), rc);
} else {
if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
rc == SetRC) {
andi(ra, rs, rb);
} else {
// mov handles the relocation.
@ -3397,7 +3420,8 @@ void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
if (rb.is_reg()) {
orx(ra, rs, rb.rm(), rc);
} else {
if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
rc == LeaveRC) {
ori(ra, rs, rb);
} else {
// mov handles the relocation.
@ -3414,7 +3438,8 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
if (rb.is_reg()) {
xor_(ra, rs, rb.rm(), rc);
} else {
if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
rc == LeaveRC) {
xori(ra, rs, rb);
} else {
// mov handles the relocation.

View File

@ -870,11 +870,14 @@ class MacroAssembler : public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub,
Condition cond = al);
void CallStubDelayed(CodeStub* stub);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {

View File

@ -344,19 +344,19 @@ void RelocInfo::Visit(Heap* heap) {
// Operand constructors
Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm_ = immediate;
value_.immediate = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(f.address());
value_.immediate = reinterpret_cast<intptr_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(value);
value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = kRelocInfo_NONEPTR;
}
@ -377,7 +377,8 @@ int32_t Assembler::emit_code_target(Handle<Code> target,
RecordRelocInfo(rmode);
int current = code_targets_.length();
if (current > 0 && code_targets_.last().is_identical_to(target)) {
if (current > 0 && !target.is_null() &&
code_targets_.last().is_identical_to(target)) {
// Optimization if we keep jumping to the same code target.
current--;
} else {

View File

@ -47,9 +47,9 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/s390/assembler-s390-inl.h"
#include "src/code-stubs.h"
#include "src/macro-assembler.h"
#include "src/s390/assembler-s390-inl.h"
namespace v8 {
namespace internal {
@ -313,15 +313,24 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
imm_ = reinterpret_cast<intptr_t>(handle.location());
value_.immediate = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// no relocation needed
imm_ = reinterpret_cast<intptr_t>(obj);
value_.immediate = reinterpret_cast<intptr_t>(obj);
rmode_ = kRelocInfo_NONEPTR;
}
}
Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
MemOperand::MemOperand(Register rn, int32_t offset) {
baseRegister = rn;
indexRegister = r0;
@ -334,6 +343,29 @@ MemOperand::MemOperand(Register rx, Register rb, int32_t offset) {
offset_ = offset;
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
Address pc = buffer_ + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
set_target_address_at(nullptr, pc, static_cast<Address>(NULL),
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
int index = instr & 0xFFFFFFFF;
code_targets_[index] = request.code_stub()->GetCode();
break;
}
}
}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@ -346,9 +378,11 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
relocations_.reserve(128);
}
void Assembler::GetCode(Isolate* isloate, CodeDesc* desc) {
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitRelocations();
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@ -649,9 +683,9 @@ void Assembler::nop(int type) {
void Assembler::ri_form(Opcode op, Register r1, const Operand& i2) {
DCHECK(is_uint12(op));
DCHECK(is_uint16(i2.imm_) || is_int16(i2.imm_));
DCHECK(is_uint16(i2.immediate()) || is_int16(i2.immediate()));
emit4bytes((op & 0xFF0) * B20 | r1.code() * B20 | (op & 0xF) * B16 |
(i2.imm_ & 0xFFFF));
(i2.immediate() & 0xFFFF));
}
// RI2 format: <insn> M1,I2
@ -665,9 +699,9 @@ void Assembler::ri_form(Opcode op, Register r1, const Operand& i2) {
void Assembler::ri_form(Opcode op, Condition m1, const Operand& i2) {
DCHECK(is_uint12(op));
DCHECK(is_uint4(m1));
DCHECK(op == BRC ? is_int16(i2.imm_) : is_uint16(i2.imm_));
DCHECK(op == BRC ? is_int16(i2.immediate()) : is_uint16(i2.immediate()));
emit4bytes((op & 0xFF0) * B20 | m1 * B20 | (op & 0xF) * B16 |
(i2.imm_ & 0xFFFF));
(i2.immediate() & 0xFFFF));
}
// RIE-f format: <insn> R1,R2,I3,I4,I5
@ -679,15 +713,15 @@ void Assembler::rie_f_form(Opcode op, Register r1, Register r2,
const Operand& i3, const Operand& i4,
const Operand& i5) {
DCHECK(is_uint16(op));
DCHECK(is_uint8(i3.imm_));
DCHECK(is_uint8(i4.imm_));
DCHECK(is_uint8(i5.imm_));
DCHECK(is_uint8(i3.immediate()));
DCHECK(is_uint8(i4.immediate()));
DCHECK(is_uint8(i5.immediate()));
uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
(static_cast<uint64_t>(r1.code())) * B36 |
(static_cast<uint64_t>(r2.code())) * B32 |
(static_cast<uint64_t>(i3.imm_)) * B24 |
(static_cast<uint64_t>(i4.imm_)) * B16 |
(static_cast<uint64_t>(i5.imm_)) * B8 |
(static_cast<uint64_t>(i3.immediate())) * B24 |
(static_cast<uint64_t>(i4.immediate())) * B16 |
(static_cast<uint64_t>(i5.immediate())) * B8 |
(static_cast<uint64_t>(op & 0x00FF));
emit6bytes(code);
}
@ -705,11 +739,11 @@ void Assembler::rie_f_form(Opcode op, Register r1, Register r2,
void Assembler::rie_form(Opcode op, Register r1, Register r3,
const Operand& i2) {
DCHECK(is_uint16(op));
DCHECK(is_int16(i2.imm_));
DCHECK(is_int16(i2.immediate()));
uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
(static_cast<uint64_t>(r1.code())) * B36 |
(static_cast<uint64_t>(r3.code())) * B32 |
(static_cast<uint64_t>(i2.imm_ & 0xFFFF)) * B16 |
(static_cast<uint64_t>(i2.immediate() & 0xFFFF)) * B16 |
(static_cast<uint64_t>(op & 0x00FF));
emit6bytes(code);
}
@ -766,8 +800,9 @@ void Assembler::rs_form(Opcode op, Register r1, Condition m3, Register b2,
void Assembler::rsi_form(Opcode op, Register r1, Register r3,
const Operand& i2) {
DCHECK(is_uint8(op));
DCHECK(is_uint16(i2.imm_));
emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 | (i2.imm_ & 0xFFFF));
DCHECK(is_uint16(i2.immediate()));
emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 |
(i2.immediate() & 0xFFFF));
}
// RSL format: <insn> R1,R3,D2(B2)
@ -919,13 +954,13 @@ void Assembler::ris_form(Opcode op, Register r1, Condition m3, Register b4,
Disp d4, const Operand& i2) {
DCHECK(is_uint12(d4));
DCHECK(is_uint16(op));
DCHECK(is_uint8(i2.imm_));
DCHECK(is_uint8(i2.immediate()));
uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
(static_cast<uint64_t>(r1.code())) * B36 |
(static_cast<uint64_t>(m3)) * B32 |
(static_cast<uint64_t>(b4.code())) * B28 |
(static_cast<uint64_t>(d4)) * B16 |
(static_cast<uint64_t>(i2.imm_)) << 8 |
(static_cast<uint64_t>(i2.immediate())) << 8 |
(static_cast<uint64_t>(op & 0x00FF));
emit6bytes(code);
}
@ -960,7 +995,7 @@ void Assembler::s_form(Opcode op, Register b1, Disp d2) {
}
void Assembler::si_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
emit4bytes((op & 0x00FF) << 24 | i2.imm_ * B16 | b1.code() * B12 | d1);
emit4bytes((op & 0x00FF) << 24 | i2.immediate() * B16 | b1.code() * B12 | d1);
}
// SIY format: <insn> D1(B1),I2
@ -979,9 +1014,9 @@ void Assembler::si_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
void Assembler::siy_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
DCHECK(is_uint20(d1) || is_int20(d1));
DCHECK(is_uint16(op));
DCHECK(is_uint8(i2.imm_));
DCHECK(is_uint8(i2.immediate()));
uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
(static_cast<uint64_t>(i2.imm_)) * B32 |
(static_cast<uint64_t>(i2.immediate())) * B32 |
(static_cast<uint64_t>(b1.code())) * B28 |
(static_cast<uint64_t>(d1 & 0x0FFF)) * B16 |
(static_cast<uint64_t>(d1 & 0x0FF000)) >> 4 |
@ -1005,11 +1040,11 @@ void Assembler::siy_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
void Assembler::sil_form(Opcode op, Register b1, Disp d1, const Operand& i2) {
DCHECK(is_uint12(d1));
DCHECK(is_uint16(op));
DCHECK(is_uint16(i2.imm_));
DCHECK(is_uint16(i2.immediate()));
uint64_t code = (static_cast<uint64_t>(op)) * B32 |
(static_cast<uint64_t>(b1.code())) * B28 |
(static_cast<uint64_t>(d1)) * B16 |
(static_cast<uint64_t>(i2.imm_));
(static_cast<uint64_t>(i2.immediate()));
emit6bytes(code);
}
@ -1123,10 +1158,10 @@ void Assembler::ss_form(Opcode op, Length l1, const Operand& i3, Register b1,
DCHECK(is_uint12(d1));
DCHECK(is_uint8(op));
DCHECK(is_uint4(l1));
DCHECK(is_uint4(i3.imm_));
DCHECK(is_uint4(i3.immediate()));
uint64_t code =
(static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l1)) * B36 |
(static_cast<uint64_t>(i3.imm_)) * B32 |
(static_cast<uint64_t>(i3.immediate())) * B32 |
(static_cast<uint64_t>(b1.code())) * B28 |
(static_cast<uint64_t>(d1)) * B16 |
(static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
@ -1424,7 +1459,7 @@ void Assembler::risbg(Register dst, Register src, const Operand& startBit,
bool zeroBits) {
// High tag the top bit of I4/EndBit to zero out any unselected bits
if (zeroBits)
rie_f_form(RISBG, dst, src, startBit, Operand(endBit.imm_ | 0x80),
rie_f_form(RISBG, dst, src, startBit, Operand(endBit.immediate() | 0x80),
shiftAmt);
else
rie_f_form(RISBG, dst, src, startBit, endBit, shiftAmt);
@ -1436,7 +1471,7 @@ void Assembler::risbgn(Register dst, Register src, const Operand& startBit,
bool zeroBits) {
// High tag the top bit of I4/EndBit to zero out any unselected bits
if (zeroBits)
rie_f_form(RISBGN, dst, src, startBit, Operand(endBit.imm_ | 0x80),
rie_f_form(RISBGN, dst, src, startBit, Operand(endBit.immediate() | 0x80),
shiftAmt);
else
rie_f_form(RISBGN, dst, src, startBit, endBit, shiftAmt);
@ -1470,9 +1505,10 @@ void Assembler::ark(Register r1, Register r2, Register r3) {
// Add Storage-Imm (32)
void Assembler::asi(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.imm_));
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
siy_form(ASI, Operand(0xff & imm.imm_), opnd.rb(), 0xfffff & opnd.offset());
siy_form(ASI, Operand(0xff & imm.immediate()), opnd.rb(),
0xfffff & opnd.offset());
}
// -----------------------
@ -1493,9 +1529,10 @@ void Assembler::agrk(Register r1, Register r2, Register r3) {
// Add Storage-Imm (64)
void Assembler::agsi(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.imm_));
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
siy_form(AGSI, Operand(0xff & imm.imm_), opnd.rb(), 0xfffff & opnd.offset());
siy_form(AGSI, Operand(0xff & imm.immediate()), opnd.rb(),
0xfffff & opnd.offset());
}
// -------------------------------
@ -1820,6 +1857,14 @@ void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
brasl(r14, Operand(target_index));
}
void Assembler::call(CodeStub* stub) {
EnsureSpace ensure_space(this);
RequestHeapObject(HeapObjectRequest(stub));
int32_t target_index =
emit_code_target(Handle<Code>(), RelocInfo::CODE_TARGET);
brasl(r14, Operand(target_index));
}
void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
Condition cond) {
EnsureSpace ensure_space(this);

View File

@ -303,6 +303,8 @@ class Operand BASE_EMBEDDED {
// rm
INLINE(explicit Operand(Register rm));
static Operand EmbeddedNumber(double value); // Smi or HeapNumber
// Return true if this is a register operand.
INLINE(bool is_reg() const);
@ -310,18 +312,39 @@ class Operand BASE_EMBEDDED {
inline intptr_t immediate() const {
DCHECK(!rm_.is_valid());
return imm_;
DCHECK(!is_heap_object_request());
return value_.immediate;
}
HeapObjectRequest heap_object_request() const {
DCHECK(is_heap_object_request());
return value_.heap_object_request;
}
inline void setBits(int n) {
imm_ = (static_cast<uint32_t>(imm_) << (32 - n)) >> (32 - n);
value_.immediate =
(static_cast<uint32_t>(value_.immediate) << (32 - n)) >> (32 - n);
}
Register rm() const { return rm_; }
bool is_heap_object_request() const {
DCHECK_IMPLIES(is_heap_object_request_, !rm_.is_valid());
DCHECK_IMPLIES(is_heap_object_request_,
rmode_ == RelocInfo::EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
private:
Register rm_;
intptr_t imm_; // valid if rm_ == no_reg
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
intptr_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
@ -839,6 +862,7 @@ class Assembler : public AssemblerBase {
}
void call(Handle<Code> target, RelocInfo::Mode rmode);
void call(CodeStub* stub);
void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond);
// S390 instruction generation
@ -1334,6 +1358,7 @@ class Assembler : public AssemblerBase {
public:
byte* buffer_pos() const { return buffer_; }
void RequestHeapObject(HeapObjectRequest request);
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@ -1458,8 +1483,6 @@ class Assembler : public AssemblerBase {
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;

View File

@ -2250,7 +2250,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
UNIMPLEMENTED_S390();
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
#if V8_TARGET_ARCH_S390X
@ -2260,10 +2259,9 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
#else
32);
#endif
ProfileEntryHookStub stub(masm->isolate());
__ CleanseP(r14);
__ Push(r14, ip);
__ CallStub(&stub); // BRASL
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
__ Pop(r14, ip);
}
}

View File

@ -1810,6 +1810,11 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
call(stub);
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
@ -1874,6 +1879,14 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
AndP(dst, src, Operand((1 << num_least_bits) - 1));
}
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
mov(r2, Operand(f->nargs));
mov(r3, Operand(ExternalReference(f, isolate())));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r2 has the return value after call.
@ -2869,20 +2882,30 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
}
void MacroAssembler::mov(Register dst, const Operand& src) {
#if V8_TARGET_ARCH_S390X
int64_t value;
#else
int value;
#endif
if (src.is_heap_object_request()) {
RequestHeapObject(src.heap_object_request());
value = 0;
} else {
value = src.immediate();
}
if (src.rmode_ != kRelocInfo_NONEPTR) {
// some form of relocation needed
RecordRelocInfo(src.rmode_, src.imm_);
RecordRelocInfo(src.rmode_, value);
}
#if V8_TARGET_ARCH_S390X
int64_t value = src.immediate();
int32_t hi_32 = static_cast<int64_t>(value) >> 32;
int32_t lo_32 = static_cast<int32_t>(value);
iihf(dst, Operand(hi_32));
iilf(dst, Operand(lo_32));
#else
int value = src.immediate();
iilf(dst, Operand(value));
#endif
}
@ -3525,22 +3548,22 @@ void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
void MacroAssembler::Sub32(Register dst, const Operand& imm) {
Add32(dst, Operand(-(imm.imm_)));
Add32(dst, Operand(-(imm.immediate())));
}
// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
void MacroAssembler::SubP(Register dst, const Operand& imm) {
AddP(dst, Operand(-(imm.imm_)));
AddP(dst, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register src - Immediate opnd)
void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
Add32(dst, src, Operand(-(imm.imm_)));
Add32(dst, src, Operand(-(imm.immediate())));
}
// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
AddP(dst, src, Operand(-(imm.imm_)));
AddP(dst, src, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register dst - Register src)
@ -3766,7 +3789,7 @@ void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
// AND Pointer Size - dst = dst & imm
void MacroAssembler::AndP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.imm_;
intptr_t value = opnd.immediate();
if (value >> 32 != -1) {
// this may not work b/c condition code won't be set correctly
nihf(dst, Operand(value >> 32));
@ -3786,7 +3809,7 @@ void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
// AND Pointer Size - dst = src & imm
void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
// Try to exploit RISBG first
intptr_t value = opnd.imm_;
intptr_t value = opnd.immediate();
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
intptr_t shifted_value = value;
int trailing_zeros = 0;
@ -3888,7 +3911,7 @@ void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
// OR Pointer Size - dst = dst & imm
void MacroAssembler::OrP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.imm_;
intptr_t value = opnd.immediate();
if (value >> 32 != 0) {
// this may not work b/c condition code won't be set correctly
oihf(dst, Operand(value >> 32));
@ -3976,7 +3999,7 @@ void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
// XOR Pointer Size - dst = dst & imm
void MacroAssembler::XorP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
intptr_t value = opnd.imm_;
intptr_t value = opnd.immediate();
xihf(dst, Operand(value >> 32));
xilf(dst, Operand(value & 0xFFFFFFFF));
#else
@ -4098,7 +4121,7 @@ void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
cfi(dst, opnd);
} else {
// Need to generate relocation record here
RecordRelocInfo(opnd.rmode_, opnd.imm_);
RecordRelocInfo(opnd.rmode_, opnd.immediate());
cfi(dst, opnd);
}
}
@ -4396,7 +4419,7 @@ void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) {
mem.getIndexRegister().is(r0) && is_int16(opnd.immediate())) {
#if V8_TARGET_ARCH_S390X
mvghi(mem, opnd);
#else
@ -5006,7 +5029,7 @@ void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
// Clear right most # of bits
void MacroAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) {
int numBitsToClear = val.imm_ % (kPointerSize * 8);
int numBitsToClear = val.immediate() % (kPointerSize * 8);
// Try to use RISBG if possible
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {

View File

@ -1159,11 +1159,14 @@ class MacroAssembler : public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub,
Condition cond = al);
void CallStubDelayed(CodeStub* stub);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {