[compiler] Delay generation of code stubs.

Bug: v8:6048
Change-Id: I055f8207d0a32b7fa0fb95961f2e0f29d1c02569
Reviewed-on: https://chromium-review.googlesource.com/548078
Commit-Queue: Georg Neis <neis@chromium.org>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46300}
This commit is contained in:
Georg Neis 2017-06-28 20:46:29 +02:00 committed by Commit Bot
parent 0216361dc2
commit 040fa06fb3
36 changed files with 555 additions and 185 deletions

View File

@ -42,6 +42,7 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@ -421,8 +422,15 @@ Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_number_ = true;
result.value_.heap_number = value;
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedCode(CodeStub* stub) {
Operand result(0, RelocInfo::CODE_TARGET);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(stub);
return result;
}
@ -496,6 +504,25 @@ void NeonMemOperand::SetAlignment(int align) {
}
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
}
Address pc = buffer_ + request.offset();
Memory::Address_at(constant_pool_entry_address(pc, 0 /* unused */)) =
reinterpret_cast<Address>(object.location());
}
}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@ -583,7 +610,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
DCHECK(pending_32_bit_constants_.empty());
DCHECK(pending_64_bit_constants_.empty());
AllocateRequestedHeapNumbers(isolate);
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
@ -1185,8 +1212,8 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
}
} else {
int32_t immediate;
if (x.IsHeapNumber()) {
RequestHeapNumber(x.heap_number());
if (x.IsHeapObjectRequest()) {
RequestHeapObject(x.heap_object_request());
immediate = 0;
} else {
immediate = x.immediate();
@ -5092,7 +5119,10 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
}
if (rmode == RelocInfo::CODE_TARGET && IsCodeTargetSharingAllowed()) {
// Share entries if allowed and possible.
// Null-values are placeholders and must be ignored.
if (rmode == RelocInfo::CODE_TARGET && IsCodeTargetSharingAllowed() &&
value != 0) {
// Sharing entries here relies on canonicalized handles - without them, we
// will miss the optimisation opportunity.
Address handle_address = reinterpret_cast<Address>(value);

View File

@ -524,7 +524,8 @@ class Operand BASE_EMBEDDED {
// rm <shift_op> rs
explicit Operand(Register rm, ShiftOp shift_op, Register rs);
static Operand EmbeddedNumber(double value); // Smi or HeapNumber
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
// Return true if this is a register operand.
bool IsRegister() const {
@ -558,21 +559,23 @@ class Operand BASE_EMBEDDED {
inline int32_t immediate() const {
DCHECK(IsImmediate());
DCHECK(!IsHeapNumber());
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
bool IsImmediate() const {
return !rm_.is_valid();
}
double heap_number() const {
DCHECK(IsHeapNumber());
return value_.heap_number;
HeapObjectRequest heap_object_request() const {
DCHECK(IsHeapObjectRequest());
return value_.heap_object_request;
}
bool IsHeapNumber() const {
DCHECK_IMPLIES(is_heap_number_, IsImmediate());
DCHECK_IMPLIES(is_heap_number_, rmode_ == RelocInfo::EMBEDDED_OBJECT);
return is_heap_number_;
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
rmode_ == RelocInfo::EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
Register rm() const { return rm_; }
@ -585,11 +588,12 @@ class Operand BASE_EMBEDDED {
Register rs_;
ShiftOp shift_op_;
int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
union {
double heap_number; // if is_heap_number_
int32_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_number_ = false;
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
int32_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
@ -1583,14 +1587,6 @@ class Assembler : public AssemblerBase {
// the marker and branch over the data.
void RecordConstPool(int size);
// Patch the dummy heap number that we emitted during code assembly in the
// constant pool entry referenced by {pc}. Replace it with the actual heap
// object (handle).
static void set_heap_number(Handle<HeapObject> number, Address pc) {
Memory::Address_at(constant_pool_entry_address(pc, 0 /* unused */)) =
reinterpret_cast<Address>(number.location());
}
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables. CheckConstantPool() should be
// called before any use of db/dd/dq/dp to ensure that constant pools
@ -1868,6 +1864,19 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
// code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
};
constexpr int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;

View File

@ -2183,14 +2183,24 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
if (masm->isolate()->function_entry_hook() != NULL) {
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm);
predictable.ExpectSize(masm->CallStubSize() + 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
__ pop(lr);
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm);
predictable.ExpectSize(masm->CallStubSize(&stub) +
2 * Assembler::kInstrSize);
predictable.ExpectSize(masm->CallStubSize() + 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);

View File

@ -91,8 +91,8 @@ int MacroAssembler::CallSize(
mov_operand.InstructionsRequired(this, mov_instr) * kInstrSize;
}
int MacroAssembler::CallStubSize(CodeStub* stub, Condition cond) {
return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
int MacroAssembler::CallStubSize() {
return CallSize(Handle<Code>(), RelocInfo::CODE_TARGET, al);
}
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
@ -147,8 +147,6 @@ int MacroAssembler::CallSize(Handle<Code> code,
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, TargetAddressStorageMode mode,
bool check_constant_pool) {
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
AllowDeferredHandleDereference embedding_raw_address;
@ -2322,6 +2320,36 @@ void MacroAssembler::CallStub(CodeStub* stub,
false);
}
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallStubSize();
#endif
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
// blx ip
// @ return address
// Or for pre-V7 or values that may be back-patched
// to avoid ICache flushes:
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
mov(ip, Operand::EmbeddedCode(stub));
blx(ip, al);
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
@ -2506,6 +2534,17 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
and_(dst, src, Operand((1 << num_least_bits) - 1));
}
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(f->nargs));
mov(r1, Operand(ExternalReference(f, isolate())));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,

View File

@ -101,7 +101,7 @@ class MacroAssembler: public Assembler {
// checking the call size and emitting the actual call.
static int CallSize(Register target, Condition cond = al);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallStubSize(CodeStub* stub, Condition cond = al);
int CallStubSize();
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
@ -1011,11 +1011,14 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub,
Condition cond = al);
void CallStubDelayed(CodeStub* stub);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);

View File

@ -394,9 +394,21 @@ Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
}
bool Operand::IsHeapObjectRequest() const {
DCHECK_IMPLIES(heap_object_request_.has_value(), reg_.Is(NoReg));
DCHECK_IMPLIES(heap_object_request_.has_value(),
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT ||
immediate_.rmode() == RelocInfo::CODE_TARGET);
return heap_object_request_.has_value();
}
HeapObjectRequest Operand::heap_object_request() const {
DCHECK(IsHeapObjectRequest());
return *heap_object_request_;
}
bool Operand::IsImmediate() const {
return reg_.Is(NoReg) && !is_heap_number();
return reg_.Is(NoReg) && !IsHeapObjectRequest();
}
@ -425,6 +437,13 @@ Operand Operand::ToExtendedRegister() const {
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}
Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT) ||
(heap_object_request().kind() == HeapObjectRequest::kCodeStub &&
immediate_.rmode() == RelocInfo::CODE_TARGET));
return immediate_;
}
Immediate Operand::immediate() const {
DCHECK(IsImmediate());

View File

@ -34,6 +34,7 @@
#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/register-configuration.h"
namespace v8 {
@ -366,7 +367,8 @@ bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
if (CanBeShared(mode)) {
write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
} else if (mode == RelocInfo::CODE_TARGET &&
assm_->IsCodeTargetSharingAllowed()) {
assm_->IsCodeTargetSharingAllowed() && raw_data != 0) {
// A zero data value is a placeholder and must not be shared.
write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
} else {
entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
@ -609,9 +611,23 @@ void Assembler::Reset() {
no_const_pool_before_ = 0;
}
void Assembler::set_heap_number(Handle<HeapObject> number, Address pc) {
Memory::Address_at(target_pointer_address_at(pc)) =
reinterpret_cast<Address>(number.location());
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
}
Address pc = buffer_ + request.offset();
Memory::Address_at(target_pointer_address_at(pc)) =
reinterpret_cast<Address>(object.location());
}
}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
@ -619,7 +635,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
CheckConstPool(true, false);
DCHECK(constpool_.IsEmpty());
AllocateRequestedHeapNumbers(isolate);
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
if (desc) {
@ -1709,20 +1725,28 @@ void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
}
Operand Operand::EmbeddedNumber(double value) {
Operand Operand::EmbeddedNumber(double number) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) {
if (DoubleToSmiInteger(number, &smi)) {
return Operand(Immediate(Smi::FromInt(smi)));
}
Operand result(bit_cast<int64_t>(value), RelocInfo::EMBEDDED_OBJECT);
result.is_heap_number_ = true;
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.heap_object_request_.emplace(number);
DCHECK(result.IsHeapObjectRequest());
return result;
}
Operand Operand::EmbeddedCode(CodeStub* stub) {
Operand result(0, RelocInfo::CODE_TARGET);
result.heap_object_request_.emplace(stub);
DCHECK(result.IsHeapObjectRequest());
return result;
}
void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
if (operand.is_heap_number()) {
RequestHeapNumber(operand.heap_number());
ldr(rt, Immediate(0, RelocInfo::EMBEDDED_OBJECT));
if (operand.IsHeapObjectRequest()) {
RequestHeapObject(operand.heap_object_request());
ldr(rt, operand.immediate_for_heap_object_request());
} else {
ldr(rt, operand.immediate());
}

View File

@ -13,6 +13,7 @@
#include "src/arm64/constants-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
#include "src/base/optional.h"
#include "src/globals.h"
#include "src/utils.h"
@ -690,19 +691,12 @@ class Operand {
Extend extend,
unsigned shift_amount = 0);
static Operand EmbeddedNumber(double value); // Smi or HeapNumber.
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
bool is_heap_number() const {
DCHECK_IMPLIES(is_heap_number_, reg_.Is(NoReg));
DCHECK_IMPLIES(is_heap_number_,
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT);
return is_heap_number_;
}
double heap_number() const {
DCHECK(is_heap_number());
return bit_cast<double>(immediate_.value());
}
inline bool IsHeapObjectRequest() const;
inline HeapObjectRequest heap_object_request() const;
inline Immediate immediate_for_heap_object_request() const;
template<typename T>
inline explicit Operand(Handle<T> handle);
@ -739,12 +733,12 @@ class Operand {
inline static Operand UntagSmiAndScale(Register smi, int scale);
private:
base::Optional<HeapObjectRequest> heap_object_request_;
Immediate immediate_;
Register reg_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
bool is_heap_number_ = false;
};
@ -1086,11 +1080,6 @@ class Assembler : public AssemblerBase {
// the marker and branch over the data.
void RecordConstPool(int size);
// Patch the dummy heap number that we emitted during code assembly in the
// constant pool entry referenced by {pc}. Replace it with the actual heap
// object (handle).
static void set_heap_number(Handle<HeapObject> number, Address pc);
// Instruction set functions ------------------------------------------------
// Branch / Jump instructions.
@ -3609,6 +3598,19 @@ class Assembler : public AssemblerBase {
// the length of the label chain.
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
// code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
private:
friend class EnsureSpace;
friend class ConstPool;

View File

@ -2175,6 +2175,20 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
static const unsigned int kProfileEntryHookCallSize =
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
if (masm->isolate()->function_entry_hook() != NULL) {
Assembler::BlockConstPoolScope no_const_pools(masm);
DontEmitDebugCodeScope no_debug_code(masm);
Label entry_hook_call_start;
__ Bind(&entry_hook_call_start);
__ Push(lr);
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize);
__ Pop(lr);
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
@ -2187,7 +2201,6 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
__ CallStub(&stub);
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize);
__ Pop(lr);
}
}

View File

@ -1902,11 +1902,36 @@ void MacroAssembler::CallStub(CodeStub* stub) {
Call(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
#endif
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Ldr(temp, Operand::EmbeddedCode(stub));
Blr(temp);
#ifdef DEBUG
AssertSizeOfCodeGeneratedSince(&start_call, kCallSizeWithRelocation);
#endif
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Mov(x0, f->nargs);
Mov(x1, ExternalReference(f, isolate()));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,

View File

@ -1680,8 +1680,11 @@ class MacroAssembler : public Assembler {
inline void jmp(Label* L);
void CallStub(CodeStub* stub);
void CallStubDelayed(CodeStub* stub);
void TailCallStub(CodeStub* stub);
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);

View File

@ -191,19 +191,6 @@ void AssemblerBase::Print(Isolate* isolate) {
v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_, nullptr);
}
AssemblerBase::RequestedHeapNumber::RequestedHeapNumber(double value,
int offset)
: value(value), offset(offset) {
DCHECK(!IsSmiDouble(value));
}
void AssemblerBase::AllocateRequestedHeapNumbers(Isolate* isolate) {
for (auto& heap_number : heap_numbers_) {
Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
heap_number.value, IMMUTABLE, TENURED);
Assembler::set_heap_number(object, buffer_ + heap_number.offset);
}
}
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
@ -1921,6 +1908,17 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
return !empty ? emitted_label_.pos() : 0;
}
HeapObjectRequest::HeapObjectRequest(double heap_number, int offset)
: kind_(kHeapNumber), offset_(offset) {
value_.heap_number = heap_number;
DCHECK(!IsSmiDouble(value_.heap_number));
}
HeapObjectRequest::HeapObjectRequest(CodeStub* code_stub, int offset)
: kind_(kCodeStub), offset_(offset) {
value_.code_stub = code_stub;
DCHECK_NOT_NULL(value_.code_stub);
}
// Platform specific but identical code for all the platforms.
@ -1956,5 +1954,10 @@ void Assembler::DataAlign(int m) {
}
}
void Assembler::RequestHeapObject(HeapObjectRequest request) {
request.set_offset(pc_offset());
heap_object_requests_.push_front(request);
}
} // namespace internal
} // namespace v8

View File

@ -149,17 +149,6 @@ class AssemblerBase: public Malloced {
// The program counter, which points into the buffer above and moves forward.
byte* pc_;
// The following two functions help with avoiding allocations of heap numbers
// during the code assembly phase. {RequestHeapNumber} records the need for a
// future heap number allocation, together with the current pc offset. After
// code assembly, {AllocateRequestedHeapNumbers} will allocate these numbers
// and, with the help of {Assembler::set_heap_number}, place them where they
// are expected (determined by the recorded pc offset).
void RequestHeapNumber(double value) {
heap_numbers_.emplace_front(value, pc_offset());
}
void AllocateRequestedHeapNumbers(Isolate* isolate);
private:
IsolateData isolate_data_;
uint64_t enabled_cpu_features_;
@ -173,19 +162,8 @@ class AssemblerBase: public Malloced {
// Constant pool.
friend class FrameAndConstantPoolScope;
friend class ConstantPoolUnavailableScope;
// Delayed allocation of heap numbers.
struct RequestedHeapNumber {
RequestedHeapNumber(double value, int offset);
double value; // The number for which we later need to create a HeapObject.
int offset; // The {buffer_} offset where we emitted a dummy that needs to
// get replaced by the actual HeapObject via
// {Assembler::set_heap_number}.
};
std::forward_list<RequestedHeapNumber> heap_numbers_;
};
// Avoids emitting debug code during the lifetime of this scope object.
class DontEmitDebugCodeScope BASE_EMBEDDED {
public:
@ -1282,6 +1260,46 @@ class ConstantPoolBuilder BASE_EMBEDDED {
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
};
class HeapObjectRequest {
public:
explicit HeapObjectRequest(double heap_number, int offset = -1);
explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
enum Kind { kHeapNumber, kCodeStub };
Kind kind() const { return kind_; }
double heap_number() const {
DCHECK_EQ(kind(), kHeapNumber);
return value_.heap_number;
}
CodeStub* code_stub() const {
DCHECK_EQ(kind(), kCodeStub);
return value_.code_stub;
}
// The code buffer offset at the time of the request.
int offset() const {
DCHECK_GE(offset_, 0);
return offset_;
}
void set_offset(int offset) {
DCHECK_LT(offset_, 0);
offset_ = offset;
DCHECK_GE(offset_, 0);
}
private:
Kind kind_;
union {
double heap_number;
CodeStub* code_stub;
} value_;
int offset_;
};
} // namespace internal
} // namespace v8
#endif // V8_ASSEMBLER_H_

View File

@ -142,7 +142,7 @@ class Node;
static const int kHasReturnedMinusZeroSentinel = 1;
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
class CodeStub : public ZoneObject {
public:
enum Major {
// TODO(mvstanton): eliminate the NoCache key by getting rid
@ -223,6 +223,11 @@ class CodeStub BASE_EMBEDDED {
}
Isolate* isolate() const { return isolate_; }
void set_isolate(Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
DCHECK(isolate_ == nullptr || isolate_ == isolate);
isolate_ = isolate;
}
void DeleteStubFromCacheForTesting();
@ -1390,6 +1395,7 @@ class ProfileEntryHookStub : public PlatformCodeStub {
// Generates a call to the entry hook if it's enabled.
static void MaybeCallEntryHook(MacroAssembler* masm);
static void MaybeCallEntryHookDelayed(MacroAssembler* masm, Zone* zone);
private:
static void EntryHookTrampoline(intptr_t function,

View File

@ -195,7 +195,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer) {}
unwinding_info_writer_(unwinding_info_writer),
zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
Register value, Register scratch0, Register scratch1,
@ -210,7 +211,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer) {}
unwinding_info_writer_(unwinding_info_writer),
zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@ -229,15 +231,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ Push(lr);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
}
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
if (index_.is(no_reg)) {
__ add(scratch1_, object_, Operand(index_immediate_));
} else {
DCHECK_EQ(0, index_immediate_);
__ add(scratch1_, object_, Operand(index_));
}
__ CallStub(&stub);
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@ -254,6 +256,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
RecordWriteMode const mode_;
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
Zone* zone_;
};
template <typename T>
@ -940,8 +943,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ CallStubDelayed(new (zone())
MathPowStub(nullptr, MathPowStub::DOUBLE));
__ vmov(d0, d2);
break;
}
@ -2819,7 +2822,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ EnterFrame(StackFrame::WASM_COMPILED);
}
__ Move(cp, Smi::kZero);
__ CallRuntime(Runtime::kThrowWasmStackOverflow);
__ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,

View File

@ -319,7 +319,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
unwinding_info_writer_(unwinding_info_writer) {}
unwinding_info_writer_(unwinding_info_writer),
zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@ -339,10 +340,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
__ StackPointer());
}
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
__ Add(scratch1_, object_, index_);
__ CallStub(&stub);
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@ -358,6 +359,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
RecordWriteMode const mode_;
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
Zone* zone_;
};
@ -935,8 +937,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ CallStubDelayed(new (zone())
MathPowStub(nullptr, MathPowStub::DOUBLE));
break;
}
case kIeee754Float64Sin:
@ -2479,7 +2481,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Initialize the jssp because it is required for the runtime call.
__ Mov(jssp, csp);
__ Move(cp, Smi::kZero);
__ CallRuntime(Runtime::kThrowWasmStackOverflow);
__ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,

View File

@ -93,7 +93,7 @@ void CodeGenerator::AssembleCode() {
// Place function entry hook if requested to do so.
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm());
ProfileEntryHookStub::MaybeCallEntryHookDelayed(masm(), zone());
}
// Architecture-specific, linkage-specific prologue.
info->set_prologue_offset(masm()->pc_offset());

View File

@ -105,11 +105,12 @@ class CodeGenerator final : public GapResolver::Assembler {
void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
int arguments, Safepoint::DeoptMode deopt_mode);
Zone* zone() const { return code()->zone(); }
private:
MacroAssembler* masm() { return &masm_; }
GapResolver* resolver() { return &resolver_; }
SafepointTableBuilder* safepoints() { return &safepoints_; }
Zone* zone() const { return code()->zone(); }
CompilationInfo* info() const { return info_; }
OsrHelper* osr_helper() { return &(*osr_helper_); }

View File

@ -247,7 +247,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode) {}
mode_(mode),
zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@ -261,10 +262,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
__ lea(scratch1_, operand_);
__ CallStub(&stub);
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
}
private:
@ -274,6 +275,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
Zone* zone_;
};
} // namespace
@ -1134,8 +1136,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movaps(xmm1, xmm2);
__ movaps(xmm2, xmm0);
}
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ CallStubDelayed(new (zone())
MathPowStub(nullptr, MathPowStub::DOUBLE));
__ movaps(i.OutputDoubleRegister(), xmm3);
break;
}

View File

@ -236,7 +236,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode) {}
mode_(mode),
zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@ -250,10 +251,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
__ leap(scratch1_, operand_);
__ CallStub(&stub);
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
}
private:
@ -263,6 +264,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
Zone* zone_;
};
class WasmOutOfLineTrap final : public OutOfLineCode {
@ -292,7 +294,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
// with AssembleArchTrap.
__ Push(Smi::FromInt(position_));
__ Move(rsi, Smi::kZero);
__ CallRuntime(Runtime::kThrowWasmError);
__ CallRuntimeDelayed(gen_->zone(), Runtime::kThrowWasmError);
ReferenceMap* reference_map =
new (gen_->code()->zone()) ReferenceMap(gen_->code()->zone());
@ -1061,8 +1063,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Pow: {
// TODO(bmeurer): Improve integration of the stub.
__ Movsd(xmm2, xmm0);
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ CallStubDelayed(new (zone())
MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Movsd(xmm0, xmm3);
break;
}

View File

@ -388,8 +388,8 @@ void Assembler::emit(const Immediate& x) {
return;
}
if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
if (x.is_heap_number()) {
RequestHeapNumber(x.heap_number());
if (x.is_heap_object_request()) {
RequestHeapObject(x.heap_object_request());
emit(0);
} else {
emit(x.immediate());

View File

@ -49,6 +49,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
#include "src/v8.h"
@ -60,8 +61,15 @@ Immediate Immediate::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Immediate(Smi::FromInt(smi));
Immediate result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_number_ = true;
result.value_.heap_number = value;
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Immediate Immediate::EmbeddedCode(CodeStub* stub) {
Immediate result(0, RelocInfo::CODE_TARGET);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(stub);
return result;
}
@ -308,6 +316,23 @@ Register Operand::reg() const {
return Register::from_code(buf_[0] & 0x07);
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
}
Address pc = buffer_ + request.offset();
Memory::Object_Handle_at(pc) = object;
}
}
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@ -335,7 +360,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
// that we are still not overlapping instructions and relocation info).
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
AllocateRequestedHeapNumbers(isolate);
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
@ -1600,6 +1625,11 @@ void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
emit(code, rmode);
}
void Assembler::call(CodeStub* stub) {
EnsureSpace ensure_space(this);
EMIT(0xE8);
emit(Immediate::EmbeddedCode(stub));
}
void Assembler::jmp(Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);

View File

@ -262,7 +262,6 @@ enum RoundingMode {
kRoundToZero = 0x3
};
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@ -275,24 +274,27 @@ class Immediate BASE_EMBEDDED {
inline explicit Immediate(Address addr);
inline explicit Immediate(Address x, RelocInfo::Mode rmode);
static Immediate EmbeddedNumber(double value); // Smi or HeapNumber.
static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
static Immediate EmbeddedCode(CodeStub* code);
static Immediate CodeRelativeOffset(Label* label) {
return Immediate(label);
}
bool is_heap_number() const {
DCHECK_IMPLIES(is_heap_number_, rmode_ == RelocInfo::EMBEDDED_OBJECT);
return is_heap_number_;
bool is_heap_object_request() const {
DCHECK_IMPLIES(is_heap_object_request_,
rmode_ == RelocInfo::EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
double heap_number() const {
DCHECK(is_heap_number());
return value_.heap_number;
HeapObjectRequest heap_object_request() const {
DCHECK(is_heap_object_request());
return value_.heap_object_request;
}
int immediate() const {
DCHECK(!is_heap_number());
DCHECK(!is_heap_object_request());
return value_.immediate;
}
@ -314,11 +316,12 @@ class Immediate BASE_EMBEDDED {
private:
inline explicit Immediate(Label* value);
union {
double heap_number;
union Value {
Value() {}
HeapObjectRequest heap_object_request;
int immediate;
} value_;
bool is_heap_number_ = false;
bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Operand;
@ -510,7 +513,7 @@ class Assembler : public AssemblerBase {
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
virtual ~Assembler() { }
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@ -864,6 +867,7 @@ class Assembler : public AssemblerBase {
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code, RelocInfo::Mode rmode);
void call(CodeStub* stub);
// Jumps
// unconditional jump to L
@ -1717,12 +1721,6 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
// Patch the dummy heap number that we emitted at {pc} during code assembly
// with the actual heap object (handle).
static void set_heap_number(Handle<HeapObject> number, Address pc) {
Memory::Object_Handle_at(pc) = number;
}
protected:
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
@ -1813,6 +1811,19 @@ class Assembler : public AssemblerBase {
// code generation
RelocInfoWriter reloc_info_writer;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
// code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
};

View File

@ -2139,6 +2139,12 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
if (masm->isolate()->function_entry_hook() != NULL) {
masm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {

View File

@ -1578,6 +1578,10 @@ void MacroAssembler::CallStub(CodeStub* stub) {
call(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
call(stub);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
@ -2067,7 +2071,8 @@ void MacroAssembler::Move(Register dst, Register src) {
void MacroAssembler::Move(Register dst, const Immediate& x) {
if (!x.is_heap_number() && x.is_zero() && RelocInfo::IsNone(x.rmode_)) {
if (!x.is_heap_object_request() && x.is_zero() &&
RelocInfo::IsNone(x.rmode_)) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else {
mov(dst, x);

View File

@ -618,6 +618,7 @@ class MacroAssembler: public Assembler {
// Call a code stub. Generate the code if necessary.
void CallStub(CodeStub* stub);
void CallStubDelayed(CodeStub* stub);
// Tail call a code stub (jump). Generate the code if necessary.
void TailCallStub(CodeStub* stub);

View File

@ -588,10 +588,6 @@ class Assembler : public AssemblerBase {
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
static void set_heap_number(Handle<HeapObject> number, Address pc) {
UNIMPLEMENTED();
}
static void QuietNaN(HeapObject* nan);
// This sets the branch destination (which gets loaded at the call address).
@ -2220,6 +2216,19 @@ class Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
// code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
friend class CodePatcher;

View File

@ -2370,6 +2370,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
UNIMPLEMENTED_MIPS();
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
__ push(ra);
__ CallStub(&stub);
__ pop(ra);
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {

View File

@ -593,10 +593,6 @@ class Assembler : public AssemblerBase {
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
static void set_heap_number(Handle<HeapObject> number, Address pc) {
UNIMPLEMENTED();
}
static void JumpLabelToJumpRegister(Address pc);
static void QuietNaN(HeapObject* nan);
@ -2269,6 +2265,19 @@ class Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
// code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
friend class CodePatcher;

View File

@ -2373,6 +2373,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
UNIMPLEMENTED_MIPS();
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
__ push(ra);
__ CallStub(&stub);
__ pop(ra);
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {

View File

@ -58,7 +58,8 @@ void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
rmode == RelocInfo::CODE_AGE_SEQUENCE);
RecordRelocInfo(rmode);
int current = code_targets_.length();
if (current > 0 && code_targets_.last().address() == target.address()) {
if (current > 0 && !target.is_null() &&
code_targets_.last().address() == target.address()) {
// Optimization if we keep jumping to the same code target.
emitl(current - 1);
} else {
@ -310,7 +311,6 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[Memory::int32_at(pc)];
}
Address Assembler::runtime_entry_at(Address pc) {
return Memory::int32_at(pc) + isolate_data().code_range_start_;
}

View File

@ -18,6 +18,7 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/macro-assembler.h"
#include "src/v8.h"
@ -292,6 +293,25 @@ bool Operand::AddressUsesRegister(Register reg) const {
}
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
Address pc = buffer_ + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
Handle<HeapNumber> object = isolate->factory()->NewHeapNumber(
request.heap_number(), IMMUTABLE, TENURED);
Memory::Object_Handle_at(pc) = object;
break;
}
case HeapObjectRequest::kCodeStub: {
request.code_stub()->set_isolate(isolate);
code_targets_[Memory::int32_at(pc)] = request.code_stub()->GetCode();
break;
}
}
}
}
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@ -314,7 +334,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
// that we are still not overlapping instructions and relocation info.
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
AllocateRequestedHeapNumbers(isolate);
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
@ -882,6 +902,14 @@ void Assembler::call(Address entry, RelocInfo::Mode rmode) {
emit_runtime_entry(entry, rmode);
}
void Assembler::call(CodeStub* stub) {
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
RequestHeapObject(HeapObjectRequest(stub));
emit_code_target(Handle<Code>(), RelocInfo::CODE_TARGET);
}
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
@ -1541,7 +1569,7 @@ void Assembler::movp_heap_number(Register dst, double value) {
EnsureSpace ensure_space(this);
emit_rex(dst, kPointerSize);
emit(0xB8 | dst.low_bits());
RequestHeapNumber(value);
RequestHeapObject(HeapObjectRequest(value));
emitp(nullptr, RelocInfo::EMBEDDED_OBJECT);
}

View File

@ -449,15 +449,14 @@ class Operand BASE_EMBEDDED {
// Shift instructions on operands/registers with kPointerSize, kInt32Size and
// kInt64Size.
#define SHIFT_INSTRUCTION_LIST(V) \
V(rol, 0x0) \
V(ror, 0x1) \
V(rcl, 0x2) \
V(rcr, 0x3) \
V(shl, 0x4) \
V(shr, 0x5) \
V(sar, 0x7) \
#define SHIFT_INSTRUCTION_LIST(V) \
V(rol, 0x0) \
V(ror, 0x1) \
V(rcl, 0x2) \
V(rcr, 0x3) \
V(shl, 0x4) \
V(shr, 0x5) \
V(sar, 0x7)
class Assembler : public AssemblerBase {
private:
@ -706,12 +705,6 @@ class Assembler : public AssemblerBase {
// move.
void movp_heap_number(Register dst, double value);
// Patch the dummy heap number that we emitted at {pc} during code assembly
// with the actual heap object (handle).
static void set_heap_number(Handle<HeapObject> number, Address pc) {
Memory::Object_Handle_at(pc) = number;
}
// Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value,
RelocInfo::Mode rmode = RelocInfo::NONE64);
@ -929,6 +922,7 @@ class Assembler : public AssemblerBase {
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
void call(Address entry, RelocInfo::Mode rmode);
void call(CodeStub* stub);
void call(Handle<Code> target,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
@ -2510,6 +2504,19 @@ class Assembler : public AssemblerBase {
std::deque<int> internal_reference_positions_;
List< Handle<Code> > code_targets_;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
// code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
};

View File

@ -2101,6 +2101,12 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
}
}
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
if (masm->isolate()->function_entry_hook() != nullptr) {
masm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
}
}
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// This stub can be called from essentially anywhere, so it needs to save

View File

@ -597,6 +597,11 @@ void MacroAssembler::Abort(BailoutReason reason) {
int3();
}
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
call(stub);
}
void MacroAssembler::CallStub(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET);
@ -612,6 +617,18 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Set(rax, f->nargs);
LoadAddress(rbx, ExternalReference(f, isolate()));
CallStubDelayed(new (zone) CEntryStub(nullptr, f->result_size, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles) {

View File

@ -1268,7 +1268,12 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub.
// The first version is deprecated.
void CallStub(CodeStub* stub);
// The second version, which expects {stub} to be zone-allocated, does not
// trigger generation of the stub's code object but instead files a
// HeapObjectRequest that will be fulfilled after code assembly.
void CallStubDelayed(CodeStub* stub);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
@ -1277,6 +1282,8 @@ class MacroAssembler: public Assembler {
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Call a runtime function and save the value of XMM registers.
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {