[arm64] Refactor constant pool implementation
This refactors the constant pool handling for arm64. The immediate goal is to allow 32bit compressed pointers in the pool. The mediate goal is to unify the implementation with the arm constant pool, which will be done in a follow-up CL. Bug: v8:8054 Change-Id: I74db4245e5e1025f2e4de4144090fa4ce25883ab Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1645316 Reviewed-by: Michael Starzinger <mstarzinger@chromium.org> Reviewed-by: Jakob Gruber <jgruber@chromium.org> Commit-Queue: Sigurd Schneider <sigurds@chromium.org> Cr-Commit-Position: refs/heads/master@{#62209}
This commit is contained in:
parent
c5b9eda7ca
commit
ac79b539ec
@ -1035,9 +1035,7 @@ inline void Assembler::CheckBuffer() {
|
||||
if (pc_offset() >= next_veneer_pool_check_) {
|
||||
CheckVeneerPool(false, true);
|
||||
}
|
||||
if (pc_offset() >= next_constant_pool_check_) {
|
||||
CheckConstPool(false, true);
|
||||
}
|
||||
constpool_.MaybeCheck();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
@ -293,167 +293,6 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
|
||||
return !RelocInfo::IsNone(rmode);
|
||||
}
|
||||
|
||||
bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
|
||||
int offset) {
|
||||
auto existing = entry_map.find(data);
|
||||
if (existing == entry_map.end()) {
|
||||
entry_map[data] = static_cast<int>(entries_.size());
|
||||
entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
|
||||
return true;
|
||||
}
|
||||
int index = existing->second;
|
||||
entries_[index].second.push_back(offset);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Constant Pool.
|
||||
bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
|
||||
DCHECK(mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL &&
|
||||
mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
|
||||
mode != RelocInfo::DEOPT_INLINING_ID &&
|
||||
mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
|
||||
|
||||
bool write_reloc_info = true;
|
||||
|
||||
uint64_t raw_data = static_cast<uint64_t>(data);
|
||||
int offset = assm_->pc_offset();
|
||||
if (IsEmpty()) {
|
||||
first_use_ = offset;
|
||||
}
|
||||
|
||||
if (RelocInfo::IsShareableRelocMode(mode)) {
|
||||
write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
|
||||
} else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
|
||||
// A zero data value is a placeholder and must not be shared.
|
||||
write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
|
||||
} else {
|
||||
entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
|
||||
}
|
||||
|
||||
if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
|
||||
// Request constant pool emission after the next instruction.
|
||||
assm_->SetNextConstPoolCheckIn(1);
|
||||
}
|
||||
|
||||
return write_reloc_info;
|
||||
}
|
||||
|
||||
int ConstPool::DistanceToFirstUse() {
|
||||
DCHECK_GE(first_use_, 0);
|
||||
return assm_->pc_offset() - first_use_;
|
||||
}
|
||||
|
||||
int ConstPool::MaxPcOffset() {
|
||||
// There are no pending entries in the pool so we can never get out of
|
||||
// range.
|
||||
if (IsEmpty()) return kMaxInt;
|
||||
|
||||
// Entries are not necessarily emitted in the order they are added so in the
|
||||
// worst case the first constant pool use will be accessing the last entry.
|
||||
return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
|
||||
}
|
||||
|
||||
int ConstPool::WorstCaseSize() {
|
||||
if (IsEmpty()) return 0;
|
||||
|
||||
// Max size prologue:
|
||||
// b over
|
||||
// ldr xzr, #pool_size
|
||||
// blr xzr
|
||||
// nop
|
||||
// All entries are 64-bit for now.
|
||||
return 4 * kInstrSize + EntryCount() * kSystemPointerSize;
|
||||
}
|
||||
|
||||
int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
|
||||
if (IsEmpty()) return 0;
|
||||
|
||||
// Prologue is:
|
||||
// b over ;; if require_jump
|
||||
// ldr xzr, #pool_size
|
||||
// blr xzr
|
||||
// nop ;; if not 64-bit aligned
|
||||
int prologue_size = require_jump ? kInstrSize : 0;
|
||||
prologue_size += 2 * kInstrSize;
|
||||
prologue_size +=
|
||||
IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
|
||||
|
||||
// All entries are 64-bit for now.
|
||||
return prologue_size + EntryCount() * kSystemPointerSize;
|
||||
}
|
||||
|
||||
void ConstPool::Emit(bool require_jump) {
|
||||
DCHECK(!assm_->is_const_pool_blocked());
|
||||
// Prevent recursive pool emission and protect from veneer pools.
|
||||
Assembler::BlockPoolsScope block_pools(assm_);
|
||||
|
||||
int size = SizeIfEmittedAtCurrentPc(require_jump);
|
||||
Label size_check;
|
||||
assm_->bind(&size_check);
|
||||
|
||||
assm_->RecordConstPool(size);
|
||||
// Emit the constant pool. It is preceded by an optional branch if
|
||||
// require_jump and a header which will:
|
||||
// 1) Encode the size of the constant pool, for use by the disassembler.
|
||||
// 2) Terminate the program, to try to prevent execution from accidentally
|
||||
// flowing into the constant pool.
|
||||
// 3) align the pool entries to 64-bit.
|
||||
// The header is therefore made of up to three arm64 instructions:
|
||||
// ldr xzr, #<size of the constant pool in 32-bit words>
|
||||
// blr xzr
|
||||
// nop
|
||||
//
|
||||
// If executed, the header will likely segfault and lr will point to the
|
||||
// instruction following the offending blr.
|
||||
// TODO(all): Make the alignment part less fragile. Currently code is
|
||||
// allocated as a byte array so there are no guarantees the alignment will
|
||||
// be preserved on compaction. Currently it works as allocation seems to be
|
||||
// 64-bit aligned.
|
||||
|
||||
// Emit branch if required
|
||||
Label after_pool;
|
||||
if (require_jump) {
|
||||
assm_->b(&after_pool);
|
||||
}
|
||||
|
||||
// Emit the header.
|
||||
assm_->RecordComment("[ Constant Pool");
|
||||
EmitMarker();
|
||||
EmitGuard();
|
||||
assm_->Align(8);
|
||||
|
||||
// Emit constant pool entries.
|
||||
// TODO(all): currently each relocated constant is 64 bits, consider adding
|
||||
// support for 32-bit entries.
|
||||
EmitEntries();
|
||||
assm_->RecordComment("]");
|
||||
|
||||
if (after_pool.is_linked()) {
|
||||
assm_->bind(&after_pool);
|
||||
}
|
||||
|
||||
DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
|
||||
static_cast<unsigned>(size));
|
||||
}
|
||||
|
||||
void ConstPool::Clear() {
|
||||
shared_entries_.clear();
|
||||
handle_to_index_map_.clear();
|
||||
entries_.clear();
|
||||
first_use_ = -1;
|
||||
}
|
||||
|
||||
void ConstPool::EmitMarker() {
|
||||
// A constant pool size is expressed in number of 32-bits words.
|
||||
// Currently all entries are 64-bit.
|
||||
// + 1 is for the crash guard.
|
||||
// + 0/1 for alignment.
|
||||
int word_count =
|
||||
EntryCount() * 2 + 1 + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
|
||||
assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
|
||||
Assembler::Rt(xzr));
|
||||
}
|
||||
|
||||
MemOperand::PairResult MemOperand::AreConsistentForPair(
|
||||
const MemOperand& operandA, const MemOperand& operandB,
|
||||
int access_size_log2) {
|
||||
@ -479,47 +318,18 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
|
||||
return kNotPair;
|
||||
}
|
||||
|
||||
void ConstPool::EmitGuard() {
|
||||
#ifdef DEBUG
|
||||
Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
|
||||
DCHECK(instr->preceding()->IsLdrLiteralX() &&
|
||||
instr->preceding()->Rt() == xzr.code());
|
||||
#endif
|
||||
assm_->EmitPoolGuard();
|
||||
}
|
||||
|
||||
void ConstPool::EmitEntries() {
|
||||
DCHECK(IsAligned(assm_->pc_offset(), 8));
|
||||
|
||||
// Emit entries.
|
||||
for (const auto& entry : entries_) {
|
||||
for (const auto& pc : entry.second) {
|
||||
Instruction* instr = assm_->InstructionAt(pc);
|
||||
|
||||
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
|
||||
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
|
||||
instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc());
|
||||
}
|
||||
|
||||
assm_->dc64(entry.first);
|
||||
}
|
||||
Clear();
|
||||
}
|
||||
|
||||
// Assembler
|
||||
Assembler::Assembler(const AssemblerOptions& options,
|
||||
std::unique_ptr<AssemblerBuffer> buffer)
|
||||
: AssemblerBase(options, std::move(buffer)),
|
||||
constpool_(this),
|
||||
unresolved_branches_() {
|
||||
const_pool_blocked_nesting_ = 0;
|
||||
unresolved_branches_(),
|
||||
constpool_(this) {
|
||||
veneer_pool_blocked_nesting_ = 0;
|
||||
Reset();
|
||||
}
|
||||
|
||||
Assembler::~Assembler() {
|
||||
DCHECK(constpool_.IsEmpty());
|
||||
DCHECK_EQ(const_pool_blocked_nesting_, 0);
|
||||
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
|
||||
}
|
||||
|
||||
@ -528,7 +338,6 @@ void Assembler::AbortedCodeGeneration() { constpool_.Clear(); }
|
||||
void Assembler::Reset() {
|
||||
#ifdef DEBUG
|
||||
DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size()));
|
||||
DCHECK_EQ(const_pool_blocked_nesting_, 0);
|
||||
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
|
||||
DCHECK(unresolved_branches_.empty());
|
||||
memset(buffer_start_, 0, pc_ - buffer_start_);
|
||||
@ -536,7 +345,6 @@ void Assembler::Reset() {
|
||||
pc_ = buffer_start_;
|
||||
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
|
||||
constpool_.Clear();
|
||||
next_constant_pool_check_ = 0;
|
||||
next_veneer_pool_check_ = kMaxInt;
|
||||
}
|
||||
|
||||
@ -568,7 +376,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
|
||||
SafepointTableBuilder* safepoint_table_builder,
|
||||
int handler_table_offset) {
|
||||
// Emit constant pool if necessary.
|
||||
CheckConstPool(true, false);
|
||||
ForceConstantPoolEmissionWithoutJump();
|
||||
DCHECK(constpool_.IsEmpty());
|
||||
|
||||
int code_comments_size = WriteCodeComments();
|
||||
@ -866,27 +674,6 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::StartBlockConstPool() {
|
||||
if (const_pool_blocked_nesting_++ == 0) {
|
||||
// Prevent constant pool checks happening by setting the next check to
|
||||
// the biggest possible offset.
|
||||
next_constant_pool_check_ = kMaxInt;
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::EndBlockConstPool() {
|
||||
if (--const_pool_blocked_nesting_ == 0) {
|
||||
// Check the constant pool hasn't been blocked for too long.
|
||||
DCHECK(pc_offset() < constpool_.MaxPcOffset());
|
||||
// Trigger a check after the pool gets unblocked.
|
||||
next_constant_pool_check_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool Assembler::is_const_pool_blocked() const {
|
||||
return (const_pool_blocked_nesting_ > 0);
|
||||
}
|
||||
|
||||
bool Assembler::IsConstantPoolAt(Instruction* instr) {
|
||||
// The constant pool marker is made of two instructions. These instructions
|
||||
// will never be emitted by the JIT, so checking for the first one is enough:
|
||||
@ -4490,7 +4277,17 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
|
||||
Handle<HeapObject> handle(reinterpret_cast<Address*>(data));
|
||||
data = AddEmbeddedObject(handle);
|
||||
}
|
||||
if (!constpool_.RecordEntry(data, rmode)) return;
|
||||
if (rmode == RelocInfo::COMPRESSED_EMBEDDED_OBJECT) {
|
||||
if (constpool_.RecordEntry(static_cast<uint32_t>(data), rmode) ==
|
||||
RelocInfoStatus::kMustOmitForDuplicate) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (constpool_.RecordEntry(static_cast<uint64_t>(data), rmode) ==
|
||||
RelocInfoStatus::kMustOmitForDuplicate) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// For modes that cannot use the constant pool, a different sequence of
|
||||
// instructions will be emitted by this function's caller.
|
||||
@ -4499,7 +4296,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
|
||||
|
||||
// Callers should ensure that constant pool emission is blocked until the
|
||||
// instruction the reloc info is associated with has been emitted.
|
||||
DCHECK(is_const_pool_blocked());
|
||||
DCHECK(constpool_.IsBlocked());
|
||||
|
||||
// We do not try to reuse pool constants.
|
||||
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
|
||||
@ -4528,63 +4325,95 @@ void Assembler::near_call(HeapObjectRequest request) {
|
||||
bl(index);
|
||||
}
|
||||
|
||||
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
|
||||
// Some short sequence of instruction mustn't be broken up by constant pool
|
||||
// emission, such sequences are protected by a BlockConstPoolScope.
|
||||
if (is_const_pool_blocked()) {
|
||||
// Something is wrong if emission is forced and blocked at the same time.
|
||||
DCHECK(!force_emit);
|
||||
return;
|
||||
}
|
||||
// Constant Pool
|
||||
|
||||
// There is nothing to do if there are no pending constant pool entries.
|
||||
if (constpool_.IsEmpty()) {
|
||||
// Calculate the offset of the next check.
|
||||
SetNextConstPoolCheckIn(kCheckConstPoolInterval);
|
||||
return;
|
||||
}
|
||||
|
||||
// We emit a constant pool when:
|
||||
// * requested to do so by parameter force_emit (e.g. after each function).
|
||||
// * the distance to the first instruction accessing the constant pool is
|
||||
// kApproxMaxDistToConstPool or more.
|
||||
// * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
|
||||
int dist = constpool_.DistanceToFirstUse();
|
||||
int count = constpool_.EntryCount();
|
||||
if (!force_emit && (dist < kApproxMaxDistToConstPool) &&
|
||||
(count < kApproxMaxPoolEntryCount)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Emit veneers for branches that would go out of range during emission of the
|
||||
// constant pool.
|
||||
int worst_case_size = constpool_.WorstCaseSize();
|
||||
CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + worst_case_size);
|
||||
|
||||
// Check that the code buffer is large enough before emitting the constant
|
||||
// pool (this includes the gap to the relocation information).
|
||||
int needed_space = worst_case_size + kGap + 1 * kInstrSize;
|
||||
while (buffer_space() <= needed_space) {
|
||||
GrowBuffer();
|
||||
}
|
||||
|
||||
Label size_check;
|
||||
bind(&size_check);
|
||||
constpool_.Emit(require_jump);
|
||||
DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
|
||||
static_cast<unsigned>(worst_case_size));
|
||||
|
||||
// Since a constant pool was just emitted, move the check offset forward by
|
||||
// the standard interval.
|
||||
SetNextConstPoolCheckIn(kCheckConstPoolInterval);
|
||||
void ConstantPool::EmitPrologue(Alignment require_alignment) {
|
||||
// Recorded constant pool size is expressed in number of 32-bits words,
|
||||
// and includes prologue and alignment, but not the jump around the pool
|
||||
// and the size of the marker itself.
|
||||
const int marker_size = 1;
|
||||
int word_count =
|
||||
ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size;
|
||||
assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
|
||||
Assembler::Rt(xzr));
|
||||
assm_->EmitPoolGuard();
|
||||
}
|
||||
|
||||
bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
|
||||
int ConstantPool::PrologueSize(Jump require_jump) const {
|
||||
// Prologue is:
|
||||
// b over ;; if require_jump
|
||||
// ldr xzr, #pool_size
|
||||
// blr xzr
|
||||
int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
|
||||
prologue_size += 2 * kInstrSize;
|
||||
return prologue_size;
|
||||
}
|
||||
|
||||
void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
|
||||
Instruction* entry_offset,
|
||||
const ConstantPoolKey& key) {
|
||||
Instruction* instr = assm_->InstructionAt(load_offset);
|
||||
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
|
||||
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
|
||||
instr->SetImmPCOffsetTarget(assm_->options(), entry_offset);
|
||||
}
|
||||
|
||||
void ConstantPool::Check(Emission force_emit, Jump require_jump,
|
||||
size_t margin) {
|
||||
// Some short sequence of instruction must not be broken up by constant pool
|
||||
// emission, such sequences are protected by a ConstPool::BlockScope.
|
||||
if (IsBlocked()) {
|
||||
// Something is wrong if emission is forced and blocked at the same time.
|
||||
DCHECK_EQ(force_emit, Emission::kIfNeeded);
|
||||
return;
|
||||
}
|
||||
|
||||
// We emit a constant pool only if :
|
||||
// * it is not empty
|
||||
// * emission is forced by parameter force_emit (e.g. at function end).
|
||||
// * emission is mandatory or opportune according to {ShouldEmitNow}.
|
||||
if (!IsEmpty() && (force_emit == Emission::kForced ||
|
||||
ShouldEmitNow(require_jump, margin))) {
|
||||
// Emit veneers for branches that would go out of range during emission of
|
||||
// the constant pool.
|
||||
int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
|
||||
assm_->CheckVeneerPool(false, require_jump == Jump::kRequired,
|
||||
assm_->kVeneerDistanceMargin + worst_case_size +
|
||||
static_cast<int>(margin));
|
||||
|
||||
// Check that the code buffer is large enough before emitting the constant
|
||||
// pool (this includes the gap to the relocation information).
|
||||
int needed_space = worst_case_size + assm_->kGap;
|
||||
while (assm_->buffer_space() <= needed_space) {
|
||||
assm_->GrowBuffer();
|
||||
}
|
||||
|
||||
EmitAndClear(require_jump);
|
||||
}
|
||||
// Since a constant pool is (now) empty, move the check offset forward by
|
||||
// the standard interval.
|
||||
SetNextCheckIn(ConstantPool::kCheckInterval);
|
||||
}
|
||||
|
||||
// Pool entries are accessed with pc relative load therefore this cannot be more
|
||||
// than 1 * MB. Since constant pool emission checks are interval based, and we
|
||||
// want to keep entries close to the code, we try to emit every 64KB.
|
||||
const size_t ConstantPool::kMaxDistToPool32 = 1 * MB;
|
||||
const size_t ConstantPool::kMaxDistToPool64 = 1 * MB;
|
||||
const size_t ConstantPool::kCheckInterval = 128 * kInstrSize;
|
||||
const size_t ConstantPool::kApproxDistToPool32 = 64 * KB;
|
||||
const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32;
|
||||
|
||||
const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB;
|
||||
const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB;
|
||||
const size_t ConstantPool::kApproxMaxEntryCount = 512;
|
||||
|
||||
bool Assembler::ShouldEmitVeneer(int max_reachable_pc, size_t margin) {
|
||||
// Account for the branch around the veneers and the guard.
|
||||
int protection_offset = 2 * kInstrSize;
|
||||
return pc_offset() >
|
||||
return static_cast<size_t>(pc_offset()) >
|
||||
max_reachable_pc - margin - protection_offset -
|
||||
static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
|
||||
unresolved_branches_.size() * kMaxVeneerCodeSize;
|
||||
}
|
||||
|
||||
void Assembler::RecordVeneerPool(int location_offset, int size) {
|
||||
@ -4594,8 +4423,9 @@ void Assembler::RecordVeneerPool(int location_offset, int size) {
|
||||
reloc_info_writer.Write(&rinfo);
|
||||
}
|
||||
|
||||
void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
|
||||
BlockPoolsScope scope(this);
|
||||
void Assembler::EmitVeneers(bool force_emit, bool need_protection,
|
||||
size_t margin) {
|
||||
BlockPoolsScope scope(this, ConstantPool::PoolEmissionCheck::kSkip);
|
||||
RecordComment("[ Veneers");
|
||||
|
||||
// The exact size of the veneer pool must be recorded (see the comment at the
|
||||
@ -4665,7 +4495,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
|
||||
}
|
||||
|
||||
void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
|
||||
int margin) {
|
||||
size_t margin) {
|
||||
// There is nothing to do if there are no pending veneer pool entries.
|
||||
if (unresolved_branches_.empty()) {
|
||||
DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
|
||||
|
@ -169,60 +169,6 @@ class MemOperand {
|
||||
unsigned shift_amount_;
|
||||
};
|
||||
|
||||
class ConstPool {
|
||||
public:
|
||||
explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {}
|
||||
// Returns true when we need to write RelocInfo and false when we do not.
|
||||
bool RecordEntry(intptr_t data, RelocInfo::Mode mode);
|
||||
int EntryCount() const { return static_cast<int>(entries_.size()); }
|
||||
bool IsEmpty() const { return entries_.empty(); }
|
||||
// Distance in bytes between the current pc and the first instruction
|
||||
// using the pool. If there are no pending entries return kMaxInt.
|
||||
int DistanceToFirstUse();
|
||||
// Offset after which instructions using the pool will be out of range.
|
||||
int MaxPcOffset();
|
||||
// Maximum size the constant pool can be with current entries. It always
|
||||
// includes alignment padding and branch over.
|
||||
int WorstCaseSize();
|
||||
// Size in bytes of the literal pool *if* it is emitted at the current
|
||||
// pc. The size will include the branch over the pool if it was requested.
|
||||
int SizeIfEmittedAtCurrentPc(bool require_jump);
|
||||
// Emit the literal pool at the current pc with a branch over the pool if
|
||||
// requested.
|
||||
void Emit(bool require_jump);
|
||||
// Discard any pending pool entries.
|
||||
void Clear();
|
||||
|
||||
private:
|
||||
void EmitMarker();
|
||||
void EmitGuard();
|
||||
void EmitEntries();
|
||||
|
||||
using SharedEntryMap = std::map<uint64_t, int>;
|
||||
// Adds a shared entry to entries_, using 'entry_map' to determine whether we
|
||||
// already track this entry. Returns true if this is the first time we add
|
||||
// this entry, false otherwise.
|
||||
bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset);
|
||||
|
||||
Assembler* assm_;
|
||||
// Keep track of the first instruction requiring a constant pool entry
|
||||
// since the previous constant pool was emitted.
|
||||
int first_use_;
|
||||
|
||||
// Map of data to index in entries_ for shared entries.
|
||||
SharedEntryMap shared_entries_;
|
||||
|
||||
// Map of address of handle to index in entries_. We need to keep track of
|
||||
// code targets separately from other shared entries, as they can be
|
||||
// relocated.
|
||||
SharedEntryMap handle_to_index_map_;
|
||||
|
||||
// Values, pc offset(s) of entries. Use a vector to preserve the order of
|
||||
// insertion, as the serializer expects code target RelocInfo to point to
|
||||
// constant pool addresses in an ascending order.
|
||||
std::vector<std::pair<uint64_t, std::vector<int> > > entries_;
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Assembler.
|
||||
|
||||
@ -369,16 +315,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
return SizeOfCodeGeneratedSince(label) / kInstrSize;
|
||||
}
|
||||
|
||||
// Prevent contant pool emission until EndBlockConstPool is called.
|
||||
// Call to this function can be nested but must be followed by an equal
|
||||
// number of calls to EndBlockConstpool.
|
||||
void StartBlockConstPool();
|
||||
|
||||
// Resume constant pool emission. Need to be called as many time as
|
||||
// StartBlockConstPool to have an effect.
|
||||
void EndBlockConstPool();
|
||||
|
||||
bool is_const_pool_blocked() const;
|
||||
static bool IsConstantPoolAt(Instruction* instr);
|
||||
static int ConstantPoolSizeAt(Instruction* instr);
|
||||
// See Assembler::CheckConstPool for more info.
|
||||
@ -397,16 +333,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
return veneer_pool_blocked_nesting_ > 0;
|
||||
}
|
||||
|
||||
// Block/resume emission of constant pools and veneer pools.
|
||||
void StartBlockPools() {
|
||||
StartBlockConstPool();
|
||||
StartBlockVeneerPool();
|
||||
}
|
||||
void EndBlockPools() {
|
||||
EndBlockConstPool();
|
||||
EndBlockVeneerPool();
|
||||
}
|
||||
|
||||
// Record a deoptimization reason that can be used by a log or cpu profiler.
|
||||
// Use --trace-deopt to enable.
|
||||
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
|
||||
@ -2118,8 +2044,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
|
||||
// Code generation helpers --------------------------------------------------
|
||||
|
||||
bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
|
||||
|
||||
Instruction* pc() const { return Instruction::Cast(pc_); }
|
||||
|
||||
Instruction* InstructionAt(ptrdiff_t offset) const {
|
||||
@ -2403,31 +2327,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
// FP register type.
|
||||
inline static Instr FPType(VRegister fd);
|
||||
|
||||
// Class for scoping postponing the constant pool generation.
|
||||
class BlockConstPoolScope {
|
||||
public:
|
||||
explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
|
||||
assem_->StartBlockConstPool();
|
||||
}
|
||||
~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
|
||||
|
||||
private:
|
||||
Assembler* assem_;
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
|
||||
};
|
||||
|
||||
// Unused on this architecture.
|
||||
void MaybeEmitOutOfLineConstantPool() {}
|
||||
|
||||
// Check if is time to emit a constant pool.
|
||||
void CheckConstPool(bool force_emit, bool require_jump);
|
||||
void ForceConstantPoolEmissionWithoutJump() {
|
||||
constpool_.Check(Emission::kForced, Jump::kOmitted);
|
||||
}
|
||||
void ForceConstantPoolEmissionWithJump() {
|
||||
constpool_.Check(Emission::kForced, Jump::kRequired);
|
||||
}
|
||||
// Check if the const pool needs to be emitted while pretending that {margin}
|
||||
// more bytes of instructions have already been emitted.
|
||||
void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
|
||||
constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
|
||||
}
|
||||
|
||||
// Returns true if we should emit a veneer as soon as possible for a branch
|
||||
// which can at most reach to specified pc.
|
||||
bool ShouldEmitVeneer(int max_reachable_pc,
|
||||
int margin = kVeneerDistanceMargin);
|
||||
bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
|
||||
size_t margin = kVeneerDistanceMargin);
|
||||
bool ShouldEmitVeneers(size_t margin = kVeneerDistanceMargin) {
|
||||
return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
|
||||
}
|
||||
|
||||
@ -2441,23 +2360,31 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
// If need_protection is true, the veneers are protected by a branch jumping
|
||||
// over the code.
|
||||
void EmitVeneers(bool force_emit, bool need_protection,
|
||||
int margin = kVeneerDistanceMargin);
|
||||
size_t margin = kVeneerDistanceMargin);
|
||||
void EmitVeneersGuard() { EmitPoolGuard(); }
|
||||
// Checks whether veneers need to be emitted at this point.
|
||||
// If force_emit is set, a veneer is generated for *all* unresolved branches.
|
||||
void CheckVeneerPool(bool force_emit, bool require_jump,
|
||||
int margin = kVeneerDistanceMargin);
|
||||
size_t margin = kVeneerDistanceMargin);
|
||||
|
||||
using BlockConstPoolScope = ConstantPool::BlockScope;
|
||||
|
||||
class BlockPoolsScope {
|
||||
public:
|
||||
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
|
||||
assem_->StartBlockPools();
|
||||
explicit BlockPoolsScope(Assembler* assem)
|
||||
: assem_(assem), block_const_pool_(assem) {
|
||||
assem_->StartBlockVeneerPool();
|
||||
}
|
||||
~BlockPoolsScope() { assem_->EndBlockPools(); }
|
||||
|
||||
BlockPoolsScope(Assembler* assem, ConstantPool::PoolEmissionCheck check)
|
||||
: assem_(assem), block_const_pool_(assem, check) {
|
||||
assem_->StartBlockVeneerPool();
|
||||
}
|
||||
~BlockPoolsScope() { assem_->EndBlockVeneerPool(); }
|
||||
|
||||
private:
|
||||
Assembler* assem_;
|
||||
|
||||
BlockConstPoolScope block_const_pool_;
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
|
||||
};
|
||||
|
||||
@ -2620,11 +2547,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
// Verify that a label's link chain is intact.
|
||||
void CheckLabelLinkChain(Label const* label);
|
||||
|
||||
// Set how far from current pc the next constant pool check will be.
|
||||
void SetNextConstPoolCheckIn(int instructions) {
|
||||
next_constant_pool_check_ = pc_offset() + instructions * kInstrSize;
|
||||
}
|
||||
|
||||
// Emit the instruction at pc_.
|
||||
void Emit(Instr instruction) {
|
||||
STATIC_ASSERT(sizeof(*pc_) == 1);
|
||||
@ -2652,39 +2574,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
void CheckBufferSpace();
|
||||
void CheckBuffer();
|
||||
|
||||
// Pc offset of the next constant pool check.
|
||||
int next_constant_pool_check_;
|
||||
|
||||
// Constant pool generation
|
||||
// Pools are emitted in the instruction stream. They are emitted when:
|
||||
// * the distance to the first use is above a pre-defined distance or
|
||||
// * the numbers of entries in the pool is above a pre-defined size or
|
||||
// * code generation is finished
|
||||
// If a pool needs to be emitted before code generation is finished a branch
|
||||
// over the emitted pool will be inserted.
|
||||
|
||||
// Constants in the pool may be addresses of functions that gets relocated;
|
||||
// if so, a relocation info entry is associated to the constant pool entry.
|
||||
|
||||
// Repeated checking whether the constant pool should be emitted is rather
|
||||
// expensive. By default we only check again once a number of instructions
|
||||
// has been generated. That also means that the sizing of the buffers is not
|
||||
// an exact science, and that we rely on some slop to not overrun buffers.
|
||||
static constexpr int kCheckConstPoolInterval = 128;
|
||||
|
||||
// Distance to first use after a which a pool will be emitted. Pool entries
|
||||
// are accessed with pc relative load therefore this cannot be more than
|
||||
// 1 * MB. Since constant pool emission checks are interval based this value
|
||||
// is an approximation.
|
||||
static constexpr int kApproxMaxDistToConstPool = 64 * KB;
|
||||
|
||||
// Number of pool entries after which a pool will be emitted. Since constant
|
||||
// pool emission checks are interval based this value is an approximation.
|
||||
static constexpr int kApproxMaxPoolEntryCount = 512;
|
||||
|
||||
// Emission of the constant pool may be blocked in some code sequences.
|
||||
int const_pool_blocked_nesting_; // Block emission if this is not zero.
|
||||
|
||||
// Emission of the veneer pools may be blocked in some code sequences.
|
||||
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
|
||||
|
||||
@ -2698,16 +2587,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
// are already bound.
|
||||
std::deque<int> internal_reference_positions_;
|
||||
|
||||
// Relocation info records are also used during code generation as temporary
|
||||
// containers for constants and code target addresses until they are emitted
|
||||
// to the constant pool. These pending relocation info records are temporarily
|
||||
// stored in a separate buffer until a constant pool is emitted.
|
||||
// If every instruction in a long sequence is accessing the pool, we need one
|
||||
// pending relocation entry per instruction.
|
||||
|
||||
// The pending constant pool.
|
||||
ConstPool constpool_;
|
||||
|
||||
protected:
|
||||
// Code generation
|
||||
// The relocation writer's position is at least kGap bytes below the end of
|
||||
@ -2720,17 +2599,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
public:
|
||||
#ifdef DEBUG
|
||||
// Functions used for testing.
|
||||
int GetConstantPoolEntriesSizeForTesting() const {
|
||||
size_t GetConstantPoolEntriesSizeForTesting() const {
|
||||
// Do not include branch over the pool.
|
||||
return constpool_.EntryCount() * kSystemPointerSize;
|
||||
return constpool_.Entry32Count() * kInt32Size +
|
||||
constpool_.Entry64Count() * kInt64Size;
|
||||
}
|
||||
|
||||
static constexpr int GetCheckConstPoolIntervalForTesting() {
|
||||
return kCheckConstPoolInterval;
|
||||
static size_t GetCheckConstPoolIntervalForTesting() {
|
||||
return ConstantPool::kCheckInterval;
|
||||
}
|
||||
|
||||
static constexpr int GetApproxMaxDistToConstPoolForTesting() {
|
||||
return kApproxMaxDistToConstPool;
|
||||
static size_t GetApproxMaxDistToConstPoolForTesting() {
|
||||
return ConstantPool::kApproxDistToPool64;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2772,7 +2652,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
DCHECK(!unresolved_branches_.empty());
|
||||
return unresolved_branches_.begin()->first;
|
||||
}
|
||||
// This is similar to next_constant_pool_check_ and helps reduce the overhead
|
||||
// This PC-offset of the next veneer pool check helps reduce the overhead
|
||||
// of checking for veneer pools.
|
||||
// It is maintained to the closest unresolved branch limit minus the maximum
|
||||
// veneer margin (or kMaxInt if there are no unresolved branches).
|
||||
@ -2797,8 +2677,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
|
||||
int WriteCodeComments();
|
||||
|
||||
// The pending constant pool.
|
||||
ConstantPool constpool_;
|
||||
|
||||
friend class EnsureSpace;
|
||||
friend class ConstPool;
|
||||
friend class ConstantPool;
|
||||
};
|
||||
|
||||
class PatchingAssembler : public Assembler {
|
||||
@ -2815,19 +2698,12 @@ class PatchingAssembler : public Assembler {
|
||||
PatchingAssembler(const AssemblerOptions& options, byte* start,
|
||||
unsigned count)
|
||||
: Assembler(options,
|
||||
ExternalAssemblerBuffer(start, count * kInstrSize + kGap)) {
|
||||
// Block constant pool emission.
|
||||
StartBlockPools();
|
||||
}
|
||||
ExternalAssemblerBuffer(start, count * kInstrSize + kGap)),
|
||||
block_constant_pool_emission_scope(this) {}
|
||||
|
||||
~PatchingAssembler() {
|
||||
// Const pool should still be blocked.
|
||||
DCHECK(is_const_pool_blocked());
|
||||
EndBlockPools();
|
||||
// Verify we have generated the number of instruction we expected.
|
||||
DCHECK_EQ(pc_offset() + kGap, buffer_->size());
|
||||
// Verify no relocation information has been emitted.
|
||||
DCHECK(IsConstPoolEmpty());
|
||||
}
|
||||
|
||||
// See definition of PatchAdrFar() for details.
|
||||
@ -2835,6 +2711,9 @@ class PatchingAssembler : public Assembler {
|
||||
static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
|
||||
void PatchAdrFar(int64_t target_offset);
|
||||
void PatchSubSp(uint32_t immediate);
|
||||
|
||||
private:
|
||||
BlockPoolsScope block_constant_pool_emission_scope;
|
||||
};
|
||||
|
||||
class EnsureSpace {
|
||||
|
@ -32,8 +32,8 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
|
||||
|
||||
constexpr uint8_t kInstrSize = 4;
|
||||
constexpr uint8_t kInstrSizeLog2 = 2;
|
||||
constexpr size_t kLoadLiteralScaleLog2 = 2;
|
||||
constexpr size_t kMaxLoadLiteralRange = 1 * MB;
|
||||
constexpr uint8_t kLoadLiteralScaleLog2 = 2;
|
||||
constexpr int kMaxLoadLiteralRange = 1 * MB;
|
||||
|
||||
const int kNumberOfRegisters = 32;
|
||||
const int kNumberOfVRegisters = 32;
|
||||
|
@ -1923,17 +1923,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
class InstructionAccurateScope {
|
||||
public:
|
||||
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
|
||||
: tasm_(tasm)
|
||||
: tasm_(tasm),
|
||||
block_pool_(tasm, count * kInstrSize)
|
||||
#ifdef DEBUG
|
||||
,
|
||||
size_(count * kInstrSize)
|
||||
#endif
|
||||
{
|
||||
// Before blocking the const pool, see if it needs to be emitted.
|
||||
tasm_->CheckConstPool(false, true);
|
||||
tasm_->CheckVeneerPool(false, true);
|
||||
|
||||
tasm_->StartBlockPools();
|
||||
tasm_->CheckVeneerPool(false, true, count * kInstrSize);
|
||||
tasm_->StartBlockVeneerPool();
|
||||
#ifdef DEBUG
|
||||
if (count != 0) {
|
||||
tasm_->bind(&start_);
|
||||
@ -1944,7 +1942,7 @@ class InstructionAccurateScope {
|
||||
}
|
||||
|
||||
~InstructionAccurateScope() {
|
||||
tasm_->EndBlockPools();
|
||||
tasm_->EndBlockVeneerPool();
|
||||
#ifdef DEBUG
|
||||
if (start_.is_bound()) {
|
||||
DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
|
||||
@ -1955,6 +1953,7 @@ class InstructionAccurateScope {
|
||||
|
||||
private:
|
||||
TurboAssembler* tasm_;
|
||||
TurboAssembler::BlockConstPoolScope block_pool_;
|
||||
#ifdef DEBUG
|
||||
size_t size_;
|
||||
Label start_;
|
||||
|
@ -3,6 +3,7 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/codegen/constant-pool.h"
|
||||
#include "src/codegen/assembler-arch.h"
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -210,5 +211,253 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_PPC)
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM64)
|
||||
|
||||
// Constant Pool.
|
||||
|
||||
ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
|
||||
ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
|
||||
RelocInfo::Mode rmode) {
|
||||
ConstantPoolKey key(data, rmode);
|
||||
CHECK(key.is_value32());
|
||||
return RecordKey(std::move(key), assm_->pc_offset());
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
|
||||
RelocInfo::Mode rmode) {
|
||||
ConstantPoolKey key(data, rmode);
|
||||
CHECK(!key.is_value32());
|
||||
return RecordKey(std::move(key), assm_->pc_offset());
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
|
||||
RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
|
||||
if (write_reloc_info == RelocInfoStatus::kMustRecord) {
|
||||
if (key.is_value32()) {
|
||||
if (entry32_count_ == 0) first_use_32_ = offset;
|
||||
++entry32_count_;
|
||||
} else {
|
||||
if (entry64_count_ == 0) first_use_64_ = offset;
|
||||
++entry64_count_;
|
||||
}
|
||||
}
|
||||
entries_.insert(std::make_pair(key, offset));
|
||||
|
||||
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
|
||||
// Request constant pool emission after the next instruction.
|
||||
SetNextCheckIn(1);
|
||||
}
|
||||
|
||||
return write_reloc_info;
|
||||
}
|
||||
|
||||
RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
|
||||
const ConstantPoolKey& key) {
|
||||
if (key.AllowsDeduplication()) {
|
||||
auto existing = entries_.find(key);
|
||||
if (existing != entries_.end()) {
|
||||
return RelocInfoStatus::kMustOmitForDuplicate;
|
||||
}
|
||||
}
|
||||
return RelocInfoStatus::kMustRecord;
|
||||
}
|
||||
|
||||
void ConstantPool::EmitAndClear(Jump require_jump) {
|
||||
DCHECK(!IsBlocked());
|
||||
// Prevent recursive pool emission.
|
||||
Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
|
||||
Alignment require_alignment =
|
||||
IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
|
||||
int size = ComputeSize(require_jump, require_alignment);
|
||||
Label size_check;
|
||||
assm_->bind(&size_check);
|
||||
assm_->RecordConstPool(size);
|
||||
|
||||
// Emit the constant pool. It is preceded by an optional branch if
|
||||
// {require_jump} and a header which will:
|
||||
// 1) Encode the size of the constant pool, for use by the disassembler.
|
||||
// 2) Terminate the program, to try to prevent execution from accidentally
|
||||
// flowing into the constant pool.
|
||||
// 3) align the 64bit pool entries to 64-bit.
|
||||
// TODO(all): Make the alignment part less fragile. Currently code is
|
||||
// allocated as a byte array so there are no guarantees the alignment will
|
||||
// be preserved on compaction. Currently it works as allocation seems to be
|
||||
// 64-bit aligned.
|
||||
|
||||
Label after_pool;
|
||||
if (require_jump == Jump::kRequired) assm_->b(&after_pool);
|
||||
|
||||
assm_->RecordComment("[ Constant Pool");
|
||||
EmitPrologue(require_alignment);
|
||||
if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
|
||||
EmitEntries();
|
||||
assm_->RecordComment("]");
|
||||
|
||||
if (after_pool.is_linked()) assm_->bind(&after_pool);
|
||||
|
||||
DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
|
||||
Clear();
|
||||
}
|
||||
|
||||
void ConstantPool::Clear() {
|
||||
entries_.clear();
|
||||
first_use_32_ = -1;
|
||||
first_use_64_ = -1;
|
||||
entry32_count_ = 0;
|
||||
entry64_count_ = 0;
|
||||
next_check_ = 0;
|
||||
}
|
||||
|
||||
void ConstantPool::StartBlock() {
|
||||
if (blocked_nesting_ == 0) {
|
||||
// Prevent constant pool checks from happening by setting the next check to
|
||||
// the biggest possible offset.
|
||||
next_check_ = kMaxInt;
|
||||
}
|
||||
++blocked_nesting_;
|
||||
}
|
||||
|
||||
void ConstantPool::EndBlock() {
|
||||
--blocked_nesting_;
|
||||
if (blocked_nesting_ == 0) {
|
||||
DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
|
||||
// Make sure a check happens quickly after getting unblocked.
|
||||
next_check_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
|
||||
|
||||
void ConstantPool::SetNextCheckIn(size_t instructions) {
|
||||
next_check_ =
|
||||
assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
|
||||
}
|
||||
|
||||
void ConstantPool::EmitEntries() {
|
||||
for (auto iter = entries_.begin(); iter != entries_.end();) {
|
||||
DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
|
||||
auto range = entries_.equal_range(iter->first);
|
||||
bool shared = iter->first.AllowsDeduplication();
|
||||
for (auto it = range.first; it != range.second; ++it) {
|
||||
SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
|
||||
if (!shared) Emit(it->first);
|
||||
}
|
||||
if (shared) Emit(iter->first);
|
||||
iter = range.second;
|
||||
}
|
||||
}
|
||||
|
||||
void ConstantPool::Emit(const ConstantPoolKey& key) {
|
||||
if (key.is_value32()) {
|
||||
assm_->dd(key.value32());
|
||||
} else {
|
||||
assm_->dq(key.value64());
|
||||
}
|
||||
}
|
||||
|
||||
bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
|
||||
if (IsEmpty()) return false;
|
||||
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
|
||||
return true;
|
||||
}
|
||||
// We compute {dist32/64}, i.e. the distance from the first instruction
|
||||
// accessing a 32bit/64bit entry in the constant pool to any of the
|
||||
// 32bit/64bit constant pool entries, respectively. This is required because
|
||||
// we do not guarantee that entries are emitted in order of reference, i.e. it
|
||||
// is possible that the entry with the earliest reference is emitted last.
|
||||
// The constant pool should be emitted if either of the following is true:
|
||||
// (A) {dist32/64} will be out of range at the next check in.
|
||||
// (B) Emission can be done behind an unconditional branch and {dist32/64}
|
||||
// exceeds {kOpportunityDist*}.
|
||||
// (C) {dist32/64} exceeds the desired approximate distance to the pool.
|
||||
int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
|
||||
size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
|
||||
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
|
||||
if (Entry64Count() != 0) {
|
||||
// The 64-bit constants are always emitted before the 32-bit constants, so
|
||||
// we subtract the size of the 32-bit constants from {size}.
|
||||
size_t dist64 = pool_end_64 - first_use_64_;
|
||||
bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
|
||||
bool opportune_emission_without_jump =
|
||||
require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
|
||||
bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
|
||||
if (next_check_too_late || opportune_emission_without_jump ||
|
||||
approximate_distance_exceeded) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (Entry32Count() != 0) {
|
||||
size_t dist32 = pool_end_32 - first_use_32_;
|
||||
bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
|
||||
bool opportune_emission_without_jump =
|
||||
require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
|
||||
bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
|
||||
if (next_check_too_late || opportune_emission_without_jump ||
|
||||
approximate_distance_exceeded) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int ConstantPool::ComputeSize(Jump require_jump,
|
||||
Alignment require_alignment) const {
|
||||
int size_up_to_marker = PrologueSize(require_jump);
|
||||
int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
|
||||
size_t size_after_marker =
|
||||
Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
|
||||
return size_up_to_marker + static_cast<int>(size_after_marker);
|
||||
}
|
||||
|
||||
Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
|
||||
int pc_offset) const {
|
||||
int size_up_to_marker = PrologueSize(require_jump);
|
||||
if (Entry64Count() != 0 &&
|
||||
!IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
|
||||
return Alignment::kRequired;
|
||||
}
|
||||
return Alignment::kOmitted;
|
||||
}
|
||||
|
||||
bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
|
||||
// Check that all entries are in range if the pool is emitted at {pc_offset}.
|
||||
// This ignores kPcLoadDelta (conservatively, since all offsets are positive),
|
||||
// and over-estimates the last entry's address with the pool's end.
|
||||
Alignment require_alignment =
|
||||
IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
|
||||
size_t pool_end_32 =
|
||||
pc_offset + ComputeSize(Jump::kRequired, require_alignment);
|
||||
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
|
||||
bool entries_in_range_32 =
|
||||
Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
|
||||
bool entries_in_range_64 =
|
||||
Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
|
||||
return entries_in_range_32 && entries_in_range_64;
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
|
||||
: pool_(&assm->constpool_) {
|
||||
pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
|
||||
pool_->StartBlock();
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
|
||||
: pool_(&assm->constpool_) {
|
||||
DCHECK_EQ(check, PoolEmissionCheck::kSkip);
|
||||
pool_->StartBlock();
|
||||
}
|
||||
|
||||
ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
|
||||
|
||||
void ConstantPool::MaybeCheck() {
|
||||
if (assm_->pc_offset() >= next_check_) {
|
||||
Check(Emission::kIfNeeded, Jump::kRequired);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_ARM64)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -15,6 +15,8 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Instruction;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Constant pool support
|
||||
|
||||
@ -161,6 +163,187 @@ class ConstantPoolBuilder {
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_PPC)
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM64)
|
||||
|
||||
class ConstantPoolKey {
|
||||
public:
|
||||
explicit ConstantPoolKey(uint64_t value,
|
||||
RelocInfo::Mode rmode = RelocInfo::NONE)
|
||||
: is_value32_(false), value64_(value), rmode_(rmode) {}
|
||||
|
||||
explicit ConstantPoolKey(uint32_t value,
|
||||
RelocInfo::Mode rmode = RelocInfo::NONE)
|
||||
: is_value32_(true), value32_(value), rmode_(rmode) {}
|
||||
|
||||
uint64_t value64() const {
|
||||
CHECK(!is_value32_);
|
||||
return value64_;
|
||||
}
|
||||
uint32_t value32() const {
|
||||
CHECK(is_value32_);
|
||||
return value32_;
|
||||
}
|
||||
|
||||
bool is_value32() const { return is_value32_; }
|
||||
RelocInfo::Mode rmode() const { return rmode_; }
|
||||
|
||||
bool AllowsDeduplication() const {
|
||||
DCHECK(rmode_ != RelocInfo::CONST_POOL &&
|
||||
rmode_ != RelocInfo::VENEER_POOL &&
|
||||
rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
|
||||
rmode_ != RelocInfo::DEOPT_INLINING_ID &&
|
||||
rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID);
|
||||
// CODE_TARGETs can be shared because they aren't patched anymore,
|
||||
// and we make sure we emit only one reloc info for them (thus delta
|
||||
// patching) will apply the delta only once. At the moment, we do not dedup
|
||||
// code targets if they are wrapped in a heap object request (value == 0).
|
||||
bool is_sharable_code_target =
|
||||
rmode_ == RelocInfo::CODE_TARGET &&
|
||||
(is_value32() ? (value32() != 0) : (value64() != 0));
|
||||
return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target;
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_value32_;
|
||||
union {
|
||||
uint64_t value64_;
|
||||
uint32_t value32_;
|
||||
};
|
||||
RelocInfo::Mode rmode_;
|
||||
};
|
||||
|
||||
// Order for pool entries. 64bit entries go first.
|
||||
inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) {
|
||||
if (a.is_value32() < b.is_value32()) return true;
|
||||
if (a.is_value32() > b.is_value32()) return false;
|
||||
if (a.rmode() < b.rmode()) return true;
|
||||
if (a.rmode() > b.rmode()) return false;
|
||||
if (a.is_value32()) return a.value32() < b.value32();
|
||||
return a.value64() < b.value64();
|
||||
}
|
||||
|
||||
inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) {
|
||||
if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) {
|
||||
return false;
|
||||
}
|
||||
if (a.is_value32()) return a.value32() == b.value32();
|
||||
return a.value64() == b.value64();
|
||||
}
|
||||
|
||||
// Constant pool generation
|
||||
enum class Jump { kOmitted, kRequired };
|
||||
enum class Emission { kIfNeeded, kForced };
|
||||
enum class Alignment { kOmitted, kRequired };
|
||||
enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate };
|
||||
|
||||
// Pools are emitted in the instruction stream, preferably after unconditional
|
||||
// jumps or after returns from functions (in dead code locations).
|
||||
// If a long code sequence does not contain unconditional jumps, it is
|
||||
// necessary to emit the constant pool before the pool gets too far from the
|
||||
// location it is accessed from. In this case, we emit a jump over the emitted
|
||||
// constant pool.
|
||||
// Constants in the pool may be addresses of functions that gets relocated;
|
||||
// if so, a relocation info entry is associated to the constant pool entry.
|
||||
class ConstantPool {
|
||||
public:
|
||||
explicit ConstantPool(Assembler* assm);
|
||||
~ConstantPool();
|
||||
|
||||
// Returns true when we need to write RelocInfo and false when we do not.
|
||||
RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode);
|
||||
RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode);
|
||||
|
||||
size_t Entry32Count() const { return entry32_count_; }
|
||||
size_t Entry64Count() const { return entry64_count_; }
|
||||
bool IsEmpty() const { return entries_.empty(); }
|
||||
// Check if pool will be out of range at {pc_offset}.
|
||||
bool IsInImmRangeIfEmittedAt(int pc_offset);
|
||||
// Size in bytes of the constant pool. Depending on parameters, the size will
|
||||
// include the branch over the pool and alignment padding.
|
||||
int ComputeSize(Jump require_jump, Alignment require_alignment) const;
|
||||
|
||||
// Emit the pool at the current pc with a branch over the pool if requested.
|
||||
void EmitAndClear(Jump require);
|
||||
bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const;
|
||||
V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump,
|
||||
size_t margin = 0);
|
||||
|
||||
V8_EXPORT_PRIVATE void MaybeCheck();
|
||||
void Clear();
|
||||
|
||||
// Constant pool emisssion can be blocked temporarily.
|
||||
bool IsBlocked() const;
|
||||
|
||||
// Repeated checking whether the constant pool should be emitted is expensive;
|
||||
// only check once a number of instructions have been generated.
|
||||
void SetNextCheckIn(size_t instructions);
|
||||
|
||||
// Class for scoping postponing the constant pool generation.
|
||||
enum class PoolEmissionCheck { kSkip };
|
||||
class V8_EXPORT_PRIVATE BlockScope {
|
||||
public:
|
||||
// BlockScope immediatelly emits the pool if necessary to ensure that
|
||||
// during the block scope at least {margin} bytes can be emitted without
|
||||
// pool emission becomming necessary.
|
||||
explicit BlockScope(Assembler* pool, size_t margin = 0);
|
||||
BlockScope(Assembler* pool, PoolEmissionCheck);
|
||||
~BlockScope();
|
||||
|
||||
private:
|
||||
ConstantPool* pool_;
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope);
|
||||
};
|
||||
|
||||
// Hard limit to the const pool which must not be exceeded.
|
||||
static const size_t kMaxDistToPool32;
|
||||
static const size_t kMaxDistToPool64;
|
||||
// Approximate distance where the pool should be emitted.
|
||||
static const size_t kApproxDistToPool32;
|
||||
V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64;
|
||||
// Approximate distance where the pool may be emitted if
|
||||
// no jump is required (due to a recent unconditional jump).
|
||||
static const size_t kOpportunityDistToPool32;
|
||||
static const size_t kOpportunityDistToPool64;
|
||||
// PC distance between constant pool checks.
|
||||
V8_EXPORT_PRIVATE static const size_t kCheckInterval;
|
||||
// Number of entries in the pool which trigger a check.
|
||||
static const size_t kApproxMaxEntryCount;
|
||||
|
||||
private:
|
||||
void StartBlock();
|
||||
void EndBlock();
|
||||
|
||||
void EmitEntries();
|
||||
void EmitPrologue(Alignment require_alignment);
|
||||
int PrologueSize(Jump require_jump) const;
|
||||
RelocInfoStatus RecordKey(ConstantPoolKey key, int offset);
|
||||
RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key);
|
||||
void Emit(const ConstantPoolKey& key);
|
||||
void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset,
|
||||
const ConstantPoolKey& key);
|
||||
Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump,
|
||||
int pc_offset) const;
|
||||
|
||||
Assembler* assm_;
|
||||
// Keep track of the first instruction requiring a constant pool entry
|
||||
// since the previous constant pool was emitted.
|
||||
int first_use_32_ = -1;
|
||||
int first_use_64_ = -1;
|
||||
// We sort not according to insertion order, but since we do not insert
|
||||
// addresses (for heap objects we insert an index which is created in
|
||||
// increasing order), the order is deterministic. We map each entry to the
|
||||
// pc offset of the load. We use a multimap because we need to record the
|
||||
// pc offset of each load of the same constant so that the immediate of the
|
||||
// loads can be back-patched when the pool is emitted.
|
||||
std::multimap<ConstantPoolKey, int> entries_;
|
||||
size_t entry32_count_ = 0;
|
||||
size_t entry64_count_ = 0;
|
||||
int next_check_ = 0;
|
||||
int blocked_nesting_ = 0;
|
||||
};
|
||||
|
||||
#endif // defined(V8_TARGET_ARCH_ARM64)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -2400,12 +2400,13 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
|
||||
__ Adr(temp, &table);
|
||||
__ Add(temp, temp, Operand(input, UXTW, 2));
|
||||
__ Br(temp);
|
||||
__ StartBlockPools();
|
||||
__ Bind(&table);
|
||||
for (size_t index = 0; index < case_count; ++index) {
|
||||
__ B(GetLabel(i.InputRpo(index + 2)));
|
||||
{
|
||||
TurboAssembler::BlockPoolsScope block_pools(tasm());
|
||||
__ Bind(&table);
|
||||
for (size_t index = 0; index < case_count; ++index) {
|
||||
__ B(GetLabel(i.InputRpo(index + 2)));
|
||||
}
|
||||
}
|
||||
__ EndBlockPools();
|
||||
}
|
||||
|
||||
void CodeGenerator::FinishFrame(Frame* frame) {
|
||||
@ -2655,7 +2656,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
|
||||
void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
|
||||
|
||||
void CodeGenerator::AssembleMove(InstructionOperand* source,
|
||||
InstructionOperand* destination) {
|
||||
|
@ -172,7 +172,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
|
||||
patching_assembler.PatchSubSp(bytes);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
|
||||
void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
|
||||
|
||||
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
|
||||
|
||||
|
@ -103,7 +103,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
|
||||
|
||||
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
|
||||
JumpToInstructionStream(builtin_target);
|
||||
CheckConstPool(true, false); // force emit of const pool
|
||||
ForceConstantPoolEmissionWithoutJump();
|
||||
}
|
||||
|
||||
void JumpTableAssembler::EmitJumpSlot(Address target) {
|
||||
|
@ -6486,76 +6486,90 @@ TEST(ldr_literal) {
|
||||
#ifdef DEBUG
|
||||
// These tests rely on functions available in debug mode.
|
||||
enum LiteralPoolEmitOutcome { EmitExpected, NoEmitExpected };
|
||||
enum LiteralPoolEmissionAlignment { EmitAtUnaligned, EmitAtAligned };
|
||||
|
||||
static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
|
||||
size_t prepadding = 0) {
|
||||
static void LdrLiteralRangeHelper(
|
||||
size_t range, LiteralPoolEmitOutcome outcome,
|
||||
LiteralPoolEmissionAlignment unaligned_emission) {
|
||||
SETUP_SIZE(static_cast<int>(range + 1024));
|
||||
|
||||
size_t code_size = 0;
|
||||
const size_t pool_entries = 2;
|
||||
const size_t kEntrySize = 8;
|
||||
const size_t first_pool_entries = 2;
|
||||
const size_t first_pool_size_bytes = first_pool_entries * kInt64Size;
|
||||
|
||||
START();
|
||||
// Force a pool dump so the pool starts off empty.
|
||||
__ CheckConstPool(true, true);
|
||||
__ ForceConstantPoolEmissionWithJump();
|
||||
CHECK_CONSTANT_POOL_SIZE(0);
|
||||
|
||||
// Emit prepadding to influence alignment of the pool; we don't count this
|
||||
// into code size.
|
||||
for (size_t i = 0; i < prepadding; ++i) __ Nop();
|
||||
// Emit prepadding to influence alignment of the pool.
|
||||
bool currently_aligned = IsAligned(__ pc_offset(), kInt64Size);
|
||||
if ((unaligned_emission == EmitAtUnaligned && currently_aligned) ||
|
||||
(unaligned_emission == EmitAtAligned && !currently_aligned)) {
|
||||
__ Nop();
|
||||
}
|
||||
|
||||
int initial_pc_offset = __ pc_offset();
|
||||
__ Ldr(x0, isolate->factory()->undefined_value());
|
||||
__ Ldr(x1, isolate->factory()->the_hole_value());
|
||||
code_size += 2 * kInstrSize;
|
||||
CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
|
||||
|
||||
// Check that the requested range (allowing space for a branch over the pool)
|
||||
// can be handled by this test.
|
||||
CHECK_LE(code_size, range);
|
||||
CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
|
||||
|
||||
size_t expected_pool_size = 0;
|
||||
|
||||
#if defined(_M_ARM64) && !defined(__clang__)
|
||||
auto PoolSizeAt = [pool_entries, kEntrySize](int pc_offset) {
|
||||
#else
|
||||
auto PoolSizeAt = [](int pc_offset) {
|
||||
auto PoolSizeAt = [unaligned_emission](int pc_offset) {
|
||||
#endif
|
||||
// To determine padding, consider the size of the prologue of the pool,
|
||||
// and the jump around the pool, which we always need.
|
||||
size_t prologue_size = 2 * kInstrSize + kInstrSize;
|
||||
size_t pc = pc_offset + prologue_size;
|
||||
const size_t padding = IsAligned(pc, 8) ? 0 : 4;
|
||||
return prologue_size + pool_entries * kEntrySize + padding;
|
||||
const size_t padding = IsAligned(pc, kInt64Size) ? 0 : kInt32Size;
|
||||
CHECK_EQ(padding == 0, unaligned_emission == EmitAtAligned);
|
||||
return prologue_size + first_pool_size_bytes + padding;
|
||||
};
|
||||
|
||||
int pc_offset_before_emission = -1;
|
||||
// Emit NOPs up to 'range'.
|
||||
while (code_size < range) {
|
||||
bool pool_was_emitted = false;
|
||||
while (__ pc_offset() - initial_pc_offset < static_cast<intptr_t>(range)) {
|
||||
pc_offset_before_emission = __ pc_offset() + kInstrSize;
|
||||
__ Nop();
|
||||
code_size += kInstrSize;
|
||||
if (__ GetConstantPoolEntriesSizeForTesting() == 0) {
|
||||
pool_was_emitted = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
CHECK_EQ(code_size, range);
|
||||
|
||||
if (outcome == EmitExpected) {
|
||||
CHECK_CONSTANT_POOL_SIZE(0);
|
||||
if (!pool_was_emitted) {
|
||||
FATAL(
|
||||
"Pool was not emitted up to pc_offset %d which corresponds to a "
|
||||
"distance to the first constant of %d bytes",
|
||||
__ pc_offset(), __ pc_offset() - initial_pc_offset);
|
||||
}
|
||||
// Check that the size of the emitted constant pool is as expected.
|
||||
expected_pool_size = PoolSizeAt(pc_offset_before_emission);
|
||||
CHECK_EQ(pc_offset_before_emission + expected_pool_size, __ pc_offset());
|
||||
} else {
|
||||
CHECK_EQ(outcome, NoEmitExpected);
|
||||
CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
|
||||
if (pool_was_emitted) {
|
||||
FATAL("Pool was unexpectedly emitted at pc_offset %d ",
|
||||
pc_offset_before_emission);
|
||||
}
|
||||
CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
|
||||
CHECK_EQ(pc_offset_before_emission, __ pc_offset());
|
||||
}
|
||||
|
||||
// Force a pool flush to check that a second pool functions correctly.
|
||||
__ CheckConstPool(true, true);
|
||||
__ ForceConstantPoolEmissionWithJump();
|
||||
CHECK_CONSTANT_POOL_SIZE(0);
|
||||
|
||||
// These loads should be after the pool (and will require a new one).
|
||||
const int second_pool_entries = 2;
|
||||
__ Ldr(x4, isolate->factory()->true_value());
|
||||
__ Ldr(x5, isolate->factory()->false_value());
|
||||
CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
|
||||
CHECK_CONSTANT_POOL_SIZE(second_pool_entries * kInt64Size);
|
||||
|
||||
END();
|
||||
|
||||
if (outcome == EmitExpected) {
|
||||
@ -6566,9 +6580,12 @@ static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
|
||||
Instruction* marker =
|
||||
reinterpret_cast<Instruction*>(pool_start + kInstrSize);
|
||||
CHECK(marker->IsLdrLiteralX());
|
||||
const size_t padding =
|
||||
IsAligned(pc_offset_before_emission + kInstrSize, kEntrySize) ? 0 : 1;
|
||||
CHECK_EQ(pool_entries * 2 + 1 + padding, marker->ImmLLiteral());
|
||||
size_t pool_data_start_offset = pc_offset_before_emission + kInstrSize;
|
||||
size_t padding =
|
||||
IsAligned(pool_data_start_offset, kInt64Size) ? 0 : kInt32Size;
|
||||
size_t marker_size = kInstrSize;
|
||||
CHECK_EQ((first_pool_size_bytes + marker_size + padding) / kInt32Size,
|
||||
marker->ImmLLiteral());
|
||||
}
|
||||
|
||||
RUN();
|
||||
@ -6582,28 +6599,34 @@ static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
|
||||
|
||||
TEST(ldr_literal_range_max_dist_emission_1) {
|
||||
INIT_V8();
|
||||
LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
|
||||
EmitExpected);
|
||||
LdrLiteralRangeHelper(
|
||||
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
|
||||
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
|
||||
EmitExpected, EmitAtAligned);
|
||||
}
|
||||
|
||||
TEST(ldr_literal_range_max_dist_emission_2) {
|
||||
INIT_V8();
|
||||
LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
|
||||
EmitExpected, 1);
|
||||
LdrLiteralRangeHelper(
|
||||
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
|
||||
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
|
||||
EmitExpected, EmitAtUnaligned);
|
||||
}
|
||||
|
||||
TEST(ldr_literal_range_max_dist_no_emission_1) {
|
||||
INIT_V8();
|
||||
LdrLiteralRangeHelper(
|
||||
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
|
||||
NoEmitExpected);
|
||||
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
|
||||
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
|
||||
NoEmitExpected, EmitAtUnaligned);
|
||||
}
|
||||
|
||||
TEST(ldr_literal_range_max_dist_no_emission_2) {
|
||||
INIT_V8();
|
||||
LdrLiteralRangeHelper(
|
||||
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
|
||||
NoEmitExpected, 1);
|
||||
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
|
||||
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
|
||||
NoEmitExpected, EmitAtAligned);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user