Revert "[arm64] Refactor constant pool implementation"

This reverts commit ac79b539ec.

Reason for revert: https://ci.chromium.org/p/v8/builders/ci/V8%20Linux%20-%20arm64%20-%20sim/18611

Original change's description:
> [arm64] Refactor constant pool implementation
> 
> This refactors the constant pool handling for arm64. The immediate goal
> is to allow 32bit compressed pointers in the pool. The mediate goal is
> to unify the implementation with the arm constant pool, which will be
> done in a follow-up CL.
> 
> Bug: v8:8054
> Change-Id: I74db4245e5e1025f2e4de4144090fa4ce25883ab
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1645316
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Commit-Queue: Sigurd Schneider <sigurds@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#62209}

TBR=mstarzinger@chromium.org,sigurds@chromium.org,jgruber@chromium.org,georgia.kouveli@arm.com

Change-Id: Iff03e81a2e70d125ef2c06b6ff3aff8d0e3688ef
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:8054
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1662293
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Sigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62211}
This commit is contained in:
Sigurd Schneider 2019-06-17 11:49:55 +00:00 committed by Commit Bot
parent f066d764cc
commit 81fc0c462e
11 changed files with 489 additions and 651 deletions

View File

@ -1035,7 +1035,9 @@ inline void Assembler::CheckBuffer() {
if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(false, true);
}
constpool_.MaybeCheck();
if (pc_offset() >= next_constant_pool_check_) {
CheckConstPool(false, true);
}
}
} // namespace internal

View File

@ -293,6 +293,167 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
return !RelocInfo::IsNone(rmode);
}
bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
int offset) {
auto existing = entry_map.find(data);
if (existing == entry_map.end()) {
entry_map[data] = static_cast<int>(entries_.size());
entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
return true;
}
int index = existing->second;
entries_[index].second.push_back(offset);
return false;
}
// Constant Pool.
bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL &&
mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
mode != RelocInfo::DEOPT_INLINING_ID &&
mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
bool write_reloc_info = true;
uint64_t raw_data = static_cast<uint64_t>(data);
int offset = assm_->pc_offset();
if (IsEmpty()) {
first_use_ = offset;
}
if (RelocInfo::IsShareableRelocMode(mode)) {
write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
} else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
// A zero data value is a placeholder and must not be shared.
write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
} else {
entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
}
if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
// Request constant pool emission after the next instruction.
assm_->SetNextConstPoolCheckIn(1);
}
return write_reloc_info;
}
int ConstPool::DistanceToFirstUse() {
DCHECK_GE(first_use_, 0);
return assm_->pc_offset() - first_use_;
}
int ConstPool::MaxPcOffset() {
// There are no pending entries in the pool so we can never get out of
// range.
if (IsEmpty()) return kMaxInt;
// Entries are not necessarily emitted in the order they are added so in the
// worst case the first constant pool use will be accessing the last entry.
return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
}
int ConstPool::WorstCaseSize() {
if (IsEmpty()) return 0;
// Max size prologue:
// b over
// ldr xzr, #pool_size
// blr xzr
// nop
// All entries are 64-bit for now.
return 4 * kInstrSize + EntryCount() * kSystemPointerSize;
}
int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
if (IsEmpty()) return 0;
// Prologue is:
// b over ;; if require_jump
// ldr xzr, #pool_size
// blr xzr
// nop ;; if not 64-bit aligned
int prologue_size = require_jump ? kInstrSize : 0;
prologue_size += 2 * kInstrSize;
prologue_size +=
IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
// All entries are 64-bit for now.
return prologue_size + EntryCount() * kSystemPointerSize;
}
void ConstPool::Emit(bool require_jump) {
DCHECK(!assm_->is_const_pool_blocked());
// Prevent recursive pool emission and protect from veneer pools.
Assembler::BlockPoolsScope block_pools(assm_);
int size = SizeIfEmittedAtCurrentPc(require_jump);
Label size_check;
assm_->bind(&size_check);
assm_->RecordConstPool(size);
// Emit the constant pool. It is preceded by an optional branch if
// require_jump and a header which will:
// 1) Encode the size of the constant pool, for use by the disassembler.
// 2) Terminate the program, to try to prevent execution from accidentally
// flowing into the constant pool.
// 3) align the pool entries to 64-bit.
// The header is therefore made of up to three arm64 instructions:
// ldr xzr, #<size of the constant pool in 32-bit words>
// blr xzr
// nop
//
// If executed, the header will likely segfault and lr will point to the
// instruction following the offending blr.
// TODO(all): Make the alignment part less fragile. Currently code is
// allocated as a byte array so there are no guarantees the alignment will
// be preserved on compaction. Currently it works as allocation seems to be
// 64-bit aligned.
// Emit branch if required
Label after_pool;
if (require_jump) {
assm_->b(&after_pool);
}
// Emit the header.
assm_->RecordComment("[ Constant Pool");
EmitMarker();
EmitGuard();
assm_->Align(8);
// Emit constant pool entries.
// TODO(all): currently each relocated constant is 64 bits, consider adding
// support for 32-bit entries.
EmitEntries();
assm_->RecordComment("]");
if (after_pool.is_linked()) {
assm_->bind(&after_pool);
}
DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
static_cast<unsigned>(size));
}
void ConstPool::Clear() {
shared_entries_.clear();
handle_to_index_map_.clear();
entries_.clear();
first_use_ = -1;
}
void ConstPool::EmitMarker() {
// A constant pool size is expressed in number of 32-bits words.
// Currently all entries are 64-bit.
// + 1 is for the crash guard.
// + 0/1 for alignment.
int word_count =
EntryCount() * 2 + 1 + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
Assembler::Rt(xzr));
}
MemOperand::PairResult MemOperand::AreConsistentForPair(
const MemOperand& operandA, const MemOperand& operandB,
int access_size_log2) {
@ -318,18 +479,47 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
return kNotPair;
}
void ConstPool::EmitGuard() {
#ifdef DEBUG
Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
DCHECK(instr->preceding()->IsLdrLiteralX() &&
instr->preceding()->Rt() == xzr.code());
#endif
assm_->EmitPoolGuard();
}
void ConstPool::EmitEntries() {
DCHECK(IsAligned(assm_->pc_offset(), 8));
// Emit entries.
for (const auto& entry : entries_) {
for (const auto& pc : entry.second) {
Instruction* instr = assm_->InstructionAt(pc);
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc());
}
assm_->dc64(entry.first);
}
Clear();
}
// Assembler
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
unresolved_branches_(),
constpool_(this) {
constpool_(this),
unresolved_branches_() {
const_pool_blocked_nesting_ = 0;
veneer_pool_blocked_nesting_ = 0;
Reset();
}
Assembler::~Assembler() {
DCHECK(constpool_.IsEmpty());
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
}
@ -338,6 +528,7 @@ void Assembler::AbortedCodeGeneration() { constpool_.Clear(); }
void Assembler::Reset() {
#ifdef DEBUG
DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size()));
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
DCHECK(unresolved_branches_.empty());
memset(buffer_start_, 0, pc_ - buffer_start_);
@ -345,6 +536,7 @@ void Assembler::Reset() {
pc_ = buffer_start_;
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
constpool_.Clear();
next_constant_pool_check_ = 0;
next_veneer_pool_check_ = kMaxInt;
}
@ -376,7 +568,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// Emit constant pool if necessary.
ForceConstantPoolEmissionWithoutJump();
CheckConstPool(true, false);
DCHECK(constpool_.IsEmpty());
int code_comments_size = WriteCodeComments();
@ -674,6 +866,27 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
}
}
void Assembler::StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
// Prevent constant pool checks happening by setting the next check to
// the biggest possible offset.
next_constant_pool_check_ = kMaxInt;
}
}
void Assembler::EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
DCHECK(pc_offset() < constpool_.MaxPcOffset());
// Trigger a check after the pool gets unblocked.
next_constant_pool_check_ = 0;
}
}
bool Assembler::is_const_pool_blocked() const {
return (const_pool_blocked_nesting_ > 0);
}
bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
@ -4277,17 +4490,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
Handle<HeapObject> handle(reinterpret_cast<Address*>(data));
data = AddEmbeddedObject(handle);
}
if (rmode == RelocInfo::COMPRESSED_EMBEDDED_OBJECT) {
if (constpool_.RecordEntry(static_cast<uint32_t>(data), rmode) ==
RelocInfoStatus::kMustOmitForDuplicate) {
return;
}
} else {
if (constpool_.RecordEntry(static_cast<uint64_t>(data), rmode) ==
RelocInfoStatus::kMustOmitForDuplicate) {
return;
}
}
if (!constpool_.RecordEntry(data, rmode)) return;
}
// For modes that cannot use the constant pool, a different sequence of
// instructions will be emitted by this function's caller.
@ -4296,7 +4499,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
// Callers should ensure that constant pool emission is blocked until the
// instruction the reloc info is associated with has been emitted.
DCHECK(constpool_.IsBlocked());
DCHECK(is_const_pool_blocked());
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
@ -4325,95 +4528,63 @@ void Assembler::near_call(HeapObjectRequest request) {
bl(index);
}
// Constant Pool
void ConstantPool::EmitPrologue(Alignment require_alignment) {
// Recorded constant pool size is expressed in number of 32-bits words,
// and includes prologue and alignment, but not the jump around the pool
// and the size of the marker itself.
const int marker_size = 1;
int word_count =
ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size;
assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
Assembler::Rt(xzr));
assm_->EmitPoolGuard();
}
int ConstantPool::PrologueSize(Jump require_jump) const {
// Prologue is:
// b over ;; if require_jump
// ldr xzr, #pool_size
// blr xzr
int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
prologue_size += 2 * kInstrSize;
return prologue_size;
}
void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
Instruction* entry_offset,
const ConstantPoolKey& key) {
Instruction* instr = assm_->InstructionAt(load_offset);
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->options(), entry_offset);
}
void ConstantPool::Check(Emission force_emit, Jump require_jump,
size_t margin) {
// Some short sequence of instruction must not be broken up by constant pool
// emission, such sequences are protected by a ConstPool::BlockScope.
if (IsBlocked()) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by a BlockConstPoolScope.
if (is_const_pool_blocked()) {
// Something is wrong if emission is forced and blocked at the same time.
DCHECK_EQ(force_emit, Emission::kIfNeeded);
DCHECK(!force_emit);
return;
}
// We emit a constant pool only if :
// * it is not empty
// * emission is forced by parameter force_emit (e.g. at function end).
// * emission is mandatory or opportune according to {ShouldEmitNow}.
if (!IsEmpty() && (force_emit == Emission::kForced ||
ShouldEmitNow(require_jump, margin))) {
// Emit veneers for branches that would go out of range during emission of
// the constant pool.
int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
assm_->CheckVeneerPool(false, require_jump == Jump::kRequired,
assm_->kVeneerDistanceMargin + worst_case_size +
static_cast<int>(margin));
// Check that the code buffer is large enough before emitting the constant
// pool (this includes the gap to the relocation information).
int needed_space = worst_case_size + assm_->kGap;
while (assm_->buffer_space() <= needed_space) {
assm_->GrowBuffer();
}
EmitAndClear(require_jump);
// There is nothing to do if there are no pending constant pool entries.
if (constpool_.IsEmpty()) {
// Calculate the offset of the next check.
SetNextConstPoolCheckIn(kCheckConstPoolInterval);
return;
}
// Since a constant pool is (now) empty, move the check offset forward by
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance to the first instruction accessing the constant pool is
// kApproxMaxDistToConstPool or more.
// * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
int dist = constpool_.DistanceToFirstUse();
int count = constpool_.EntryCount();
if (!force_emit && (dist < kApproxMaxDistToConstPool) &&
(count < kApproxMaxPoolEntryCount)) {
return;
}
// Emit veneers for branches that would go out of range during emission of the
// constant pool.
int worst_case_size = constpool_.WorstCaseSize();
CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + worst_case_size);
// Check that the code buffer is large enough before emitting the constant
// pool (this includes the gap to the relocation information).
int needed_space = worst_case_size + kGap + 1 * kInstrSize;
while (buffer_space() <= needed_space) {
GrowBuffer();
}
Label size_check;
bind(&size_check);
constpool_.Emit(require_jump);
DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
static_cast<unsigned>(worst_case_size));
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
SetNextCheckIn(ConstantPool::kCheckInterval);
SetNextConstPoolCheckIn(kCheckConstPoolInterval);
}
// Pool entries are accessed with pc relative load therefore this cannot be more
// than 1 * MB. Since constant pool emission checks are interval based, and we
// want to keep entries close to the code, we try to emit every 64KB.
const size_t ConstantPool::kMaxDistToPool32 = 1 * MB;
const size_t ConstantPool::kMaxDistToPool64 = 1 * MB;
const size_t ConstantPool::kCheckInterval = 128 * kInstrSize;
const size_t ConstantPool::kApproxDistToPool32 = 64 * KB;
const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32;
const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB;
const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB;
const size_t ConstantPool::kApproxMaxEntryCount = 512;
bool Assembler::ShouldEmitVeneer(int max_reachable_pc, size_t margin) {
bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
// Account for the branch around the veneers and the guard.
int protection_offset = 2 * kInstrSize;
return static_cast<size_t>(pc_offset()) >
return pc_offset() >
max_reachable_pc - margin - protection_offset -
unresolved_branches_.size() * kMaxVeneerCodeSize;
static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
}
void Assembler::RecordVeneerPool(int location_offset, int size) {
@ -4423,9 +4594,8 @@ void Assembler::RecordVeneerPool(int location_offset, int size) {
reloc_info_writer.Write(&rinfo);
}
void Assembler::EmitVeneers(bool force_emit, bool need_protection,
size_t margin) {
BlockPoolsScope scope(this, ConstantPool::PoolEmissionCheck::kSkip);
void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
BlockPoolsScope scope(this);
RecordComment("[ Veneers");
// The exact size of the veneer pool must be recorded (see the comment at the
@ -4495,7 +4665,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection,
}
void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
size_t margin) {
int margin) {
// There is nothing to do if there are no pending veneer pool entries.
if (unresolved_branches_.empty()) {
DCHECK_EQ(next_veneer_pool_check_, kMaxInt);

View File

@ -169,6 +169,60 @@ class MemOperand {
unsigned shift_amount_;
};
class ConstPool {
public:
explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {}
// Returns true when we need to write RelocInfo and false when we do not.
bool RecordEntry(intptr_t data, RelocInfo::Mode mode);
int EntryCount() const { return static_cast<int>(entries_.size()); }
bool IsEmpty() const { return entries_.empty(); }
// Distance in bytes between the current pc and the first instruction
// using the pool. If there are no pending entries return kMaxInt.
int DistanceToFirstUse();
// Offset after which instructions using the pool will be out of range.
int MaxPcOffset();
// Maximum size the constant pool can be with current entries. It always
// includes alignment padding and branch over.
int WorstCaseSize();
// Size in bytes of the literal pool *if* it is emitted at the current
// pc. The size will include the branch over the pool if it was requested.
int SizeIfEmittedAtCurrentPc(bool require_jump);
// Emit the literal pool at the current pc with a branch over the pool if
// requested.
void Emit(bool require_jump);
// Discard any pending pool entries.
void Clear();
private:
void EmitMarker();
void EmitGuard();
void EmitEntries();
using SharedEntryMap = std::map<uint64_t, int>;
// Adds a shared entry to entries_, using 'entry_map' to determine whether we
// already track this entry. Returns true if this is the first time we add
// this entry, false otherwise.
bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset);
Assembler* assm_;
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_use_;
// Map of data to index in entries_ for shared entries.
SharedEntryMap shared_entries_;
// Map of address of handle to index in entries_. We need to keep track of
// code targets separately from other shared entries, as they can be
// relocated.
SharedEntryMap handle_to_index_map_;
// Values, pc offset(s) of entries. Use a vector to preserve the order of
// insertion, as the serializer expects code target RelocInfo to point to
// constant pool addresses in an ascending order.
std::vector<std::pair<uint64_t, std::vector<int> > > entries_;
};
// -----------------------------------------------------------------------------
// Assembler.
@ -315,6 +369,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
// number of calls to EndBlockConstpool.
void StartBlockConstPool();
// Resume constant pool emission. Need to be called as many time as
// StartBlockConstPool to have an effect.
void EndBlockConstPool();
bool is_const_pool_blocked() const;
static bool IsConstantPoolAt(Instruction* instr);
static int ConstantPoolSizeAt(Instruction* instr);
// See Assembler::CheckConstPool for more info.
@ -333,6 +397,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return veneer_pool_blocked_nesting_ > 0;
}
// Block/resume emission of constant pools and veneer pools.
void StartBlockPools() {
StartBlockConstPool();
StartBlockVeneerPool();
}
void EndBlockPools() {
EndBlockConstPool();
EndBlockVeneerPool();
}
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@ -2044,6 +2118,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Code generation helpers --------------------------------------------------
bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(ptrdiff_t offset) const {
@ -2327,26 +2403,31 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// FP register type.
inline static Instr FPType(VRegister fd);
// Class for scoping postponing the constant pool generation.
class BlockConstPoolScope {
public:
explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockConstPool();
}
~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
private:
Assembler* assem_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
void ForceConstantPoolEmissionWithoutJump() {
constpool_.Check(Emission::kForced, Jump::kOmitted);
}
void ForceConstantPoolEmissionWithJump() {
constpool_.Check(Emission::kForced, Jump::kRequired);
}
// Check if the const pool needs to be emitted while pretending that {margin}
// more bytes of instructions have already been emitted.
void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
}
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
bool ShouldEmitVeneer(int max_reachable_pc,
size_t margin = kVeneerDistanceMargin);
bool ShouldEmitVeneers(size_t margin = kVeneerDistanceMargin) {
int margin = kVeneerDistanceMargin);
bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
}
@ -2360,31 +2441,23 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// If need_protection is true, the veneers are protected by a branch jumping
// over the code.
void EmitVeneers(bool force_emit, bool need_protection,
size_t margin = kVeneerDistanceMargin);
int margin = kVeneerDistanceMargin);
void EmitVeneersGuard() { EmitPoolGuard(); }
// Checks whether veneers need to be emitted at this point.
// If force_emit is set, a veneer is generated for *all* unresolved branches.
void CheckVeneerPool(bool force_emit, bool require_jump,
size_t margin = kVeneerDistanceMargin);
using BlockConstPoolScope = ConstantPool::BlockScope;
int margin = kVeneerDistanceMargin);
class BlockPoolsScope {
public:
explicit BlockPoolsScope(Assembler* assem)
: assem_(assem), block_const_pool_(assem) {
assem_->StartBlockVeneerPool();
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockPools();
}
BlockPoolsScope(Assembler* assem, ConstantPool::PoolEmissionCheck check)
: assem_(assem), block_const_pool_(assem, check) {
assem_->StartBlockVeneerPool();
}
~BlockPoolsScope() { assem_->EndBlockVeneerPool(); }
~BlockPoolsScope() { assem_->EndBlockPools(); }
private:
Assembler* assem_;
BlockConstPoolScope block_const_pool_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
@ -2547,6 +2620,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Verify that a label's link chain is intact.
void CheckLabelLinkChain(Label const* label);
// Set how far from current pc the next constant pool check will be.
void SetNextConstPoolCheckIn(int instructions) {
next_constant_pool_check_ = pc_offset() + instructions * kInstrSize;
}
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
@ -2574,6 +2652,39 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void CheckBufferSpace();
void CheckBuffer();
// Pc offset of the next constant pool check.
int next_constant_pool_check_;
// Constant pool generation
// Pools are emitted in the instruction stream. They are emitted when:
// * the distance to the first use is above a pre-defined distance or
// * the numbers of entries in the pool is above a pre-defined size or
// * code generation is finished
// If a pool needs to be emitted before code generation is finished a branch
// over the emitted pool will be inserted.
// Constants in the pool may be addresses of functions that gets relocated;
// if so, a relocation info entry is associated to the constant pool entry.
// Repeated checking whether the constant pool should be emitted is rather
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
static constexpr int kCheckConstPoolInterval = 128;
// Distance to first use after a which a pool will be emitted. Pool entries
// are accessed with pc relative load therefore this cannot be more than
// 1 * MB. Since constant pool emission checks are interval based this value
// is an approximation.
static constexpr int kApproxMaxDistToConstPool = 64 * KB;
// Number of pool entries after which a pool will be emitted. Since constant
// pool emission checks are interval based this value is an approximation.
static constexpr int kApproxMaxPoolEntryCount = 512;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
@ -2587,6 +2698,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// are already bound.
std::deque<int> internal_reference_positions_;
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
// stored in a separate buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
// The pending constant pool.
ConstPool constpool_;
protected:
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
@ -2599,18 +2720,17 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
#ifdef DEBUG
// Functions used for testing.
size_t GetConstantPoolEntriesSizeForTesting() const {
int GetConstantPoolEntriesSizeForTesting() const {
// Do not include branch over the pool.
return constpool_.Entry32Count() * kInt32Size +
constpool_.Entry64Count() * kInt64Size;
return constpool_.EntryCount() * kSystemPointerSize;
}
static size_t GetCheckConstPoolIntervalForTesting() {
return ConstantPool::kCheckInterval;
static constexpr int GetCheckConstPoolIntervalForTesting() {
return kCheckConstPoolInterval;
}
static size_t GetApproxMaxDistToConstPoolForTesting() {
return ConstantPool::kApproxDistToPool64;
static constexpr int GetApproxMaxDistToConstPoolForTesting() {
return kApproxMaxDistToConstPool;
}
#endif
@ -2652,7 +2772,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DCHECK(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
// This PC-offset of the next veneer pool check helps reduce the overhead
// This is similar to next_constant_pool_check_ and helps reduce the overhead
// of checking for veneer pools.
// It is maintained to the closest unresolved branch limit minus the maximum
// veneer margin (or kMaxInt if there are no unresolved branches).
@ -2677,11 +2797,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
int WriteCodeComments();
// The pending constant pool.
ConstantPool constpool_;
friend class EnsureSpace;
friend class ConstantPool;
friend class ConstPool;
};
class PatchingAssembler : public Assembler {
@ -2698,12 +2815,19 @@ class PatchingAssembler : public Assembler {
PatchingAssembler(const AssemblerOptions& options, byte* start,
unsigned count)
: Assembler(options,
ExternalAssemblerBuffer(start, count * kInstrSize + kGap)),
block_constant_pool_emission_scope(this) {}
ExternalAssemblerBuffer(start, count * kInstrSize + kGap)) {
// Block constant pool emission.
StartBlockPools();
}
~PatchingAssembler() {
// Const pool should still be blocked.
DCHECK(is_const_pool_blocked());
EndBlockPools();
// Verify we have generated the number of instruction we expected.
DCHECK_EQ(pc_offset() + kGap, buffer_->size());
// Verify no relocation information has been emitted.
DCHECK(IsConstPoolEmpty());
}
// See definition of PatchAdrFar() for details.
@ -2711,9 +2835,6 @@ class PatchingAssembler : public Assembler {
static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset);
void PatchSubSp(uint32_t immediate);
private:
BlockPoolsScope block_constant_pool_emission_scope;
};
class EnsureSpace {

View File

@ -32,8 +32,8 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
constexpr uint8_t kLoadLiteralScaleLog2 = 2;
constexpr int kMaxLoadLiteralRange = 1 * MB;
constexpr size_t kLoadLiteralScaleLog2 = 2;
constexpr size_t kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
const int kNumberOfVRegisters = 32;

View File

@ -1923,15 +1923,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
class InstructionAccurateScope {
public:
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
: tasm_(tasm),
block_pool_(tasm, count * kInstrSize)
: tasm_(tasm)
#ifdef DEBUG
,
size_(count * kInstrSize)
#endif
{
tasm_->CheckVeneerPool(false, true, count * kInstrSize);
tasm_->StartBlockVeneerPool();
// Before blocking the const pool, see if it needs to be emitted.
tasm_->CheckConstPool(false, true);
tasm_->CheckVeneerPool(false, true);
tasm_->StartBlockPools();
#ifdef DEBUG
if (count != 0) {
tasm_->bind(&start_);
@ -1942,7 +1944,7 @@ class InstructionAccurateScope {
}
~InstructionAccurateScope() {
tasm_->EndBlockVeneerPool();
tasm_->EndBlockPools();
#ifdef DEBUG
if (start_.is_bound()) {
DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
@ -1953,7 +1955,6 @@ class InstructionAccurateScope {
private:
TurboAssembler* tasm_;
TurboAssembler::BlockConstPoolScope block_pool_;
#ifdef DEBUG
size_t size_;
Label start_;

View File

@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/codegen/constant-pool.h"
#include "src/codegen/assembler-arch.h"
#include "src/codegen/assembler-inl.h"
namespace v8 {
@ -211,253 +210,5 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
#endif // defined(V8_TARGET_ARCH_PPC)
#if defined(V8_TARGET_ARCH_ARM64)
// Constant Pool.
ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
RelocInfo::Mode rmode) {
ConstantPoolKey key(data, rmode);
CHECK(key.is_value32());
return RecordKey(std::move(key), assm_->pc_offset());
}
RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
RelocInfo::Mode rmode) {
ConstantPoolKey key(data, rmode);
CHECK(!key.is_value32());
return RecordKey(std::move(key), assm_->pc_offset());
}
RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
if (write_reloc_info == RelocInfoStatus::kMustRecord) {
if (key.is_value32()) {
if (entry32_count_ == 0) first_use_32_ = offset;
++entry32_count_;
} else {
if (entry64_count_ == 0) first_use_64_ = offset;
++entry64_count_;
}
}
entries_.insert(std::make_pair(key, offset));
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
// Request constant pool emission after the next instruction.
SetNextCheckIn(1);
}
return write_reloc_info;
}
RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
const ConstantPoolKey& key) {
if (key.AllowsDeduplication()) {
auto existing = entries_.find(key);
if (existing != entries_.end()) {
return RelocInfoStatus::kMustOmitForDuplicate;
}
}
return RelocInfoStatus::kMustRecord;
}
void ConstantPool::EmitAndClear(Jump require_jump) {
DCHECK(!IsBlocked());
// Prevent recursive pool emission.
Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
Alignment require_alignment =
IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
int size = ComputeSize(require_jump, require_alignment);
Label size_check;
assm_->bind(&size_check);
assm_->RecordConstPool(size);
// Emit the constant pool. It is preceded by an optional branch if
// {require_jump} and a header which will:
// 1) Encode the size of the constant pool, for use by the disassembler.
// 2) Terminate the program, to try to prevent execution from accidentally
// flowing into the constant pool.
// 3) align the 64bit pool entries to 64-bit.
// TODO(all): Make the alignment part less fragile. Currently code is
// allocated as a byte array so there are no guarantees the alignment will
// be preserved on compaction. Currently it works as allocation seems to be
// 64-bit aligned.
Label after_pool;
if (require_jump == Jump::kRequired) assm_->b(&after_pool);
assm_->RecordComment("[ Constant Pool");
EmitPrologue(require_alignment);
if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
EmitEntries();
assm_->RecordComment("]");
if (after_pool.is_linked()) assm_->bind(&after_pool);
DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
Clear();
}
void ConstantPool::Clear() {
entries_.clear();
first_use_32_ = -1;
first_use_64_ = -1;
entry32_count_ = 0;
entry64_count_ = 0;
next_check_ = 0;
}
void ConstantPool::StartBlock() {
if (blocked_nesting_ == 0) {
// Prevent constant pool checks from happening by setting the next check to
// the biggest possible offset.
next_check_ = kMaxInt;
}
++blocked_nesting_;
}
void ConstantPool::EndBlock() {
--blocked_nesting_;
if (blocked_nesting_ == 0) {
DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
// Make sure a check happens quickly after getting unblocked.
next_check_ = 0;
}
}
bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
void ConstantPool::SetNextCheckIn(size_t instructions) {
next_check_ =
assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
}
void ConstantPool::EmitEntries() {
for (auto iter = entries_.begin(); iter != entries_.end();) {
DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
auto range = entries_.equal_range(iter->first);
bool shared = iter->first.AllowsDeduplication();
for (auto it = range.first; it != range.second; ++it) {
SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
if (!shared) Emit(it->first);
}
if (shared) Emit(iter->first);
iter = range.second;
}
}
void ConstantPool::Emit(const ConstantPoolKey& key) {
if (key.is_value32()) {
assm_->dd(key.value32());
} else {
assm_->dq(key.value64());
}
}
bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
if (IsEmpty()) return false;
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
return true;
}
// We compute {dist32/64}, i.e. the distance from the first instruction
// accessing a 32bit/64bit entry in the constant pool to any of the
// 32bit/64bit constant pool entries, respectively. This is required because
// we do not guarantee that entries are emitted in order of reference, i.e. it
// is possible that the entry with the earliest reference is emitted last.
// The constant pool should be emitted if either of the following is true:
// (A) {dist32/64} will be out of range at the next check in.
// (B) Emission can be done behind an unconditional branch and {dist32/64}
// exceeds {kOpportunityDist*}.
// (C) {dist32/64} exceeds the desired approximate distance to the pool.
int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
if (Entry64Count() != 0) {
// The 64-bit constants are always emitted before the 32-bit constants, so
// we subtract the size of the 32-bit constants from {size}.
size_t dist64 = pool_end_64 - first_use_64_;
bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
bool opportune_emission_without_jump =
require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
if (next_check_too_late || opportune_emission_without_jump ||
approximate_distance_exceeded) {
return true;
}
}
if (Entry32Count() != 0) {
size_t dist32 = pool_end_32 - first_use_32_;
bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
bool opportune_emission_without_jump =
require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
if (next_check_too_late || opportune_emission_without_jump ||
approximate_distance_exceeded) {
return true;
}
}
return false;
}
int ConstantPool::ComputeSize(Jump require_jump,
Alignment require_alignment) const {
int size_up_to_marker = PrologueSize(require_jump);
int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
size_t size_after_marker =
Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
return size_up_to_marker + static_cast<int>(size_after_marker);
}
Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
int pc_offset) const {
int size_up_to_marker = PrologueSize(require_jump);
if (Entry64Count() != 0 &&
!IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
return Alignment::kRequired;
}
return Alignment::kOmitted;
}
bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
// Check that all entries are in range if the pool is emitted at {pc_offset}.
// This ignores kPcLoadDelta (conservatively, since all offsets are positive),
// and over-estimates the last entry's address with the pool's end.
Alignment require_alignment =
IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
size_t pool_end_32 =
pc_offset + ComputeSize(Jump::kRequired, require_alignment);
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
bool entries_in_range_32 =
Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
bool entries_in_range_64 =
Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
return entries_in_range_32 && entries_in_range_64;
}
ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
: pool_(&assm->constpool_) {
pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
pool_->StartBlock();
}
ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
: pool_(&assm->constpool_) {
DCHECK_EQ(check, PoolEmissionCheck::kSkip);
pool_->StartBlock();
}
ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
void ConstantPool::MaybeCheck() {
if (assm_->pc_offset() >= next_check_) {
Check(Emission::kIfNeeded, Jump::kRequired);
}
}
#endif // defined(V8_TARGET_ARCH_ARM64)
} // namespace internal
} // namespace v8

View File

@ -15,8 +15,6 @@
namespace v8 {
namespace internal {
class Instruction;
// -----------------------------------------------------------------------------
// Constant pool support
@ -163,187 +161,6 @@ class ConstantPoolBuilder {
#endif // defined(V8_TARGET_ARCH_PPC)
#if defined(V8_TARGET_ARCH_ARM64)
class ConstantPoolKey {
public:
explicit ConstantPoolKey(uint64_t value,
RelocInfo::Mode rmode = RelocInfo::NONE)
: is_value32_(false), value64_(value), rmode_(rmode) {}
explicit ConstantPoolKey(uint32_t value,
RelocInfo::Mode rmode = RelocInfo::NONE)
: is_value32_(true), value32_(value), rmode_(rmode) {}
uint64_t value64() const {
CHECK(!is_value32_);
return value64_;
}
uint32_t value32() const {
CHECK(is_value32_);
return value32_;
}
bool is_value32() const { return is_value32_; }
RelocInfo::Mode rmode() const { return rmode_; }
bool AllowsDeduplication() const {
DCHECK(rmode_ != RelocInfo::CONST_POOL &&
rmode_ != RelocInfo::VENEER_POOL &&
rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
rmode_ != RelocInfo::DEOPT_INLINING_ID &&
rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID);
// CODE_TARGETs can be shared because they aren't patched anymore,
// and we make sure we emit only one reloc info for them (thus delta
// patching) will apply the delta only once. At the moment, we do not dedup
// code targets if they are wrapped in a heap object request (value == 0).
bool is_sharable_code_target =
rmode_ == RelocInfo::CODE_TARGET &&
(is_value32() ? (value32() != 0) : (value64() != 0));
return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target;
}
private:
bool is_value32_;
union {
uint64_t value64_;
uint32_t value32_;
};
RelocInfo::Mode rmode_;
};
// Order for pool entries. 64bit entries go first.
inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) {
if (a.is_value32() < b.is_value32()) return true;
if (a.is_value32() > b.is_value32()) return false;
if (a.rmode() < b.rmode()) return true;
if (a.rmode() > b.rmode()) return false;
if (a.is_value32()) return a.value32() < b.value32();
return a.value64() < b.value64();
}
inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) {
if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) {
return false;
}
if (a.is_value32()) return a.value32() == b.value32();
return a.value64() == b.value64();
}
// Constant pool generation
enum class Jump { kOmitted, kRequired };
enum class Emission { kIfNeeded, kForced };
enum class Alignment { kOmitted, kRequired };
enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate };
// Pools are emitted in the instruction stream, preferably after unconditional
// jumps or after returns from functions (in dead code locations).
// If a long code sequence does not contain unconditional jumps, it is
// necessary to emit the constant pool before the pool gets too far from the
// location it is accessed from. In this case, we emit a jump over the emitted
// constant pool.
// Constants in the pool may be addresses of functions that gets relocated;
// if so, a relocation info entry is associated to the constant pool entry.
class ConstantPool {
public:
explicit ConstantPool(Assembler* assm);
~ConstantPool();
// Returns true when we need to write RelocInfo and false when we do not.
RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode);
RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode);
size_t Entry32Count() const { return entry32_count_; }
size_t Entry64Count() const { return entry64_count_; }
bool IsEmpty() const { return entries_.empty(); }
// Check if pool will be out of range at {pc_offset}.
bool IsInImmRangeIfEmittedAt(int pc_offset);
// Size in bytes of the constant pool. Depending on parameters, the size will
// include the branch over the pool and alignment padding.
int ComputeSize(Jump require_jump, Alignment require_alignment) const;
// Emit the pool at the current pc with a branch over the pool if requested.
void EmitAndClear(Jump require);
bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const;
V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump,
size_t margin = 0);
V8_EXPORT_PRIVATE void MaybeCheck();
void Clear();
// Constant pool emisssion can be blocked temporarily.
bool IsBlocked() const;
// Repeated checking whether the constant pool should be emitted is expensive;
// only check once a number of instructions have been generated.
void SetNextCheckIn(size_t instructions);
// Class for scoping postponing the constant pool generation.
enum class PoolEmissionCheck { kSkip };
class V8_EXPORT_PRIVATE BlockScope {
public:
// BlockScope immediatelly emits the pool if necessary to ensure that
// during the block scope at least {margin} bytes can be emitted without
// pool emission becomming necessary.
explicit BlockScope(Assembler* pool, size_t margin = 0);
BlockScope(Assembler* pool, PoolEmissionCheck);
~BlockScope();
private:
ConstantPool* pool_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope);
};
// Hard limit to the const pool which must not be exceeded.
static const size_t kMaxDistToPool32;
static const size_t kMaxDistToPool64;
// Approximate distance where the pool should be emitted.
static const size_t kApproxDistToPool32;
V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64;
// Approximate distance where the pool may be emitted if
// no jump is required (due to a recent unconditional jump).
static const size_t kOpportunityDistToPool32;
static const size_t kOpportunityDistToPool64;
// PC distance between constant pool checks.
V8_EXPORT_PRIVATE static const size_t kCheckInterval;
// Number of entries in the pool which trigger a check.
static const size_t kApproxMaxEntryCount;
private:
void StartBlock();
void EndBlock();
void EmitEntries();
void EmitPrologue(Alignment require_alignment);
int PrologueSize(Jump require_jump) const;
RelocInfoStatus RecordKey(ConstantPoolKey key, int offset);
RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key);
void Emit(const ConstantPoolKey& key);
void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset,
const ConstantPoolKey& key);
Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump,
int pc_offset) const;
Assembler* assm_;
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_use_32_ = -1;
int first_use_64_ = -1;
// We sort not according to insertion order, but since we do not insert
// addresses (for heap objects we insert an index which is created in
// increasing order), the order is deterministic. We map each entry to the
// pc offset of the load. We use a multimap because we need to record the
// pc offset of each load of the same constant so that the immediate of the
// loads can be back-patched when the pool is emitted.
std::multimap<ConstantPoolKey, int> entries_;
size_t entry32_count_ = 0;
size_t entry64_count_ = 0;
int next_check_ = 0;
int blocked_nesting_ = 0;
};
#endif // defined(V8_TARGET_ARCH_ARM64)
} // namespace internal
} // namespace v8

View File

@ -2400,13 +2400,12 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Adr(temp, &table);
__ Add(temp, temp, Operand(input, UXTW, 2));
__ Br(temp);
{
TurboAssembler::BlockPoolsScope block_pools(tasm());
__ Bind(&table);
for (size_t index = 0; index < case_count; ++index) {
__ B(GetLabel(i.InputRpo(index + 2)));
}
__ StartBlockPools();
__ Bind(&table);
for (size_t index = 0; index < case_count; ++index) {
__ B(GetLabel(i.InputRpo(index + 2)));
}
__ EndBlockPools();
}
void CodeGenerator::FinishFrame(Frame* frame) {
@ -2656,7 +2655,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {

View File

@ -172,7 +172,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
patching_assembler.PatchSubSp(bytes);
}
void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }

View File

@ -103,7 +103,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
ForceConstantPoolEmissionWithoutJump();
CheckConstPool(true, false); // force emit of const pool
}
void JumpTableAssembler::EmitJumpSlot(Address target) {

View File

@ -6486,90 +6486,76 @@ TEST(ldr_literal) {
#ifdef DEBUG
// These tests rely on functions available in debug mode.
enum LiteralPoolEmitOutcome { EmitExpected, NoEmitExpected };
enum LiteralPoolEmissionAlignment { EmitAtUnaligned, EmitAtAligned };
static void LdrLiteralRangeHelper(
size_t range, LiteralPoolEmitOutcome outcome,
LiteralPoolEmissionAlignment unaligned_emission) {
static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
size_t prepadding = 0) {
SETUP_SIZE(static_cast<int>(range + 1024));
const size_t first_pool_entries = 2;
const size_t first_pool_size_bytes = first_pool_entries * kInt64Size;
size_t code_size = 0;
const size_t pool_entries = 2;
const size_t kEntrySize = 8;
START();
// Force a pool dump so the pool starts off empty.
__ ForceConstantPoolEmissionWithJump();
__ CheckConstPool(true, true);
CHECK_CONSTANT_POOL_SIZE(0);
// Emit prepadding to influence alignment of the pool.
bool currently_aligned = IsAligned(__ pc_offset(), kInt64Size);
if ((unaligned_emission == EmitAtUnaligned && currently_aligned) ||
(unaligned_emission == EmitAtAligned && !currently_aligned)) {
__ Nop();
}
// Emit prepadding to influence alignment of the pool; we don't count this
// into code size.
for (size_t i = 0; i < prepadding; ++i) __ Nop();
int initial_pc_offset = __ pc_offset();
__ Ldr(x0, isolate->factory()->undefined_value());
__ Ldr(x1, isolate->factory()->the_hole_value());
CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
code_size += 2 * kInstrSize;
CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
// Check that the requested range (allowing space for a branch over the pool)
// can be handled by this test.
CHECK_LE(code_size, range);
size_t expected_pool_size = 0;
#if defined(_M_ARM64) && !defined(__clang__)
auto PoolSizeAt = [pool_entries, kEntrySize](int pc_offset) {
#else
auto PoolSizeAt = [unaligned_emission](int pc_offset) {
auto PoolSizeAt = [](int pc_offset) {
#endif
// To determine padding, consider the size of the prologue of the pool,
// and the jump around the pool, which we always need.
size_t prologue_size = 2 * kInstrSize + kInstrSize;
size_t pc = pc_offset + prologue_size;
const size_t padding = IsAligned(pc, kInt64Size) ? 0 : kInt32Size;
CHECK_EQ(padding == 0, unaligned_emission == EmitAtAligned);
return prologue_size + first_pool_size_bytes + padding;
const size_t padding = IsAligned(pc, 8) ? 0 : 4;
return prologue_size + pool_entries * kEntrySize + padding;
};
int pc_offset_before_emission = -1;
bool pool_was_emitted = false;
while (__ pc_offset() - initial_pc_offset < static_cast<intptr_t>(range)) {
// Emit NOPs up to 'range'.
while (code_size < range) {
pc_offset_before_emission = __ pc_offset() + kInstrSize;
__ Nop();
if (__ GetConstantPoolEntriesSizeForTesting() == 0) {
pool_was_emitted = true;
break;
}
code_size += kInstrSize;
}
CHECK_EQ(code_size, range);
if (outcome == EmitExpected) {
if (!pool_was_emitted) {
FATAL(
"Pool was not emitted up to pc_offset %d which corresponds to a "
"distance to the first constant of %d bytes",
__ pc_offset(), __ pc_offset() - initial_pc_offset);
}
CHECK_CONSTANT_POOL_SIZE(0);
// Check that the size of the emitted constant pool is as expected.
expected_pool_size = PoolSizeAt(pc_offset_before_emission);
CHECK_EQ(pc_offset_before_emission + expected_pool_size, __ pc_offset());
} else {
CHECK_EQ(outcome, NoEmitExpected);
if (pool_was_emitted) {
FATAL("Pool was unexpectedly emitted at pc_offset %d ",
pc_offset_before_emission);
}
CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
CHECK_EQ(pc_offset_before_emission, __ pc_offset());
}
// Force a pool flush to check that a second pool functions correctly.
__ ForceConstantPoolEmissionWithJump();
__ CheckConstPool(true, true);
CHECK_CONSTANT_POOL_SIZE(0);
// These loads should be after the pool (and will require a new one).
const int second_pool_entries = 2;
__ Ldr(x4, isolate->factory()->true_value());
__ Ldr(x5, isolate->factory()->false_value());
CHECK_CONSTANT_POOL_SIZE(second_pool_entries * kInt64Size);
CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
END();
if (outcome == EmitExpected) {
@ -6580,12 +6566,9 @@ static void LdrLiteralRangeHelper(
Instruction* marker =
reinterpret_cast<Instruction*>(pool_start + kInstrSize);
CHECK(marker->IsLdrLiteralX());
size_t pool_data_start_offset = pc_offset_before_emission + kInstrSize;
size_t padding =
IsAligned(pool_data_start_offset, kInt64Size) ? 0 : kInt32Size;
size_t marker_size = kInstrSize;
CHECK_EQ((first_pool_size_bytes + marker_size + padding) / kInt32Size,
marker->ImmLLiteral());
const size_t padding =
IsAligned(pc_offset_before_emission + kInstrSize, kEntrySize) ? 0 : 1;
CHECK_EQ(pool_entries * 2 + 1 + padding, marker->ImmLLiteral());
}
RUN();
@ -6599,34 +6582,28 @@ static void LdrLiteralRangeHelper(
TEST(ldr_literal_range_max_dist_emission_1) {
INIT_V8();
LdrLiteralRangeHelper(
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
EmitExpected, EmitAtAligned);
LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
EmitExpected);
}
TEST(ldr_literal_range_max_dist_emission_2) {
INIT_V8();
LdrLiteralRangeHelper(
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
EmitExpected, EmitAtUnaligned);
LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
EmitExpected, 1);
}
TEST(ldr_literal_range_max_dist_no_emission_1) {
INIT_V8();
LdrLiteralRangeHelper(
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
NoEmitExpected, EmitAtUnaligned);
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
NoEmitExpected);
}
TEST(ldr_literal_range_max_dist_no_emission_2) {
INIT_V8();
LdrLiteralRangeHelper(
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
NoEmitExpected, EmitAtAligned);
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
NoEmitExpected, 1);
}
#endif