[arm64] Share constant pool entries in snapshot.
Portc15b3ffc77
and6a99238b90
to arm64. This reduces the snapshot size for arm64 by about 5.5%. BUG= Review-Url: https://codereview.chromium.org/2937413002 Cr-Commit-Position: refs/heads/master@{#46214}
This commit is contained in:
parent
c221758770
commit
118958f517
@ -333,36 +333,51 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
|
||||
return !RelocInfo::IsNone(rmode);
|
||||
}
|
||||
|
||||
bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
|
||||
int offset) {
|
||||
auto existing = entry_map.find(data);
|
||||
if (existing == entry_map.end()) {
|
||||
entry_map[data] = static_cast<int>(entries_.size());
|
||||
entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
|
||||
return true;
|
||||
}
|
||||
int index = existing->second;
|
||||
entries_[index].second.push_back(offset);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Constant Pool.
|
||||
void ConstPool::RecordEntry(intptr_t data,
|
||||
RelocInfo::Mode mode) {
|
||||
bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
|
||||
DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
|
||||
mode != RelocInfo::VENEER_POOL &&
|
||||
mode != RelocInfo::CODE_AGE_SEQUENCE &&
|
||||
mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
|
||||
mode != RelocInfo::DEOPT_INLINING_ID &&
|
||||
mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
|
||||
|
||||
bool write_reloc_info = true;
|
||||
|
||||
uint64_t raw_data = static_cast<uint64_t>(data);
|
||||
int offset = assm_->pc_offset();
|
||||
if (IsEmpty()) {
|
||||
first_use_ = offset;
|
||||
}
|
||||
|
||||
std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
|
||||
if (CanBeShared(mode)) {
|
||||
shared_entries_.insert(entry);
|
||||
if (shared_entries_.count(entry.first) == 1) {
|
||||
shared_entries_count++;
|
||||
}
|
||||
write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
|
||||
} else if (mode == RelocInfo::CODE_TARGET &&
|
||||
assm_->IsCodeTargetSharingAllowed()) {
|
||||
write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
|
||||
} else {
|
||||
unique_entries_.push_back(entry);
|
||||
entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
|
||||
}
|
||||
|
||||
if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
|
||||
// Request constant pool emission after the next instruction.
|
||||
assm_->SetNextConstPoolCheckIn(1);
|
||||
}
|
||||
|
||||
return write_reloc_info;
|
||||
}
|
||||
|
||||
|
||||
@ -471,8 +486,8 @@ void ConstPool::Emit(bool require_jump) {
|
||||
|
||||
void ConstPool::Clear() {
|
||||
shared_entries_.clear();
|
||||
shared_entries_count = 0;
|
||||
unique_entries_.clear();
|
||||
handle_to_index_map_.clear();
|
||||
entries_.clear();
|
||||
first_use_ = -1;
|
||||
}
|
||||
|
||||
@ -482,8 +497,7 @@ bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
|
||||
DCHECK(mode != RelocInfo::NONE32);
|
||||
|
||||
return RelocInfo::IsNone(mode) ||
|
||||
(!assm_->serializer_enabled() &&
|
||||
(mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE));
|
||||
(mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
|
||||
}
|
||||
|
||||
|
||||
@ -541,43 +555,19 @@ void ConstPool::EmitGuard() {
|
||||
void ConstPool::EmitEntries() {
|
||||
DCHECK(IsAligned(assm_->pc_offset(), 8));
|
||||
|
||||
typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
|
||||
SharedEntriesIterator value_it;
|
||||
// Iterate through the keys (constant pool values).
|
||||
for (value_it = shared_entries_.begin();
|
||||
value_it != shared_entries_.end();
|
||||
value_it = shared_entries_.upper_bound(value_it->first)) {
|
||||
std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
|
||||
uint64_t data = value_it->first;
|
||||
range = shared_entries_.equal_range(data);
|
||||
SharedEntriesIterator offset_it;
|
||||
// Iterate through the offsets of a given key.
|
||||
for (offset_it = range.first; offset_it != range.second; offset_it++) {
|
||||
Instruction* instr = assm_->InstructionAt(offset_it->second);
|
||||
// Emit entries.
|
||||
for (const auto& entry : entries_) {
|
||||
for (const auto& pc : entry.second) {
|
||||
Instruction* instr = assm_->InstructionAt(pc);
|
||||
|
||||
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
|
||||
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
|
||||
instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
|
||||
}
|
||||
assm_->dc64(data);
|
||||
}
|
||||
shared_entries_.clear();
|
||||
shared_entries_count = 0;
|
||||
|
||||
// Emit unique entries.
|
||||
std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
|
||||
for (unique_it = unique_entries_.begin();
|
||||
unique_it != unique_entries_.end();
|
||||
unique_it++) {
|
||||
Instruction* instr = assm_->InstructionAt(unique_it->second);
|
||||
|
||||
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
|
||||
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
|
||||
instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
|
||||
assm_->dc64(unique_it->first);
|
||||
assm_->dc64(entry.first);
|
||||
}
|
||||
unique_entries_.clear();
|
||||
first_use_ = -1;
|
||||
Clear();
|
||||
}
|
||||
|
||||
|
||||
@ -588,22 +578,25 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
|
||||
unresolved_branches_() {
|
||||
const_pool_blocked_nesting_ = 0;
|
||||
veneer_pool_blocked_nesting_ = 0;
|
||||
code_target_sharing_blocked_nesting_ = 0;
|
||||
Reset();
|
||||
}
|
||||
|
||||
|
||||
Assembler::~Assembler() {
|
||||
DCHECK(constpool_.IsEmpty());
|
||||
DCHECK(const_pool_blocked_nesting_ == 0);
|
||||
DCHECK(veneer_pool_blocked_nesting_ == 0);
|
||||
DCHECK_EQ(const_pool_blocked_nesting_, 0);
|
||||
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
|
||||
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::Reset() {
|
||||
#ifdef DEBUG
|
||||
DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
|
||||
DCHECK(const_pool_blocked_nesting_ == 0);
|
||||
DCHECK(veneer_pool_blocked_nesting_ == 0);
|
||||
DCHECK_EQ(const_pool_blocked_nesting_, 0);
|
||||
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
|
||||
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
|
||||
DCHECK(unresolved_branches_.empty());
|
||||
memset(buffer_, 0, pc_ - buffer_);
|
||||
#endif
|
||||
@ -4758,6 +4751,8 @@ void Assembler::GrowBuffer() {
|
||||
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||
// We do not try to reuse pool constants.
|
||||
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
|
||||
bool write_reloc_info = true;
|
||||
|
||||
if (((rmode >= RelocInfo::COMMENT) &&
|
||||
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
|
||||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
|
||||
@ -4773,13 +4768,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|
||||
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
|
||||
// These modes do not need an entry in the constant pool.
|
||||
} else {
|
||||
constpool_.RecordEntry(data, rmode);
|
||||
write_reloc_info = constpool_.RecordEntry(data, rmode);
|
||||
// Make sure the constant pool is not emitted in place of the next
|
||||
// instruction for which we just recorded relocation info.
|
||||
BlockConstPoolFor(1);
|
||||
}
|
||||
|
||||
if (!RelocInfo::IsNone(rmode)) {
|
||||
if (!RelocInfo::IsNone(rmode) && write_reloc_info) {
|
||||
// Don't record external references unless the heap will be serialized.
|
||||
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
|
||||
!serializer_enabled() && !emit_debug_code()) {
|
||||
|
@ -806,17 +806,11 @@ class MemOperand {
|
||||
|
||||
class ConstPool {
|
||||
public:
|
||||
explicit ConstPool(Assembler* assm)
|
||||
: assm_(assm),
|
||||
first_use_(-1),
|
||||
shared_entries_count(0) {}
|
||||
void RecordEntry(intptr_t data, RelocInfo::Mode mode);
|
||||
int EntryCount() const {
|
||||
return shared_entries_count + static_cast<int>(unique_entries_.size());
|
||||
}
|
||||
bool IsEmpty() const {
|
||||
return shared_entries_.empty() && unique_entries_.empty();
|
||||
}
|
||||
explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {}
|
||||
// Returns true when we need to write RelocInfo and false when we do not.
|
||||
bool RecordEntry(intptr_t data, RelocInfo::Mode mode);
|
||||
int EntryCount() const { return static_cast<int>(entries_.size()); }
|
||||
bool IsEmpty() const { return entries_.empty(); }
|
||||
// Distance in bytes between the current pc and the first instruction
|
||||
// using the pool. If there are no pending entries return kMaxInt.
|
||||
int DistanceToFirstUse();
|
||||
@ -840,16 +834,29 @@ class ConstPool {
|
||||
void EmitGuard();
|
||||
void EmitEntries();
|
||||
|
||||
typedef std::map<uint64_t, int> SharedEntryMap;
|
||||
// Adds a shared entry to entries_, using 'entry_map' to determine whether we
|
||||
// already track this entry. Returns true if this is the first time we add
|
||||
// this entry, false otherwise.
|
||||
bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset);
|
||||
|
||||
Assembler* assm_;
|
||||
// Keep track of the first instruction requiring a constant pool entry
|
||||
// since the previous constant pool was emitted.
|
||||
int first_use_;
|
||||
// values, pc offset(s) of entries which can be shared.
|
||||
std::multimap<uint64_t, int> shared_entries_;
|
||||
// Number of distinct literal in shared entries.
|
||||
int shared_entries_count;
|
||||
// values, pc offset of entries which cannot be shared.
|
||||
std::vector<std::pair<uint64_t, int> > unique_entries_;
|
||||
|
||||
// Map of data to index in entries_ for shared entries.
|
||||
SharedEntryMap shared_entries_;
|
||||
|
||||
// Map of address of handle to index in entries_. We need to keep track of
|
||||
// code targets separately from other shared entries, as they can be
|
||||
// relocated.
|
||||
SharedEntryMap handle_to_index_map_;
|
||||
|
||||
// Values, pc offset(s) of entries. Use a vector to preserve the order of
|
||||
// insertion, as the serializer expects code target RelocInfo to point to
|
||||
// constant pool addresses in an ascending order.
|
||||
std::vector<std::pair<uint64_t, std::vector<int> > > entries_;
|
||||
};
|
||||
|
||||
|
||||
@ -1011,7 +1018,7 @@ class Assembler : public AssemblerBase {
|
||||
|
||||
// Prevent contant pool emission until EndBlockConstPool is called.
|
||||
// Call to this function can be nested but must be followed by an equal
|
||||
// number of call to EndBlockConstpool.
|
||||
// number of calls to EndBlockConstpool.
|
||||
void StartBlockConstPool();
|
||||
|
||||
// Resume constant pool emission. Need to be called as many time as
|
||||
@ -1026,7 +1033,7 @@ class Assembler : public AssemblerBase {
|
||||
|
||||
// Prevent veneer pool emission until EndBlockVeneerPool is called.
|
||||
// Call to this function can be nested but must be followed by an equal
|
||||
// number of call to EndBlockConstpool.
|
||||
// number of calls to EndBlockConstpool.
|
||||
void StartBlockVeneerPool();
|
||||
|
||||
// Resume constant pool emission. Need to be called as many time as
|
||||
@ -3187,6 +3194,34 @@ class Assembler : public AssemblerBase {
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
|
||||
};
|
||||
|
||||
// Class for blocking sharing of code targets in constant pool.
|
||||
class BlockCodeTargetSharingScope {
|
||||
public:
|
||||
explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) {
|
||||
Open(assem);
|
||||
}
|
||||
// This constructor does not initialize the scope. The user needs to
|
||||
// explicitly call Open() before using it.
|
||||
BlockCodeTargetSharingScope() : assem_(nullptr) {}
|
||||
~BlockCodeTargetSharingScope() { Close(); }
|
||||
void Open(Assembler* assem) {
|
||||
DCHECK_NULL(assem_);
|
||||
DCHECK_NOT_NULL(assem);
|
||||
assem_ = assem;
|
||||
assem_->StartBlockCodeTargetSharing();
|
||||
}
|
||||
|
||||
private:
|
||||
void Close() {
|
||||
if (assem_ != nullptr) {
|
||||
assem_->EndBlockCodeTargetSharing();
|
||||
}
|
||||
}
|
||||
Assembler* assem_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
|
||||
};
|
||||
|
||||
protected:
|
||||
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
|
||||
|
||||
@ -3272,6 +3307,16 @@ class Assembler : public AssemblerBase {
|
||||
Label* label,
|
||||
Instruction* label_veneer = NULL);
|
||||
|
||||
// Prevent sharing of code target constant pool entries until
|
||||
// EndBlockCodeTargetSharing is called. Calls to this function can be nested
|
||||
// but must be followed by an equal number of call to
|
||||
// EndBlockCodeTargetSharing.
|
||||
void StartBlockCodeTargetSharing() { ++code_target_sharing_blocked_nesting_; }
|
||||
|
||||
// Resume sharing of constant pool code target entries. Needs to be called
|
||||
// as many times as StartBlockCodeTargetSharing to have an effect.
|
||||
void EndBlockCodeTargetSharing() { --code_target_sharing_blocked_nesting_; }
|
||||
|
||||
private:
|
||||
static uint32_t FPToImm8(double imm);
|
||||
|
||||
@ -3453,6 +3498,12 @@ class Assembler : public AssemblerBase {
|
||||
// Emission of the veneer pools may be blocked in some code sequences.
|
||||
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
|
||||
|
||||
// Sharing of code target entries may be blocked in some code sequences.
|
||||
int code_target_sharing_blocked_nesting_;
|
||||
bool IsCodeTargetSharingAllowed() const {
|
||||
return code_target_sharing_blocked_nesting_ == 0;
|
||||
}
|
||||
|
||||
// Relocation info generation
|
||||
// Each relocation is encoded as a variable size value
|
||||
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
|
||||
|
@ -664,6 +664,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
|
||||
switch (arch_opcode) {
|
||||
case kArchCallCodeObject: {
|
||||
// We must not share code targets for calls to builtins for wasm code, as
|
||||
// they might need to be patched individually.
|
||||
internal::Assembler::BlockCodeTargetSharingScope scope;
|
||||
if (info()->IsWasm()) scope.Open(masm());
|
||||
|
||||
EnsureSpaceForLazyDeopt();
|
||||
if (instr->InputAt(0)->IsImmediate()) {
|
||||
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
|
||||
@ -691,6 +696,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
case kArchTailCallCodeObjectFromJSFunction:
|
||||
case kArchTailCallCodeObject: {
|
||||
// We must not share code targets for calls to builtins for wasm code, as
|
||||
// they might need to be patched individually.
|
||||
internal::Assembler::BlockCodeTargetSharingScope scope;
|
||||
if (info()->IsWasm()) scope.Open(masm());
|
||||
|
||||
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
|
||||
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
|
||||
i.TempRegister(0), i.TempRegister(1),
|
||||
|
@ -108,6 +108,10 @@ class JumpPatchSite BASE_EMBEDDED {
|
||||
// frames-arm.h for its layout.
|
||||
void FullCodeGenerator::Generate() {
|
||||
CompilationInfo* info = info_;
|
||||
// Block sharing of code target entries. The interrupt checks must be
|
||||
// possible to patch individually, and replacing code with a debug version
|
||||
// relies on RelocInfo not being shared.
|
||||
Assembler::BlockCodeTargetSharingScope block_code_target_sharing(masm_);
|
||||
profiling_counter_ = isolate()->factory()->NewCell(
|
||||
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
|
||||
SetFunctionPosition(literal());
|
||||
|
Loading…
Reference in New Issue
Block a user