A64: Move veneer emission checking in the Assembler.

The previous heuristic would break as a significant amount of code could be
generated without checking for veneer emission.
The veneer emission is now done in the Assembler, in a very similar way to
constant pool emission.

BUG=v8:3177
LOG=N
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/181873002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@19661 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
alexandre.rames@arm.com 2014-03-04 15:54:12 +00:00
parent 827adfe45a
commit 83c7b60293
9 changed files with 285 additions and 184 deletions

View File

@ -1178,7 +1178,10 @@ inline void Assembler::CheckBuffer() {
if (buffer_space() < kGap) {
GrowBuffer();
}
if (pc_offset() >= next_buffer_check_) {
if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(true);
}
if (pc_offset() >= next_constant_pool_check_) {
CheckConstPool(false, true);
}
}

View File

@ -286,6 +286,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
unresolved_branches_(),
positions_recorder_(this) {
const_pool_blocked_nesting_ = 0;
veneer_pool_blocked_nesting_ = 0;
Reset();
}
@ -293,6 +294,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
Assembler::~Assembler() {
ASSERT(num_pending_reloc_info_ == 0);
ASSERT(const_pool_blocked_nesting_ == 0);
ASSERT(veneer_pool_blocked_nesting_ == 0);
}
@ -300,13 +302,16 @@ void Assembler::Reset() {
#ifdef DEBUG
ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
ASSERT(const_pool_blocked_nesting_ == 0);
ASSERT(veneer_pool_blocked_nesting_ == 0);
ASSERT(unresolved_branches_.empty());
memset(buffer_, 0, pc_ - buffer_);
#endif
pc_ = buffer_;
reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
reinterpret_cast<byte*>(pc_));
num_pending_reloc_info_ = 0;
next_buffer_check_ = 0;
next_constant_pool_check_ = 0;
next_veneer_pool_check_ = kMaxInt;
no_const_pool_before_ = 0;
first_const_pool_use_ = -1;
ClearRecordedAstId();
@ -534,6 +539,11 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
if (unresolved_branches_.empty()) {
ASSERT(next_veneer_pool_check_ == kMaxInt);
return;
}
// Branches to this label will be resolved when the label is bound below.
std::multimap<int, FarBranchInfo>::iterator it_tmp, it;
it = unresolved_branches_.begin();
@ -544,6 +554,12 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
unresolved_branches_.erase(it_tmp);
}
}
if (unresolved_branches_.empty()) {
next_veneer_pool_check_ = kMaxInt;
} else {
next_veneer_pool_check_ =
unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
}
}
@ -551,7 +567,7 @@ void Assembler::StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
// Prevent constant pool checks happening by setting the next check to
// the biggest possible offset.
next_buffer_check_ = kMaxInt;
next_constant_pool_check_ = kMaxInt;
}
}
@ -560,13 +576,13 @@ void Assembler::EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
(pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
// still blocked
// * no_const_pool_before_ < next_buffer_check_ and the next emit will
// trigger a check.
next_buffer_check_ = no_const_pool_before_;
// * no_const_pool_before_ < next_constant_pool_check_ and the next emit
// will trigger a check.
next_constant_pool_check_ = no_const_pool_before_;
}
}
@ -622,6 +638,20 @@ void Assembler::ConstantPoolGuard() {
}
void Assembler::StartBlockVeneerPool() {
++veneer_pool_blocked_nesting_;
}
void Assembler::EndBlockVeneerPool() {
if (--veneer_pool_blocked_nesting_ == 0) {
// Check the veneer pool hasn't been blocked for too long.
ASSERT(unresolved_branches_.empty() ||
(pc_offset() < unresolved_branches_first_limit()));
}
}
void Assembler::br(const Register& xn) {
positions_recorder()->WriteRecordedPositions();
ASSERT(xn.Is64Bits());
@ -1870,8 +1900,8 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
Serializer::TooLateToEnableNow();
#endif
// The arguments to the debug marker need to be contiguous in memory, so
// make sure we don't try to emit a literal pool.
BlockConstPoolScope scope(this);
// make sure we don't try to emit pools.
BlockPoolsScope scope(this);
Label start;
bind(&start);
@ -2445,14 +2475,14 @@ void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstructionSize;
if (no_const_pool_before_ < pc_limit) {
// If there are some pending entries, the constant pool cannot be blocked
// further than first_const_pool_use_ + kMaxDistToPool
// further than first_const_pool_use_ + kMaxDistToConstPool
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
(pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
no_const_pool_before_ = pc_limit;
}
if (next_buffer_check_ < no_const_pool_before_) {
next_buffer_check_ = no_const_pool_before_;
if (next_constant_pool_check_ < no_const_pool_before_) {
next_constant_pool_check_ = no_const_pool_before_;
}
}
@ -2470,42 +2500,47 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// There is nothing to do if there are no pending constant pool entries.
if (num_pending_reloc_info_ == 0) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
return;
}
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance to the first instruction accessing the constant pool is
// kAvgDistToPool or more.
// kAvgDistToConstPool or more.
// * no jump is required and the distance to the first instruction accessing
// the constant pool is at least kMaxDistToPool / 2.
// the constant pool is at least kMaxDistToPConstool / 2.
ASSERT(first_const_pool_use_ >= 0);
int dist = pc_offset() - first_const_pool_use_;
if (!force_emit && dist < kAvgDistToPool &&
(require_jump || (dist < (kMaxDistToPool / 2)))) {
if (!force_emit && dist < kAvgDistToConstPool &&
(require_jump || (dist < (kMaxDistToConstPool / 2)))) {
return;
}
int jump_instr = require_jump ? kInstructionSize : 0;
int size_pool_marker = kInstructionSize;
int size_pool_guard = kInstructionSize;
int pool_size = jump_instr + size_pool_marker + size_pool_guard +
num_pending_reloc_info_ * kPointerSize;
int needed_space = pool_size + kGap;
// Emit veneers for branches that would go out of range during emission of the
// constant pool.
CheckVeneerPool(require_jump, kVeneerDistanceMargin - pool_size);
Label size_check;
bind(&size_check);
// Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool, the constant pool marker, the
// constant pool guard, and the gap to the relocation information).
int jump_instr = require_jump ? kInstructionSize : 0;
int size_pool_marker = kInstructionSize;
int size_pool_guard = kInstructionSize;
int pool_size = jump_instr + size_pool_marker + size_pool_guard +
num_pending_reloc_info_ * kPointerSize;
int needed_space = pool_size + kGap;
while (buffer_space() <= needed_space) {
GrowBuffer();
}
{
// Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this);
// Block recursive calls to CheckConstPool and protect from veneer pools.
BlockPoolsScope block_pools(this);
RecordComment("[ Constant Pool");
RecordConstPool(pool_size);
@ -2558,13 +2593,114 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
static_cast<unsigned>(pool_size));
}
bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
// Account for the branch around the veneers and the guard.
int protection_offset = 2 * kInstructionSize;
return pc_offset() > max_reachable_pc - margin - protection_offset -
static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
}
void Assembler::EmitVeneers(bool need_protection, int margin) {
BlockPoolsScope scope(this);
RecordComment("[ Veneers");
Label end;
if (need_protection) {
b(&end);
}
EmitVeneersGuard();
Label size_check;
std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
it = unresolved_branches_.begin();
while (it != unresolved_branches_.end()) {
if (ShouldEmitVeneer(it->first, margin)) {
Instruction* branch = InstructionAt(it->second.pc_offset_);
Label* label = it->second.label_;
#ifdef DEBUG
bind(&size_check);
#endif
// Patch the branch to point to the current position, and emit a branch
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
branch->SetImmPCOffsetTarget(veneer);
b(label);
#ifdef DEBUG
ASSERT(SizeOfCodeGeneratedSince(&size_check) <=
static_cast<uint64_t>(kMaxVeneerCodeSize));
size_check.Unuse();
#endif
it_to_delete = it++;
unresolved_branches_.erase(it_to_delete);
} else {
++it;
}
}
if (unresolved_branches_.empty()) {
next_veneer_pool_check_ = kMaxInt;
} else {
next_veneer_pool_check_ =
unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
}
bind(&end);
RecordComment("]");
}
void Assembler::EmitVeneersGuard() {
if (emit_debug_code()) {
Unreachable();
}
}
void Assembler::CheckVeneerPool(bool require_jump,
int margin) {
// There is nothing to do if there are no pending veneer pool entries.
if (unresolved_branches_.empty()) {
ASSERT(next_veneer_pool_check_ == kMaxInt);
return;
}
ASSERT(pc_offset() < unresolved_branches_first_limit());
// Some short sequence of instruction mustn't be broken up by veneer pool
// emission, such sequences are protected by calls to BlockVeneerPoolFor and
// BlockVeneerPoolScope.
if (is_veneer_pool_blocked()) {
return;
}
if (!require_jump) {
// Prefer emitting veneers protected by an existing instruction.
margin *= kVeneerNoProtectionFactor;
}
if (ShouldEmitVeneers(margin)) {
EmitVeneers(require_jump, margin);
} else {
next_veneer_pool_check_ =
unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
}
}
void Assembler::RecordComment(const char* msg) {
if (FLAG_code_comments) {
CheckBuffer();

View File

@ -730,7 +730,7 @@ class Assembler : public AssemblerBase {
void bind(Label* label);
// RelocInfo and constant pool ----------------------------------------------
// RelocInfo and pools ------------------------------------------------------
// Record relocation information for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
@ -841,6 +841,28 @@ class Assembler : public AssemblerBase {
void ConstantPoolMarker(uint32_t size);
void ConstantPoolGuard();
// Prevent veneer pool emission until EndBlockVeneerPool is called.
// Call to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
void StartBlockVeneerPool();
// Resume constant pool emission. Need to be called as many time as
// StartBlockVeneerPool to have an effect.
void EndBlockVeneerPool();
bool is_veneer_pool_blocked() const {
return veneer_pool_blocked_nesting_ > 0;
}
// Block/resume emission of constant pools and veneer pools.
void StartBlockPools() {
StartBlockConstPool();
StartBlockVeneerPool();
}
void EndBlockPools() {
EndBlockConstPool();
EndBlockVeneerPool();
}
// Debugging ----------------------------------------------------------------
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
@ -1718,6 +1740,44 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
bool ShouldEmitVeneer(int max_reachable_pc,
int margin = kVeneerDistanceMargin);
bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
}
// The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
// Emits veneers for branches that are approaching their maximum range.
// If need_protection is true, the veneers are protected by a branch jumping
// over the code.
void EmitVeneers(bool need_protection, int margin = kVeneerDistanceMargin);
void EmitVeneersGuard();
// Checks whether veneers need to be emitted at this point.
void CheckVeneerPool(bool require_jump, int margin = kVeneerDistanceMargin);
class BlockPoolsScope {
public:
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockPools();
}
~BlockPoolsScope() {
assem_->EndBlockPools();
}
private:
Assembler* assem_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
// Available for constrained code generation scopes. Prefer
// MacroAssembler::Mov() when possible.
inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
@ -1903,8 +1963,8 @@ class Assembler : public AssemblerBase {
void GrowBuffer();
void CheckBuffer();
// Pc offset of the next buffer check.
int next_buffer_check_;
// Pc offset of the next constant pool check.
int next_constant_pool_check_;
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
@ -1920,15 +1980,16 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckPoolIntervalInst = 128;
static const int kCheckPoolInterval =
kCheckPoolIntervalInst * kInstructionSize;
static const int kCheckConstPoolIntervalInst = 128;
static const int kCheckConstPoolInterval =
kCheckConstPoolIntervalInst * kInstructionSize;
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant.
static const int kMaxDistToPool = 4 * KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool / kInstructionSize;
static const int kMaxDistToConstPool = 4 * KB;
static const int kMaxNumPendingRelocInfo =
kMaxDistToConstPool / kInstructionSize;
// Average distance beetween a constant pool and the first instruction
@ -1936,7 +1997,8 @@ class Assembler : public AssemblerBase {
// pollution.
// In practice the distance will be smaller since constant pool emission is
// forced after function return and sometimes after unconditional branches.
static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
static const int kAvgDistToConstPool =
kMaxDistToConstPool - kCheckConstPoolInterval;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
@ -1946,6 +2008,9 @@ class Assembler : public AssemblerBase {
// since the previous constant pool was emitted.
int first_const_pool_use_;
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@ -2013,6 +2078,25 @@ class Assembler : public AssemblerBase {
// pc_offset() for convenience.
std::multimap<int, FarBranchInfo> unresolved_branches_;
// We generate a veneer for a branch if we reach within this distance of the
// limit of the range.
static const int kVeneerDistanceMargin = 1 * KB;
// The factor of 2 is a finger in the air guess. With a default margin of
// 1KB, that leaves us an addional 256 instructions to avoid generating a
// protective branch.
static const int kVeneerNoProtectionFactor = 2;
static const int kVeneerDistanceCheckMargin =
kVeneerNoProtectionFactor * kVeneerDistanceMargin;
int unresolved_branches_first_limit() const {
ASSERT(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
// This is similar to next_constant_pool_check_ and helps reduce the overhead
// of checking for veneer pools.
// It is maintained to the closest unresolved branch limit minus the maximum
// veneer margin (or kMaxInt if there are no unresolved branches).
int next_veneer_pool_check_;
private:
// If a veneer is emitted for a branch instruction, that instruction must be
// removed from the associated label's link chain so that the assembler does
@ -2021,14 +2105,6 @@ class Assembler : public AssemblerBase {
void DeleteUnresolvedBranchInfoForLabel(Label* label);
private:
// TODO(jbramley): VIXL uses next_literal_pool_check_ and
// literal_pool_monitor_ to determine when to consider emitting a literal
// pool. V8 doesn't use them, so they should either not be here at all, or
// should replace or be merged with next_buffer_check_ and
// const_pool_blocked_nesting_.
Instruction* next_literal_pool_check_;
unsigned literal_pool_monitor_;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
friend class EnsureSpace;

View File

@ -1844,7 +1844,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// checking for constant pool emission, but we do not want to depend on
// that.
{
Assembler::BlockConstPoolScope block_const_pool(masm);
Assembler::BlockPoolsScope block_pools(masm);
__ bind(&handler_entry);
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
@ -4948,7 +4948,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
// TODO(all): This needs to be reliably consistent with
// kReturnAddressDistanceFromFunctionStart in ::Generate.
Assembler::BlockConstPoolScope no_const_pools(masm);
Assembler::BlockPoolsScope no_pools(masm);
ProfileEntryHookStub stub;
__ Push(lr);
__ CallStub(&stub);

View File

@ -93,7 +93,7 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
Assembler::BlockConstPoolScope scope(masm_);
Assembler::BlockPoolsScope scope(masm_);
InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
#ifdef DEBUG
info_emitted_ = true;
@ -350,7 +350,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
ASSERT(jssp.Is(__ StackPointer()));
Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting back edge code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Assembler::BlockPoolsScope block_const_pool(masm_);
Label ok;
ASSERT(back_edge_target->is_bound());
@ -2006,7 +2006,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Bind(&stub_call);
BinaryOpICStub stub(op, mode);
{
Assembler::BlockConstPoolScope scope(masm_);
Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
@ -2092,7 +2092,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{
Assembler::BlockConstPoolScope scope(masm_);
Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
@ -4116,7 +4116,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
{
Assembler::BlockConstPoolScope scope(masm_);
Assembler::BlockPoolsScope scope(masm_);
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();

View File

@ -414,7 +414,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
Assembler::BlockConstPoolScope scope(masm_);
Assembler::BlockPoolsScope scope(masm_);
__ Call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);

View File

@ -346,7 +346,7 @@ void MacroAssembler::Asr(const Register& rd,
void MacroAssembler::B(Label* label) {
b(label);
CheckVeneers(false);
CheckVeneerPool(false);
}
@ -1014,7 +1014,7 @@ void MacroAssembler::Ret(const Register& xn) {
ASSERT(allow_macro_instructions_);
ASSERT(!xn.IsZero());
ret(xn);
CheckVeneers(false);
CheckVeneerPool(false);
}

View File

@ -558,92 +558,6 @@ void MacroAssembler::Store(const Register& rt,
}
bool MacroAssembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
// Account for the branch around the veneers and the guard.
int protection_offset = 2 * kInstructionSize;
return pc_offset() > max_reachable_pc - margin - protection_offset -
static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
}
void MacroAssembler::EmitVeneers(bool need_protection) {
RecordComment("[ Veneers");
Label end;
if (need_protection) {
B(&end);
}
EmitVeneersGuard();
{
InstructionAccurateScope scope(this);
Label size_check;
std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
it = unresolved_branches_.begin();
while (it != unresolved_branches_.end()) {
if (ShouldEmitVeneer(it->first)) {
Instruction* branch = InstructionAt(it->second.pc_offset_);
Label* label = it->second.label_;
#ifdef DEBUG
__ bind(&size_check);
#endif
// Patch the branch to point to the current position, and emit a branch
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
branch->SetImmPCOffsetTarget(veneer);
b(label);
#ifdef DEBUG
ASSERT(SizeOfCodeGeneratedSince(&size_check) <=
static_cast<uint64_t>(kMaxVeneerCodeSize));
size_check.Unuse();
#endif
it_to_delete = it++;
unresolved_branches_.erase(it_to_delete);
} else {
++it;
}
}
}
Bind(&end);
RecordComment("]");
}
void MacroAssembler::EmitVeneersGuard() {
if (emit_debug_code()) {
Unreachable();
}
}
void MacroAssembler::CheckVeneers(bool need_protection) {
if (unresolved_branches_.empty()) {
return;
}
CHECK(pc_offset() < unresolved_branches_first_limit());
int margin = kVeneerDistanceMargin;
if (!need_protection) {
// Prefer emitting veneers protected by an existing instruction.
// The 4 divisor is a finger in the air guess. With a default margin of 2KB,
// that leaves 512B = 128 instructions of extra margin to avoid requiring a
// protective branch.
margin += margin / 4;
}
if (ShouldEmitVeneer(unresolved_branches_first_limit(), margin)) {
EmitVeneers(need_protection);
}
}
bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
Label *label, ImmBranchType b_type) {
bool need_longer_range = false;
@ -661,6 +575,10 @@ bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
unresolved_branches_.insert(
std::pair<int, FarBranchInfo>(max_reachable_pc,
FarBranchInfo(pc_offset(), label)));
// Also maintain the next pool check.
next_veneer_pool_check_ =
Min(next_veneer_pool_check_,
max_reachable_pc - kVeneerDistanceCheckMargin);
}
return need_longer_range;
}
@ -696,11 +614,10 @@ void MacroAssembler::B(Label* label, Condition cond) {
if (need_extra_instructions) {
b(&done, InvertCondition(cond));
b(label);
B(label);
} else {
b(label, cond);
}
CheckVeneers(!need_extra_instructions);
bind(&done);
}
@ -714,11 +631,10 @@ void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
if (need_extra_instructions) {
tbz(rt, bit_pos, &done);
b(label);
B(label);
} else {
tbnz(rt, bit_pos, label);
}
CheckVeneers(!need_extra_instructions);
bind(&done);
}
@ -732,11 +648,10 @@ void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
if (need_extra_instructions) {
tbnz(rt, bit_pos, &done);
b(label);
B(label);
} else {
tbz(rt, bit_pos, label);
}
CheckVeneers(!need_extra_instructions);
bind(&done);
}
@ -750,11 +665,10 @@ void MacroAssembler::Cbnz(const Register& rt, Label* label) {
if (need_extra_instructions) {
cbz(rt, &done);
b(label);
B(label);
} else {
cbnz(rt, label);
}
CheckVeneers(!need_extra_instructions);
bind(&done);
}
@ -768,11 +682,10 @@ void MacroAssembler::Cbz(const Register& rt, Label* label) {
if (need_extra_instructions) {
cbnz(rt, &done);
b(label);
B(label);
} else {
cbz(rt, label);
}
CheckVeneers(!need_extra_instructions);
bind(&done);
}
@ -2009,7 +1922,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
void MacroAssembler::Call(Register target) {
BlockConstPoolScope scope(this);
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
@ -2024,7 +1937,7 @@ void MacroAssembler::Call(Register target) {
void MacroAssembler::Call(Label* target) {
BlockConstPoolScope scope(this);
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
@ -2041,7 +1954,7 @@ void MacroAssembler::Call(Label* target) {
// MacroAssembler::CallSize is sensitive to changes in this function, as it
// requires to know how many instructions are used to branch to the target.
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
BlockConstPoolScope scope(this);
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
@ -4679,7 +4592,7 @@ void MacroAssembler::Abort(BailoutReason reason) {
// Emit the message string directly in the instruction stream.
{
BlockConstPoolScope scope(this);
BlockPoolsScope scope(this);
Bind(&msg_address);
EmitStringData(GetBailoutReason(reason));
}
@ -4860,7 +4773,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Adr(x0, &format_address);
// Emit the format string directly in the instruction stream.
{ BlockConstPoolScope scope(this);
{ BlockPoolsScope scope(this);
Label after_data;
B(&after_data);
Bind(&format_address);
@ -5025,7 +4938,7 @@ bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
const Label* smi_check) {
Assembler::BlockConstPoolScope scope(masm);
Assembler::BlockPoolsScope scope(masm);
if (reg.IsValid()) {
ASSERT(smi_check->is_bound());
ASSERT(reg.Is64Bits());

View File

@ -2169,24 +2169,6 @@ class MacroAssembler : public Assembler {
// (!), the mechanism can be extended to generate special veneers for really
// far targets.
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
bool ShouldEmitVeneer(int max_reachable_pc,
int margin = kVeneerDistanceMargin);
// The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
// Emits veneers for branches that are approaching their maximum range.
// If need_protection is true, the veneers are protected by a branch jumping
// over the code.
void EmitVeneers(bool need_protection);
void EmitVeneersGuard();
// Checks wether veneers need to be emitted at this point.
void CheckVeneers(bool need_protection);
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later
// be able to emit a veneer for this branch if necessary.
@ -2197,15 +2179,6 @@ class MacroAssembler : public Assembler {
// This function also checks wether veneers need to be emitted.
bool NeedExtraInstructionsOrRegisterBranch(Label *label,
ImmBranchType branch_type);
private:
// We generate a veneer for a branch if we reach within this distance of the
// limit of the range.
static const int kVeneerDistanceMargin = 4 * KB;
int unresolved_branches_first_limit() const {
ASSERT(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
};