ARM: sligthly reduce deopt tables size

BUG=
R=ulan@chromium.org, bmeurer@chromium.org

Review URL: https://codereview.chromium.org/349243002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22305 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
m.m.capewell@googlemail.com 2014-07-09 14:08:13 +00:00
parent c1f6a0306e
commit 577984b3d6
2 changed files with 66 additions and 35 deletions

View File

@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
const int Deoptimizer::table_entry_size_ = 12;
const int Deoptimizer::table_entry_size_ = 8;
int Deoptimizer::patch_size() {
@ -328,11 +328,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
int start = masm()->pc_offset();
USE(start);
__ mov(ip, Operand(i));
__ push(ip);
__ b(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
__ push(ip);
}

View File

@ -324,48 +324,79 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
if (deopt_jump_table_.length() > 0) {
Label needs_frame, call_deopt_entry;
Comment(";;; -------------------- Jump table --------------------");
}
Label table_start;
__ bind(&table_start);
Label needs_frame;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
Address base = deopt_jump_table_[0].address;
Register entry_offset = scratch0();
int length = deopt_jump_table_.length();
for (int i = 0; i < length; i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
ASSERT(type == deopt_jump_table_[0].bailout_type);
Address entry = deopt_jump_table_[i].address;
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
Comment(";;; jump table entry %d.", i);
} else {
ASSERT(id != Deoptimizer::kNotDeoptimizationEntry);
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
// offset which will be added to the base address later.
__ mov(entry_offset, Operand(entry - base));
if (deopt_jump_table_[i].needs_frame) {
ASSERT(!info()->saves_caller_doubles());
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) {
__ b(&needs_frame);
} else {
__ bind(&needs_frame);
Comment(";;; call deopt with frame");
__ PushFixedFrame();
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, ip);
__ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
__ push(ip);
__ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ bind(&call_deopt_entry);
// Add the base address to the offset previously loaded in
// entry_offset.
__ add(entry_offset, entry_offset,
Operand(ExternalReference::ForDeoptEntry(base)));
__ blx(entry_offset);
}
masm()->CheckConstPool(false, false);
} else {
// The last entry can fall through into `call_deopt_entry`, avoiding a
// branch.
bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
if (need_branch) __ b(&call_deopt_entry);
masm()->CheckConstPool(false, !need_branch);
}
}
if (!call_deopt_entry.is_bound()) {
Comment(";;; call deopt");
__ bind(&call_deopt_entry);
if (info()->saves_caller_doubles()) {
ASSERT(info()->IsStub());
RestoreCallerDoubles();
}
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
// Add the base address to the offset previously loaded in entry_offset.
__ add(entry_offset, entry_offset,
Operand(ExternalReference::ForDeoptEntry(base)));
__ blx(entry_offset);
}
masm()->CheckConstPool(false, false);
}
// Force constant pool emission at the end of the deopt jump table to make