MIPS: crankshaft implementation
BUG= TEST= Review URL: http://codereview.chromium.org/7934002 Patch from Paul Lind <plind44@gmail.com>. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9828 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
b5b3345799
commit
f077a41b12
@ -172,6 +172,9 @@ SOURCES = {
|
||||
mips/frames-mips.cc
|
||||
mips/full-codegen-mips.cc
|
||||
mips/ic-mips.cc
|
||||
mips/lithium-codegen-mips.cc
|
||||
mips/lithium-gap-resolver-mips.cc
|
||||
mips/lithium-mips.cc
|
||||
mips/macro-assembler-mips.cc
|
||||
mips/regexp-macro-assembler-mips.cc
|
||||
mips/stub-cache-mips.cc
|
||||
|
@ -369,7 +369,20 @@ class FrameDescription {
|
||||
}
|
||||
|
||||
double GetDoubleFrameSlot(unsigned offset) {
|
||||
return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
|
||||
intptr_t* ptr = GetFrameSlotPointer(offset);
|
||||
#if V8_TARGET_ARCH_MIPS
|
||||
// Prevent gcc from using load-double (mips ldc1) on (possibly)
|
||||
// non-64-bit aligned double. Uses two lwc1 instructions.
|
||||
union conversion {
|
||||
double d;
|
||||
uint32_t u[2];
|
||||
} c;
|
||||
c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
|
||||
c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
|
||||
return c.d;
|
||||
#else
|
||||
return *reinterpret_cast<double*>(ptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
void SetFrameSlot(unsigned offset, intptr_t value) {
|
||||
|
@ -114,11 +114,7 @@ DEFINE_bool(clever_optimizations,
|
||||
"Optimize object size, Array shift, DOM strings and string +")
|
||||
|
||||
// Flags for Crankshaft.
|
||||
#ifdef V8_TARGET_ARCH_MIPS
|
||||
DEFINE_bool(crankshaft, false, "use crankshaft")
|
||||
#else
|
||||
DEFINE_bool(crankshaft, true, "use crankshaft")
|
||||
#endif
|
||||
DEFINE_bool(crankshaft, true, "use crankshaft")
|
||||
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
|
||||
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
|
||||
DEFINE_bool(build_lithium, true, "use lithium chunk builder")
|
||||
@ -326,7 +322,8 @@ DEFINE_bool(strict_mode, true, "allow strict mode directives")
|
||||
|
||||
// simulator-arm.cc and simulator-mips.cc
|
||||
DEFINE_bool(trace_sim, false, "Trace simulator execution")
|
||||
DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
|
||||
DEFINE_bool(check_icache, false,
|
||||
"Check icache flushes in ARM and MIPS simulator")
|
||||
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
|
||||
DEFINE_int(sim_stack_alignment, 8,
|
||||
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
|
||||
|
@ -1176,24 +1176,93 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
// These functions are called from C++ but cannot be used in live code.
|
||||
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
||||
Deoptimizer::BailoutType type) {
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
// Pass the function and deoptimization type to the runtime system.
|
||||
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
|
||||
__ push(a0);
|
||||
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
|
||||
}
|
||||
|
||||
// Get the full codegen state from the stack and untag it -> t2.
|
||||
__ lw(t2, MemOperand(sp, 0 * kPointerSize));
|
||||
__ SmiUntag(t2);
|
||||
// Switch on the state.
|
||||
Label with_tos_register, unknown_state;
|
||||
__ Branch(&with_tos_register,
|
||||
ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
|
||||
__ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
|
||||
__ Ret();
|
||||
|
||||
__ bind(&with_tos_register);
|
||||
__ lw(v0, MemOperand(sp, 1 * kPointerSize));
|
||||
__ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
|
||||
|
||||
__ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
|
||||
__ Ret();
|
||||
|
||||
__ bind(&unknown_state);
|
||||
__ stop("no cases left");
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
|
||||
__ Abort("Call to unimplemented function in builtins-mips.cc");
|
||||
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
|
||||
__ Abort("Call to unimplemented function in builtins-mips.cc");
|
||||
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
|
||||
__ Abort("Call to unimplemented function in builtins-mips.cc");
|
||||
// For now, we are relying on the fact that Runtime::NotifyOSR
|
||||
// doesn't do any garbage collection which allows us to save/restore
|
||||
// the registers without worrying about which of them contain
|
||||
// pointers. This seems a bit fragile.
|
||||
RegList saved_regs =
|
||||
(kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
|
||||
__ MultiPush(saved_regs);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ CallRuntime(Runtime::kNotifyOSR, 0);
|
||||
}
|
||||
__ MultiPop(saved_regs);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
|
||||
__ Abort("Call to unimplemented function in builtins-mips.cc");
|
||||
CpuFeatures::TryForceFeatureScope scope(VFP3);
|
||||
if (!CpuFeatures::IsSupported(FPU)) {
|
||||
__ Abort("Unreachable code: Cannot optimize without FPU support.");
|
||||
return;
|
||||
}
|
||||
|
||||
// Lookup the function in the JavaScript frame and push it as an
|
||||
// argument to the on-stack replacement function.
|
||||
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ push(a0);
|
||||
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
|
||||
}
|
||||
|
||||
// If the result was -1 it means that we couldn't optimize the
|
||||
// function. Just return and continue in the unoptimized version.
|
||||
__ Ret(eq, v0, Operand(Smi::FromInt(-1)));
|
||||
|
||||
// Untag the AST id and push it on the stack.
|
||||
__ SmiUntag(v0);
|
||||
__ push(v0);
|
||||
|
||||
// Generate the code for doing the frame-to-frame translation using
|
||||
// the deoptimizer infrastructure.
|
||||
Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
|
||||
generator.Generate();
|
||||
}
|
||||
|
||||
|
||||
@ -1395,8 +1464,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
||||
const int kFunctionOffset = 4 * kPointerSize;
|
||||
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
|
||||
FrameScope frame_scope(masm, StackFrame::INTERNAL);
|
||||
__ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
|
||||
__ push(a0);
|
||||
__ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
|
||||
@ -1530,8 +1598,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
||||
__ InvokeFunction(a1, actual, CALL_FUNCTION,
|
||||
NullCallWrapper(), CALL_AS_METHOD);
|
||||
|
||||
scope.GenerateLeaveFrame();
|
||||
|
||||
frame_scope.GenerateLeaveFrame();
|
||||
__ Ret(USE_DELAY_SLOT);
|
||||
__ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
|
||||
|
||||
|
@ -32,24 +32,112 @@
|
||||
#include "full-codegen.h"
|
||||
#include "safepoint-table.h"
|
||||
|
||||
// Note: this file was taken from the X64 version. ARM has a partially working
|
||||
// lithium implementation, but for now it is not ported to mips.
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
const int Deoptimizer::table_entry_size_ = 10;
|
||||
const int Deoptimizer::table_entry_size_ = 32;
|
||||
|
||||
|
||||
int Deoptimizer::patch_size() {
|
||||
const int kCallInstructionSizeInWords = 3;
|
||||
const int kCallInstructionSizeInWords = 4;
|
||||
return kCallInstructionSizeInWords * Assembler::kInstrSize;
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
|
||||
// Nothing to do. No new relocation information is written for lazy
|
||||
// deoptimization on MIPS.
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
|
||||
UNIMPLEMENTED();
|
||||
HandleScope scope;
|
||||
AssertNoAllocation no_allocation;
|
||||
|
||||
if (!function->IsOptimized()) return;
|
||||
|
||||
// Get the optimized code.
|
||||
Code* code = function->code();
|
||||
|
||||
// Invalidate the relocation information, as it will become invalid by the
|
||||
// code patching below, and is not needed any more.
|
||||
code->InvalidateRelocation();
|
||||
|
||||
// For each return after a safepoint insert an absolute call to the
|
||||
// corresponding deoptimization entry.
|
||||
unsigned last_pc_offset = 0;
|
||||
SafepointTable table(function->code());
|
||||
for (unsigned i = 0; i < table.length(); i++) {
|
||||
unsigned pc_offset = table.GetPcOffset(i);
|
||||
SafepointEntry safepoint_entry = table.GetEntry(i);
|
||||
int deoptimization_index = safepoint_entry.deoptimization_index();
|
||||
int gap_code_size = safepoint_entry.gap_code_size();
|
||||
// Check that we did not shoot past next safepoint.
|
||||
CHECK(pc_offset >= last_pc_offset);
|
||||
#ifdef DEBUG
|
||||
// Destroy the code which is not supposed to be run again.
|
||||
int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
|
||||
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
|
||||
instructions);
|
||||
for (int x = 0; x < instructions; x++) {
|
||||
destroyer.masm()->break_(0);
|
||||
}
|
||||
#endif
|
||||
last_pc_offset = pc_offset;
|
||||
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
|
||||
Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
|
||||
deoptimization_index, Deoptimizer::LAZY);
|
||||
last_pc_offset += gap_code_size;
|
||||
int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
|
||||
RelocInfo::NONE);
|
||||
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
|
||||
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
|
||||
ASSERT(call_size_in_bytes <= patch_size());
|
||||
CodePatcher patcher(code->instruction_start() + last_pc_offset,
|
||||
call_size_in_words);
|
||||
patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
|
||||
last_pc_offset += call_size_in_bytes;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
// Destroy the code which is not supposed to be run again.
|
||||
int instructions =
|
||||
(code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
|
||||
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
|
||||
instructions);
|
||||
for (int x = 0; x < instructions; x++) {
|
||||
destroyer.masm()->break_(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
Isolate* isolate = code->GetIsolate();
|
||||
|
||||
// Add the deoptimizing code to the list.
|
||||
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
|
||||
DeoptimizerData* data = isolate->deoptimizer_data();
|
||||
node->set_next(data->deoptimizing_code_list_);
|
||||
data->deoptimizing_code_list_ = node;
|
||||
|
||||
// We might be in the middle of incremental marking with compaction.
|
||||
// Tell collector to treat this code object in a special way and
|
||||
// ignore all slots that might have been recorded on it.
|
||||
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
|
||||
|
||||
// Set the code for the function to non-optimized version.
|
||||
function->ReplaceCode(function->shared()->code());
|
||||
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF("[forced deoptimization: ");
|
||||
function->PrintName();
|
||||
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
|
||||
#ifdef DEBUG
|
||||
if (FLAG_print_code) {
|
||||
code->PrintLn();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -57,7 +145,42 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
|
||||
Address pc_after,
|
||||
Code* check_code,
|
||||
Code* replacement_code) {
|
||||
UNIMPLEMENTED();
|
||||
const int kInstrSize = Assembler::kInstrSize;
|
||||
// This structure comes from FullCodeGenerator::EmitStackCheck.
|
||||
// The call of the stack guard check has the following form:
|
||||
// sltu at, sp, t0
|
||||
// beq at, zero_reg, ok
|
||||
// lui t9, <stack guard address> upper
|
||||
// ori t9, <stack guard address> lower
|
||||
// jalr t9
|
||||
// nop
|
||||
// ----- pc_after points here
|
||||
|
||||
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
|
||||
|
||||
// Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
|
||||
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
|
||||
patcher.masm()->addiu(at, zero_reg, 1);
|
||||
|
||||
// Replace the stack check address in the load-immediate (lui/ori pair)
|
||||
// with the entry address of the replacement code.
|
||||
ASSERT(reinterpret_cast<uint32_t>(
|
||||
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
|
||||
reinterpret_cast<uint32_t>(check_code->entry()));
|
||||
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
|
||||
replacement_code->entry());
|
||||
|
||||
// We patched the code to the following form:
|
||||
// addiu at, zero_reg, 1
|
||||
// beq at, zero_reg, ok ;; Not changed
|
||||
// lui t9, <on-stack replacement address> upper
|
||||
// ori t9, <on-stack replacement address> lower
|
||||
// jalr t9 ;; Not changed
|
||||
// nop ;; Not changed
|
||||
// ----- pc_after points here
|
||||
|
||||
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
|
||||
unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
|
||||
}
|
||||
|
||||
|
||||
@ -65,34 +188,618 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
|
||||
Address pc_after,
|
||||
Code* check_code,
|
||||
Code* replacement_code) {
|
||||
UNIMPLEMENTED();
|
||||
// Exact opposite of the function above.
|
||||
const int kInstrSize = Assembler::kInstrSize;
|
||||
ASSERT(Assembler::IsAddImmediate(
|
||||
Assembler::instr_at(pc_after - 6 * kInstrSize)));
|
||||
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
|
||||
|
||||
// Restore the sltu instruction so beq can be taken again.
|
||||
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
|
||||
patcher.masm()->sltu(at, sp, t0);
|
||||
|
||||
// Replace the on-stack replacement address in the load-immediate (lui/ori
|
||||
// pair) with the entry address of the normal stack-check code.
|
||||
ASSERT(reinterpret_cast<uint32_t>(
|
||||
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
|
||||
reinterpret_cast<uint32_t>(replacement_code->entry()));
|
||||
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
|
||||
check_code->entry());
|
||||
|
||||
check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
|
||||
unoptimized_code, pc_after - 4 * kInstrSize, check_code);
|
||||
}
|
||||
|
||||
|
||||
static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
|
||||
ByteArray* translations = data->TranslationByteArray();
|
||||
int length = data->DeoptCount();
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
|
||||
TranslationIterator it(translations, data->TranslationIndex(i)->value());
|
||||
int value = it.Next();
|
||||
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
|
||||
// Read the number of frames.
|
||||
value = it.Next();
|
||||
if (value == 1) return i;
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::DoComputeOsrOutputFrame() {
|
||||
UNIMPLEMENTED();
|
||||
DeoptimizationInputData* data = DeoptimizationInputData::cast(
|
||||
optimized_code_->deoptimization_data());
|
||||
unsigned ast_id = data->OsrAstId()->value();
|
||||
|
||||
int bailout_id = LookupBailoutId(data, ast_id);
|
||||
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
|
||||
ByteArray* translations = data->TranslationByteArray();
|
||||
|
||||
TranslationIterator iterator(translations, translation_index);
|
||||
Translation::Opcode opcode =
|
||||
static_cast<Translation::Opcode>(iterator.Next());
|
||||
ASSERT(Translation::BEGIN == opcode);
|
||||
USE(opcode);
|
||||
int count = iterator.Next();
|
||||
ASSERT(count == 1);
|
||||
USE(count);
|
||||
|
||||
opcode = static_cast<Translation::Opcode>(iterator.Next());
|
||||
USE(opcode);
|
||||
ASSERT(Translation::FRAME == opcode);
|
||||
unsigned node_id = iterator.Next();
|
||||
USE(node_id);
|
||||
ASSERT(node_id == ast_id);
|
||||
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
|
||||
USE(function);
|
||||
ASSERT(function == function_);
|
||||
unsigned height = iterator.Next();
|
||||
unsigned height_in_bytes = height * kPointerSize;
|
||||
USE(height_in_bytes);
|
||||
|
||||
unsigned fixed_size = ComputeFixedSize(function_);
|
||||
unsigned input_frame_size = input_->GetFrameSize();
|
||||
ASSERT(fixed_size + height_in_bytes == input_frame_size);
|
||||
|
||||
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
|
||||
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
|
||||
unsigned outgoing_size = outgoing_height * kPointerSize;
|
||||
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
|
||||
ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
|
||||
|
||||
if (FLAG_trace_osr) {
|
||||
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
|
||||
reinterpret_cast<intptr_t>(function_));
|
||||
function_->PrintName();
|
||||
PrintF(" => node=%u, frame=%d->%d]\n",
|
||||
ast_id,
|
||||
input_frame_size,
|
||||
output_frame_size);
|
||||
}
|
||||
|
||||
// There's only one output frame in the OSR case.
|
||||
output_count_ = 1;
|
||||
output_ = new FrameDescription*[1];
|
||||
output_[0] = new(output_frame_size) FrameDescription(
|
||||
output_frame_size, function_);
|
||||
#ifdef DEBUG
|
||||
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
|
||||
#endif
|
||||
|
||||
// Clear the incoming parameters in the optimized frame to avoid
|
||||
// confusing the garbage collector.
|
||||
unsigned output_offset = output_frame_size - kPointerSize;
|
||||
int parameter_count = function_->shared()->formal_parameter_count() + 1;
|
||||
for (int i = 0; i < parameter_count; ++i) {
|
||||
output_[0]->SetFrameSlot(output_offset, 0);
|
||||
output_offset -= kPointerSize;
|
||||
}
|
||||
|
||||
// Translate the incoming parameters. This may overwrite some of the
|
||||
// incoming argument slots we've just cleared.
|
||||
int input_offset = input_frame_size - kPointerSize;
|
||||
bool ok = true;
|
||||
int limit = input_offset - (parameter_count * kPointerSize);
|
||||
while (ok && input_offset > limit) {
|
||||
ok = DoOsrTranslateCommand(&iterator, &input_offset);
|
||||
}
|
||||
|
||||
// There are no translation commands for the caller's pc and fp, the
|
||||
// context, and the function. Set them up explicitly.
|
||||
for (int i = StandardFrameConstants::kCallerPCOffset;
|
||||
ok && i >= StandardFrameConstants::kMarkerOffset;
|
||||
i -= kPointerSize) {
|
||||
uint32_t input_value = input_->GetFrameSlot(input_offset);
|
||||
if (FLAG_trace_osr) {
|
||||
const char* name = "UNKNOWN";
|
||||
switch (i) {
|
||||
case StandardFrameConstants::kCallerPCOffset:
|
||||
name = "caller's pc";
|
||||
break;
|
||||
case StandardFrameConstants::kCallerFPOffset:
|
||||
name = "fp";
|
||||
break;
|
||||
case StandardFrameConstants::kContextOffset:
|
||||
name = "context";
|
||||
break;
|
||||
case StandardFrameConstants::kMarkerOffset:
|
||||
name = "function";
|
||||
break;
|
||||
}
|
||||
PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
|
||||
output_offset,
|
||||
input_value,
|
||||
input_offset,
|
||||
name);
|
||||
}
|
||||
|
||||
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
|
||||
input_offset -= kPointerSize;
|
||||
output_offset -= kPointerSize;
|
||||
}
|
||||
|
||||
// Translate the rest of the frame.
|
||||
while (ok && input_offset >= 0) {
|
||||
ok = DoOsrTranslateCommand(&iterator, &input_offset);
|
||||
}
|
||||
|
||||
// If translation of any command failed, continue using the input frame.
|
||||
if (!ok) {
|
||||
delete output_[0];
|
||||
output_[0] = input_;
|
||||
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
|
||||
} else {
|
||||
// Setup the frame pointer and the context pointer.
|
||||
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
|
||||
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
|
||||
|
||||
unsigned pc_offset = data->OsrPcOffset()->value();
|
||||
uint32_t pc = reinterpret_cast<uint32_t>(
|
||||
optimized_code_->entry() + pc_offset);
|
||||
output_[0]->SetPc(pc);
|
||||
}
|
||||
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
|
||||
output_[0]->SetContinuation(
|
||||
reinterpret_cast<uint32_t>(continuation->entry()));
|
||||
|
||||
if (FLAG_trace_osr) {
|
||||
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
|
||||
ok ? "finished" : "aborted",
|
||||
reinterpret_cast<intptr_t>(function));
|
||||
function->PrintName();
|
||||
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// This code is very similar to ia32/arm code, but relies on register names
|
||||
// (fp, sp) and how the frame is laid out.
|
||||
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
|
||||
int frame_index) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
// Read the ast node id, function, and frame height for this output frame.
|
||||
Translation::Opcode opcode =
|
||||
static_cast<Translation::Opcode>(iterator->Next());
|
||||
USE(opcode);
|
||||
ASSERT(Translation::FRAME == opcode);
|
||||
int node_id = iterator->Next();
|
||||
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
|
||||
unsigned height = iterator->Next();
|
||||
unsigned height_in_bytes = height * kPointerSize;
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" translating ");
|
||||
function->PrintName();
|
||||
PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
|
||||
}
|
||||
|
||||
// The 'fixed' part of the frame consists of the incoming parameters and
|
||||
// the part described by JavaScriptFrameConstants.
|
||||
unsigned fixed_frame_size = ComputeFixedSize(function);
|
||||
unsigned input_frame_size = input_->GetFrameSize();
|
||||
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
|
||||
|
||||
// Allocate and store the output frame description.
|
||||
FrameDescription* output_frame =
|
||||
new(output_frame_size) FrameDescription(output_frame_size, function);
|
||||
#ifdef DEBUG
|
||||
output_frame->SetKind(Code::FUNCTION);
|
||||
#endif
|
||||
|
||||
bool is_bottommost = (0 == frame_index);
|
||||
bool is_topmost = (output_count_ - 1 == frame_index);
|
||||
ASSERT(frame_index >= 0 && frame_index < output_count_);
|
||||
ASSERT(output_[frame_index] == NULL);
|
||||
output_[frame_index] = output_frame;
|
||||
|
||||
// The top address for the bottommost output frame can be computed from
|
||||
// the input frame pointer and the output frame's height. For all
|
||||
// subsequent output frames, it can be computed from the previous one's
|
||||
// top address and the current frame's size.
|
||||
uint32_t top_address;
|
||||
if (is_bottommost) {
|
||||
// 2 = context and function in the frame.
|
||||
top_address =
|
||||
input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
|
||||
} else {
|
||||
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
|
||||
}
|
||||
output_frame->SetTop(top_address);
|
||||
|
||||
// Compute the incoming parameter translation.
|
||||
int parameter_count = function->shared()->formal_parameter_count() + 1;
|
||||
unsigned output_offset = output_frame_size;
|
||||
unsigned input_offset = input_frame_size;
|
||||
for (int i = 0; i < parameter_count; ++i) {
|
||||
output_offset -= kPointerSize;
|
||||
DoTranslateCommand(iterator, frame_index, output_offset);
|
||||
}
|
||||
input_offset -= (parameter_count * kPointerSize);
|
||||
|
||||
// There are no translation commands for the caller's pc and fp, the
|
||||
// context, and the function. Synthesize their values and set them up
|
||||
// explicitly.
|
||||
//
|
||||
// The caller's pc for the bottommost output frame is the same as in the
|
||||
// input frame. For all subsequent output frames, it can be read from the
|
||||
// previous one. This frame's pc can be computed from the non-optimized
|
||||
// function code and AST id of the bailout.
|
||||
output_offset -= kPointerSize;
|
||||
input_offset -= kPointerSize;
|
||||
intptr_t value;
|
||||
if (is_bottommost) {
|
||||
value = input_->GetFrameSlot(input_offset);
|
||||
} else {
|
||||
value = output_[frame_index - 1]->GetPc();
|
||||
}
|
||||
output_frame->SetFrameSlot(output_offset, value);
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
|
||||
top_address + output_offset, output_offset, value);
|
||||
}
|
||||
|
||||
// The caller's frame pointer for the bottommost output frame is the same
|
||||
// as in the input frame. For all subsequent output frames, it can be
|
||||
// read from the previous one. Also compute and set this frame's frame
|
||||
// pointer.
|
||||
output_offset -= kPointerSize;
|
||||
input_offset -= kPointerSize;
|
||||
if (is_bottommost) {
|
||||
value = input_->GetFrameSlot(input_offset);
|
||||
} else {
|
||||
value = output_[frame_index - 1]->GetFp();
|
||||
}
|
||||
output_frame->SetFrameSlot(output_offset, value);
|
||||
intptr_t fp_value = top_address + output_offset;
|
||||
ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
|
||||
output_frame->SetFp(fp_value);
|
||||
if (is_topmost) {
|
||||
output_frame->SetRegister(fp.code(), fp_value);
|
||||
}
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
|
||||
fp_value, output_offset, value);
|
||||
}
|
||||
|
||||
// For the bottommost output frame the context can be gotten from the input
|
||||
// frame. For all subsequent output frames it can be gotten from the function
|
||||
// so long as we don't inline functions that need local contexts.
|
||||
output_offset -= kPointerSize;
|
||||
input_offset -= kPointerSize;
|
||||
if (is_bottommost) {
|
||||
value = input_->GetFrameSlot(input_offset);
|
||||
} else {
|
||||
value = reinterpret_cast<intptr_t>(function->context());
|
||||
}
|
||||
output_frame->SetFrameSlot(output_offset, value);
|
||||
if (is_topmost) {
|
||||
output_frame->SetRegister(cp.code(), value);
|
||||
}
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
|
||||
top_address + output_offset, output_offset, value);
|
||||
}
|
||||
|
||||
// The function was mentioned explicitly in the BEGIN_FRAME.
|
||||
output_offset -= kPointerSize;
|
||||
input_offset -= kPointerSize;
|
||||
value = reinterpret_cast<uint32_t>(function);
|
||||
// The function for the bottommost output frame should also agree with the
|
||||
// input frame.
|
||||
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
|
||||
output_frame->SetFrameSlot(output_offset, value);
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
|
||||
top_address + output_offset, output_offset, value);
|
||||
}
|
||||
|
||||
// Translate the rest of the frame.
|
||||
for (unsigned i = 0; i < height; ++i) {
|
||||
output_offset -= kPointerSize;
|
||||
DoTranslateCommand(iterator, frame_index, output_offset);
|
||||
}
|
||||
ASSERT(0 == output_offset);
|
||||
|
||||
// Compute this frame's PC, state, and continuation.
|
||||
Code* non_optimized_code = function->shared()->code();
|
||||
FixedArray* raw_data = non_optimized_code->deoptimization_data();
|
||||
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
|
||||
Address start = non_optimized_code->instruction_start();
|
||||
unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
|
||||
unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
|
||||
uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
|
||||
output_frame->SetPc(pc_value);
|
||||
|
||||
FullCodeGenerator::State state =
|
||||
FullCodeGenerator::StateField::decode(pc_and_state);
|
||||
output_frame->SetState(Smi::FromInt(state));
|
||||
|
||||
|
||||
// Set the continuation for the topmost frame.
|
||||
if (is_topmost && bailout_type_ != DEBUGGER) {
|
||||
Builtins* builtins = isolate_->builtins();
|
||||
Code* continuation = (bailout_type_ == EAGER)
|
||||
? builtins->builtin(Builtins::kNotifyDeoptimized)
|
||||
: builtins->builtin(Builtins::kNotifyLazyDeoptimized);
|
||||
output_frame->SetContinuation(
|
||||
reinterpret_cast<uint32_t>(continuation->entry()));
|
||||
}
|
||||
}
|
||||
|
||||
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
|
||||
UNIMPLEMENTED();
|
||||
// Set the register values. The values are not important as there are no
|
||||
// callee saved registers in JavaScript frames, so all registers are
|
||||
// spilled. Registers fp and sp are set to the correct values though.
|
||||
|
||||
for (int i = 0; i < Register::kNumRegisters; i++) {
|
||||
input_->SetRegister(i, i * 4);
|
||||
}
|
||||
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
|
||||
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
|
||||
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
|
||||
input_->SetDoubleRegister(i, 0.0);
|
||||
}
|
||||
|
||||
// Fill the frame content from the actual data on the frame.
|
||||
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
|
||||
input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
|
||||
// This code tries to be close to ia32 code so that any changes can be
|
||||
// easily ported.
|
||||
void Deoptimizer::EntryGenerator::Generate() {
|
||||
UNIMPLEMENTED();
|
||||
GeneratePrologue();
|
||||
|
||||
Isolate* isolate = masm()->isolate();
|
||||
|
||||
CpuFeatures::Scope scope(FPU);
|
||||
// Unlike on ARM we don't save all the registers, just the useful ones.
|
||||
// For the rest, there are gaps on the stack, so the offsets remain the same.
|
||||
const int kNumberOfRegisters = Register::kNumRegisters;
|
||||
|
||||
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
|
||||
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
|
||||
|
||||
const int kDoubleRegsSize =
|
||||
kDoubleSize * FPURegister::kNumAllocatableRegisters;
|
||||
|
||||
// Save all FPU registers before messing with them.
|
||||
__ Subu(sp, sp, Operand(kDoubleRegsSize));
|
||||
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
|
||||
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
|
||||
int offset = i * kDoubleSize;
|
||||
__ sdc1(fpu_reg, MemOperand(sp, offset));
|
||||
}
|
||||
|
||||
// Push saved_regs (needed to populate FrameDescription::registers_).
|
||||
// Leave gaps for other registers.
|
||||
__ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
|
||||
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
|
||||
if ((saved_regs & (1 << i)) != 0) {
|
||||
__ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
|
||||
}
|
||||
}
|
||||
|
||||
const int kSavedRegistersAreaSize =
|
||||
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
|
||||
|
||||
// Get the bailout id from the stack.
|
||||
__ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
|
||||
|
||||
// Get the address of the location in the code object if possible (a3) (return
|
||||
// address for lazy deoptimization) and compute the fp-to-sp delta in
|
||||
// register t0.
|
||||
if (type() == EAGER) {
|
||||
__ mov(a3, zero_reg);
|
||||
// Correct one word for bailout id.
|
||||
__ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
|
||||
} else if (type() == OSR) {
|
||||
__ mov(a3, ra);
|
||||
// Correct one word for bailout id.
|
||||
__ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
|
||||
} else {
|
||||
__ mov(a3, ra);
|
||||
// Correct two words for bailout id and return address.
|
||||
__ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
|
||||
}
|
||||
|
||||
__ Subu(t0, fp, t0);
|
||||
|
||||
// Allocate a new deoptimizer object.
|
||||
// Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
|
||||
__ PrepareCallCFunction(6, t1);
|
||||
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
||||
__ li(a1, Operand(type())); // bailout type,
|
||||
// a2: bailout id already loaded.
|
||||
// a3: code address or 0 already loaded.
|
||||
__ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
|
||||
__ li(t1, Operand(ExternalReference::isolate_address()));
|
||||
__ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
|
||||
// Call Deoptimizer::New().
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm());
|
||||
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
|
||||
}
|
||||
|
||||
// Preserve "deoptimizer" object in register v0 and get the input
|
||||
// frame descriptor pointer to a1 (deoptimizer->input_);
|
||||
// Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
|
||||
__ mov(a0, v0);
|
||||
__ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
|
||||
|
||||
// Copy core registers into FrameDescription::registers_[kNumRegisters].
|
||||
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
|
||||
for (int i = 0; i < kNumberOfRegisters; i++) {
|
||||
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
|
||||
if ((saved_regs & (1 << i)) != 0) {
|
||||
__ lw(a2, MemOperand(sp, i * kPointerSize));
|
||||
__ sw(a2, MemOperand(a1, offset));
|
||||
} else if (FLAG_debug_code) {
|
||||
__ li(a2, kDebugZapValue);
|
||||
__ sw(a2, MemOperand(a1, offset));
|
||||
}
|
||||
}
|
||||
|
||||
// Copy FPU registers to
|
||||
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
|
||||
int double_regs_offset = FrameDescription::double_registers_offset();
|
||||
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
|
||||
int dst_offset = i * kDoubleSize + double_regs_offset;
|
||||
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
|
||||
__ ldc1(f0, MemOperand(sp, src_offset));
|
||||
__ sdc1(f0, MemOperand(a1, dst_offset));
|
||||
}
|
||||
|
||||
// Remove the bailout id, eventually return address, and the saved registers
|
||||
// from the stack.
|
||||
if (type() == EAGER || type() == OSR) {
|
||||
__ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
|
||||
} else {
|
||||
__ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
|
||||
}
|
||||
|
||||
// Compute a pointer to the unwinding limit in register a2; that is
|
||||
// the first stack slot not part of the input frame.
|
||||
__ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
|
||||
__ Addu(a2, a2, sp);
|
||||
|
||||
// Unwind the stack down to - but not including - the unwinding
|
||||
// limit and copy the contents of the activation frame to the input
|
||||
// frame description.
|
||||
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
|
||||
Label pop_loop;
|
||||
__ bind(&pop_loop);
|
||||
__ pop(t0);
|
||||
__ sw(t0, MemOperand(a3, 0));
|
||||
__ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
|
||||
__ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
|
||||
|
||||
// Compute the output frame in the deoptimizer.
|
||||
__ push(a0); // Preserve deoptimizer object across call.
|
||||
// a0: deoptimizer object; a1: scratch.
|
||||
__ PrepareCallCFunction(1, a1);
|
||||
// Call Deoptimizer::ComputeOutputFrames().
|
||||
{
|
||||
AllowExternalCallThatCantCauseGC scope(masm());
|
||||
__ CallCFunction(
|
||||
ExternalReference::compute_output_frames_function(isolate), 1);
|
||||
}
|
||||
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
|
||||
|
||||
// Replace the current (input) frame with the output frames.
|
||||
Label outer_push_loop, inner_push_loop;
|
||||
// Outer loop state: a0 = current "FrameDescription** output_",
|
||||
// a1 = one past the last FrameDescription**.
|
||||
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
|
||||
__ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
|
||||
__ sll(a1, a1, kPointerSizeLog2); // Count to offset.
|
||||
__ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
|
||||
__ bind(&outer_push_loop);
|
||||
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
|
||||
__ lw(a2, MemOperand(a0, 0)); // output_[ix]
|
||||
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
|
||||
__ bind(&inner_push_loop);
|
||||
__ Subu(a3, a3, Operand(sizeof(uint32_t)));
|
||||
__ Addu(t2, a2, Operand(a3));
|
||||
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
|
||||
__ push(t3);
|
||||
__ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
|
||||
|
||||
__ Addu(a0, a0, Operand(kPointerSize));
|
||||
__ Branch(&outer_push_loop, lt, a0, Operand(a1));
|
||||
|
||||
|
||||
// Push state, pc, and continuation from the last output frame.
|
||||
if (type() != OSR) {
|
||||
__ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
|
||||
__ push(t2);
|
||||
}
|
||||
|
||||
__ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
|
||||
__ push(t2);
|
||||
__ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
|
||||
__ push(t2);
|
||||
|
||||
|
||||
// Technically restoring 'at' should work unless zero_reg is also restored
|
||||
// but it's safer to check for this.
|
||||
ASSERT(!(at.bit() & restored_regs));
|
||||
// Restore the registers from the last output frame.
|
||||
__ mov(at, a2);
|
||||
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
|
||||
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
|
||||
if ((restored_regs & (1 << i)) != 0) {
|
||||
__ lw(ToRegister(i), MemOperand(at, offset));
|
||||
}
|
||||
}
|
||||
|
||||
// Set up the roots register.
|
||||
ExternalReference roots_array_start =
|
||||
ExternalReference::roots_array_start(isolate);
|
||||
__ li(roots, Operand(roots_array_start));
|
||||
|
||||
__ pop(at); // Get continuation, leave pc on stack.
|
||||
__ pop(ra);
|
||||
__ Jump(at);
|
||||
__ stop("Unreachable.");
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
|
||||
UNIMPLEMENTED();
|
||||
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
|
||||
|
||||
// Create a sequence of deoptimization entries. Note that any
|
||||
// registers may be still live.
|
||||
|
||||
Label done;
|
||||
for (int i = 0; i < count(); i++) {
|
||||
int start = masm()->pc_offset();
|
||||
USE(start);
|
||||
if (type() != EAGER) {
|
||||
// Emulate ia32 like call by pushing return address to stack.
|
||||
__ push(ra);
|
||||
}
|
||||
__ li(at, Operand(i));
|
||||
__ push(at);
|
||||
__ Branch(&done);
|
||||
|
||||
// Pad the rest of the code.
|
||||
while (table_entry_size_ > (masm()->pc_offset() - start)) {
|
||||
__ nop();
|
||||
}
|
||||
|
||||
ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start);
|
||||
}
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
4628
src/mips/lithium-codegen-mips.cc
Normal file
4628
src/mips/lithium-codegen-mips.cc
Normal file
File diff suppressed because it is too large
Load Diff
@ -29,35 +29,398 @@
|
||||
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
|
||||
|
||||
#include "mips/lithium-mips.h"
|
||||
|
||||
#include "mips/lithium-gap-resolver-mips.h"
|
||||
#include "deoptimizer.h"
|
||||
#include "safepoint-table.h"
|
||||
#include "scopes.h"
|
||||
|
||||
// Note: this file was taken from the X64 version. ARM has a partially working
|
||||
// lithium implementation, but for now it is not ported to mips.
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Forward declarations.
|
||||
class LDeferredCode;
|
||||
class SafepointGenerator;
|
||||
|
||||
class LCodeGen BASE_EMBEDDED {
|
||||
public:
|
||||
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
|
||||
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
|
||||
: chunk_(chunk),
|
||||
masm_(assembler),
|
||||
info_(info),
|
||||
current_block_(-1),
|
||||
current_instruction_(-1),
|
||||
instructions_(chunk->instructions()),
|
||||
deoptimizations_(4),
|
||||
deopt_jump_table_(4),
|
||||
deoptimization_literals_(8),
|
||||
inlined_function_count_(0),
|
||||
scope_(info->scope()),
|
||||
status_(UNUSED),
|
||||
deferred_(8),
|
||||
osr_pc_offset_(-1),
|
||||
resolver_(this),
|
||||
expected_safepoint_kind_(Safepoint::kSimple) {
|
||||
PopulateDeoptimizationLiteralsWithInlinedFunctions();
|
||||
}
|
||||
|
||||
|
||||
// Simple accessors.
|
||||
MacroAssembler* masm() const { return masm_; }
|
||||
CompilationInfo* info() const { return info_; }
|
||||
Isolate* isolate() const { return info_->isolate(); }
|
||||
Factory* factory() const { return isolate()->factory(); }
|
||||
Heap* heap() const { return isolate()->heap(); }
|
||||
|
||||
// Support for converting LOperands to assembler types.
|
||||
// LOperand must be a register.
|
||||
Register ToRegister(LOperand* op) const;
|
||||
|
||||
// LOperand is loaded into scratch, unless already a register.
|
||||
Register EmitLoadRegister(LOperand* op, Register scratch);
|
||||
|
||||
// LOperand must be a double register.
|
||||
DoubleRegister ToDoubleRegister(LOperand* op) const;
|
||||
|
||||
// LOperand is loaded into dbl_scratch, unless already a double register.
|
||||
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
|
||||
FloatRegister flt_scratch,
|
||||
DoubleRegister dbl_scratch);
|
||||
int ToInteger32(LConstantOperand* op) const;
|
||||
double ToDouble(LConstantOperand* op) const;
|
||||
Operand ToOperand(LOperand* op);
|
||||
MemOperand ToMemOperand(LOperand* op) const;
|
||||
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
|
||||
MemOperand ToHighMemOperand(LOperand* op) const;
|
||||
|
||||
// Try to generate code for the entire chunk, but it may fail if the
|
||||
// chunk contains constructs we cannot handle. Returns true if the
|
||||
// code generation attempt succeeded.
|
||||
bool GenerateCode() {
|
||||
UNIMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
bool GenerateCode();
|
||||
|
||||
// Finish the code by setting stack height, safepoint, and bailout
|
||||
// information on it.
|
||||
void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
|
||||
void FinishCode(Handle<Code> code);
|
||||
|
||||
// Deferred code support.
|
||||
template<int T>
|
||||
void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
|
||||
Token::Value op);
|
||||
void DoDeferredNumberTagD(LNumberTagD* instr);
|
||||
void DoDeferredNumberTagI(LNumberTagI* instr);
|
||||
void DoDeferredTaggedToI(LTaggedToI* instr);
|
||||
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
|
||||
void DoDeferredStackCheck(LStackCheck* instr);
|
||||
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
|
||||
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
|
||||
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
||||
Label* map_check);
|
||||
|
||||
// Parallel move support.
|
||||
void DoParallelMove(LParallelMove* move);
|
||||
void DoGap(LGap* instr);
|
||||
|
||||
// Emit frame translation commands for an environment.
|
||||
void WriteTranslation(LEnvironment* environment, Translation* translation);
|
||||
|
||||
// Declare methods that deal with the individual node types.
|
||||
#define DECLARE_DO(type) void Do##type(L##type* node);
|
||||
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
|
||||
#undef DECLARE_DO
|
||||
|
||||
private:
|
||||
enum Status {
|
||||
UNUSED,
|
||||
GENERATING,
|
||||
DONE,
|
||||
ABORTED
|
||||
};
|
||||
|
||||
bool is_unused() const { return status_ == UNUSED; }
|
||||
bool is_generating() const { return status_ == GENERATING; }
|
||||
bool is_done() const { return status_ == DONE; }
|
||||
bool is_aborted() const { return status_ == ABORTED; }
|
||||
|
||||
StrictModeFlag strict_mode_flag() const {
|
||||
return info()->strict_mode_flag();
|
||||
}
|
||||
|
||||
LChunk* chunk() const { return chunk_; }
|
||||
Scope* scope() const { return scope_; }
|
||||
HGraph* graph() const { return chunk_->graph(); }
|
||||
|
||||
Register scratch0() { return lithiumScratchReg; }
|
||||
Register scratch1() { return lithiumScratchReg2; }
|
||||
DoubleRegister double_scratch0() { return lithiumScratchDouble; }
|
||||
|
||||
int GetNextEmittedBlock(int block);
|
||||
LInstruction* GetNextInstruction();
|
||||
|
||||
void EmitClassOfTest(Label* if_true,
|
||||
Label* if_false,
|
||||
Handle<String> class_name,
|
||||
Register input,
|
||||
Register temporary,
|
||||
Register temporary2);
|
||||
|
||||
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
|
||||
int GetParameterCount() const { return scope()->num_parameters(); }
|
||||
|
||||
void Abort(const char* format, ...);
|
||||
void Comment(const char* format, ...);
|
||||
|
||||
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
|
||||
|
||||
// Code generation passes. Returns true if code generation should
|
||||
// continue.
|
||||
bool GeneratePrologue();
|
||||
bool GenerateBody();
|
||||
bool GenerateDeferredCode();
|
||||
bool GenerateDeoptJumpTable();
|
||||
bool GenerateSafepointTable();
|
||||
|
||||
enum SafepointMode {
|
||||
RECORD_SIMPLE_SAFEPOINT,
|
||||
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
|
||||
};
|
||||
|
||||
void CallCode(Handle<Code> code,
|
||||
RelocInfo::Mode mode,
|
||||
LInstruction* instr);
|
||||
|
||||
void CallCodeGeneric(Handle<Code> code,
|
||||
RelocInfo::Mode mode,
|
||||
LInstruction* instr,
|
||||
SafepointMode safepoint_mode);
|
||||
|
||||
void CallRuntime(const Runtime::Function* function,
|
||||
int num_arguments,
|
||||
LInstruction* instr);
|
||||
|
||||
void CallRuntime(Runtime::FunctionId id,
|
||||
int num_arguments,
|
||||
LInstruction* instr) {
|
||||
const Runtime::Function* function = Runtime::FunctionForId(id);
|
||||
CallRuntime(function, num_arguments, instr);
|
||||
}
|
||||
|
||||
void CallRuntimeFromDeferred(Runtime::FunctionId id,
|
||||
int argc,
|
||||
LInstruction* instr);
|
||||
|
||||
// Generate a direct call to a known function. Expects the function
|
||||
// to be in a1.
|
||||
void CallKnownFunction(Handle<JSFunction> function,
|
||||
int arity,
|
||||
LInstruction* instr,
|
||||
CallKind call_kind);
|
||||
|
||||
void LoadHeapObject(Register result, Handle<HeapObject> object);
|
||||
|
||||
void RegisterLazyDeoptimization(LInstruction* instr,
|
||||
SafepointMode safepoint_mode);
|
||||
|
||||
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
|
||||
void DeoptimizeIf(Condition cc,
|
||||
LEnvironment* environment,
|
||||
Register src1,
|
||||
const Operand& src2);
|
||||
|
||||
void AddToTranslation(Translation* translation,
|
||||
LOperand* op,
|
||||
bool is_tagged);
|
||||
void PopulateDeoptimizationData(Handle<Code> code);
|
||||
int DefineDeoptimizationLiteral(Handle<Object> literal);
|
||||
|
||||
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
|
||||
|
||||
Register ToRegister(int index) const;
|
||||
DoubleRegister ToDoubleRegister(int index) const;
|
||||
|
||||
// Specific math operations - used from DoUnaryMathOperation.
|
||||
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
|
||||
void DoMathAbs(LUnaryMathOperation* instr);
|
||||
void DoMathFloor(LUnaryMathOperation* instr);
|
||||
void DoMathRound(LUnaryMathOperation* instr);
|
||||
void DoMathSqrt(LUnaryMathOperation* instr);
|
||||
void DoMathPowHalf(LUnaryMathOperation* instr);
|
||||
void DoMathLog(LUnaryMathOperation* instr);
|
||||
void DoMathCos(LUnaryMathOperation* instr);
|
||||
void DoMathSin(LUnaryMathOperation* instr);
|
||||
|
||||
// Support for recording safepoint and position information.
|
||||
void RecordSafepoint(LPointerMap* pointers,
|
||||
Safepoint::Kind kind,
|
||||
int arguments,
|
||||
int deoptimization_index);
|
||||
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
|
||||
void RecordSafepoint(int deoptimization_index);
|
||||
void RecordSafepointWithRegisters(LPointerMap* pointers,
|
||||
int arguments,
|
||||
int deoptimization_index);
|
||||
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
|
||||
int arguments,
|
||||
int deoptimization_index);
|
||||
void RecordPosition(int position);
|
||||
int LastSafepointEnd() {
|
||||
return static_cast<int>(safepoints_.GetPcAfterGap());
|
||||
}
|
||||
|
||||
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
|
||||
void EmitGoto(int block);
|
||||
void EmitBranch(int left_block,
|
||||
int right_block,
|
||||
Condition cc,
|
||||
Register src1,
|
||||
const Operand& src2);
|
||||
void EmitBranchF(int left_block,
|
||||
int right_block,
|
||||
Condition cc,
|
||||
FPURegister src1,
|
||||
FPURegister src2);
|
||||
void EmitCmpI(LOperand* left, LOperand* right);
|
||||
void EmitNumberUntagD(Register input,
|
||||
DoubleRegister result,
|
||||
bool deoptimize_on_undefined,
|
||||
LEnvironment* env);
|
||||
|
||||
// Emits optimized code for typeof x == "y". Modifies input register.
|
||||
// Returns the condition on which a final split to
|
||||
// true and false label should be made, to optimize fallthrough.
|
||||
// Returns two registers in cmp1 and cmp2 that can be used in the
|
||||
// Branch instruction after EmitTypeofIs.
|
||||
Condition EmitTypeofIs(Label* true_label,
|
||||
Label* false_label,
|
||||
Register input,
|
||||
Handle<String> type_name,
|
||||
Register& cmp1,
|
||||
Operand& cmp2);
|
||||
|
||||
// Emits optimized code for %_IsObject(x). Preserves input register.
|
||||
// Returns the condition on which a final split to
|
||||
// true and false label should be made, to optimize fallthrough.
|
||||
Condition EmitIsObject(Register input,
|
||||
Register temp1,
|
||||
Label* is_not_object,
|
||||
Label* is_object);
|
||||
|
||||
// Emits optimized code for %_IsConstructCall().
|
||||
// Caller should branch on equal condition.
|
||||
void EmitIsConstructCall(Register temp1, Register temp2);
|
||||
|
||||
void EmitLoadFieldOrConstantFunction(Register result,
|
||||
Register object,
|
||||
Handle<Map> type,
|
||||
Handle<String> name);
|
||||
|
||||
struct JumpTableEntry {
|
||||
explicit inline JumpTableEntry(Address entry)
|
||||
: label(),
|
||||
address(entry) { }
|
||||
Label label;
|
||||
Address address;
|
||||
};
|
||||
|
||||
LChunk* const chunk_;
|
||||
MacroAssembler* const masm_;
|
||||
CompilationInfo* const info_;
|
||||
|
||||
int current_block_;
|
||||
int current_instruction_;
|
||||
const ZoneList<LInstruction*>* instructions_;
|
||||
ZoneList<LEnvironment*> deoptimizations_;
|
||||
ZoneList<JumpTableEntry> deopt_jump_table_;
|
||||
ZoneList<Handle<Object> > deoptimization_literals_;
|
||||
int inlined_function_count_;
|
||||
Scope* const scope_;
|
||||
Status status_;
|
||||
TranslationBuffer translations_;
|
||||
ZoneList<LDeferredCode*> deferred_;
|
||||
int osr_pc_offset_;
|
||||
|
||||
// Builder that keeps track of safepoints in the code. The table
|
||||
// itself is emitted at the end of the generated code.
|
||||
SafepointTableBuilder safepoints_;
|
||||
|
||||
// Compiler from a set of parallel moves to a sequential list of moves.
|
||||
LGapResolver resolver_;
|
||||
|
||||
Safepoint::Kind expected_safepoint_kind_;
|
||||
|
||||
class PushSafepointRegistersScope BASE_EMBEDDED {
|
||||
public:
|
||||
PushSafepointRegistersScope(LCodeGen* codegen,
|
||||
Safepoint::Kind kind)
|
||||
: codegen_(codegen) {
|
||||
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
|
||||
codegen_->expected_safepoint_kind_ = kind;
|
||||
|
||||
switch (codegen_->expected_safepoint_kind_) {
|
||||
case Safepoint::kWithRegisters:
|
||||
codegen_->masm_->PushSafepointRegisters();
|
||||
break;
|
||||
case Safepoint::kWithRegistersAndDoubles:
|
||||
codegen_->masm_->PushSafepointRegistersAndDoubles();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
~PushSafepointRegistersScope() {
|
||||
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
|
||||
ASSERT((kind & Safepoint::kWithRegisters) != 0);
|
||||
switch (kind) {
|
||||
case Safepoint::kWithRegisters:
|
||||
codegen_->masm_->PopSafepointRegisters();
|
||||
break;
|
||||
case Safepoint::kWithRegistersAndDoubles:
|
||||
codegen_->masm_->PopSafepointRegistersAndDoubles();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
|
||||
}
|
||||
|
||||
private:
|
||||
LCodeGen* codegen_;
|
||||
};
|
||||
|
||||
friend class LDeferredCode;
|
||||
friend class LEnvironment;
|
||||
friend class SafepointGenerator;
|
||||
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
|
||||
};
|
||||
|
||||
|
||||
class LDeferredCode: public ZoneObject {
|
||||
public:
|
||||
explicit LDeferredCode(LCodeGen* codegen)
|
||||
: codegen_(codegen),
|
||||
external_exit_(NULL),
|
||||
instruction_index_(codegen->current_instruction_) {
|
||||
codegen->AddDeferredCode(this);
|
||||
}
|
||||
|
||||
virtual ~LDeferredCode() { }
|
||||
virtual void Generate() = 0;
|
||||
virtual LInstruction* instr() = 0;
|
||||
|
||||
void SetExit(Label *exit) { external_exit_ = exit; }
|
||||
Label* entry() { return &entry_; }
|
||||
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
|
||||
int instruction_index() const { return instruction_index_; }
|
||||
|
||||
protected:
|
||||
LCodeGen* codegen() const { return codegen_; }
|
||||
MacroAssembler* masm() const { return codegen_->masm(); }
|
||||
|
||||
private:
|
||||
LCodeGen* codegen_;
|
||||
Label entry_;
|
||||
Label exit_;
|
||||
Label* external_exit_;
|
||||
int instruction_index_;
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
309
src/mips/lithium-gap-resolver-mips.cc
Normal file
309
src/mips/lithium-gap-resolver-mips.cc
Normal file
@ -0,0 +1,309 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "mips/lithium-gap-resolver-mips.h"
|
||||
#include "mips/lithium-codegen-mips.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
static const Register kSavedValueRegister = lithiumScratchReg;
|
||||
static const DoubleRegister kSavedDoubleValueRegister = lithiumScratchDouble;
|
||||
|
||||
LGapResolver::LGapResolver(LCodeGen* owner)
|
||||
: cgen_(owner),
|
||||
moves_(32),
|
||||
root_index_(0),
|
||||
in_cycle_(false),
|
||||
saved_destination_(NULL) {}
|
||||
|
||||
|
||||
void LGapResolver::Resolve(LParallelMove* parallel_move) {
|
||||
ASSERT(moves_.is_empty());
|
||||
// Build up a worklist of moves.
|
||||
BuildInitialMoveList(parallel_move);
|
||||
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LMoveOperands move = moves_[i];
|
||||
// Skip constants to perform them last. They don't block other moves
|
||||
// and skipping such moves with register destinations keeps those
|
||||
// registers free for the whole algorithm.
|
||||
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
|
||||
root_index_ = i; // Any cycle is found when by reaching this move again.
|
||||
PerformMove(i);
|
||||
if (in_cycle_) {
|
||||
RestoreValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the moves with constant sources.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
if (!moves_[i].IsEliminated()) {
|
||||
ASSERT(moves_[i].source()->IsConstantOperand());
|
||||
EmitMove(i);
|
||||
}
|
||||
}
|
||||
|
||||
moves_.Rewind(0);
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
|
||||
// Perform a linear sweep of the moves to add them to the initial list of
|
||||
// moves to perform, ignoring any move that is redundant (the source is
|
||||
// the same as the destination, the destination is ignored and
|
||||
// unallocated, or the move was already eliminated).
|
||||
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
|
||||
for (int i = 0; i < moves->length(); ++i) {
|
||||
LMoveOperands move = moves->at(i);
|
||||
if (!move.IsRedundant()) moves_.Add(move);
|
||||
}
|
||||
Verify();
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::PerformMove(int index) {
|
||||
// Each call to this function performs a move and deletes it from the move
|
||||
// graph. We first recursively perform any move blocking this one. We
|
||||
// mark a move as "pending" on entry to PerformMove in order to detect
|
||||
// cycles in the move graph.
|
||||
|
||||
// We can only find a cycle, when doing a depth-first traversal of moves,
|
||||
// be encountering the starting move again. So by spilling the source of
|
||||
// the starting move, we break the cycle. All moves are then unblocked,
|
||||
// and the starting move is completed by writing the spilled value to
|
||||
// its destination. All other moves from the spilled source have been
|
||||
// completed prior to breaking the cycle.
|
||||
// An additional complication is that moves to MemOperands with large
|
||||
// offsets (more than 1K or 4K) require us to spill this spilled value to
|
||||
// the stack, to free up the register.
|
||||
ASSERT(!moves_[index].IsPending());
|
||||
ASSERT(!moves_[index].IsRedundant());
|
||||
|
||||
// Clear this move's destination to indicate a pending move. The actual
|
||||
// destination is saved in a stack allocated local. Multiple moves can
|
||||
// be pending because this function is recursive.
|
||||
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
|
||||
LOperand* destination = moves_[index].destination();
|
||||
moves_[index].set_destination(NULL);
|
||||
|
||||
// Perform a depth-first traversal of the move graph to resolve
|
||||
// dependencies. Any unperformed, unpending move with a source the same
|
||||
// as this one's destination blocks this one so recursively perform all
|
||||
// such moves.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LMoveOperands other_move = moves_[i];
|
||||
if (other_move.Blocks(destination) && !other_move.IsPending()) {
|
||||
PerformMove(i);
|
||||
// If there is a blocking, pending move it must be moves_[root_index_]
|
||||
// and all other moves with the same source as moves_[root_index_] are
|
||||
// sucessfully executed (because they are cycle-free) by this loop.
|
||||
}
|
||||
}
|
||||
|
||||
// We are about to resolve this move and don't need it marked as
|
||||
// pending, so restore its destination.
|
||||
moves_[index].set_destination(destination);
|
||||
|
||||
// The move may be blocked on a pending move, which must be the starting move.
|
||||
// In this case, we have a cycle, and we save the source of this move to
|
||||
// a scratch register to break it.
|
||||
LMoveOperands other_move = moves_[root_index_];
|
||||
if (other_move.Blocks(destination)) {
|
||||
ASSERT(other_move.IsPending());
|
||||
BreakCycle(index);
|
||||
return;
|
||||
}
|
||||
|
||||
// This move is no longer blocked.
|
||||
EmitMove(index);
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::Verify() {
|
||||
#ifdef ENABLE_SLOW_ASSERTS
|
||||
// No operand should be the destination for more than one move.
|
||||
for (int i = 0; i < moves_.length(); ++i) {
|
||||
LOperand* destination = moves_[i].destination();
|
||||
for (int j = i + 1; j < moves_.length(); ++j) {
|
||||
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define __ ACCESS_MASM(cgen_->masm())
|
||||
|
||||
void LGapResolver::BreakCycle(int index) {
|
||||
// We save in a register the value that should end up in the source of
|
||||
// moves_[root_index]. After performing all moves in the tree rooted
|
||||
// in that move, we save the value to that source.
|
||||
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
|
||||
ASSERT(!in_cycle_);
|
||||
in_cycle_ = true;
|
||||
LOperand* source = moves_[index].source();
|
||||
saved_destination_ = moves_[index].destination();
|
||||
if (source->IsRegister()) {
|
||||
__ mov(kSavedValueRegister, cgen_->ToRegister(source));
|
||||
} else if (source->IsStackSlot()) {
|
||||
__ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
|
||||
} else if (source->IsDoubleRegister()) {
|
||||
__ mov_d(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
|
||||
} else if (source->IsDoubleStackSlot()) {
|
||||
__ ldc1(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
// This move will be done by restoring the saved value to the destination.
|
||||
moves_[index].Eliminate();
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::RestoreValue() {
|
||||
ASSERT(in_cycle_);
|
||||
ASSERT(saved_destination_ != NULL);
|
||||
|
||||
// Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
|
||||
if (saved_destination_->IsRegister()) {
|
||||
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
|
||||
} else if (saved_destination_->IsStackSlot()) {
|
||||
__ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
|
||||
} else if (saved_destination_->IsDoubleRegister()) {
|
||||
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
|
||||
kSavedDoubleValueRegister);
|
||||
} else if (saved_destination_->IsDoubleStackSlot()) {
|
||||
__ sdc1(kSavedDoubleValueRegister,
|
||||
cgen_->ToMemOperand(saved_destination_));
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
in_cycle_ = false;
|
||||
saved_destination_ = NULL;
|
||||
}
|
||||
|
||||
|
||||
void LGapResolver::EmitMove(int index) {
|
||||
LOperand* source = moves_[index].source();
|
||||
LOperand* destination = moves_[index].destination();
|
||||
|
||||
// Dispatch on the source and destination operand kinds. Not all
|
||||
// combinations are possible.
|
||||
|
||||
if (source->IsRegister()) {
|
||||
Register source_register = cgen_->ToRegister(source);
|
||||
if (destination->IsRegister()) {
|
||||
__ mov(cgen_->ToRegister(destination), source_register);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
__ sw(source_register, cgen_->ToMemOperand(destination));
|
||||
}
|
||||
|
||||
} else if (source->IsStackSlot()) {
|
||||
MemOperand source_operand = cgen_->ToMemOperand(source);
|
||||
if (destination->IsRegister()) {
|
||||
__ lw(cgen_->ToRegister(destination), source_operand);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
MemOperand destination_operand = cgen_->ToMemOperand(destination);
|
||||
if (in_cycle_) {
|
||||
if (!destination_operand.OffsetIsInt16Encodable()) {
|
||||
// 'at' is overwritten while saving the value to the destination.
|
||||
// Therefore we can't use 'at'. It is OK if the read from the source
|
||||
// destroys 'at', since that happens before the value is read.
|
||||
// This uses only a single reg of the double reg-pair.
|
||||
__ lwc1(kSavedDoubleValueRegister, source_operand);
|
||||
__ swc1(kSavedDoubleValueRegister, destination_operand);
|
||||
} else {
|
||||
__ lw(at, source_operand);
|
||||
__ sw(at, destination_operand);
|
||||
}
|
||||
} else {
|
||||
__ lw(kSavedValueRegister, source_operand);
|
||||
__ sw(kSavedValueRegister, destination_operand);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (source->IsConstantOperand()) {
|
||||
Operand source_operand = cgen_->ToOperand(source);
|
||||
if (destination->IsRegister()) {
|
||||
__ li(cgen_->ToRegister(destination), source_operand);
|
||||
} else {
|
||||
ASSERT(destination->IsStackSlot());
|
||||
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
|
||||
MemOperand destination_operand = cgen_->ToMemOperand(destination);
|
||||
__ li(kSavedValueRegister, source_operand);
|
||||
__ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
|
||||
}
|
||||
|
||||
} else if (source->IsDoubleRegister()) {
|
||||
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
MemOperand destination_operand = cgen_->ToMemOperand(destination);
|
||||
__ sdc1(source_register, destination_operand);
|
||||
}
|
||||
|
||||
} else if (source->IsDoubleStackSlot()) {
|
||||
MemOperand source_operand = cgen_->ToMemOperand(source);
|
||||
if (destination->IsDoubleRegister()) {
|
||||
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
|
||||
} else {
|
||||
ASSERT(destination->IsDoubleStackSlot());
|
||||
MemOperand destination_operand = cgen_->ToMemOperand(destination);
|
||||
if (in_cycle_) {
|
||||
// kSavedDoubleValueRegister was used to break the cycle,
|
||||
// but kSavedValueRegister is free.
|
||||
MemOperand source_high_operand =
|
||||
cgen_->ToHighMemOperand(source);
|
||||
MemOperand destination_high_operand =
|
||||
cgen_->ToHighMemOperand(destination);
|
||||
__ lw(kSavedValueRegister, source_operand);
|
||||
__ sw(kSavedValueRegister, destination_operand);
|
||||
__ lw(kSavedValueRegister, source_high_operand);
|
||||
__ sw(kSavedValueRegister, destination_high_operand);
|
||||
} else {
|
||||
__ ldc1(kSavedDoubleValueRegister, source_operand);
|
||||
__ sdc1(kSavedDoubleValueRegister, destination_operand);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
moves_[index].Eliminate();
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
|
||||
} } // namespace v8::internal
|
84
src/mips/lithium-gap-resolver-mips.h
Normal file
84
src/mips/lithium-gap-resolver-mips.h
Normal file
@ -0,0 +1,84 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
|
||||
#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "lithium.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class LCodeGen;
|
||||
class LGapResolver;
|
||||
|
||||
class LGapResolver BASE_EMBEDDED {
|
||||
public:
|
||||
|
||||
explicit LGapResolver(LCodeGen* owner);
|
||||
|
||||
// Resolve a set of parallel moves, emitting assembler instructions.
|
||||
void Resolve(LParallelMove* parallel_move);
|
||||
|
||||
private:
|
||||
// Build the initial list of moves.
|
||||
void BuildInitialMoveList(LParallelMove* parallel_move);
|
||||
|
||||
// Perform the move at the moves_ index in question (possibly requiring
|
||||
// other moves to satisfy dependencies).
|
||||
void PerformMove(int index);
|
||||
|
||||
// If a cycle is found in the series of moves, save the blocking value to
|
||||
// a scratch register. The cycle must be found by hitting the root of the
|
||||
// depth-first search.
|
||||
void BreakCycle(int index);
|
||||
|
||||
// After a cycle has been resolved, restore the value from the scratch
|
||||
// register to its proper destination.
|
||||
void RestoreValue();
|
||||
|
||||
// Emit a move and remove it from the move graph.
|
||||
void EmitMove(int index);
|
||||
|
||||
// Verify the move list before performing moves.
|
||||
void Verify();
|
||||
|
||||
LCodeGen* cgen_;
|
||||
|
||||
// List of moves not yet resolved.
|
||||
ZoneList<LMoveOperands> moves_;
|
||||
|
||||
int root_index_;
|
||||
bool in_cycle_;
|
||||
LOperand* saved_destination_;
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
|
2218
src/mips/lithium-mips.cc
Normal file
2218
src/mips/lithium-mips.cc
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user