2013-02-05 16:28:36 +00:00
|
|
|
// Copyright 2013 the V8 project authors. All rights reserved.
|
2014-04-29 06:42:26 +00:00
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2015-08-20 07:44:00 +00:00
|
|
|
#include "src/deoptimizer.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
|
2016-07-25 10:24:45 +00:00
|
|
|
#include <memory>
|
|
|
|
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/accessors.h"
|
2015-11-26 16:22:34 +00:00
|
|
|
#include "src/ast/prettyprinter.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/codegen.h"
|
|
|
|
#include "src/disasm.h"
|
2015-08-12 10:28:34 +00:00
|
|
|
#include "src/frames-inl.h"
|
2015-07-24 10:11:46 +00:00
|
|
|
#include "src/full-codegen/full-codegen.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/global-handles.h"
|
2016-02-11 13:10:45 +00:00
|
|
|
#include "src/interpreter/interpreter.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/macro-assembler.h"
|
2016-02-18 06:12:45 +00:00
|
|
|
#include "src/tracing/trace-event.h"
|
2015-08-20 07:44:00 +00:00
|
|
|
#include "src/v8.h"
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
|
2013-03-18 13:57:49 +00:00
|
|
|
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
|
|
|
|
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
|
2016-11-15 14:39:59 +00:00
|
|
|
MemoryAllocator::GetCommitPageSize(),
|
|
|
|
EXECUTABLE, NULL);
|
2013-03-18 13:57:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
|
|
|
|
: allocator_(allocator),
|
2013-09-04 13:53:24 +00:00
|
|
|
current_(NULL) {
|
2016-03-07 08:18:41 +00:00
|
|
|
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
|
2013-05-14 11:45:33 +00:00
|
|
|
deopt_entry_code_entries_[i] = -1;
|
|
|
|
deopt_entry_code_[i] = AllocateCodeChunk(allocator);
|
|
|
|
}
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
DeoptimizerData::~DeoptimizerData() {
|
2016-03-07 08:18:41 +00:00
|
|
|
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
|
2016-04-29 14:27:41 +00:00
|
|
|
allocator_->Free<MemoryAllocator::kFull>(deopt_entry_code_[i]);
|
2013-05-14 11:45:33 +00:00
|
|
|
deopt_entry_code_[i] = NULL;
|
|
|
|
}
|
2011-03-18 20:35:07 +00:00
|
|
|
}
|
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
|
|
|
|
if (function_->IsHeapObject()) {
|
|
|
|
// Search all deoptimizing code in the native context of the function.
|
2016-06-06 12:58:10 +00:00
|
|
|
Isolate* isolate = function_->GetIsolate();
|
2013-09-04 13:53:24 +00:00
|
|
|
Context* native_context = function_->context()->native_context();
|
|
|
|
Object* element = native_context->DeoptimizedCodeListHead();
|
2016-06-06 12:58:10 +00:00
|
|
|
while (!element->IsUndefined(isolate)) {
|
2013-09-04 13:53:24 +00:00
|
|
|
Code* code = Code::cast(element);
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
|
2013-09-04 13:53:24 +00:00
|
|
|
if (code->contains(addr)) return code;
|
|
|
|
element = code->next_code_link();
|
2012-12-20 09:47:09 +00:00
|
|
|
}
|
|
|
|
}
|
2013-09-04 13:53:24 +00:00
|
|
|
return NULL;
|
2012-12-20 09:47:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-15 11:30:45 +00:00
|
|
|
// We rely on this function not causing a GC. It is called from generated code
|
|
|
|
// without having a real stack frame in place.
|
2010-12-07 11:31:57 +00:00
|
|
|
Deoptimizer* Deoptimizer::New(JSFunction* function,
|
|
|
|
BailoutType type,
|
|
|
|
unsigned bailout_id,
|
|
|
|
Address from,
|
2011-03-18 20:35:07 +00:00
|
|
|
int fp_to_sp_delta,
|
|
|
|
Isolate* isolate) {
|
2016-08-25 08:14:14 +00:00
|
|
|
Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, type,
|
|
|
|
bailout_id, from, fp_to_sp_delta);
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK(isolate->deoptimizer_data()->current_ == NULL);
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate->deoptimizer_data()->current_ = deoptimizer;
|
2010-12-07 11:31:57 +00:00
|
|
|
return deoptimizer;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-07 08:49:17 +00:00
|
|
|
// No larger than 2K on all platforms
|
|
|
|
static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
|
|
|
|
|
|
|
|
|
|
|
|
size_t Deoptimizer::GetMaxDeoptTableSize() {
|
2012-11-07 09:59:50 +00:00
|
|
|
int entries_size =
|
2012-11-07 08:49:17 +00:00
|
|
|
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
|
2016-11-15 14:39:59 +00:00
|
|
|
int commit_page_size = static_cast<int>(MemoryAllocator::GetCommitPageSize());
|
2012-11-07 08:49:17 +00:00
|
|
|
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
|
2012-11-07 10:31:45 +00:00
|
|
|
commit_page_size) + 1;
|
|
|
|
return static_cast<size_t>(commit_page_size * page_count);
|
2012-11-07 08:49:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
|
|
|
|
Deoptimizer* result = isolate->deoptimizer_data()->current_;
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_NOT_NULL(result);
|
2010-12-07 11:31:57 +00:00
|
|
|
result->DeleteFrameDescriptions();
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate->deoptimizer_data()->current_ = NULL;
|
2010-12-07 11:31:57 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
|
|
|
|
JavaScriptFrame* frame,
|
2012-01-24 08:43:12 +00:00
|
|
|
int jsframe_index,
|
2011-06-29 13:02:00 +00:00
|
|
|
Isolate* isolate) {
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK(frame->is_optimized());
|
2011-06-29 13:02:00 +00:00
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
TranslatedState translated_values(frame);
|
|
|
|
translated_values.Prepare(false, frame->fp());
|
|
|
|
|
|
|
|
TranslatedState::iterator frame_it = translated_values.end();
|
|
|
|
int counter = jsframe_index;
|
|
|
|
for (auto it = translated_values.begin(); it != translated_values.end();
|
|
|
|
it++) {
|
|
|
|
if (it->kind() == TranslatedFrame::kFunction ||
|
|
|
|
it->kind() == TranslatedFrame::kInterpretedFunction) {
|
|
|
|
if (counter == 0) {
|
|
|
|
frame_it = it;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
counter--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CHECK(frame_it != translated_values.end());
|
2011-06-29 13:02:00 +00:00
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
DeoptimizedFrameInfo* info =
|
|
|
|
new DeoptimizedFrameInfo(&translated_values, frame_it, isolate);
|
2011-06-29 13:02:00 +00:00
|
|
|
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
|
|
|
|
int count,
|
|
|
|
BailoutType type) {
|
|
|
|
TableEntryGenerator generator(masm, type, count);
|
|
|
|
generator.Generate();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
|
|
|
|
Context* context, OptimizedFunctionVisitor* visitor) {
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_allocation;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK(context->IsNativeContext());
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
visitor->EnterContext(context);
|
2012-06-15 10:36:45 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Visit the list of optimized functions, removing elements that
|
|
|
|
// no longer refer to optimized code.
|
|
|
|
JSFunction* prev = NULL;
|
2010-12-07 11:31:57 +00:00
|
|
|
Object* element = context->OptimizedFunctionsListHead();
|
2016-06-06 12:58:10 +00:00
|
|
|
Isolate* isolate = context->GetIsolate();
|
|
|
|
while (!element->IsUndefined(isolate)) {
|
2013-09-04 13:53:24 +00:00
|
|
|
JSFunction* function = JSFunction::cast(element);
|
|
|
|
Object* next = function->next_function_link();
|
|
|
|
if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
|
|
|
|
(visitor->VisitFunction(function),
|
|
|
|
function->code()->kind() != Code::OPTIMIZED_FUNCTION)) {
|
|
|
|
// The function no longer refers to optimized code, or the visitor
|
|
|
|
// changed the code to which it refers to no longer be optimized code.
|
|
|
|
// Remove the function from this list.
|
|
|
|
if (prev != NULL) {
|
2015-05-29 08:06:19 +00:00
|
|
|
prev->set_next_function_link(next, UPDATE_WEAK_WRITE_BARRIER);
|
2013-09-04 13:53:24 +00:00
|
|
|
} else {
|
|
|
|
context->SetOptimizedFunctionsListHead(next);
|
|
|
|
}
|
|
|
|
// The visitor should not alter the link directly.
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_EQ(function->next_function_link(), next);
|
2013-09-04 13:53:24 +00:00
|
|
|
// Set the next function link to undefined to indicate it is no longer
|
|
|
|
// in the optimized functions list.
|
2015-05-29 08:06:19 +00:00
|
|
|
function->set_next_function_link(context->GetHeap()->undefined_value(),
|
|
|
|
SKIP_WRITE_BARRIER);
|
2013-09-04 13:53:24 +00:00
|
|
|
} else {
|
|
|
|
// The visitor should not alter the link directly.
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_EQ(function->next_function_link(), next);
|
2013-09-04 13:53:24 +00:00
|
|
|
// preserve this element.
|
|
|
|
prev = function;
|
|
|
|
}
|
|
|
|
element = next;
|
2012-06-15 10:36:45 +00:00
|
|
|
}
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
visitor->LeaveContext(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-12-17 10:23:52 +00:00
|
|
|
void Deoptimizer::VisitAllOptimizedFunctions(
|
2013-03-18 13:57:49 +00:00
|
|
|
Isolate* isolate,
|
2012-12-17 10:23:52 +00:00
|
|
|
OptimizedFunctionVisitor* visitor) {
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_allocation;
|
2012-12-17 10:23:52 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Run through the list of all native contexts.
|
2013-03-18 13:57:49 +00:00
|
|
|
Object* context = isolate->heap()->native_contexts_list();
|
2016-06-06 12:58:10 +00:00
|
|
|
while (!context->IsUndefined(isolate)) {
|
2012-12-17 10:23:52 +00:00
|
|
|
VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
|
2016-05-12 08:50:18 +00:00
|
|
|
context = Context::cast(context)->next_context_link();
|
2012-12-17 10:23:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Unlink functions referring to code marked for deoptimization, then move
|
|
|
|
// marked code from the optimized code list to the deoptimized code list,
|
|
|
|
// and patch code for lazy deopt.
|
|
|
|
void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_allocation;
|
2013-09-04 13:53:24 +00:00
|
|
|
|
|
|
|
// A "closure" that unlinks optimized code that is going to be
|
|
|
|
// deoptimized from the functions that refer to it.
|
|
|
|
class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
|
|
|
|
public:
|
|
|
|
virtual void EnterContext(Context* context) { } // Don't care.
|
|
|
|
virtual void LeaveContext(Context* context) { } // Don't care.
|
|
|
|
virtual void VisitFunction(JSFunction* function) {
|
2012-12-17 10:23:52 +00:00
|
|
|
Code* code = function->code();
|
2013-09-04 13:53:24 +00:00
|
|
|
if (!code->marked_for_deoptimization()) return;
|
|
|
|
|
|
|
|
// Unlink this function and evict from optimized code map.
|
2013-07-24 11:12:17 +00:00
|
|
|
SharedFunctionInfo* shared = function->shared();
|
|
|
|
function->set_code(shared->code());
|
|
|
|
|
|
|
|
if (FLAG_trace_deopt) {
|
2013-11-07 16:35:27 +00:00
|
|
|
CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
|
|
|
|
PrintF(scope.file(), "[deoptimizer unlinked: ");
|
|
|
|
function->PrintName(scope.file());
|
|
|
|
PrintF(scope.file(),
|
|
|
|
" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
|
2012-12-17 10:23:52 +00:00
|
|
|
}
|
|
|
|
}
|
2013-09-04 13:53:24 +00:00
|
|
|
};
|
2012-12-17 10:23:52 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Unlink all functions that refer to marked code.
|
|
|
|
SelectedCodeUnlinker unlinker;
|
|
|
|
VisitAllOptimizedFunctionsForContext(context, &unlinker);
|
2012-12-17 10:23:52 +00:00
|
|
|
|
2014-03-21 15:03:40 +00:00
|
|
|
Isolate* isolate = context->GetHeap()->isolate();
|
|
|
|
#ifdef DEBUG
|
|
|
|
Code* topmost_optimized_code = NULL;
|
|
|
|
bool safe_to_deopt_topmost_optimized_code = false;
|
|
|
|
// Make sure all activations of optimized code can deopt at their current PC.
|
|
|
|
// The topmost optimized code has special handling because it cannot be
|
|
|
|
// deoptimized due to weak object dependency.
|
|
|
|
for (StackFrameIterator it(isolate, isolate->thread_local_top());
|
|
|
|
!it.done(); it.Advance()) {
|
|
|
|
StackFrame::Type type = it.frame()->type();
|
|
|
|
if (type == StackFrame::OPTIMIZED) {
|
|
|
|
Code* code = it.frame()->LookupCode();
|
2015-05-21 11:33:42 +00:00
|
|
|
JSFunction* function =
|
|
|
|
static_cast<OptimizedFrame*>(it.frame())->function();
|
2014-03-21 15:03:40 +00:00
|
|
|
if (FLAG_trace_deopt) {
|
|
|
|
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
|
|
|
PrintF(scope.file(), "[deoptimizer found activation of function: ");
|
|
|
|
function->PrintName(scope.file());
|
|
|
|
PrintF(scope.file(),
|
|
|
|
" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
|
|
|
|
}
|
|
|
|
SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
|
|
|
|
int deopt_index = safepoint.deoptimization_index();
|
2014-07-30 13:54:45 +00:00
|
|
|
// Turbofan deopt is checked when we are patching addresses on stack.
|
2015-05-21 11:33:42 +00:00
|
|
|
bool turbofanned = code->is_turbofanned() &&
|
|
|
|
function->shared()->asm_function() &&
|
|
|
|
!FLAG_turbo_asm_deoptimization;
|
2014-07-30 13:54:45 +00:00
|
|
|
bool safe_to_deopt =
|
|
|
|
deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
|
2016-04-06 08:37:09 +00:00
|
|
|
bool builtin = code->kind() == Code::BUILTIN;
|
|
|
|
CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned ||
|
|
|
|
builtin);
|
2014-03-21 15:03:40 +00:00
|
|
|
if (topmost_optimized_code == NULL) {
|
|
|
|
topmost_optimized_code = code;
|
|
|
|
safe_to_deopt_topmost_optimized_code = safe_to_deopt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Move marked code from the optimized code list to the deoptimized
|
|
|
|
// code list, collecting them into a ZoneList.
|
2016-10-17 12:12:30 +00:00
|
|
|
Zone zone(isolate->allocator(), ZONE_NAME);
|
2013-09-04 13:53:24 +00:00
|
|
|
ZoneList<Code*> codes(10, &zone);
|
2012-12-17 10:23:52 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Walk over all optimized code objects in this native context.
|
|
|
|
Code* prev = NULL;
|
|
|
|
Object* element = context->OptimizedCodeListHead();
|
2016-06-06 12:58:10 +00:00
|
|
|
while (!element->IsUndefined(isolate)) {
|
2013-09-04 13:53:24 +00:00
|
|
|
Code* code = Code::cast(element);
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
|
2013-09-04 13:53:24 +00:00
|
|
|
Object* next = code->next_code_link();
|
2014-07-30 13:54:45 +00:00
|
|
|
|
2015-05-08 09:13:09 +00:00
|
|
|
if (code->marked_for_deoptimization()) {
|
2013-09-04 13:53:24 +00:00
|
|
|
// Put the code into the list for later patching.
|
|
|
|
codes.Add(code, &zone);
|
|
|
|
|
|
|
|
if (prev != NULL) {
|
|
|
|
// Skip this code in the optimized code list.
|
|
|
|
prev->set_next_code_link(next);
|
|
|
|
} else {
|
|
|
|
// There was no previous node, the next node is the new head.
|
|
|
|
context->SetOptimizedCodeListHead(next);
|
|
|
|
}
|
2012-12-17 10:23:52 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Move the code to the _deoptimized_ code list.
|
|
|
|
code->set_next_code_link(context->DeoptimizedCodeListHead());
|
|
|
|
context->SetDeoptimizedCodeListHead(code);
|
|
|
|
} else {
|
|
|
|
// Not marked; preserve this element.
|
|
|
|
prev = code;
|
|
|
|
}
|
|
|
|
element = next;
|
2012-12-17 10:23:52 +00:00
|
|
|
}
|
|
|
|
|
2016-02-12 17:30:01 +00:00
|
|
|
// We need a handle scope only because of the macro assembler,
|
|
|
|
// which is used in code patching in EnsureCodeForDeoptimizationEntry.
|
2013-09-04 13:53:24 +00:00
|
|
|
HandleScope scope(isolate);
|
2014-03-12 09:59:36 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Now patch all the codes for deoptimization.
|
|
|
|
for (int i = 0; i < codes.length(); i++) {
|
2014-03-21 15:03:40 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
if (codes[i] == topmost_optimized_code) {
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(safe_to_deopt_topmost_optimized_code);
|
2014-03-21 15:03:40 +00:00
|
|
|
}
|
|
|
|
#endif
|
2013-09-04 13:53:24 +00:00
|
|
|
// It is finally time to die, code object.
|
2014-07-30 13:54:45 +00:00
|
|
|
|
|
|
|
// Remove the code from optimized code map.
|
|
|
|
DeoptimizationInputData* deopt_data =
|
|
|
|
DeoptimizationInputData::cast(codes[i]->deoptimization_data());
|
|
|
|
SharedFunctionInfo* shared =
|
|
|
|
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
|
|
|
|
shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code");
|
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Do platform-specific patching to force any activations to lazy deopt.
|
2015-05-21 11:33:42 +00:00
|
|
|
PatchCodeForDeoptimization(isolate, codes[i]);
|
2014-07-30 13:54:45 +00:00
|
|
|
|
2015-05-21 11:33:42 +00:00
|
|
|
// We might be in the middle of incremental marking with compaction.
|
2015-07-21 16:14:33 +00:00
|
|
|
// Tell collector to treat this code object in a special way and
|
|
|
|
// ignore all slots that might have been recorded on it.
|
|
|
|
isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
|
2014-07-30 13:54:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-17 10:23:52 +00:00
|
|
|
|
2013-03-18 13:57:49 +00:00
|
|
|
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
|
2016-05-13 15:53:27 +00:00
|
|
|
RuntimeCallTimerScope runtimeTimer(isolate,
|
|
|
|
&RuntimeCallStats::DeoptimizeCode);
|
2016-02-15 17:29:22 +00:00
|
|
|
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
|
2016-09-01 20:07:34 +00:00
|
|
|
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
|
2012-12-17 10:23:52 +00:00
|
|
|
if (FLAG_trace_deopt) {
|
2013-11-07 16:35:27 +00:00
|
|
|
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
|
|
|
PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
|
2012-12-17 10:23:52 +00:00
|
|
|
}
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_allocation;
|
2013-09-04 13:53:24 +00:00
|
|
|
// For all contexts, mark all code, then deoptimize.
|
|
|
|
Object* context = isolate->heap()->native_contexts_list();
|
2016-06-06 12:58:10 +00:00
|
|
|
while (!context->IsUndefined(isolate)) {
|
2013-09-04 13:53:24 +00:00
|
|
|
Context* native_context = Context::cast(context);
|
|
|
|
MarkAllCodeForContext(native_context);
|
|
|
|
DeoptimizeMarkedCodeForContext(native_context);
|
2016-05-12 08:50:18 +00:00
|
|
|
context = native_context->next_context_link();
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
|
2016-05-13 15:53:27 +00:00
|
|
|
RuntimeCallTimerScope runtimeTimer(isolate,
|
|
|
|
&RuntimeCallStats::DeoptimizeCode);
|
2016-02-15 17:29:22 +00:00
|
|
|
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
|
2016-09-01 20:07:34 +00:00
|
|
|
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
|
2013-09-04 13:53:24 +00:00
|
|
|
if (FLAG_trace_deopt) {
|
2013-11-07 16:35:27 +00:00
|
|
|
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
|
|
|
PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
|
2012-12-17 10:23:52 +00:00
|
|
|
}
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_allocation;
|
2013-09-04 13:53:24 +00:00
|
|
|
// For all contexts, deoptimize code already marked.
|
2013-03-18 13:57:49 +00:00
|
|
|
Object* context = isolate->heap()->native_contexts_list();
|
2016-06-06 12:58:10 +00:00
|
|
|
while (!context->IsUndefined(isolate)) {
|
2013-09-04 13:53:24 +00:00
|
|
|
Context* native_context = Context::cast(context);
|
|
|
|
DeoptimizeMarkedCodeForContext(native_context);
|
2016-05-12 08:50:18 +00:00
|
|
|
context = native_context->next_context_link();
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
void Deoptimizer::MarkAllCodeForContext(Context* context) {
|
|
|
|
Object* element = context->OptimizedCodeListHead();
|
2016-06-06 12:58:10 +00:00
|
|
|
Isolate* isolate = context->GetIsolate();
|
|
|
|
while (!element->IsUndefined(isolate)) {
|
2013-09-04 13:53:24 +00:00
|
|
|
Code* code = Code::cast(element);
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
|
2013-09-04 13:53:24 +00:00
|
|
|
code->set_marked_for_deoptimization(true);
|
|
|
|
element = code->next_code_link();
|
|
|
|
}
|
2013-07-24 11:12:17 +00:00
|
|
|
}
|
|
|
|
|
2016-11-29 11:34:07 +00:00
|
|
|
void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
|
2016-05-13 15:53:27 +00:00
|
|
|
Isolate* isolate = function->GetIsolate();
|
|
|
|
RuntimeCallTimerScope runtimeTimer(isolate,
|
|
|
|
&RuntimeCallStats::DeoptimizeCode);
|
|
|
|
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
|
2016-09-01 20:07:34 +00:00
|
|
|
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
|
2016-11-29 11:34:07 +00:00
|
|
|
if (code == nullptr) code = function->code();
|
2013-09-04 13:53:24 +00:00
|
|
|
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
|
|
|
|
// Mark the code for deoptimization and unlink any functions that also
|
|
|
|
// refer to that code. The code cannot be shared across native contexts,
|
|
|
|
// so we only need to search one.
|
|
|
|
code->set_marked_for_deoptimization(true);
|
|
|
|
DeoptimizeMarkedCodeForContext(function->context()->native_context());
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-30 18:05:16 +00:00
|
|
|
void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
|
2010-12-07 11:31:57 +00:00
|
|
|
deoptimizer->DoComputeOutputFrames();
|
|
|
|
}
|
|
|
|
|
2016-08-25 13:25:54 +00:00
|
|
|
bool Deoptimizer::TraceEnabledFor(StackFrame::Type frame_type) {
|
|
|
|
return (frame_type == StackFrame::STUB) ? FLAG_trace_stub_failures
|
|
|
|
: FLAG_trace_deopt;
|
2012-12-21 07:18:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const char* Deoptimizer::MessageFor(BailoutType type) {
|
|
|
|
switch (type) {
|
2013-05-16 09:44:59 +00:00
|
|
|
case EAGER: return "eager";
|
|
|
|
case SOFT: return "soft";
|
|
|
|
case LAZY: return "lazy";
|
2012-12-21 07:18:56 +00:00
|
|
|
}
|
2014-04-25 12:58:15 +00:00
|
|
|
FATAL("Unsupported deopt type");
|
2012-12-21 14:28:33 +00:00
|
|
|
return NULL;
|
2012-12-20 13:05:16 +00:00
|
|
|
}
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
|
|
|
|
BailoutType type, unsigned bailout_id, Address from,
|
2016-08-25 08:14:14 +00:00
|
|
|
int fp_to_sp_delta)
|
2011-03-18 20:35:07 +00:00
|
|
|
: isolate_(isolate),
|
|
|
|
function_(function),
|
2010-12-07 11:31:57 +00:00
|
|
|
bailout_id_(bailout_id),
|
|
|
|
bailout_type_(type),
|
|
|
|
from_(from),
|
|
|
|
fp_to_sp_delta_(fp_to_sp_delta),
|
2016-02-12 10:14:42 +00:00
|
|
|
deoptimizing_throw_(false),
|
|
|
|
catch_handler_data_(-1),
|
|
|
|
catch_handler_pc_offset_(-1),
|
2015-06-08 10:04:51 +00:00
|
|
|
input_(nullptr),
|
2010-12-07 11:31:57 +00:00
|
|
|
output_count_(0),
|
2012-01-24 08:43:12 +00:00
|
|
|
jsframe_count_(0),
|
2015-06-08 10:04:51 +00:00
|
|
|
output_(nullptr),
|
2016-03-07 12:18:43 +00:00
|
|
|
caller_frame_top_(0),
|
|
|
|
caller_fp_(0),
|
|
|
|
caller_pc_(0),
|
|
|
|
caller_constant_pool_(0),
|
|
|
|
input_frame_context_(0),
|
|
|
|
stack_fp_(0),
|
2015-06-08 10:04:51 +00:00
|
|
|
trace_scope_(nullptr) {
|
2016-02-12 10:14:42 +00:00
|
|
|
if (isolate->deoptimizer_lazy_throw()) {
|
|
|
|
isolate->set_deoptimizer_lazy_throw(false);
|
|
|
|
deoptimizing_throw_ = true;
|
|
|
|
}
|
|
|
|
|
2012-12-21 07:18:56 +00:00
|
|
|
// For COMPILED_STUBs called from builtins, the function pointer is a SMI
|
|
|
|
// indicating an internal frame.
|
2012-12-18 16:25:45 +00:00
|
|
|
if (function->IsSmi()) {
|
2015-06-08 10:04:51 +00:00
|
|
|
function = nullptr;
|
2012-12-18 16:25:45 +00:00
|
|
|
}
|
2015-06-08 10:04:51 +00:00
|
|
|
DCHECK(from != nullptr);
|
|
|
|
if (function != nullptr && function->IsOptimized()) {
|
2012-12-18 16:25:45 +00:00
|
|
|
function->shared()->increment_deopt_count();
|
2013-05-14 11:45:33 +00:00
|
|
|
if (bailout_type_ == Deoptimizer::SOFT) {
|
2013-06-20 16:53:22 +00:00
|
|
|
isolate->counters()->soft_deopts_executed()->Increment();
|
2013-05-14 11:45:33 +00:00
|
|
|
// Soft deopts shouldn't count against the overall re-optimization count
|
|
|
|
// that can eventually lead to disabling optimization for a function.
|
|
|
|
int opt_count = function->shared()->opt_count();
|
|
|
|
if (opt_count > 0) opt_count--;
|
|
|
|
function->shared()->set_opt_count(opt_count);
|
|
|
|
}
|
2012-12-18 16:25:45 +00:00
|
|
|
}
|
2016-08-25 08:14:14 +00:00
|
|
|
compiled_code_ = FindOptimizedCode(function);
|
2013-09-09 16:34:40 +00:00
|
|
|
#if DEBUG
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(compiled_code_ != NULL);
|
2013-09-09 16:34:40 +00:00
|
|
|
if (type == EAGER || type == SOFT || type == LAZY) {
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(compiled_code_->kind() != Code::FUNCTION);
|
2013-09-09 16:34:40 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-01-29 09:12:20 +00:00
|
|
|
StackFrame::Type frame_type = function == NULL
|
|
|
|
? StackFrame::STUB
|
|
|
|
: StackFrame::JAVA_SCRIPT;
|
2016-08-25 13:25:54 +00:00
|
|
|
trace_scope_ = TraceEnabledFor(frame_type)
|
|
|
|
? new CodeTracer::Scope(isolate->GetCodeTracer())
|
|
|
|
: NULL;
|
2013-06-03 15:32:22 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
CHECK(AllowHeapAllocation::IsAllowed());
|
|
|
|
disallow_heap_allocation_ = new DisallowHeapAllocation();
|
|
|
|
#endif // DEBUG
|
2015-02-10 14:32:42 +00:00
|
|
|
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
|
2015-03-09 14:43:29 +00:00
|
|
|
PROFILE(isolate_, CodeDeoptEvent(compiled_code_, from_, fp_to_sp_delta_));
|
2015-02-10 14:32:42 +00:00
|
|
|
}
|
2013-02-05 16:28:36 +00:00
|
|
|
unsigned size = ComputeInputFrameSize();
|
2016-02-15 07:36:15 +00:00
|
|
|
int parameter_count =
|
|
|
|
function == nullptr
|
|
|
|
? 0
|
|
|
|
: (function->shared()->internal_formal_parameter_count() + 1);
|
|
|
|
input_ = new (size) FrameDescription(size, parameter_count);
|
2013-01-29 09:12:20 +00:00
|
|
|
input_->SetFrameType(frame_type);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
2016-08-25 08:14:14 +00:00
|
|
|
Code* Deoptimizer::FindOptimizedCode(JSFunction* function) {
|
2016-08-25 13:25:54 +00:00
|
|
|
Code* compiled_code = FindDeoptimizingCode(from_);
|
|
|
|
return (compiled_code == NULL)
|
|
|
|
? static_cast<Code*>(isolate_->FindCodeObject(from_))
|
|
|
|
: compiled_code;
|
2012-12-21 07:18:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Deoptimizer::PrintFunctionName() {
|
2016-06-14 14:36:59 +00:00
|
|
|
if (function_ != nullptr && function_->IsJSFunction()) {
|
2015-01-07 08:14:15 +00:00
|
|
|
function_->ShortPrint(trace_scope_->file());
|
2012-12-21 07:18:56 +00:00
|
|
|
} else {
|
2013-11-07 16:35:27 +00:00
|
|
|
PrintF(trace_scope_->file(),
|
|
|
|
"%s", Code::Kind2String(compiled_code_->kind()));
|
2012-12-21 07:18:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
Deoptimizer::~Deoptimizer() {
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(input_ == NULL && output_ == NULL);
|
|
|
|
DCHECK(disallow_heap_allocation_ == NULL);
|
2013-11-07 16:35:27 +00:00
|
|
|
delete trace_scope_;
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Deoptimizer::DeleteFrameDescriptions() {
|
|
|
|
delete input_;
|
|
|
|
for (int i = 0; i < output_count_; ++i) {
|
|
|
|
if (output_[i] != input_) delete output_[i];
|
|
|
|
}
|
|
|
|
delete[] output_;
|
|
|
|
input_ = NULL;
|
|
|
|
output_ = NULL;
|
2013-06-03 15:32:22 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
CHECK(!AllowHeapAllocation::IsAllowed());
|
|
|
|
CHECK(disallow_heap_allocation_ != NULL);
|
|
|
|
delete disallow_heap_allocation_;
|
|
|
|
disallow_heap_allocation_ = NULL;
|
|
|
|
#endif // DEBUG
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-27 14:45:59 +00:00
|
|
|
Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
|
|
|
|
int id,
|
2012-11-07 08:49:17 +00:00
|
|
|
BailoutType type,
|
|
|
|
GetEntryMode mode) {
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_GE(id, 0);
|
2012-11-07 08:49:17 +00:00
|
|
|
if (id >= kMaxNumberOfEntries) return NULL;
|
|
|
|
if (mode == ENSURE_ENTRY_CODE) {
|
2013-02-27 14:45:59 +00:00
|
|
|
EnsureCodeForDeoptimizationEntry(isolate, type, id);
|
2012-11-07 08:49:17 +00:00
|
|
|
} else {
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_EQ(mode, CALCULATE_ENTRY_ADDRESS);
|
2012-11-07 08:49:17 +00:00
|
|
|
}
|
2013-02-27 14:45:59 +00:00
|
|
|
DeoptimizerData* data = isolate->deoptimizer_data();
|
2016-03-07 08:18:41 +00:00
|
|
|
CHECK_LE(type, kLastBailoutType);
|
2013-05-14 11:45:33 +00:00
|
|
|
MemoryChunk* base = data->deopt_entry_code_[type];
|
2013-01-29 09:09:55 +00:00
|
|
|
return base->area_start() + (id * table_entry_size_);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-18 13:57:49 +00:00
|
|
|
int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
|
|
|
|
Address addr,
|
|
|
|
BailoutType type) {
|
|
|
|
DeoptimizerData* data = isolate->deoptimizer_data();
|
2013-05-14 11:45:33 +00:00
|
|
|
MemoryChunk* base = data->deopt_entry_code_[type];
|
2013-01-29 09:09:55 +00:00
|
|
|
Address start = base->area_start();
|
2014-11-03 19:44:46 +00:00
|
|
|
if (addr < start ||
|
2013-01-29 09:09:55 +00:00
|
|
|
addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
|
2010-12-07 11:31:57 +00:00
|
|
|
return kNotDeoptimizationEntry;
|
|
|
|
}
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK_EQ(0,
|
2013-01-29 09:09:55 +00:00
|
|
|
static_cast<int>(addr - start) % table_entry_size_);
|
|
|
|
return static_cast<int>(addr - start) / table_entry_size_;
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-22 09:49:26 +00:00
|
|
|
int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
|
2012-08-06 14:13:09 +00:00
|
|
|
BailoutId id,
|
2010-12-22 09:49:26 +00:00
|
|
|
SharedFunctionInfo* shared) {
|
2010-12-07 11:31:57 +00:00
|
|
|
// TODO(kasperl): For now, we do a simple linear search for the PC
|
|
|
|
// offset associated with the given node id. This should probably be
|
|
|
|
// changed to a binary search.
|
|
|
|
int length = data->DeoptPoints();
|
|
|
|
for (int i = 0; i < length; i++) {
|
2012-08-06 14:13:09 +00:00
|
|
|
if (data->AstId(i) == id) {
|
2010-12-07 11:31:57 +00:00
|
|
|
return data->PcAndState(i)->value();
|
|
|
|
}
|
|
|
|
}
|
2014-07-07 09:57:29 +00:00
|
|
|
OFStream os(stderr);
|
|
|
|
os << "[couldn't find pc offset for node=" << id.ToInt() << "]\n"
|
|
|
|
<< "[method: " << shared->DebugName()->ToCString().get() << "]\n"
|
2014-09-30 10:29:32 +00:00
|
|
|
<< "[source:\n" << SourceCodeOf(shared) << "\n]" << std::endl;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2015-05-12 15:48:04 +00:00
|
|
|
shared->GetHeap()->isolate()->PushStackTraceAndDie(0xfefefefe, data, shared,
|
|
|
|
0xfefefeff);
|
2012-12-06 12:43:05 +00:00
|
|
|
FATAL("unable to find pc offset during deoptimization");
|
2010-12-07 11:31:57 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
|
2010-12-07 11:31:57 +00:00
|
|
|
int length = 0;
|
2013-09-04 13:53:24 +00:00
|
|
|
// Count all entries in the deoptimizing code list of every context.
|
|
|
|
Object* context = isolate->heap()->native_contexts_list();
|
2016-06-06 12:58:10 +00:00
|
|
|
while (!context->IsUndefined(isolate)) {
|
2013-09-04 13:53:24 +00:00
|
|
|
Context* native_context = Context::cast(context);
|
|
|
|
Object* element = native_context->DeoptimizedCodeListHead();
|
2016-06-06 12:58:10 +00:00
|
|
|
while (!element->IsUndefined(isolate)) {
|
2013-09-04 13:53:24 +00:00
|
|
|
Code* code = Code::cast(element);
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
|
2013-09-04 13:53:24 +00:00
|
|
|
length++;
|
|
|
|
element = code->next_code_link();
|
|
|
|
}
|
2016-05-12 08:50:18 +00:00
|
|
|
context = Context::cast(context)->next_context_link();
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2016-02-12 10:14:42 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
|
|
|
|
switch (translated_frame->kind()) {
|
|
|
|
case TranslatedFrame::kFunction: {
|
2016-11-28 15:58:17 +00:00
|
|
|
#ifdef DEBUG
|
2016-02-12 10:14:42 +00:00
|
|
|
JSFunction* function =
|
|
|
|
JSFunction::cast(translated_frame->begin()->GetRawValue());
|
|
|
|
Code* non_optimized_code = function->shared()->code();
|
|
|
|
HandlerTable* table =
|
|
|
|
HandlerTable::cast(non_optimized_code->handler_table());
|
2016-11-28 15:58:17 +00:00
|
|
|
DCHECK_EQ(0, table->NumberOfRangeEntries());
|
|
|
|
#endif
|
|
|
|
break;
|
2016-02-12 10:14:42 +00:00
|
|
|
}
|
|
|
|
case TranslatedFrame::kInterpretedFunction: {
|
|
|
|
int bytecode_offset = translated_frame->node_id().ToInt();
|
|
|
|
JSFunction* function =
|
|
|
|
JSFunction::cast(translated_frame->begin()->GetRawValue());
|
|
|
|
BytecodeArray* bytecode = function->shared()->bytecode_array();
|
2016-11-29 09:16:40 +00:00
|
|
|
HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
|
|
|
|
return table->LookupRange(bytecode_offset, data_out, nullptr);
|
2016-02-12 10:14:42 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2011-09-15 11:30:45 +00:00
|
|
|
// We rely on this function not causing a GC. It is called from generated code
|
|
|
|
// without having a real stack frame in place.
|
2010-12-07 11:31:57 +00:00
|
|
|
void Deoptimizer::DoComputeOutputFrames() {
|
2014-06-30 13:25:46 +00:00
|
|
|
base::ElapsedTimer timer;
|
2014-02-13 16:09:28 +00:00
|
|
|
|
|
|
|
// Determine basic deoptimization information. The optimized frame is
|
|
|
|
// described by the input data.
|
|
|
|
DeoptimizationInputData* input_data =
|
|
|
|
DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
{
|
|
|
|
// Read caller's PC, caller's FP and caller's constant pool values
|
|
|
|
// from input frame. Compute caller's frame top address.
|
|
|
|
|
|
|
|
Register fp_reg = JavaScriptFrame::fp_register();
|
|
|
|
stack_fp_ = input_->GetRegister(fp_reg.code());
|
|
|
|
|
|
|
|
caller_frame_top_ = stack_fp_ + ComputeInputFrameAboveFpFixedSize();
|
|
|
|
|
|
|
|
Address fp_address = input_->GetFramePointerAddress();
|
|
|
|
caller_fp_ = Memory::intptr_at(fp_address);
|
|
|
|
caller_pc_ =
|
|
|
|
Memory::intptr_at(fp_address + CommonFrameConstants::kCallerPCOffset);
|
|
|
|
input_frame_context_ = Memory::intptr_at(
|
|
|
|
fp_address + CommonFrameConstants::kContextOrFrameTypeOffset);
|
|
|
|
|
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
|
|
|
caller_constant_pool_ = Memory::intptr_at(
|
|
|
|
fp_address + CommonFrameConstants::kConstantPoolOffset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-07 16:35:27 +00:00
|
|
|
if (trace_scope_ != NULL) {
|
2013-08-29 09:15:13 +00:00
|
|
|
timer.Start();
|
2015-01-07 08:14:15 +00:00
|
|
|
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
|
|
|
|
MessageFor(bailout_type_));
|
2012-12-21 07:18:56 +00:00
|
|
|
PrintFunctionName();
|
2013-11-07 16:35:27 +00:00
|
|
|
PrintF(trace_scope_->file(),
|
2016-03-09 11:33:10 +00:00
|
|
|
" (opt #%d) @%d, FP to SP delta: %d, caller sp: 0x%08" V8PRIxPTR
|
|
|
|
"]\n",
|
|
|
|
input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_,
|
|
|
|
caller_frame_top_);
|
2014-07-03 11:47:31 +00:00
|
|
|
if (bailout_type_ == EAGER || bailout_type_ == SOFT ||
|
|
|
|
(compiled_code_->is_hydrogen_stub())) {
|
2015-03-09 14:43:29 +00:00
|
|
|
compiled_code_->PrintDeoptLocation(trace_scope_->file(), from_);
|
2013-05-16 09:44:59 +00:00
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
2012-08-06 14:13:09 +00:00
|
|
|
BailoutId node_id = input_data->AstId(bailout_id_);
|
2010-12-07 11:31:57 +00:00
|
|
|
ByteArray* translations = input_data->TranslationByteArray();
|
|
|
|
unsigned translation_index =
|
|
|
|
input_data->TranslationIndex(bailout_id_)->value();
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
TranslationIterator state_iterator(translations, translation_index);
|
|
|
|
translated_state_.Init(
|
2015-11-11 10:45:13 +00:00
|
|
|
input_->GetFramePointerAddress(), &state_iterator,
|
2015-06-08 10:04:51 +00:00
|
|
|
input_data->LiteralArray(), input_->GetRegisterValues(),
|
|
|
|
trace_scope_ == nullptr ? nullptr : trace_scope_->file());
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
// Do the input frame to output frame(s) translation.
|
2015-06-08 10:04:51 +00:00
|
|
|
size_t count = translated_state_.frames().size();
|
2016-02-12 10:14:42 +00:00
|
|
|
// If we are supposed to go to the catch handler, find the catching frame
|
|
|
|
// for the catch and make sure we only deoptimize upto that frame.
|
|
|
|
if (deoptimizing_throw_) {
|
|
|
|
size_t catch_handler_frame_index = count;
|
|
|
|
for (size_t i = count; i-- > 0;) {
|
|
|
|
catch_handler_pc_offset_ = LookupCatchHandler(
|
|
|
|
&(translated_state_.frames()[i]), &catch_handler_data_);
|
|
|
|
if (catch_handler_pc_offset_ >= 0) {
|
|
|
|
catch_handler_frame_index = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CHECK_LT(catch_handler_frame_index, count);
|
|
|
|
count = catch_handler_frame_index + 1;
|
|
|
|
}
|
|
|
|
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(output_ == NULL);
|
2010-12-07 11:31:57 +00:00
|
|
|
output_ = new FrameDescription*[count];
|
2015-06-08 10:04:51 +00:00
|
|
|
for (size_t i = 0; i < count; ++i) {
|
2010-12-07 11:31:57 +00:00
|
|
|
output_[i] = NULL;
|
|
|
|
}
|
2015-06-08 10:04:51 +00:00
|
|
|
output_count_ = static_cast<int>(count);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
// Translate each output frame.
|
2016-03-09 11:33:10 +00:00
|
|
|
int frame_index = 0; // output_frame_index
|
|
|
|
for (size_t i = 0; i < count; ++i, ++frame_index) {
|
2012-01-24 08:43:12 +00:00
|
|
|
// Read the ast node id, function, and frame height for this output frame.
|
2016-03-09 11:33:10 +00:00
|
|
|
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
|
|
|
|
switch (translated_frame->kind()) {
|
2015-06-08 10:04:51 +00:00
|
|
|
case TranslatedFrame::kFunction:
|
2016-03-09 11:33:10 +00:00
|
|
|
DoComputeJSFrame(translated_frame, frame_index,
|
|
|
|
deoptimizing_throw_ && i == count - 1);
|
2012-01-24 08:43:12 +00:00
|
|
|
jsframe_count_++;
|
|
|
|
break;
|
2015-12-18 18:34:21 +00:00
|
|
|
case TranslatedFrame::kInterpretedFunction:
|
2016-03-09 11:33:10 +00:00
|
|
|
DoComputeInterpretedFrame(translated_frame, frame_index,
|
2016-02-12 10:14:42 +00:00
|
|
|
deoptimizing_throw_ && i == count - 1);
|
2015-12-18 18:34:21 +00:00
|
|
|
jsframe_count_++;
|
|
|
|
break;
|
2015-06-08 10:04:51 +00:00
|
|
|
case TranslatedFrame::kArgumentsAdaptor:
|
2016-03-09 11:33:10 +00:00
|
|
|
DoComputeArgumentsAdaptorFrame(translated_frame, frame_index);
|
|
|
|
break;
|
|
|
|
case TranslatedFrame::kTailCallerFunction:
|
|
|
|
DoComputeTailCallerFrame(translated_frame, frame_index);
|
|
|
|
// Tail caller frame translations do not produce output frames.
|
|
|
|
frame_index--;
|
|
|
|
output_count_--;
|
2012-01-24 08:43:12 +00:00
|
|
|
break;
|
2015-06-08 10:04:51 +00:00
|
|
|
case TranslatedFrame::kConstructStub:
|
2016-03-09 11:33:10 +00:00
|
|
|
DoComputeConstructStubFrame(translated_frame, frame_index);
|
2012-02-28 09:05:55 +00:00
|
|
|
break;
|
2015-06-08 10:04:51 +00:00
|
|
|
case TranslatedFrame::kGetter:
|
2016-03-09 11:33:10 +00:00
|
|
|
DoComputeAccessorStubFrame(translated_frame, frame_index, false);
|
2012-09-07 09:01:54 +00:00
|
|
|
break;
|
2015-06-08 10:04:51 +00:00
|
|
|
case TranslatedFrame::kSetter:
|
2016-03-09 11:33:10 +00:00
|
|
|
DoComputeAccessorStubFrame(translated_frame, frame_index, true);
|
2012-08-17 10:43:32 +00:00
|
|
|
break;
|
2015-06-08 10:04:51 +00:00
|
|
|
case TranslatedFrame::kCompiledStub:
|
2016-03-09 11:33:10 +00:00
|
|
|
DoComputeCompiledStubFrame(translated_frame, frame_index);
|
2012-12-18 16:25:45 +00:00
|
|
|
break;
|
2015-06-08 10:04:51 +00:00
|
|
|
case TranslatedFrame::kInvalid:
|
|
|
|
FATAL("invalid frame");
|
2012-01-24 08:43:12 +00:00
|
|
|
break;
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Print some helpful diagnostic information.
|
2013-11-07 16:35:27 +00:00
|
|
|
if (trace_scope_ != NULL) {
|
2013-08-29 09:15:13 +00:00
|
|
|
double ms = timer.Elapsed().InMillisecondsF();
|
2010-12-07 11:31:57 +00:00
|
|
|
int index = output_count_ - 1; // Index of the topmost frame.
|
2015-01-07 08:14:15 +00:00
|
|
|
PrintF(trace_scope_->file(), "[deoptimizing (%s): end ",
|
|
|
|
MessageFor(bailout_type_));
|
2013-05-16 09:44:59 +00:00
|
|
|
PrintFunctionName();
|
2016-03-09 11:33:10 +00:00
|
|
|
PrintF(trace_scope_->file(),
|
|
|
|
" @%d => node=%d, pc=0x%08" V8PRIxPTR ", caller sp=0x%08" V8PRIxPTR
|
|
|
|
", state=%s, took %0.3f ms]\n",
|
|
|
|
bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
|
2016-05-18 07:50:00 +00:00
|
|
|
caller_frame_top_, BailoutStateToString(static_cast<BailoutState>(
|
|
|
|
output_[index]->GetState()->value())),
|
2016-03-09 11:33:10 +00:00
|
|
|
ms);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
|
|
|
|
int frame_index, bool goto_catch_handler) {
|
2016-02-11 12:03:29 +00:00
|
|
|
SharedFunctionInfo* shared = translated_frame->raw_shared_info();
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
TranslatedFrame::iterator value_iterator = translated_frame->begin();
|
2016-02-12 10:14:42 +00:00
|
|
|
bool is_bottommost = (0 == frame_index);
|
|
|
|
bool is_topmost = (output_count_ - 1 == frame_index);
|
2015-06-08 10:04:51 +00:00
|
|
|
int input_index = 0;
|
|
|
|
|
|
|
|
BailoutId node_id = translated_frame->node_id();
|
|
|
|
unsigned height =
|
|
|
|
translated_frame->height() - 1; // Do not count the context.
|
2013-05-17 08:27:56 +00:00
|
|
|
unsigned height_in_bytes = height * kPointerSize;
|
2016-02-12 10:14:42 +00:00
|
|
|
if (goto_catch_handler) {
|
|
|
|
// Take the stack height from the handler table.
|
|
|
|
height = catch_handler_data_;
|
|
|
|
// We also make space for the exception itself.
|
|
|
|
height_in_bytes = (height + 1) * kPointerSize;
|
2016-02-15 07:36:15 +00:00
|
|
|
CHECK(is_topmost);
|
2016-02-12 10:14:42 +00:00
|
|
|
}
|
|
|
|
|
2015-06-10 11:52:35 +00:00
|
|
|
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
|
|
|
|
value_iterator++;
|
2015-06-15 10:14:28 +00:00
|
|
|
input_index++;
|
2013-11-07 16:35:27 +00:00
|
|
|
if (trace_scope_ != NULL) {
|
2015-06-08 10:04:51 +00:00
|
|
|
PrintF(trace_scope_->file(), " translating frame ");
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
|
2016-02-11 12:03:29 +00:00
|
|
|
PrintF(trace_scope_->file(), "%s", name.get());
|
2016-02-12 10:14:42 +00:00
|
|
|
PrintF(trace_scope_->file(), " => node=%d, height=%d%s\n", node_id.ToInt(),
|
|
|
|
height_in_bytes, goto_catch_handler ? " (throw)" : "");
|
2013-05-17 08:27:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The 'fixed' part of the frame consists of the incoming parameters and
|
|
|
|
// the part described by JavaScriptFrameConstants.
|
2016-02-11 12:03:29 +00:00
|
|
|
unsigned fixed_frame_size = ComputeJavascriptFixedSize(shared);
|
2013-05-17 08:27:56 +00:00
|
|
|
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
|
|
|
|
|
|
|
|
// Allocate and store the output frame description.
|
2016-02-15 07:36:15 +00:00
|
|
|
int parameter_count = shared->internal_formal_parameter_count() + 1;
|
|
|
|
FrameDescription* output_frame = new (output_frame_size)
|
|
|
|
FrameDescription(output_frame_size, parameter_count);
|
2013-05-17 08:27:56 +00:00
|
|
|
output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
|
|
|
|
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK(frame_index >= 0 && frame_index < output_count_);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_NULL(output_[frame_index]);
|
2013-05-17 08:27:56 +00:00
|
|
|
output_[frame_index] = output_frame;
|
|
|
|
|
2016-04-21 09:53:06 +00:00
|
|
|
// The top address of the frame is computed from the previous frame's top and
|
|
|
|
// this frame's size.
|
2013-05-17 08:27:56 +00:00
|
|
|
intptr_t top_address;
|
|
|
|
if (is_bottommost) {
|
2016-03-07 12:18:43 +00:00
|
|
|
top_address = caller_frame_top_ - output_frame_size;
|
2013-05-17 08:27:56 +00:00
|
|
|
} else {
|
|
|
|
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
|
|
|
|
}
|
|
|
|
output_frame->SetTop(top_address);
|
|
|
|
|
|
|
|
// Compute the incoming parameter translation.
|
|
|
|
unsigned output_offset = output_frame_size;
|
|
|
|
for (int i = 0; i < parameter_count; ++i) {
|
|
|
|
output_offset -= kPointerSize;
|
2015-06-15 10:14:28 +00:00
|
|
|
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
|
|
|
|
output_offset);
|
2013-05-17 08:27:56 +00:00
|
|
|
}
|
|
|
|
|
2016-09-26 11:13:37 +00:00
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
PrintF(trace_scope_->file(), " -------------------------\n");
|
|
|
|
}
|
|
|
|
|
2013-05-17 08:27:56 +00:00
|
|
|
// There are no translation commands for the caller's pc and fp, the
|
|
|
|
// context, and the function. Synthesize their values and set them up
|
|
|
|
// explicitly.
|
|
|
|
//
|
|
|
|
// The caller's pc for the bottommost output frame is the same as in the
|
|
|
|
// input frame. For all subsequent output frames, it can be read from the
|
|
|
|
// previous one. This frame's pc can be computed from the non-optimized
|
|
|
|
// function code and AST id of the bailout.
|
2013-07-23 13:46:10 +00:00
|
|
|
output_offset -= kPCOnStackSize;
|
2013-05-17 08:27:56 +00:00
|
|
|
intptr_t value;
|
|
|
|
if (is_bottommost) {
|
2016-03-07 12:18:43 +00:00
|
|
|
value = caller_pc_;
|
2013-05-17 08:27:56 +00:00
|
|
|
} else {
|
|
|
|
value = output_[frame_index - 1]->GetPc();
|
|
|
|
}
|
2013-07-23 13:46:10 +00:00
|
|
|
output_frame->SetCallerPc(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
|
2013-05-17 08:27:56 +00:00
|
|
|
|
|
|
|
// The caller's frame pointer for the bottommost output frame is the same
|
|
|
|
// as in the input frame. For all subsequent output frames, it can be
|
|
|
|
// read from the previous one. Also compute and set this frame's frame
|
|
|
|
// pointer.
|
2013-07-23 13:46:10 +00:00
|
|
|
output_offset -= kFPOnStackSize;
|
2013-05-17 08:27:56 +00:00
|
|
|
if (is_bottommost) {
|
2016-03-07 12:18:43 +00:00
|
|
|
value = caller_fp_;
|
2013-05-17 08:27:56 +00:00
|
|
|
} else {
|
|
|
|
value = output_[frame_index - 1]->GetFp();
|
|
|
|
}
|
2013-07-23 13:46:10 +00:00
|
|
|
output_frame->SetCallerFp(output_offset, value);
|
2013-05-17 08:27:56 +00:00
|
|
|
intptr_t fp_value = top_address + output_offset;
|
|
|
|
output_frame->SetFp(fp_value);
|
2016-04-21 09:53:06 +00:00
|
|
|
if (is_topmost) {
|
|
|
|
Register fp_reg = JavaScriptFrame::fp_register();
|
|
|
|
output_frame->SetRegister(fp_reg.code(), fp_value);
|
|
|
|
}
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
|
2013-05-17 08:27:56 +00:00
|
|
|
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2013-12-30 11:23:59 +00:00
|
|
|
// For the bottommost output frame the constant pool pointer can be gotten
|
2014-03-14 15:11:58 +00:00
|
|
|
// from the input frame. For subsequent output frames, it can be read from
|
|
|
|
// the previous frame.
|
2013-12-30 11:23:59 +00:00
|
|
|
output_offset -= kPointerSize;
|
|
|
|
if (is_bottommost) {
|
2016-03-07 12:18:43 +00:00
|
|
|
value = caller_constant_pool_;
|
2013-12-30 11:23:59 +00:00
|
|
|
} else {
|
2014-03-14 15:11:58 +00:00
|
|
|
value = output_[frame_index - 1]->GetConstantPool();
|
2013-12-30 11:23:59 +00:00
|
|
|
}
|
2014-03-14 15:11:58 +00:00
|
|
|
output_frame->SetCallerConstantPool(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset,
|
|
|
|
"caller's constant_pool\n");
|
2013-12-30 11:23:59 +00:00
|
|
|
}
|
|
|
|
|
2013-05-17 08:27:56 +00:00
|
|
|
// For the bottommost output frame the context can be gotten from the input
|
|
|
|
// frame. For all subsequent output frames it can be gotten from the function
|
|
|
|
// so long as we don't inline functions that need local contexts.
|
|
|
|
output_offset -= kPointerSize;
|
2016-02-12 10:14:42 +00:00
|
|
|
|
|
|
|
// When deoptimizing into a catch block, we need to take the context
|
|
|
|
// from just above the top of the operand stack (we push the context
|
|
|
|
// at the entry of the try block).
|
2016-09-07 16:02:01 +00:00
|
|
|
TranslatedFrame::iterator context_pos = value_iterator;
|
|
|
|
int context_input_index = input_index;
|
2016-02-12 10:14:42 +00:00
|
|
|
if (goto_catch_handler) {
|
|
|
|
for (unsigned i = 0; i < height + 1; ++i) {
|
|
|
|
context_pos++;
|
|
|
|
context_input_index++;
|
|
|
|
}
|
|
|
|
}
|
2014-09-01 09:31:14 +00:00
|
|
|
// Read the context from the translations.
|
2016-02-12 10:14:42 +00:00
|
|
|
Object* context = context_pos->GetRawValue();
|
2016-06-06 12:58:10 +00:00
|
|
|
if (context->IsUndefined(isolate_)) {
|
2014-09-04 14:52:52 +00:00
|
|
|
// If the context was optimized away, just use the context from
|
|
|
|
// the activation. This should only apply to Crankshaft code.
|
|
|
|
CHECK(!compiled_code_->is_turbofanned());
|
2016-03-07 12:18:43 +00:00
|
|
|
context = is_bottommost ? reinterpret_cast<Object*>(input_frame_context_)
|
|
|
|
: function->context();
|
2014-09-04 14:52:52 +00:00
|
|
|
}
|
2015-06-15 10:14:28 +00:00
|
|
|
value = reinterpret_cast<intptr_t>(context);
|
2013-05-17 08:27:56 +00:00
|
|
|
output_frame->SetContext(value);
|
2016-02-12 10:14:42 +00:00
|
|
|
WriteValueToOutput(context, context_input_index, frame_index, output_offset,
|
2015-06-15 10:14:28 +00:00
|
|
|
"context ");
|
2016-01-11 12:47:52 +00:00
|
|
|
if (context == isolate_->heap()->arguments_marker()) {
|
|
|
|
Address output_address =
|
|
|
|
reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
|
|
|
|
output_offset;
|
2016-02-12 10:14:42 +00:00
|
|
|
values_to_materialize_.push_back({output_address, context_pos});
|
2016-01-11 12:47:52 +00:00
|
|
|
}
|
2015-06-15 10:14:28 +00:00
|
|
|
value_iterator++;
|
|
|
|
input_index++;
|
2013-05-17 08:27:56 +00:00
|
|
|
|
|
|
|
// The function was mentioned explicitly in the BEGIN_FRAME.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = reinterpret_cast<intptr_t>(function);
|
2015-06-15 10:14:28 +00:00
|
|
|
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
|
2013-05-17 08:27:56 +00:00
|
|
|
|
2016-09-26 11:13:37 +00:00
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
PrintF(trace_scope_->file(), " -------------------------\n");
|
|
|
|
}
|
|
|
|
|
2013-05-17 08:27:56 +00:00
|
|
|
// Translate the rest of the frame.
|
|
|
|
for (unsigned i = 0; i < height; ++i) {
|
|
|
|
output_offset -= kPointerSize;
|
2015-06-15 10:14:28 +00:00
|
|
|
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
|
|
|
|
output_offset);
|
2013-05-17 08:27:56 +00:00
|
|
|
}
|
2016-02-12 10:14:42 +00:00
|
|
|
if (goto_catch_handler) {
|
|
|
|
// Write out the exception for the catch handler.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
Object* exception_obj = reinterpret_cast<Object*>(
|
|
|
|
input_->GetRegister(FullCodeGenerator::result_register().code()));
|
|
|
|
WriteValueToOutput(exception_obj, input_index, frame_index, output_offset,
|
|
|
|
"exception ");
|
|
|
|
input_index++;
|
|
|
|
}
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(0u, output_offset);
|
2013-05-17 08:27:56 +00:00
|
|
|
|
2014-03-14 15:11:58 +00:00
|
|
|
// Update constant pool.
|
2016-02-12 10:14:42 +00:00
|
|
|
Code* non_optimized_code = shared->code();
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2014-03-14 15:11:58 +00:00
|
|
|
intptr_t constant_pool_value =
|
|
|
|
reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
|
|
|
|
output_frame->SetConstantPool(constant_pool_value);
|
|
|
|
if (is_topmost) {
|
|
|
|
Register constant_pool_reg =
|
|
|
|
JavaScriptFrame::constant_pool_pointer_register();
|
|
|
|
output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-10 11:34:46 +00:00
|
|
|
// Compute this frame's PC and state.
|
2016-02-12 10:14:42 +00:00
|
|
|
FixedArray* raw_data = non_optimized_code->deoptimization_data();
|
|
|
|
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
|
|
|
|
Address start = non_optimized_code->instruction_start();
|
|
|
|
unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
|
|
|
|
unsigned pc_offset = goto_catch_handler
|
|
|
|
? catch_handler_pc_offset_
|
|
|
|
: FullCodeGenerator::PcField::decode(pc_and_state);
|
|
|
|
intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
|
|
|
|
output_frame->SetPc(pc_value);
|
|
|
|
|
|
|
|
// If we are going to the catch handler, then the exception lives in
|
|
|
|
// the accumulator.
|
2016-05-18 07:50:00 +00:00
|
|
|
BailoutState state =
|
|
|
|
goto_catch_handler
|
|
|
|
? BailoutState::TOS_REGISTER
|
|
|
|
: FullCodeGenerator::BailoutStateField::decode(pc_and_state);
|
|
|
|
output_frame->SetState(Smi::FromInt(static_cast<int>(state)));
|
2013-05-17 08:27:56 +00:00
|
|
|
|
2016-09-08 09:51:32 +00:00
|
|
|
// Clear the context register. The context might be a de-materialized object
|
|
|
|
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
|
|
|
|
// safety we use Smi(0) instead of the potential {arguments_marker} here.
|
|
|
|
if (is_topmost) {
|
2016-10-07 13:05:07 +00:00
|
|
|
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
|
2016-09-08 09:51:32 +00:00
|
|
|
Register context_reg = JavaScriptFrame::context_register();
|
|
|
|
output_frame->SetRegister(context_reg.code(), context_value);
|
|
|
|
}
|
|
|
|
|
2013-05-17 08:27:56 +00:00
|
|
|
// Set the continuation for the topmost frame.
|
2016-03-07 08:18:41 +00:00
|
|
|
if (is_topmost) {
|
2013-05-17 08:27:56 +00:00
|
|
|
Builtins* builtins = isolate_->builtins();
|
|
|
|
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
|
|
|
|
if (bailout_type_ == LAZY) {
|
|
|
|
continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
|
|
|
|
} else if (bailout_type_ == SOFT) {
|
|
|
|
continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
|
|
|
|
} else {
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_EQ(bailout_type_, EAGER);
|
2013-05-17 08:27:56 +00:00
|
|
|
}
|
|
|
|
output_frame->SetContinuation(
|
|
|
|
reinterpret_cast<intptr_t>(continuation->entry()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
|
|
|
|
int frame_index,
|
2016-02-12 10:14:42 +00:00
|
|
|
bool goto_catch_handler) {
|
2016-02-11 12:03:29 +00:00
|
|
|
SharedFunctionInfo* shared = translated_frame->raw_shared_info();
|
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
TranslatedFrame::iterator value_iterator = translated_frame->begin();
|
2016-08-26 13:12:16 +00:00
|
|
|
bool is_bottommost = (0 == frame_index);
|
|
|
|
bool is_topmost = (output_count_ - 1 == frame_index);
|
2015-12-18 18:34:21 +00:00
|
|
|
int input_index = 0;
|
|
|
|
|
2016-02-12 10:14:42 +00:00
|
|
|
int bytecode_offset = translated_frame->node_id().ToInt();
|
2015-12-18 18:34:21 +00:00
|
|
|
unsigned height = translated_frame->height();
|
|
|
|
unsigned height_in_bytes = height * kPointerSize;
|
2016-08-26 13:12:16 +00:00
|
|
|
|
|
|
|
// All tranlations for interpreted frames contain the accumulator and hence
|
|
|
|
// are assumed to be in bailout state {BailoutState::TOS_REGISTER}. However
|
|
|
|
// such a state is only supported for the topmost frame. We need to skip
|
|
|
|
// pushing the accumulator for any non-topmost frame.
|
|
|
|
if (!is_topmost) height_in_bytes -= kPointerSize;
|
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
|
|
|
|
value_iterator++;
|
|
|
|
input_index++;
|
|
|
|
if (trace_scope_ != NULL) {
|
|
|
|
PrintF(trace_scope_->file(), " translating interpreted frame ");
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
|
2016-02-11 12:03:29 +00:00
|
|
|
PrintF(trace_scope_->file(), "%s", name.get());
|
2016-02-12 10:14:42 +00:00
|
|
|
PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d%s\n",
|
|
|
|
bytecode_offset, height_in_bytes,
|
|
|
|
goto_catch_handler ? " (throw)" : "");
|
|
|
|
}
|
|
|
|
if (goto_catch_handler) {
|
|
|
|
bytecode_offset = catch_handler_pc_offset_;
|
2015-12-18 18:34:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The 'fixed' part of the frame consists of the incoming parameters and
|
|
|
|
// the part described by InterpreterFrameConstants.
|
2016-02-11 12:03:29 +00:00
|
|
|
unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared);
|
2015-12-18 18:34:21 +00:00
|
|
|
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
|
|
|
|
|
|
|
|
// Allocate and store the output frame description.
|
2016-02-15 07:36:15 +00:00
|
|
|
int parameter_count = shared->internal_formal_parameter_count() + 1;
|
|
|
|
FrameDescription* output_frame = new (output_frame_size)
|
|
|
|
FrameDescription(output_frame_size, parameter_count);
|
2015-12-18 18:34:21 +00:00
|
|
|
output_frame->SetFrameType(StackFrame::INTERPRETED);
|
|
|
|
|
|
|
|
CHECK(frame_index >= 0 && frame_index < output_count_);
|
|
|
|
CHECK_NULL(output_[frame_index]);
|
|
|
|
output_[frame_index] = output_frame;
|
|
|
|
|
2016-04-21 09:53:06 +00:00
|
|
|
// The top address of the frame is computed from the previous frame's top and
|
|
|
|
// this frame's size.
|
2015-12-18 18:34:21 +00:00
|
|
|
intptr_t top_address;
|
|
|
|
if (is_bottommost) {
|
2016-03-07 12:18:43 +00:00
|
|
|
top_address = caller_frame_top_ - output_frame_size;
|
2015-12-18 18:34:21 +00:00
|
|
|
} else {
|
|
|
|
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
|
|
|
|
}
|
|
|
|
output_frame->SetTop(top_address);
|
|
|
|
|
|
|
|
// Compute the incoming parameter translation.
|
|
|
|
unsigned output_offset = output_frame_size;
|
|
|
|
for (int i = 0; i < parameter_count; ++i) {
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
|
|
|
|
output_offset);
|
|
|
|
}
|
|
|
|
|
2016-09-26 11:13:37 +00:00
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
PrintF(trace_scope_->file(), " -------------------------\n");
|
|
|
|
}
|
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
// There are no translation commands for the caller's pc and fp, the
|
|
|
|
// context, the function, new.target and the bytecode offset. Synthesize
|
|
|
|
// their values and set them up
|
|
|
|
// explicitly.
|
|
|
|
//
|
|
|
|
// The caller's pc for the bottommost output frame is the same as in the
|
|
|
|
// input frame. For all subsequent output frames, it can be read from the
|
|
|
|
// previous one. This frame's pc can be computed from the non-optimized
|
|
|
|
// function code and AST id of the bailout.
|
|
|
|
output_offset -= kPCOnStackSize;
|
|
|
|
intptr_t value;
|
|
|
|
if (is_bottommost) {
|
2016-03-07 12:18:43 +00:00
|
|
|
value = caller_pc_;
|
2015-12-18 18:34:21 +00:00
|
|
|
} else {
|
|
|
|
value = output_[frame_index - 1]->GetPc();
|
|
|
|
}
|
|
|
|
output_frame->SetCallerPc(output_offset, value);
|
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
|
|
|
|
|
|
|
|
// The caller's frame pointer for the bottommost output frame is the same
|
|
|
|
// as in the input frame. For all subsequent output frames, it can be
|
|
|
|
// read from the previous one. Also compute and set this frame's frame
|
|
|
|
// pointer.
|
|
|
|
output_offset -= kFPOnStackSize;
|
|
|
|
if (is_bottommost) {
|
2016-03-07 12:18:43 +00:00
|
|
|
value = caller_fp_;
|
2015-12-18 18:34:21 +00:00
|
|
|
} else {
|
|
|
|
value = output_[frame_index - 1]->GetFp();
|
|
|
|
}
|
|
|
|
output_frame->SetCallerFp(output_offset, value);
|
|
|
|
intptr_t fp_value = top_address + output_offset;
|
|
|
|
output_frame->SetFp(fp_value);
|
2016-04-21 09:53:06 +00:00
|
|
|
if (is_topmost) {
|
|
|
|
Register fp_reg = InterpretedFrame::fp_register();
|
|
|
|
output_frame->SetRegister(fp_reg.code(), fp_value);
|
|
|
|
}
|
2015-12-18 18:34:21 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
|
|
|
|
|
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
|
|
|
// For the bottommost output frame the constant pool pointer can be gotten
|
|
|
|
// from the input frame. For subsequent output frames, it can be read from
|
|
|
|
// the previous frame.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
if (is_bottommost) {
|
2016-03-07 12:18:43 +00:00
|
|
|
value = caller_constant_pool_;
|
2015-12-18 18:34:21 +00:00
|
|
|
} else {
|
|
|
|
value = output_[frame_index - 1]->GetConstantPool();
|
|
|
|
}
|
|
|
|
output_frame->SetCallerConstantPool(output_offset, value);
|
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset,
|
|
|
|
"caller's constant_pool\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// For the bottommost output frame the context can be gotten from the input
|
|
|
|
// frame. For all subsequent output frames it can be gotten from the function
|
|
|
|
// so long as we don't inline functions that need local contexts.
|
|
|
|
output_offset -= kPointerSize;
|
2016-02-12 10:14:42 +00:00
|
|
|
|
|
|
|
// When deoptimizing into a catch block, we need to take the context
|
|
|
|
// from a register that was specified in the handler table.
|
|
|
|
TranslatedFrame::iterator context_pos = value_iterator;
|
|
|
|
int context_input_index = input_index;
|
|
|
|
if (goto_catch_handler) {
|
|
|
|
// Skip to the translated value of the register specified
|
|
|
|
// in the handler table.
|
|
|
|
for (int i = 0; i < catch_handler_data_ + 1; ++i) {
|
|
|
|
context_pos++;
|
|
|
|
context_input_index++;
|
|
|
|
}
|
|
|
|
}
|
2015-12-18 18:34:21 +00:00
|
|
|
// Read the context from the translations.
|
2016-02-12 10:14:42 +00:00
|
|
|
Object* context = context_pos->GetRawValue();
|
2015-12-18 18:34:21 +00:00
|
|
|
value = reinterpret_cast<intptr_t>(context);
|
|
|
|
output_frame->SetContext(value);
|
2016-02-12 10:14:42 +00:00
|
|
|
WriteValueToOutput(context, context_input_index, frame_index, output_offset,
|
2015-12-18 18:34:21 +00:00
|
|
|
"context ");
|
2016-09-07 16:02:01 +00:00
|
|
|
if (context == isolate_->heap()->arguments_marker()) {
|
|
|
|
Address output_address =
|
|
|
|
reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
|
|
|
|
output_offset;
|
|
|
|
values_to_materialize_.push_back({output_address, context_pos});
|
|
|
|
}
|
2015-12-18 18:34:21 +00:00
|
|
|
value_iterator++;
|
|
|
|
input_index++;
|
|
|
|
|
|
|
|
// The function was mentioned explicitly in the BEGIN_FRAME.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = reinterpret_cast<intptr_t>(function);
|
|
|
|
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
|
|
|
|
|
2016-02-11 13:10:45 +00:00
|
|
|
// The new.target slot is only used during function activiation which is
|
|
|
|
// before the first deopt point, so should never be needed. Just set it to
|
2015-12-18 18:34:21 +00:00
|
|
|
// undefined.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
Object* new_target = isolate_->heap()->undefined_value();
|
|
|
|
WriteValueToOutput(new_target, 0, frame_index, output_offset, "new_target ");
|
|
|
|
|
2016-02-16 12:35:38 +00:00
|
|
|
// Set the bytecode array pointer.
|
2016-02-11 13:10:45 +00:00
|
|
|
output_offset -= kPointerSize;
|
2016-08-23 11:55:27 +00:00
|
|
|
Object* bytecode_array = shared->HasDebugInfo()
|
|
|
|
? shared->GetDebugInfo()->DebugBytecodeArray()
|
|
|
|
: shared->bytecode_array();
|
2016-02-16 12:35:38 +00:00
|
|
|
WriteValueToOutput(bytecode_array, 0, frame_index, output_offset,
|
|
|
|
"bytecode array ");
|
2016-02-11 13:10:45 +00:00
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
int raw_bytecode_offset =
|
2016-02-12 10:14:42 +00:00
|
|
|
BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
|
2015-12-18 18:34:21 +00:00
|
|
|
Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
|
|
|
|
WriteValueToOutput(smi_bytecode_offset, 0, frame_index, output_offset,
|
|
|
|
"bytecode offset ");
|
|
|
|
|
2016-09-26 11:13:37 +00:00
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
PrintF(trace_scope_->file(), " -------------------------\n");
|
|
|
|
}
|
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
// Translate the rest of the interpreter registers in the frame.
|
2016-02-18 12:49:58 +00:00
|
|
|
for (unsigned i = 0; i < height - 1; ++i) {
|
2015-12-18 18:34:21 +00:00
|
|
|
output_offset -= kPointerSize;
|
|
|
|
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
|
|
|
|
output_offset);
|
|
|
|
}
|
|
|
|
|
2016-08-26 13:12:16 +00:00
|
|
|
// Translate the accumulator register (depending on frame position).
|
|
|
|
if (is_topmost) {
|
2016-11-10 11:34:46 +00:00
|
|
|
// For topmost frame, put the accumulator on the stack. The bailout state
|
2016-08-26 13:12:16 +00:00
|
|
|
// for interpreted frames is always set to {BailoutState::TOS_REGISTER} and
|
|
|
|
// the {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
|
|
|
|
// after materialization).
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
if (goto_catch_handler) {
|
|
|
|
// If we are lazy deopting to a catch handler, we set the accumulator to
|
|
|
|
// the exception (which lives in the result register).
|
|
|
|
intptr_t accumulator_value =
|
|
|
|
input_->GetRegister(FullCodeGenerator::result_register().code());
|
|
|
|
WriteValueToOutput(reinterpret_cast<Object*>(accumulator_value), 0,
|
|
|
|
frame_index, output_offset, "accumulator ");
|
|
|
|
value_iterator++;
|
|
|
|
} else {
|
|
|
|
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
|
|
|
|
output_offset, "accumulator ");
|
|
|
|
}
|
2016-02-18 12:49:58 +00:00
|
|
|
} else {
|
2016-08-26 13:12:16 +00:00
|
|
|
// For non-topmost frames, skip the accumulator translation. For those
|
|
|
|
// frames, the return value from the callee will become the accumulator.
|
|
|
|
value_iterator++;
|
|
|
|
input_index++;
|
2016-02-18 12:49:58 +00:00
|
|
|
}
|
|
|
|
CHECK_EQ(0u, output_offset);
|
2015-12-18 18:34:21 +00:00
|
|
|
|
2016-11-10 11:34:46 +00:00
|
|
|
// Compute this frame's PC and state. The PC will be a special builtin that
|
|
|
|
// continues the bytecode dispatch. Note that non-topmost and lazy-style
|
|
|
|
// bailout handlers also advance the bytecode offset before dispatch, hence
|
|
|
|
// simulating what normal handlers do upon completion of the operation.
|
2015-12-18 18:34:21 +00:00
|
|
|
Builtins* builtins = isolate_->builtins();
|
2016-01-26 12:22:45 +00:00
|
|
|
Code* dispatch_builtin =
|
2016-11-10 11:34:46 +00:00
|
|
|
(!is_topmost || (bailout_type_ == LAZY)) && !goto_catch_handler
|
|
|
|
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
|
|
|
|
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
|
2016-01-26 12:22:45 +00:00
|
|
|
output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
|
2016-05-18 07:50:00 +00:00
|
|
|
// Restore accumulator (TOS) register.
|
|
|
|
output_frame->SetState(
|
|
|
|
Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
|
2015-12-18 18:34:21 +00:00
|
|
|
|
|
|
|
// Update constant pool.
|
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
|
|
|
intptr_t constant_pool_value =
|
2016-01-26 12:22:45 +00:00
|
|
|
reinterpret_cast<intptr_t>(dispatch_builtin->constant_pool());
|
2015-12-18 18:34:21 +00:00
|
|
|
output_frame->SetConstantPool(constant_pool_value);
|
|
|
|
if (is_topmost) {
|
|
|
|
Register constant_pool_reg =
|
|
|
|
InterpretedFrame::constant_pool_pointer_register();
|
|
|
|
output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 09:51:32 +00:00
|
|
|
// Clear the context register. The context might be a de-materialized object
|
|
|
|
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
|
|
|
|
// safety we use Smi(0) instead of the potential {arguments_marker} here.
|
|
|
|
if (is_topmost) {
|
2016-10-07 13:05:07 +00:00
|
|
|
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
|
2016-09-08 09:51:32 +00:00
|
|
|
Register context_reg = JavaScriptFrame::context_register();
|
|
|
|
output_frame->SetRegister(context_reg.code(), context_value);
|
|
|
|
}
|
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
// Set the continuation for the topmost frame.
|
2016-03-07 08:18:41 +00:00
|
|
|
if (is_topmost) {
|
2016-05-18 07:50:00 +00:00
|
|
|
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
|
2015-12-18 18:34:21 +00:00
|
|
|
if (bailout_type_ == LAZY) {
|
2016-05-18 07:50:00 +00:00
|
|
|
continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
|
2015-12-18 18:34:21 +00:00
|
|
|
} else if (bailout_type_ == SOFT) {
|
2016-05-18 07:50:00 +00:00
|
|
|
continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
|
2015-12-18 18:34:21 +00:00
|
|
|
} else {
|
|
|
|
CHECK_EQ(bailout_type_, EAGER);
|
|
|
|
}
|
|
|
|
output_frame->SetContinuation(
|
|
|
|
reinterpret_cast<intptr_t>(continuation->entry()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
void Deoptimizer::DoComputeArgumentsAdaptorFrame(
|
|
|
|
TranslatedFrame* translated_frame, int frame_index) {
|
2015-06-08 10:04:51 +00:00
|
|
|
TranslatedFrame::iterator value_iterator = translated_frame->begin();
|
2016-03-09 11:33:10 +00:00
|
|
|
bool is_bottommost = (0 == frame_index);
|
2015-06-08 10:04:51 +00:00
|
|
|
int input_index = 0;
|
|
|
|
|
|
|
|
unsigned height = translated_frame->height();
|
2013-03-01 12:23:24 +00:00
|
|
|
unsigned height_in_bytes = height * kPointerSize;
|
2015-06-10 11:52:35 +00:00
|
|
|
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
|
|
|
|
value_iterator++;
|
2015-06-15 10:14:28 +00:00
|
|
|
input_index++;
|
2013-11-07 16:35:27 +00:00
|
|
|
if (trace_scope_ != NULL) {
|
|
|
|
PrintF(trace_scope_->file(),
|
|
|
|
" translating arguments adaptor => height=%d\n", height_in_bytes);
|
2013-03-01 12:23:24 +00:00
|
|
|
}
|
|
|
|
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFixedFrameSize;
|
2013-03-01 12:23:24 +00:00
|
|
|
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
|
|
|
|
|
|
|
|
// Allocate and store the output frame description.
|
2016-02-15 07:36:15 +00:00
|
|
|
int parameter_count = height;
|
|
|
|
FrameDescription* output_frame = new (output_frame_size)
|
|
|
|
FrameDescription(output_frame_size, parameter_count);
|
2013-03-01 12:23:24 +00:00
|
|
|
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
// Arguments adaptor can not be topmost.
|
|
|
|
CHECK(frame_index < output_count_ - 1);
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK(output_[frame_index] == NULL);
|
2013-03-01 12:23:24 +00:00
|
|
|
output_[frame_index] = output_frame;
|
|
|
|
|
2016-04-21 09:53:06 +00:00
|
|
|
// The top address of the frame is computed from the previous frame's top and
|
|
|
|
// this frame's size.
|
2013-03-01 12:23:24 +00:00
|
|
|
intptr_t top_address;
|
2016-03-09 11:33:10 +00:00
|
|
|
if (is_bottommost) {
|
|
|
|
top_address = caller_frame_top_ - output_frame_size;
|
|
|
|
} else {
|
|
|
|
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
|
|
|
|
}
|
2013-03-01 12:23:24 +00:00
|
|
|
output_frame->SetTop(top_address);
|
|
|
|
|
|
|
|
// Compute the incoming parameter translation.
|
|
|
|
unsigned output_offset = output_frame_size;
|
|
|
|
for (int i = 0; i < parameter_count; ++i) {
|
|
|
|
output_offset -= kPointerSize;
|
2015-06-15 10:14:28 +00:00
|
|
|
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
|
|
|
|
output_offset);
|
2013-03-01 12:23:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read caller's PC from the previous frame.
|
2013-07-23 13:46:10 +00:00
|
|
|
output_offset -= kPCOnStackSize;
|
2016-03-09 11:33:10 +00:00
|
|
|
intptr_t value;
|
|
|
|
if (is_bottommost) {
|
|
|
|
value = caller_pc_;
|
|
|
|
} else {
|
|
|
|
value = output_[frame_index - 1]->GetPc();
|
|
|
|
}
|
|
|
|
output_frame->SetCallerPc(output_offset, value);
|
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
|
2013-03-01 12:23:24 +00:00
|
|
|
|
|
|
|
// Read caller's FP from the previous frame, and set this frame's FP.
|
2013-07-23 13:46:10 +00:00
|
|
|
output_offset -= kFPOnStackSize;
|
2016-03-09 11:33:10 +00:00
|
|
|
if (is_bottommost) {
|
|
|
|
value = caller_fp_;
|
|
|
|
} else {
|
|
|
|
value = output_[frame_index - 1]->GetFp();
|
|
|
|
}
|
2013-07-23 13:46:10 +00:00
|
|
|
output_frame->SetCallerFp(output_offset, value);
|
2013-03-01 12:23:24 +00:00
|
|
|
intptr_t fp_value = top_address + output_offset;
|
|
|
|
output_frame->SetFp(fp_value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
|
2013-03-01 12:23:24 +00:00
|
|
|
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2014-03-14 15:11:58 +00:00
|
|
|
// Read the caller's constant pool from the previous frame.
|
2013-12-30 11:23:59 +00:00
|
|
|
output_offset -= kPointerSize;
|
2016-03-10 15:05:53 +00:00
|
|
|
if (is_bottommost) {
|
|
|
|
value = caller_constant_pool_;
|
|
|
|
} else {
|
|
|
|
value = output_[frame_index - 1]->GetConstantPool();
|
|
|
|
}
|
2014-03-14 15:11:58 +00:00
|
|
|
output_frame->SetCallerConstantPool(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset,
|
|
|
|
"caller's constant_pool\n");
|
2013-12-30 11:23:59 +00:00
|
|
|
}
|
|
|
|
|
2013-03-01 12:23:24 +00:00
|
|
|
// A marker value is used in place of the context.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
intptr_t context = reinterpret_cast<intptr_t>(
|
|
|
|
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
|
|
|
output_frame->SetFrameSlot(output_offset, context);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(context, frame_index, output_offset,
|
|
|
|
"context (adaptor sentinel)\n");
|
2013-03-01 12:23:24 +00:00
|
|
|
|
|
|
|
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = reinterpret_cast<intptr_t>(function);
|
2015-06-15 10:14:28 +00:00
|
|
|
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
|
2013-03-01 12:23:24 +00:00
|
|
|
|
|
|
|
// Number of incoming arguments.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
|
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "argc ");
|
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
|
2013-03-01 12:23:24 +00:00
|
|
|
}
|
|
|
|
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(0 == output_offset);
|
2013-03-01 12:23:24 +00:00
|
|
|
|
|
|
|
Builtins* builtins = isolate_->builtins();
|
|
|
|
Code* adaptor_trampoline =
|
|
|
|
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
|
|
|
|
intptr_t pc_value = reinterpret_cast<intptr_t>(
|
|
|
|
adaptor_trampoline->instruction_start() +
|
|
|
|
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
|
|
|
|
output_frame->SetPc(pc_value);
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2014-03-14 15:11:58 +00:00
|
|
|
intptr_t constant_pool_value =
|
|
|
|
reinterpret_cast<intptr_t>(adaptor_trampoline->constant_pool());
|
|
|
|
output_frame->SetConstantPool(constant_pool_value);
|
|
|
|
}
|
2013-03-01 12:23:24 +00:00
|
|
|
}
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
void Deoptimizer::DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
|
|
|
|
int frame_index) {
|
|
|
|
SharedFunctionInfo* shared = translated_frame->raw_shared_info();
|
2013-03-01 12:23:24 +00:00
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
bool is_bottommost = (0 == frame_index);
|
|
|
|
// Tail caller frame can't be topmost.
|
2016-04-05 09:42:24 +00:00
|
|
|
CHECK_NE(output_count_ - 1, frame_index);
|
2016-03-09 11:33:10 +00:00
|
|
|
|
|
|
|
if (trace_scope_ != NULL) {
|
|
|
|
PrintF(trace_scope_->file(), " translating tail caller frame ");
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
|
2016-03-09 11:33:10 +00:00
|
|
|
PrintF(trace_scope_->file(), "%s\n", name.get());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_bottommost) return;
|
|
|
|
|
|
|
|
// Drop arguments adaptor frame below current frame if it exsits.
|
|
|
|
Address fp_address = input_->GetFramePointerAddress();
|
|
|
|
Address adaptor_fp_address =
|
|
|
|
Memory::Address_at(fp_address + CommonFrameConstants::kCallerFPOffset);
|
|
|
|
|
|
|
|
if (Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR) !=
|
|
|
|
Memory::Object_at(adaptor_fp_address +
|
|
|
|
CommonFrameConstants::kContextOrFrameTypeOffset)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
int caller_params_count =
|
|
|
|
Smi::cast(
|
|
|
|
Memory::Object_at(adaptor_fp_address +
|
|
|
|
ArgumentsAdaptorFrameConstants::kLengthOffset))
|
|
|
|
->value();
|
|
|
|
|
|
|
|
int callee_params_count =
|
|
|
|
function_->shared()->internal_formal_parameter_count();
|
|
|
|
|
|
|
|
// Both caller and callee parameters count do not include receiver.
|
|
|
|
int offset = (caller_params_count - callee_params_count) * kPointerSize;
|
|
|
|
intptr_t new_stack_fp =
|
|
|
|
reinterpret_cast<intptr_t>(adaptor_fp_address) + offset;
|
|
|
|
|
|
|
|
intptr_t new_caller_frame_top = new_stack_fp +
|
|
|
|
(callee_params_count + 1) * kPointerSize +
|
|
|
|
CommonFrameConstants::kFixedFrameSizeAboveFp;
|
|
|
|
|
|
|
|
intptr_t adaptor_caller_pc = Memory::intptr_at(
|
|
|
|
adaptor_fp_address + CommonFrameConstants::kCallerPCOffset);
|
|
|
|
intptr_t adaptor_caller_fp = Memory::intptr_at(
|
|
|
|
adaptor_fp_address + CommonFrameConstants::kCallerFPOffset);
|
|
|
|
|
|
|
|
if (trace_scope_ != NULL) {
|
|
|
|
PrintF(trace_scope_->file(),
|
|
|
|
" dropping caller arguments adaptor frame: offset=%d, "
|
|
|
|
"fp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR
|
|
|
|
", "
|
|
|
|
"caller sp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR "\n",
|
|
|
|
offset, stack_fp_, new_stack_fp, caller_frame_top_,
|
|
|
|
new_caller_frame_top);
|
|
|
|
}
|
|
|
|
caller_frame_top_ = new_caller_frame_top;
|
|
|
|
caller_fp_ = adaptor_caller_fp;
|
|
|
|
caller_pc_ = adaptor_caller_pc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
|
|
|
|
int frame_index) {
|
2015-06-08 10:04:51 +00:00
|
|
|
TranslatedFrame::iterator value_iterator = translated_frame->begin();
|
2016-05-06 12:36:23 +00:00
|
|
|
bool is_topmost = (output_count_ - 1 == frame_index);
|
|
|
|
// The construct frame could become topmost only if we inlined a constructor
|
|
|
|
// call which does a tail call (otherwise the tail callee's frame would be
|
|
|
|
// the topmost one). So it could only be the LAZY case.
|
|
|
|
CHECK(!is_topmost || bailout_type_ == LAZY);
|
2015-06-08 10:04:51 +00:00
|
|
|
int input_index = 0;
|
|
|
|
|
2013-03-06 16:12:24 +00:00
|
|
|
Builtins* builtins = isolate_->builtins();
|
|
|
|
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
|
2015-06-08 10:04:51 +00:00
|
|
|
unsigned height = translated_frame->height();
|
2013-03-06 16:12:24 +00:00
|
|
|
unsigned height_in_bytes = height * kPointerSize;
|
2016-05-06 12:36:23 +00:00
|
|
|
|
|
|
|
// If the construct frame appears to be topmost we should ensure that the
|
|
|
|
// value of result register is preserved during continuation execution.
|
|
|
|
// We do this here by "pushing" the result of the constructor function to the
|
|
|
|
// top of the reconstructed stack and then using the
|
2016-05-18 07:50:00 +00:00
|
|
|
// BailoutState::TOS_REGISTER machinery.
|
2016-05-06 12:36:23 +00:00
|
|
|
if (is_topmost) {
|
|
|
|
height_in_bytes += kPointerSize;
|
|
|
|
}
|
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
// Skip function.
|
2015-06-10 11:52:35 +00:00
|
|
|
value_iterator++;
|
2015-06-15 10:14:28 +00:00
|
|
|
input_index++;
|
2013-11-07 16:35:27 +00:00
|
|
|
if (trace_scope_ != NULL) {
|
|
|
|
PrintF(trace_scope_->file(),
|
|
|
|
" translating construct stub => height=%d\n", height_in_bytes);
|
2013-03-06 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
unsigned fixed_frame_size = ConstructFrameConstants::kFixedFrameSize;
|
2013-03-06 16:12:24 +00:00
|
|
|
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
|
|
|
|
|
|
|
|
// Allocate and store the output frame description.
|
|
|
|
FrameDescription* output_frame =
|
2016-02-15 07:36:15 +00:00
|
|
|
new (output_frame_size) FrameDescription(output_frame_size);
|
2013-03-06 16:12:24 +00:00
|
|
|
output_frame->SetFrameType(StackFrame::CONSTRUCT);
|
|
|
|
|
2016-05-06 12:36:23 +00:00
|
|
|
// Construct stub can not be topmost.
|
|
|
|
DCHECK(frame_index > 0 && frame_index < output_count_);
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(output_[frame_index] == NULL);
|
2013-03-06 16:12:24 +00:00
|
|
|
output_[frame_index] = output_frame;
|
|
|
|
|
2016-04-21 09:53:06 +00:00
|
|
|
// The top address of the frame is computed from the previous frame's top and
|
|
|
|
// this frame's size.
|
2013-03-06 16:12:24 +00:00
|
|
|
intptr_t top_address;
|
|
|
|
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
|
|
|
|
output_frame->SetTop(top_address);
|
|
|
|
|
|
|
|
// Compute the incoming parameter translation.
|
|
|
|
int parameter_count = height;
|
|
|
|
unsigned output_offset = output_frame_size;
|
|
|
|
for (int i = 0; i < parameter_count; ++i) {
|
|
|
|
output_offset -= kPointerSize;
|
2013-08-07 11:24:14 +00:00
|
|
|
// The allocated receiver of a construct stub frame is passed as the
|
|
|
|
// receiver parameter through the translation. It might be encoding
|
2015-06-08 10:04:51 +00:00
|
|
|
// a captured object, override the slot address for a captured object.
|
2015-06-15 10:14:28 +00:00
|
|
|
WriteTranslatedValueToOutput(
|
|
|
|
&value_iterator, &input_index, frame_index, output_offset, nullptr,
|
2015-06-08 10:04:51 +00:00
|
|
|
(i == 0) ? reinterpret_cast<Address>(top_address) : nullptr);
|
2013-03-06 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read caller's PC from the previous frame.
|
2013-07-23 13:46:10 +00:00
|
|
|
output_offset -= kPCOnStackSize;
|
2013-03-06 16:12:24 +00:00
|
|
|
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
|
2013-07-23 13:46:10 +00:00
|
|
|
output_frame->SetCallerPc(output_offset, callers_pc);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
|
2013-03-06 16:12:24 +00:00
|
|
|
|
|
|
|
// Read caller's FP from the previous frame, and set this frame's FP.
|
2013-07-23 13:46:10 +00:00
|
|
|
output_offset -= kFPOnStackSize;
|
2013-03-06 16:12:24 +00:00
|
|
|
intptr_t value = output_[frame_index - 1]->GetFp();
|
2013-07-23 13:46:10 +00:00
|
|
|
output_frame->SetCallerFp(output_offset, value);
|
2013-03-06 16:12:24 +00:00
|
|
|
intptr_t fp_value = top_address + output_offset;
|
|
|
|
output_frame->SetFp(fp_value);
|
2016-05-06 12:36:23 +00:00
|
|
|
if (is_topmost) {
|
|
|
|
Register fp_reg = JavaScriptFrame::fp_register();
|
|
|
|
output_frame->SetRegister(fp_reg.code(), fp_value);
|
|
|
|
}
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
|
2013-03-06 16:12:24 +00:00
|
|
|
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2014-03-14 15:11:58 +00:00
|
|
|
// Read the caller's constant pool from the previous frame.
|
2013-12-30 11:23:59 +00:00
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = output_[frame_index - 1]->GetConstantPool();
|
2014-03-14 15:11:58 +00:00
|
|
|
output_frame->SetCallerConstantPool(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset,
|
|
|
|
"caller's constant_pool\n");
|
2013-12-30 11:23:59 +00:00
|
|
|
}
|
|
|
|
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
// A marker value is used to mark the frame.
|
2013-03-06 16:12:24 +00:00
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
|
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset,
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
"typed frame marker\n");
|
2013-03-06 16:12:24 +00:00
|
|
|
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
// The context can be gotten from the previous frame.
|
2013-03-06 16:12:24 +00:00
|
|
|
output_offset -= kPointerSize;
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
value = output_[frame_index - 1]->GetContext();
|
2013-03-06 16:12:24 +00:00
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
|
2013-03-06 16:12:24 +00:00
|
|
|
|
|
|
|
// Number of incoming arguments.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
|
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "argc ");
|
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
|
2013-03-06 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The newly allocated object was passed as receiver in the artificial
|
|
|
|
// constructor stub environment created by HEnvironment::CopyForInlining().
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
|
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset,
|
|
|
|
"allocated receiver\n");
|
2013-03-06 16:12:24 +00:00
|
|
|
|
2016-05-06 12:36:23 +00:00
|
|
|
if (is_topmost) {
|
|
|
|
// Ensure the result is restored back when we return to the stub.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
Register result_reg = FullCodeGenerator::result_register();
|
|
|
|
value = input_->GetRegister(result_reg.code());
|
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset,
|
|
|
|
"constructor result\n");
|
|
|
|
|
2016-05-18 07:50:00 +00:00
|
|
|
output_frame->SetState(
|
|
|
|
Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
|
2016-05-06 12:36:23 +00:00
|
|
|
}
|
|
|
|
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(0u, output_offset);
|
2013-03-06 16:12:24 +00:00
|
|
|
|
|
|
|
intptr_t pc = reinterpret_cast<intptr_t>(
|
|
|
|
construct_stub->instruction_start() +
|
|
|
|
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
|
|
|
|
output_frame->SetPc(pc);
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2014-03-14 15:11:58 +00:00
|
|
|
intptr_t constant_pool_value =
|
|
|
|
reinterpret_cast<intptr_t>(construct_stub->constant_pool());
|
|
|
|
output_frame->SetConstantPool(constant_pool_value);
|
2016-05-06 12:36:23 +00:00
|
|
|
if (is_topmost) {
|
|
|
|
Register constant_pool_reg =
|
|
|
|
JavaScriptFrame::constant_pool_pointer_register();
|
|
|
|
output_frame->SetRegister(constant_pool_reg.code(), fp_value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 09:51:32 +00:00
|
|
|
// Clear the context register. The context might be a de-materialized object
|
|
|
|
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
|
|
|
|
// safety we use Smi(0) instead of the potential {arguments_marker} here.
|
|
|
|
if (is_topmost) {
|
2016-10-07 13:05:07 +00:00
|
|
|
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
|
2016-09-08 09:51:32 +00:00
|
|
|
Register context_reg = JavaScriptFrame::context_register();
|
|
|
|
output_frame->SetRegister(context_reg.code(), context_value);
|
|
|
|
}
|
|
|
|
|
2016-05-06 12:36:23 +00:00
|
|
|
// Set the continuation for the topmost frame.
|
|
|
|
if (is_topmost) {
|
|
|
|
Builtins* builtins = isolate_->builtins();
|
|
|
|
DCHECK_EQ(LAZY, bailout_type_);
|
|
|
|
Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
|
|
|
|
output_frame->SetContinuation(
|
|
|
|
reinterpret_cast<intptr_t>(continuation->entry()));
|
2014-03-14 15:11:58 +00:00
|
|
|
}
|
2013-03-06 16:12:24 +00:00
|
|
|
}
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
|
|
|
|
int frame_index,
|
2013-03-01 12:23:24 +00:00
|
|
|
bool is_setter_stub_frame) {
|
2015-06-08 10:04:51 +00:00
|
|
|
TranslatedFrame::iterator value_iterator = translated_frame->begin();
|
2016-05-06 12:36:23 +00:00
|
|
|
bool is_topmost = (output_count_ - 1 == frame_index);
|
|
|
|
// The accessor frame could become topmost only if we inlined an accessor
|
|
|
|
// call which does a tail call (otherwise the tail callee's frame would be
|
|
|
|
// the topmost one). So it could only be the LAZY case.
|
|
|
|
CHECK(!is_topmost || bailout_type_ == LAZY);
|
2015-06-08 10:04:51 +00:00
|
|
|
int input_index = 0;
|
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
// Skip accessor.
|
2015-06-10 11:52:35 +00:00
|
|
|
value_iterator++;
|
2015-06-15 10:14:28 +00:00
|
|
|
input_index++;
|
2013-03-01 12:23:24 +00:00
|
|
|
// The receiver (and the implicit return value, if any) are expected in
|
|
|
|
// registers by the LoadIC/StoreIC, so they don't belong to the output stack
|
|
|
|
// frame. This means that we have to use a height of 0.
|
|
|
|
unsigned height = 0;
|
|
|
|
unsigned height_in_bytes = height * kPointerSize;
|
2016-05-06 12:36:23 +00:00
|
|
|
|
|
|
|
// If the accessor frame appears to be topmost we should ensure that the
|
|
|
|
// value of result register is preserved during continuation execution.
|
|
|
|
// We do this here by "pushing" the result of the accessor function to the
|
|
|
|
// top of the reconstructed stack and then using the
|
2016-05-18 07:50:00 +00:00
|
|
|
// BailoutState::TOS_REGISTER machinery.
|
2016-05-06 12:36:23 +00:00
|
|
|
// We don't need to restore the result in case of a setter call because we
|
|
|
|
// have to return the stored value but not the result of the setter function.
|
|
|
|
bool should_preserve_result = is_topmost && !is_setter_stub_frame;
|
|
|
|
if (should_preserve_result) {
|
|
|
|
height_in_bytes += kPointerSize;
|
|
|
|
}
|
|
|
|
|
2013-03-01 12:23:24 +00:00
|
|
|
const char* kind = is_setter_stub_frame ? "setter" : "getter";
|
2013-11-07 16:35:27 +00:00
|
|
|
if (trace_scope_ != NULL) {
|
|
|
|
PrintF(trace_scope_->file(),
|
|
|
|
" translating %s stub => height=%u\n", kind, height_in_bytes);
|
2013-03-01 12:23:24 +00:00
|
|
|
}
|
|
|
|
|
2013-11-20 13:44:24 +00:00
|
|
|
// We need 1 stack entry for the return address and enough entries for the
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
// StackFrame::INTERNAL (FP, frame type, context, code object and constant
|
2015-06-04 14:44:00 +00:00
|
|
|
// pool (if enabled)- see MacroAssembler::EnterFrame).
|
2013-12-30 11:23:59 +00:00
|
|
|
// For a setter stub frame we need one additional entry for the implicit
|
|
|
|
// return value, see StoreStubCompiler::CompileStoreViaSetter.
|
2013-11-20 13:44:24 +00:00
|
|
|
unsigned fixed_frame_entries =
|
|
|
|
(StandardFrameConstants::kFixedFrameSize / kPointerSize) + 1 +
|
|
|
|
(is_setter_stub_frame ? 1 : 0);
|
2013-03-01 12:23:24 +00:00
|
|
|
unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
|
|
|
|
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
|
|
|
|
|
|
|
|
// Allocate and store the output frame description.
|
|
|
|
FrameDescription* output_frame =
|
2016-02-15 07:36:15 +00:00
|
|
|
new (output_frame_size) FrameDescription(output_frame_size);
|
2013-03-01 12:23:24 +00:00
|
|
|
output_frame->SetFrameType(StackFrame::INTERNAL);
|
|
|
|
|
2016-05-06 12:36:23 +00:00
|
|
|
// A frame for an accessor stub can not be bottommost.
|
|
|
|
CHECK(frame_index > 0 && frame_index < output_count_);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_NULL(output_[frame_index]);
|
2013-03-01 12:23:24 +00:00
|
|
|
output_[frame_index] = output_frame;
|
|
|
|
|
|
|
|
// The top address of the frame is computed from the previous frame's top and
|
|
|
|
// this frame's size.
|
|
|
|
intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
|
|
|
|
output_frame->SetTop(top_address);
|
|
|
|
|
|
|
|
unsigned output_offset = output_frame_size;
|
|
|
|
|
|
|
|
// Read caller's PC from the previous frame.
|
2013-07-23 13:46:10 +00:00
|
|
|
output_offset -= kPCOnStackSize;
|
2013-03-01 12:23:24 +00:00
|
|
|
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
|
2013-07-23 13:46:10 +00:00
|
|
|
output_frame->SetCallerPc(output_offset, callers_pc);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
|
2013-03-01 12:23:24 +00:00
|
|
|
|
|
|
|
// Read caller's FP from the previous frame, and set this frame's FP.
|
2013-07-23 13:46:10 +00:00
|
|
|
output_offset -= kFPOnStackSize;
|
2013-03-01 12:23:24 +00:00
|
|
|
intptr_t value = output_[frame_index - 1]->GetFp();
|
2013-07-23 13:46:10 +00:00
|
|
|
output_frame->SetCallerFp(output_offset, value);
|
2013-03-01 12:23:24 +00:00
|
|
|
intptr_t fp_value = top_address + output_offset;
|
|
|
|
output_frame->SetFp(fp_value);
|
2016-05-06 12:36:23 +00:00
|
|
|
if (is_topmost) {
|
|
|
|
Register fp_reg = JavaScriptFrame::fp_register();
|
|
|
|
output_frame->SetRegister(fp_reg.code(), fp_value);
|
|
|
|
}
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
|
2013-03-01 12:23:24 +00:00
|
|
|
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2014-03-14 15:11:58 +00:00
|
|
|
// Read the caller's constant pool from the previous frame.
|
2013-12-30 11:23:59 +00:00
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = output_[frame_index - 1]->GetConstantPool();
|
2014-03-14 15:11:58 +00:00
|
|
|
output_frame->SetCallerConstantPool(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset,
|
|
|
|
"caller's constant_pool\n");
|
2013-12-30 11:23:59 +00:00
|
|
|
}
|
|
|
|
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
// Set the frame type.
|
2013-03-01 12:23:24 +00:00
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
|
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "frame type ");
|
2015-06-15 10:14:28 +00:00
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
PrintF(trace_scope_->file(), "(%s sentinel)\n", kind);
|
2013-03-01 12:23:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get Code object from accessor stub.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
Builtins::Name name = is_setter_stub_frame ?
|
|
|
|
Builtins::kStoreIC_Setter_ForDeopt :
|
|
|
|
Builtins::kLoadIC_Getter_ForDeopt;
|
|
|
|
Code* accessor_stub = isolate_->builtins()->builtin(name);
|
|
|
|
value = reinterpret_cast<intptr_t>(accessor_stub);
|
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
|
2013-03-01 12:23:24 +00:00
|
|
|
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
// The context can be gotten from the previous frame.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
value = output_[frame_index - 1]->GetContext();
|
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
|
|
|
|
|
2013-03-01 12:23:24 +00:00
|
|
|
// Skip receiver.
|
2015-06-08 10:04:51 +00:00
|
|
|
value_iterator++;
|
2015-06-15 10:14:28 +00:00
|
|
|
input_index++;
|
2013-03-01 12:23:24 +00:00
|
|
|
|
|
|
|
if (is_setter_stub_frame) {
|
|
|
|
// The implicit return value was part of the artificial setter stub
|
|
|
|
// environment.
|
|
|
|
output_offset -= kPointerSize;
|
2015-06-15 10:14:28 +00:00
|
|
|
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
|
|
|
|
output_offset);
|
2013-03-01 12:23:24 +00:00
|
|
|
}
|
|
|
|
|
2016-05-06 12:36:23 +00:00
|
|
|
if (should_preserve_result) {
|
|
|
|
// Ensure the result is restored back when we return to the stub.
|
|
|
|
output_offset -= kPointerSize;
|
|
|
|
Register result_reg = FullCodeGenerator::result_register();
|
|
|
|
value = input_->GetRegister(result_reg.code());
|
|
|
|
output_frame->SetFrameSlot(output_offset, value);
|
|
|
|
DebugPrintOutputSlot(value, frame_index, output_offset,
|
|
|
|
"accessor result\n");
|
|
|
|
|
2016-05-18 07:50:00 +00:00
|
|
|
output_frame->SetState(
|
|
|
|
Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
|
2016-05-06 12:36:23 +00:00
|
|
|
} else {
|
2016-05-18 07:50:00 +00:00
|
|
|
output_frame->SetState(
|
|
|
|
Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
|
2016-05-06 12:36:23 +00:00
|
|
|
}
|
|
|
|
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(0u, output_offset);
|
2013-03-01 12:23:24 +00:00
|
|
|
|
|
|
|
Smi* offset = is_setter_stub_frame ?
|
|
|
|
isolate_->heap()->setter_stub_deopt_pc_offset() :
|
|
|
|
isolate_->heap()->getter_stub_deopt_pc_offset();
|
|
|
|
intptr_t pc = reinterpret_cast<intptr_t>(
|
|
|
|
accessor_stub->instruction_start() + offset->value());
|
|
|
|
output_frame->SetPc(pc);
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2014-03-14 15:11:58 +00:00
|
|
|
intptr_t constant_pool_value =
|
|
|
|
reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
|
|
|
|
output_frame->SetConstantPool(constant_pool_value);
|
2016-05-06 12:36:23 +00:00
|
|
|
if (is_topmost) {
|
|
|
|
Register constant_pool_reg =
|
|
|
|
JavaScriptFrame::constant_pool_pointer_register();
|
|
|
|
output_frame->SetRegister(constant_pool_reg.code(), fp_value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 09:51:32 +00:00
|
|
|
// Clear the context register. The context might be a de-materialized object
|
|
|
|
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
|
|
|
|
// safety we use Smi(0) instead of the potential {arguments_marker} here.
|
|
|
|
if (is_topmost) {
|
2016-10-07 13:05:07 +00:00
|
|
|
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
|
2016-09-08 09:51:32 +00:00
|
|
|
Register context_reg = JavaScriptFrame::context_register();
|
|
|
|
output_frame->SetRegister(context_reg.code(), context_value);
|
|
|
|
}
|
|
|
|
|
2016-05-06 12:36:23 +00:00
|
|
|
// Set the continuation for the topmost frame.
|
|
|
|
if (is_topmost) {
|
|
|
|
Builtins* builtins = isolate_->builtins();
|
|
|
|
DCHECK_EQ(LAZY, bailout_type_);
|
|
|
|
Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
|
|
|
|
output_frame->SetContinuation(
|
|
|
|
reinterpret_cast<intptr_t>(continuation->entry()));
|
2014-03-14 15:11:58 +00:00
|
|
|
}
|
2013-03-01 12:23:24 +00:00
|
|
|
}
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
void Deoptimizer::DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
|
|
|
|
int frame_index) {
|
2013-03-08 16:18:50 +00:00
|
|
|
//
|
|
|
|
// FROM TO
|
|
|
|
// | .... | | .... |
|
|
|
|
// +-------------------------+ +-------------------------+
|
|
|
|
// | JSFunction continuation | | JSFunction continuation |
|
|
|
|
// +-------------------------+ +-------------------------+
|
|
|
|
// | | saved frame (FP) | | saved frame (FP) |
|
|
|
|
// | +=========================+<-fpreg +=========================+<-fpreg
|
2013-12-30 11:23:59 +00:00
|
|
|
// | |constant pool (if ool_cp)| |constant pool (if ool_cp)|
|
|
|
|
// | +-------------------------+ +-------------------------|
|
2013-03-08 16:18:50 +00:00
|
|
|
// | | JSFunction context | | JSFunction context |
|
|
|
|
// v +-------------------------+ +-------------------------|
|
|
|
|
// | COMPILED_STUB marker | | STUB_FAILURE marker |
|
|
|
|
// +-------------------------+ +-------------------------+
|
|
|
|
// | | | caller args.arguments_ |
|
|
|
|
// | ... | +-------------------------+
|
|
|
|
// | | | caller args.length_ |
|
|
|
|
// |-------------------------|<-spreg +-------------------------+
|
|
|
|
// | caller args pointer |
|
|
|
|
// +-------------------------+
|
|
|
|
// | caller stack param 1 |
|
|
|
|
// parameters in registers +-------------------------+
|
|
|
|
// and spilled to stack | .... |
|
|
|
|
// +-------------------------+
|
|
|
|
// | caller stack param n |
|
|
|
|
// +-------------------------+<-spreg
|
|
|
|
// reg = number of parameters
|
|
|
|
// reg = failure handler address
|
|
|
|
// reg = saved frame
|
|
|
|
// reg = JSFunction context
|
|
|
|
//
|
2015-07-23 14:00:02 +00:00
|
|
|
// Caller stack params contain the register parameters to the stub first,
|
|
|
|
// and then, if the descriptor specifies a constant number of stack
|
|
|
|
// parameters, the stack parameters as well.
|
2013-03-08 16:18:50 +00:00
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
TranslatedFrame::iterator value_iterator = translated_frame->begin();
|
|
|
|
int input_index = 0;
|
|
|
|
|
2014-07-03 11:47:31 +00:00
|
|
|
CHECK(compiled_code_->is_hydrogen_stub());
|
2014-07-21 13:10:14 +00:00
|
|
|
int major_key = CodeStub::GetMajorKey(compiled_code_);
|
2014-09-08 15:18:54 +00:00
|
|
|
CodeStubDescriptor descriptor(isolate_, compiled_code_->stub_key());
|
2013-03-08 16:18:50 +00:00
|
|
|
|
|
|
|
// The output frame must have room for all pushed register parameters
|
|
|
|
// and the standard stack frame slots. Include space for an argument
|
|
|
|
// object to the callee and optionally the space to pass the argument
|
|
|
|
// object to the stub failure handler.
|
2015-07-01 08:45:05 +00:00
|
|
|
int param_count = descriptor.GetRegisterParameterCount();
|
2015-07-23 14:00:02 +00:00
|
|
|
int stack_param_count = descriptor.GetStackParameterCount();
|
2016-02-16 11:47:50 +00:00
|
|
|
// The translated frame contains all of the register parameters
|
|
|
|
// plus the context.
|
|
|
|
CHECK_EQ(translated_frame->height(), param_count + 1);
|
2014-07-17 11:50:04 +00:00
|
|
|
CHECK_GE(param_count, 0);
|
|
|
|
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
int height_in_bytes = kPointerSize * (param_count + stack_param_count);
|
|
|
|
int fixed_frame_size = StubFailureTrampolineFrameConstants::kFixedFrameSize;
|
2013-03-08 16:18:50 +00:00
|
|
|
int output_frame_size = height_in_bytes + fixed_frame_size;
|
2013-11-07 16:35:27 +00:00
|
|
|
if (trace_scope_ != NULL) {
|
|
|
|
PrintF(trace_scope_->file(),
|
2014-01-31 16:52:17 +00:00
|
|
|
" translating %s => StubFailureTrampolineStub, height=%d\n",
|
2015-08-24 10:23:39 +00:00
|
|
|
CodeStub::MajorName(static_cast<CodeStub::Major>(major_key)),
|
2013-03-08 16:18:50 +00:00
|
|
|
height_in_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The stub failure trampoline is a single frame.
|
|
|
|
FrameDescription* output_frame =
|
2016-02-15 07:36:15 +00:00
|
|
|
new (output_frame_size) FrameDescription(output_frame_size);
|
2013-03-08 16:18:50 +00:00
|
|
|
output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_EQ(frame_index, 0);
|
2013-03-08 16:18:50 +00:00
|
|
|
output_[frame_index] = output_frame;
|
|
|
|
|
2016-04-21 09:53:06 +00:00
|
|
|
// The top address of the frame is computed from the previous frame's top and
|
|
|
|
// this frame's size.
|
|
|
|
intptr_t top_address = caller_frame_top_ - output_frame_size;
|
2013-03-08 16:18:50 +00:00
|
|
|
output_frame->SetTop(top_address);
|
|
|
|
|
2016-03-07 12:18:43 +00:00
|
|
|
// Set caller's PC (JSFunction continuation).
|
2013-07-23 13:46:10 +00:00
|
|
|
unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
|
2016-03-07 12:18:43 +00:00
|
|
|
intptr_t value = caller_pc_;
|
2013-07-23 13:46:10 +00:00
|
|
|
output_frame->SetCallerPc(output_frame_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
|
|
|
|
"caller's pc\n");
|
2013-03-08 16:18:50 +00:00
|
|
|
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
// Read caller's FP from the input frame, and set this frame's FP.
|
2016-03-07 12:18:43 +00:00
|
|
|
value = caller_fp_;
|
2013-07-23 13:46:10 +00:00
|
|
|
output_frame_offset -= kFPOnStackSize;
|
|
|
|
output_frame->SetCallerFp(output_frame_offset, value);
|
2016-04-21 09:53:06 +00:00
|
|
|
intptr_t frame_ptr = top_address + output_frame_offset;
|
|
|
|
Register fp_reg = StubFailureTrampolineFrame::fp_register();
|
2013-03-08 16:18:50 +00:00
|
|
|
output_frame->SetRegister(fp_reg.code(), frame_ptr);
|
|
|
|
output_frame->SetFp(frame_ptr);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
|
|
|
|
"caller's fp\n");
|
2013-03-08 16:18:50 +00:00
|
|
|
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2014-03-14 15:11:58 +00:00
|
|
|
// Read the caller's constant pool from the input frame.
|
2016-03-07 12:18:43 +00:00
|
|
|
value = caller_constant_pool_;
|
2013-12-30 11:23:59 +00:00
|
|
|
output_frame_offset -= kPointerSize;
|
2014-03-14 15:11:58 +00:00
|
|
|
output_frame->SetCallerConstantPool(output_frame_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
|
|
|
|
"caller's constant_pool\n");
|
2013-12-30 11:23:59 +00:00
|
|
|
}
|
|
|
|
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
// The marker for the typed stack frame
|
2013-03-08 16:18:50 +00:00
|
|
|
output_frame_offset -= kPointerSize;
|
|
|
|
value = reinterpret_cast<intptr_t>(
|
|
|
|
Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
|
|
|
|
output_frame->SetFrameSlot(output_frame_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
|
|
|
|
"function (stub failure sentinel)\n");
|
2013-03-08 16:18:50 +00:00
|
|
|
|
2015-07-23 14:00:02 +00:00
|
|
|
intptr_t caller_arg_count = stack_param_count;
|
2014-09-05 15:20:45 +00:00
|
|
|
bool arg_count_known = !descriptor.stack_parameter_count().is_valid();
|
2013-03-08 16:18:50 +00:00
|
|
|
|
|
|
|
// Build the Arguments object for the caller's parameters and a pointer to it.
|
|
|
|
output_frame_offset -= kPointerSize;
|
2013-04-02 11:28:01 +00:00
|
|
|
int args_arguments_offset = output_frame_offset;
|
|
|
|
intptr_t the_hole = reinterpret_cast<intptr_t>(
|
|
|
|
isolate_->heap()->the_hole_value());
|
|
|
|
if (arg_count_known) {
|
|
|
|
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
|
|
|
|
(caller_arg_count - 1) * kPointerSize;
|
|
|
|
} else {
|
|
|
|
value = the_hole;
|
|
|
|
}
|
|
|
|
|
|
|
|
output_frame->SetFrameSlot(args_arguments_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(
|
|
|
|
value, frame_index, args_arguments_offset,
|
|
|
|
arg_count_known ? "args.arguments\n" : "args.arguments (the hole)\n");
|
2013-03-08 16:18:50 +00:00
|
|
|
|
|
|
|
output_frame_offset -= kPointerSize;
|
2013-04-02 11:28:01 +00:00
|
|
|
int length_frame_offset = output_frame_offset;
|
|
|
|
value = arg_count_known ? caller_arg_count : the_hole;
|
|
|
|
output_frame->SetFrameSlot(length_frame_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(
|
|
|
|
value, frame_index, length_frame_offset,
|
|
|
|
arg_count_known ? "args.length\n" : "args.length (the hole)\n");
|
2013-03-08 16:18:50 +00:00
|
|
|
|
|
|
|
output_frame_offset -= kPointerSize;
|
2013-06-20 12:51:36 +00:00
|
|
|
value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
|
|
|
|
(output_frame_size - output_frame_offset) + kPointerSize;
|
2013-03-08 16:18:50 +00:00
|
|
|
output_frame->SetFrameSlot(output_frame_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, output_frame_offset, "args*\n");
|
2013-03-08 16:18:50 +00:00
|
|
|
|
|
|
|
// Copy the register parameters to the failure frame.
|
2013-11-13 10:07:04 +00:00
|
|
|
int arguments_length_offset = -1;
|
2014-07-17 11:50:04 +00:00
|
|
|
for (int i = 0; i < param_count; ++i) {
|
2013-03-08 16:18:50 +00:00
|
|
|
output_frame_offset -= kPointerSize;
|
2015-06-15 10:14:28 +00:00
|
|
|
WriteTranslatedValueToOutput(&value_iterator, &input_index, 0,
|
|
|
|
output_frame_offset);
|
2013-11-13 10:07:04 +00:00
|
|
|
|
2015-07-01 08:45:05 +00:00
|
|
|
if (!arg_count_known &&
|
|
|
|
descriptor.GetRegisterParameter(i)
|
|
|
|
.is(descriptor.stack_parameter_count())) {
|
2013-11-13 10:07:04 +00:00
|
|
|
arguments_length_offset = output_frame_offset;
|
|
|
|
}
|
2013-03-08 16:18:50 +00:00
|
|
|
}
|
|
|
|
|
2016-02-16 11:47:50 +00:00
|
|
|
Object* maybe_context = value_iterator->GetRawValue();
|
|
|
|
CHECK(maybe_context->IsContext());
|
|
|
|
Register context_reg = StubFailureTrampolineFrame::context_register();
|
|
|
|
value = reinterpret_cast<intptr_t>(maybe_context);
|
|
|
|
output_frame->SetRegister(context_reg.code(), value);
|
|
|
|
++value_iterator;
|
|
|
|
|
2015-07-23 14:00:02 +00:00
|
|
|
// Copy constant stack parameters to the failure frame. If the number of stack
|
|
|
|
// parameters is not known in the descriptor, the arguments object is the way
|
|
|
|
// to access them.
|
|
|
|
for (int i = 0; i < stack_param_count; i++) {
|
|
|
|
output_frame_offset -= kPointerSize;
|
|
|
|
Object** stack_parameter = reinterpret_cast<Object**>(
|
|
|
|
frame_ptr + StandardFrameConstants::kCallerSPOffset +
|
|
|
|
(stack_param_count - i - 1) * kPointerSize);
|
|
|
|
value = reinterpret_cast<intptr_t>(*stack_parameter);
|
|
|
|
output_frame->SetFrameSlot(output_frame_offset, value);
|
|
|
|
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
|
|
|
|
"stack parameter\n");
|
|
|
|
}
|
|
|
|
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(0u, output_frame_offset);
|
2013-11-13 10:07:04 +00:00
|
|
|
|
2013-04-02 11:28:01 +00:00
|
|
|
if (!arg_count_known) {
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_GE(arguments_length_offset, 0);
|
2013-11-13 10:07:04 +00:00
|
|
|
// We know it's a smi because 1) the code stub guarantees the stack
|
|
|
|
// parameter count is in smi range, and 2) the DoTranslateCommand in the
|
|
|
|
// parameter loop above translated that to a tagged value.
|
|
|
|
Smi* smi_caller_arg_count = reinterpret_cast<Smi*>(
|
|
|
|
output_frame->GetFrameSlot(arguments_length_offset));
|
|
|
|
caller_arg_count = smi_caller_arg_count->value();
|
|
|
|
output_frame->SetFrameSlot(length_frame_offset, caller_arg_count);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(caller_arg_count, frame_index, length_frame_offset,
|
|
|
|
"args.length\n");
|
2013-04-02 11:28:01 +00:00
|
|
|
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
|
|
|
|
(caller_arg_count - 1) * kPointerSize;
|
|
|
|
output_frame->SetFrameSlot(args_arguments_offset, value);
|
2015-06-15 10:14:28 +00:00
|
|
|
DebugPrintOutputSlot(value, frame_index, args_arguments_offset,
|
|
|
|
"args.arguments");
|
2013-04-02 11:28:01 +00:00
|
|
|
}
|
|
|
|
|
2013-03-08 16:18:50 +00:00
|
|
|
// Copy the double registers from the input into the output frame.
|
|
|
|
CopyDoubleRegisters(output_frame);
|
|
|
|
|
|
|
|
// Fill registers containing handler and number of parameters.
|
2014-09-05 15:20:45 +00:00
|
|
|
SetPlatformCompiledStubRegisters(output_frame, &descriptor);
|
2013-03-08 16:18:50 +00:00
|
|
|
|
|
|
|
// Compute this frame's PC, state, and continuation.
|
|
|
|
Code* trampoline = NULL;
|
2014-09-05 15:20:45 +00:00
|
|
|
StubFunctionMode function_mode = descriptor.function_mode();
|
2015-07-23 14:00:02 +00:00
|
|
|
StubFailureTrampolineStub(isolate_, function_mode)
|
|
|
|
.FindCodeInCache(&trampoline);
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(trampoline != NULL);
|
2013-03-08 16:18:50 +00:00
|
|
|
output_frame->SetPc(reinterpret_cast<intptr_t>(
|
|
|
|
trampoline->instruction_start()));
|
2015-06-04 14:44:00 +00:00
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
2014-03-14 15:11:58 +00:00
|
|
|
Register constant_pool_reg =
|
|
|
|
StubFailureTrampolineFrame::constant_pool_pointer_register();
|
|
|
|
intptr_t constant_pool_value =
|
|
|
|
reinterpret_cast<intptr_t>(trampoline->constant_pool());
|
|
|
|
output_frame->SetConstantPool(constant_pool_value);
|
|
|
|
output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
|
|
|
|
}
|
2016-05-18 07:50:00 +00:00
|
|
|
output_frame->SetState(
|
|
|
|
Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
|
2014-05-09 13:01:50 +00:00
|
|
|
Code* notify_failure =
|
|
|
|
isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
|
2013-03-08 16:18:50 +00:00
|
|
|
output_frame->SetContinuation(
|
|
|
|
reinterpret_cast<intptr_t>(notify_failure->entry()));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-09-12 12:28:42 +00:00
|
|
|
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
|
2015-06-08 10:04:51 +00:00
|
|
|
// Walk to the last JavaScript output frame to find out if it has
|
|
|
|
// adapted arguments.
|
2013-08-07 11:24:14 +00:00
|
|
|
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
|
|
|
|
if (frame_index != 0) it->Advance();
|
2012-09-12 12:28:42 +00:00
|
|
|
}
|
2016-03-07 12:18:43 +00:00
|
|
|
translated_state_.Prepare(it->frame()->has_adapted_arguments(),
|
|
|
|
reinterpret_cast<Address>(stack_fp_));
|
2012-09-12 12:28:42 +00:00
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
for (auto& materialization : values_to_materialize_) {
|
|
|
|
Handle<Object> value = materialization.value_->GetValue();
|
2012-09-12 12:28:42 +00:00
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
PrintF("Materialization [0x%08" V8PRIxPTR "] <- 0x%08" V8PRIxPTR " ; ",
|
|
|
|
reinterpret_cast<intptr_t>(materialization.output_slot_address_),
|
|
|
|
reinterpret_cast<intptr_t>(*value));
|
|
|
|
value->ShortPrint(trace_scope_->file());
|
|
|
|
PrintF(trace_scope_->file(), "\n");
|
2012-09-12 12:28:42 +00:00
|
|
|
}
|
2013-08-07 11:24:14 +00:00
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
*(reinterpret_cast<intptr_t*>(materialization.output_slot_address_)) =
|
|
|
|
reinterpret_cast<intptr_t>(*value);
|
2012-09-12 12:28:42 +00:00
|
|
|
}
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
|
2016-03-07 12:18:43 +00:00
|
|
|
isolate_->materialized_object_store()->Remove(
|
|
|
|
reinterpret_cast<Address>(stack_fp_));
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-15 10:14:28 +00:00
|
|
|
void Deoptimizer::WriteTranslatedValueToOutput(
|
2015-06-08 10:04:51 +00:00
|
|
|
TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
|
2015-06-15 10:14:28 +00:00
|
|
|
unsigned output_offset, const char* debug_hint_string,
|
|
|
|
Address output_address_for_materialization) {
|
2015-06-08 10:04:51 +00:00
|
|
|
Object* value = (*iterator)->GetRawValue();
|
2014-04-25 12:58:15 +00:00
|
|
|
|
2015-06-15 10:14:28 +00:00
|
|
|
WriteValueToOutput(value, *input_index, frame_index, output_offset,
|
|
|
|
debug_hint_string);
|
2014-04-25 12:58:15 +00:00
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
if (value == isolate_->heap()->arguments_marker()) {
|
2015-06-15 10:14:28 +00:00
|
|
|
Address output_address =
|
|
|
|
reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
|
|
|
|
output_offset;
|
2015-06-08 10:04:51 +00:00
|
|
|
if (output_address_for_materialization == nullptr) {
|
|
|
|
output_address_for_materialization = output_address;
|
2014-04-25 12:58:15 +00:00
|
|
|
}
|
2015-06-08 10:04:51 +00:00
|
|
|
values_to_materialize_.push_back(
|
|
|
|
{output_address_for_materialization, *iterator});
|
|
|
|
}
|
2014-04-25 12:58:15 +00:00
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
(*iterator)++;
|
|
|
|
(*input_index)++;
|
|
|
|
}
|
2014-04-25 12:58:15 +00:00
|
|
|
|
|
|
|
|
2015-06-15 10:14:28 +00:00
|
|
|
void Deoptimizer::WriteValueToOutput(Object* value, int input_index,
|
|
|
|
int frame_index, unsigned output_offset,
|
|
|
|
const char* debug_hint_string) {
|
|
|
|
output_[frame_index]->SetFrameSlot(output_offset,
|
|
|
|
reinterpret_cast<intptr_t>(value));
|
|
|
|
|
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
DebugPrintOutputSlot(reinterpret_cast<intptr_t>(value), frame_index,
|
|
|
|
output_offset, debug_hint_string);
|
|
|
|
value->ShortPrint(trace_scope_->file());
|
|
|
|
PrintF(trace_scope_->file(), " (input #%d)\n", input_index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Deoptimizer::DebugPrintOutputSlot(intptr_t value, int frame_index,
|
|
|
|
unsigned output_offset,
|
|
|
|
const char* debug_hint_string) {
|
|
|
|
if (trace_scope_ != nullptr) {
|
|
|
|
Address output_address =
|
|
|
|
reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
|
|
|
|
output_offset;
|
|
|
|
PrintF(trace_scope_->file(),
|
|
|
|
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s",
|
|
|
|
reinterpret_cast<intptr_t>(output_address), output_offset, value,
|
|
|
|
debug_hint_string == nullptr ? "" : debug_hint_string);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-07 12:18:43 +00:00
|
|
|
unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const {
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
unsigned fixed_size = CommonFrameConstants::kFixedFrameSizeAboveFp;
|
2016-02-11 12:03:29 +00:00
|
|
|
if (!function_->IsSmi()) {
|
|
|
|
fixed_size += ComputeIncomingArgumentSize(function_->shared());
|
|
|
|
}
|
2016-03-07 12:18:43 +00:00
|
|
|
return fixed_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Deoptimizer::ComputeInputFrameSize() const {
|
2015-06-08 10:04:51 +00:00
|
|
|
// The fp-to-sp delta already takes the context, constant pool pointer and the
|
|
|
|
// function into account so we have to avoid double counting them.
|
2016-03-09 11:33:10 +00:00
|
|
|
unsigned fixed_size_above_fp = ComputeInputFrameAboveFpFixedSize();
|
|
|
|
unsigned result = fixed_size_above_fp + fp_to_sp_delta_;
|
2015-06-08 10:04:51 +00:00
|
|
|
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
|
|
|
|
unsigned stack_slots = compiled_code_->stack_slots();
|
2015-08-03 12:59:41 +00:00
|
|
|
unsigned outgoing_size =
|
|
|
|
ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
|
2016-03-09 11:33:10 +00:00
|
|
|
CHECK_EQ(fixed_size_above_fp + (stack_slots * kPointerSize) -
|
|
|
|
CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size,
|
|
|
|
result);
|
2014-04-25 12:58:15 +00:00
|
|
|
}
|
2015-06-08 10:04:51 +00:00
|
|
|
return result;
|
2014-04-25 12:58:15 +00:00
|
|
|
}
|
|
|
|
|
2016-02-11 12:03:29 +00:00
|
|
|
// static
|
|
|
|
unsigned Deoptimizer::ComputeJavascriptFixedSize(SharedFunctionInfo* shared) {
|
2015-06-08 10:04:51 +00:00
|
|
|
// The fixed part of the frame consists of the return address, frame
|
|
|
|
// pointer, function, context, and all the incoming arguments.
|
2016-02-11 12:03:29 +00:00
|
|
|
return ComputeIncomingArgumentSize(shared) +
|
2015-06-08 10:04:51 +00:00
|
|
|
StandardFrameConstants::kFixedFrameSize;
|
|
|
|
}
|
2013-06-12 14:22:49 +00:00
|
|
|
|
2016-02-11 12:03:29 +00:00
|
|
|
// static
|
|
|
|
unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
|
2015-12-18 18:34:21 +00:00
|
|
|
// The fixed part of the frame consists of the return address, frame
|
|
|
|
// pointer, function, context, new.target, bytecode offset and all the
|
|
|
|
// incoming arguments.
|
2016-02-11 12:03:29 +00:00
|
|
|
return ComputeIncomingArgumentSize(shared) +
|
2015-12-18 18:34:21 +00:00
|
|
|
InterpreterFrameConstants::kFixedFrameSize;
|
|
|
|
}
|
|
|
|
|
2016-02-11 12:03:29 +00:00
|
|
|
// static
|
|
|
|
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
|
|
|
|
return (shared->internal_formal_parameter_count() + 1) * kPointerSize;
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
2013-06-12 14:22:49 +00:00
|
|
|
|
|
|
|
|
2015-08-03 12:59:41 +00:00
|
|
|
// static
|
|
|
|
unsigned Deoptimizer::ComputeOutgoingArgumentSize(Code* code,
|
|
|
|
unsigned bailout_id) {
|
2015-06-08 10:04:51 +00:00
|
|
|
DeoptimizationInputData* data =
|
2015-08-03 12:59:41 +00:00
|
|
|
DeoptimizationInputData::cast(code->deoptimization_data());
|
|
|
|
unsigned height = data->ArgumentsStackHeight(bailout_id)->value();
|
2015-06-08 10:04:51 +00:00
|
|
|
return height * kPointerSize;
|
|
|
|
}
|
2013-06-12 14:22:49 +00:00
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
|
|
|
|
BailoutType type,
|
|
|
|
int max_entry_id) {
|
|
|
|
// We cannot run this if the serializer is enabled because this will
|
|
|
|
// cause us to emit relocation information for the external
|
|
|
|
// references. This is fine because the deoptimizer's code section
|
|
|
|
// isn't meant to be serialized at all.
|
|
|
|
CHECK(type == EAGER || type == SOFT || type == LAZY);
|
|
|
|
DeoptimizerData* data = isolate->deoptimizer_data();
|
|
|
|
int entry_count = data->deopt_entry_code_entries_[type];
|
|
|
|
if (max_entry_id < entry_count) return;
|
|
|
|
entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
|
|
|
|
while (max_entry_id >= entry_count) entry_count *= 2;
|
|
|
|
CHECK(entry_count <= Deoptimizer::kMaxNumberOfEntries);
|
2013-06-12 14:22:49 +00:00
|
|
|
|
2015-11-25 14:23:37 +00:00
|
|
|
MacroAssembler masm(isolate, NULL, 16 * KB, CodeObjectRequired::kYes);
|
2015-06-08 10:04:51 +00:00
|
|
|
masm.set_emit_debug_code(false);
|
|
|
|
GenerateDeoptimizationEntries(&masm, entry_count, type);
|
|
|
|
CodeDesc desc;
|
|
|
|
masm.GetCode(&desc);
|
|
|
|
DCHECK(!RelocInfo::RequiresRelocation(desc));
|
2013-06-12 14:22:49 +00:00
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
MemoryChunk* chunk = data->deopt_entry_code_[type];
|
|
|
|
CHECK(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
|
|
|
|
desc.instr_size);
|
|
|
|
if (!chunk->CommitArea(desc.instr_size)) {
|
|
|
|
V8::FatalProcessOutOfMemory(
|
|
|
|
"Deoptimizer::EnsureCodeForDeoptimizationEntry");
|
|
|
|
}
|
|
|
|
CopyBytes(chunk->area_start(), desc.buffer,
|
|
|
|
static_cast<size_t>(desc.instr_size));
|
2015-09-11 12:59:30 +00:00
|
|
|
Assembler::FlushICache(isolate, chunk->area_start(), desc.instr_size);
|
2013-06-12 14:22:49 +00:00
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
data->deopt_entry_code_entries_[type] = entry_count;
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
|
2010-12-07 11:31:57 +00:00
|
|
|
: frame_size_(frame_size),
|
2016-02-15 07:36:15 +00:00
|
|
|
parameter_count_(parameter_count),
|
2010-12-07 11:31:57 +00:00
|
|
|
top_(kZapUint32),
|
|
|
|
pc_(kZapUint32),
|
2012-02-28 09:05:55 +00:00
|
|
|
fp_(kZapUint32),
|
2013-12-30 11:23:59 +00:00
|
|
|
context_(kZapUint32),
|
|
|
|
constant_pool_(kZapUint32) {
|
2010-12-07 11:31:57 +00:00
|
|
|
// Zap all the registers.
|
|
|
|
for (int r = 0; r < Register::kNumRegisters; r++) {
|
2014-02-12 09:19:30 +00:00
|
|
|
// TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
|
|
|
|
// isn't used before the next safepoint, the GC will try to scan it as a
|
|
|
|
// tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
|
2010-12-07 11:31:57 +00:00
|
|
|
SetRegister(r, kZapUint32);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Zap all the slots.
|
|
|
|
for (unsigned o = 0; o < frame_size; o += kPointerSize) {
|
|
|
|
SetFrameSlot(o, kZapUint32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-03 17:16:00 +00:00
|
|
|
void TranslationBuffer::Add(int32_t value) {
|
2015-04-15 13:55:26 +00:00
|
|
|
// This wouldn't handle kMinInt correctly if it ever encountered it.
|
|
|
|
DCHECK(value != kMinInt);
|
2010-12-07 11:31:57 +00:00
|
|
|
// Encode the sign bit in the least significant bit.
|
|
|
|
bool is_negative = (value < 0);
|
|
|
|
uint32_t bits = ((is_negative ? -value : value) << 1) |
|
|
|
|
static_cast<int32_t>(is_negative);
|
|
|
|
// Encode the individual bytes using the least significant bit of
|
|
|
|
// each byte to indicate whether or not more bytes follow.
|
|
|
|
do {
|
|
|
|
uint32_t next = bits >> 7;
|
2016-11-03 17:16:00 +00:00
|
|
|
contents_.push_back(((bits << 1) & 0xFF) | (next != 0));
|
2010-12-07 11:31:57 +00:00
|
|
|
bits = next;
|
|
|
|
} while (bits != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32_t TranslationIterator::Next() {
|
|
|
|
// Run through the bytes until we reach one with a least significant
|
|
|
|
// bit of zero (marks the end).
|
|
|
|
uint32_t bits = 0;
|
|
|
|
for (int i = 0; true; i += 7) {
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(HasNext());
|
2010-12-07 11:31:57 +00:00
|
|
|
uint8_t next = buffer_->get(index_++);
|
|
|
|
bits |= (next >> 1) << i;
|
|
|
|
if ((next & 1) == 0) break;
|
|
|
|
}
|
|
|
|
// The bits encode the sign in the least significant bit.
|
|
|
|
bool is_negative = (bits & 1) == 1;
|
|
|
|
int32_t result = bits >> 1;
|
|
|
|
return is_negative ? -result : result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-18 13:57:49 +00:00
|
|
|
Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
|
2016-11-03 17:16:00 +00:00
|
|
|
Handle<ByteArray> result = factory->NewByteArray(CurrentIndex(), TENURED);
|
|
|
|
contents_.CopyTo(result->GetDataStartAddress());
|
2010-12-07 11:31:57 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-28 09:05:55 +00:00
|
|
|
void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(CONSTRUCT_STUB_FRAME);
|
|
|
|
buffer_->Add(literal_id);
|
|
|
|
buffer_->Add(height);
|
2012-02-28 09:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-09-07 09:01:54 +00:00
|
|
|
void Translation::BeginGetterStubFrame(int literal_id) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(GETTER_STUB_FRAME);
|
|
|
|
buffer_->Add(literal_id);
|
2012-09-07 09:01:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-17 10:43:32 +00:00
|
|
|
void Translation::BeginSetterStubFrame(int literal_id) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(SETTER_STUB_FRAME);
|
|
|
|
buffer_->Add(literal_id);
|
2012-08-17 10:43:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-24 08:43:12 +00:00
|
|
|
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
|
|
|
|
buffer_->Add(literal_id);
|
|
|
|
buffer_->Add(height);
|
2012-01-24 08:43:12 +00:00
|
|
|
}
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
void Translation::BeginTailCallerFrame(int literal_id) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(TAIL_CALLER_FRAME);
|
|
|
|
buffer_->Add(literal_id);
|
2016-03-09 11:33:10 +00:00
|
|
|
}
|
2012-01-24 08:43:12 +00:00
|
|
|
|
2012-08-06 14:13:09 +00:00
|
|
|
void Translation::BeginJSFrame(BailoutId node_id,
|
|
|
|
int literal_id,
|
|
|
|
unsigned height) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(JS_FRAME);
|
|
|
|
buffer_->Add(node_id.ToInt());
|
|
|
|
buffer_->Add(literal_id);
|
|
|
|
buffer_->Add(height);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
|
|
|
|
int literal_id, unsigned height) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(INTERPRETED_FRAME);
|
|
|
|
buffer_->Add(bytecode_offset.ToInt());
|
|
|
|
buffer_->Add(literal_id);
|
|
|
|
buffer_->Add(height);
|
2015-12-18 18:34:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
void Translation::BeginCompiledStubFrame(int height) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(COMPILED_STUB_FRAME);
|
|
|
|
buffer_->Add(height);
|
2012-12-18 16:25:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-12 14:22:49 +00:00
|
|
|
void Translation::BeginArgumentsObject(int args_length) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(ARGUMENTS_OBJECT);
|
|
|
|
buffer_->Add(args_length);
|
2013-06-12 14:22:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-07 11:24:14 +00:00
|
|
|
void Translation::BeginCapturedObject(int length) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(CAPTURED_OBJECT);
|
|
|
|
buffer_->Add(length);
|
2013-08-07 11:24:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Translation::DuplicateObject(int object_index) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(DUPLICATED_OBJECT);
|
|
|
|
buffer_->Add(object_index);
|
2013-08-07 11:24:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void Translation::StoreRegister(Register reg) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(REGISTER);
|
|
|
|
buffer_->Add(reg.code());
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Translation::StoreInt32Register(Register reg) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(INT32_REGISTER);
|
|
|
|
buffer_->Add(reg.code());
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-22 15:44:17 +00:00
|
|
|
void Translation::StoreUint32Register(Register reg) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(UINT32_REGISTER);
|
|
|
|
buffer_->Add(reg.code());
|
2012-08-22 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-04-23 08:07:12 +00:00
|
|
|
void Translation::StoreBoolRegister(Register reg) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(BOOL_REGISTER);
|
|
|
|
buffer_->Add(reg.code());
|
2015-04-23 08:07:12 +00:00
|
|
|
}
|
|
|
|
|
2016-06-03 14:16:15 +00:00
|
|
|
void Translation::StoreFloatRegister(FloatRegister reg) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(FLOAT_REGISTER);
|
|
|
|
buffer_->Add(reg.code());
|
2016-06-03 14:16:15 +00:00
|
|
|
}
|
2015-04-23 08:07:12 +00:00
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void Translation::StoreDoubleRegister(DoubleRegister reg) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(DOUBLE_REGISTER);
|
|
|
|
buffer_->Add(reg.code());
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Translation::StoreStackSlot(int index) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(STACK_SLOT);
|
|
|
|
buffer_->Add(index);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Translation::StoreInt32StackSlot(int index) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(INT32_STACK_SLOT);
|
|
|
|
buffer_->Add(index);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-22 15:44:17 +00:00
|
|
|
void Translation::StoreUint32StackSlot(int index) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(UINT32_STACK_SLOT);
|
|
|
|
buffer_->Add(index);
|
2012-08-22 15:44:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-04-23 08:07:12 +00:00
|
|
|
void Translation::StoreBoolStackSlot(int index) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(BOOL_STACK_SLOT);
|
|
|
|
buffer_->Add(index);
|
2015-04-23 08:07:12 +00:00
|
|
|
}
|
|
|
|
|
2016-06-03 14:16:15 +00:00
|
|
|
void Translation::StoreFloatStackSlot(int index) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(FLOAT_STACK_SLOT);
|
|
|
|
buffer_->Add(index);
|
2016-06-03 14:16:15 +00:00
|
|
|
}
|
2015-04-23 08:07:12 +00:00
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void Translation::StoreDoubleStackSlot(int index) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(DOUBLE_STACK_SLOT);
|
|
|
|
buffer_->Add(index);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Translation::StoreLiteral(int literal_id) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(LITERAL);
|
|
|
|
buffer_->Add(literal_id);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-26 08:43:27 +00:00
|
|
|
void Translation::StoreArgumentsObject(bool args_known,
|
|
|
|
int args_index,
|
|
|
|
int args_length) {
|
2016-11-03 17:16:00 +00:00
|
|
|
buffer_->Add(ARGUMENTS_OBJECT);
|
|
|
|
buffer_->Add(args_known);
|
|
|
|
buffer_->Add(args_index);
|
|
|
|
buffer_->Add(args_length);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-10 11:52:35 +00:00
|
|
|
void Translation::StoreJSFrameFunction() {
|
2016-02-11 08:24:36 +00:00
|
|
|
StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
|
[runtime] Unify and simplify how frames are marked
Before this CL, various code stubs used different techniques
for marking their frames to enable stack-crawling and other
access to data in the frame. All of them were based on a abuse
of the "standard" frame representation, e.g. storing the a
context pointer immediately below the frame's fp, and a
function pointer after that. Although functional, this approach
tends to make stubs and builtins do an awkward, unnecessary
dance to appear like standard frames, even if they have
nothing to do with JavaScript execution.
This CL attempts to improve this by:
* Ensuring that there are only two fundamentally different
types of frames, a "standard" frame and a "typed" frame.
Standard frames, as before, contain both a context and
function pointer. Typed frames contain only a minimum
of a smi marker in the position immediately below the fp
where the context is in standard frames.
* Only interpreted, full codegen, and optimized Crankshaft and
TurboFan JavaScript frames use the "standard" format. All
other frames use the type frame format with an explicit
marker.
* Typed frames can contain one or more values below the
type marker. There is new magic macro machinery in
frames.h that simplifies defining the offsets of these fields
in typed frames.
* A new flag in the CallDescriptor enables specifying whether
a frame is a standard frame or a typed frame. Secondary
register location spilling is now only enabled for standard
frames.
* A zillion places in the code have been updated to deal with
the fact that most code stubs and internal frames use the
typed frame format. This includes changes in the
deoptimizer, debugger, and liveedit.
* StandardFrameConstants::kMarkerOffset is deprecated,
(CommonFrameConstants::kContextOrFrameTypeOffset
and StandardFrameConstants::kFrameOffset are now used
in its stead).
LOG=N
Review URL: https://codereview.chromium.org/1696043002
Cr-Commit-Position: refs/heads/master@{#34571}
2016-03-08 08:35:44 +00:00
|
|
|
StandardFrameConstants::kFunctionOffset) /
|
2016-02-11 08:24:36 +00:00
|
|
|
kPointerSize);
|
2015-06-10 11:52:35 +00:00
|
|
|
}
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
int Translation::NumberOfOperandsFor(Opcode opcode) {
|
|
|
|
switch (opcode) {
|
2012-09-07 09:01:54 +00:00
|
|
|
case GETTER_STUB_FRAME:
|
2012-08-17 10:43:32 +00:00
|
|
|
case SETTER_STUB_FRAME:
|
2013-08-07 11:24:14 +00:00
|
|
|
case DUPLICATED_OBJECT:
|
2013-06-12 14:22:49 +00:00
|
|
|
case ARGUMENTS_OBJECT:
|
2013-08-07 11:24:14 +00:00
|
|
|
case CAPTURED_OBJECT:
|
2010-12-07 11:31:57 +00:00
|
|
|
case REGISTER:
|
|
|
|
case INT32_REGISTER:
|
2012-08-22 15:44:17 +00:00
|
|
|
case UINT32_REGISTER:
|
2015-04-23 08:07:12 +00:00
|
|
|
case BOOL_REGISTER:
|
2016-06-03 14:16:15 +00:00
|
|
|
case FLOAT_REGISTER:
|
2010-12-07 11:31:57 +00:00
|
|
|
case DOUBLE_REGISTER:
|
|
|
|
case STACK_SLOT:
|
|
|
|
case INT32_STACK_SLOT:
|
2012-08-22 15:44:17 +00:00
|
|
|
case UINT32_STACK_SLOT:
|
2015-04-23 08:07:12 +00:00
|
|
|
case BOOL_STACK_SLOT:
|
2016-06-03 14:16:15 +00:00
|
|
|
case FLOAT_STACK_SLOT:
|
2010-12-07 11:31:57 +00:00
|
|
|
case DOUBLE_STACK_SLOT:
|
|
|
|
case LITERAL:
|
2012-12-18 16:25:45 +00:00
|
|
|
case COMPILED_STUB_FRAME:
|
2016-03-09 11:33:10 +00:00
|
|
|
case TAIL_CALLER_FRAME:
|
2010-12-07 11:31:57 +00:00
|
|
|
return 1;
|
2012-01-24 08:43:12 +00:00
|
|
|
case BEGIN:
|
|
|
|
case ARGUMENTS_ADAPTOR_FRAME:
|
2012-02-28 09:05:55 +00:00
|
|
|
case CONSTRUCT_STUB_FRAME:
|
2012-01-24 08:43:12 +00:00
|
|
|
return 2;
|
|
|
|
case JS_FRAME:
|
2015-12-18 18:34:21 +00:00
|
|
|
case INTERPRETED_FRAME:
|
2010-12-07 11:31:57 +00:00
|
|
|
return 3;
|
|
|
|
}
|
2014-04-25 12:58:15 +00:00
|
|
|
FATAL("Unexpected translation type");
|
2010-12-07 11:31:57 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-16 07:58:47 +00:00
|
|
|
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
const char* Translation::StringFor(Opcode opcode) {
|
2013-10-01 11:18:30 +00:00
|
|
|
#define TRANSLATION_OPCODE_CASE(item) case item: return #item;
|
2010-12-07 11:31:57 +00:00
|
|
|
switch (opcode) {
|
2013-10-01 11:18:30 +00:00
|
|
|
TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE)
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
2013-10-01 11:18:30 +00:00
|
|
|
#undef TRANSLATION_OPCODE_CASE
|
2010-12-07 11:31:57 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
|
|
|
|
int index = StackIdToIndex(fp);
|
|
|
|
if (index == -1) {
|
|
|
|
return Handle<FixedArray>::null();
|
|
|
|
}
|
|
|
|
Handle<FixedArray> array = GetStackEntries();
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_GT(array->length(), index);
|
2015-06-08 10:04:51 +00:00
|
|
|
return Handle<FixedArray>::cast(Handle<Object>(array->get(index), isolate()));
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MaterializedObjectStore::Set(Address fp,
|
2015-06-08 10:04:51 +00:00
|
|
|
Handle<FixedArray> materialized_objects) {
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
int index = StackIdToIndex(fp);
|
|
|
|
if (index == -1) {
|
|
|
|
index = frame_fps_.length();
|
|
|
|
frame_fps_.Add(fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
Handle<FixedArray> array = EnsureStackEntries(index + 1);
|
|
|
|
array->set(index, *materialized_objects);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-04 16:43:56 +00:00
|
|
|
bool MaterializedObjectStore::Remove(Address fp) {
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
int index = StackIdToIndex(fp);
|
2015-05-04 16:43:56 +00:00
|
|
|
if (index == -1) {
|
|
|
|
return false;
|
|
|
|
}
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_GE(index, 0);
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
|
|
|
|
frame_fps_.Remove(index);
|
2015-05-04 16:43:56 +00:00
|
|
|
FixedArray* array = isolate()->heap()->materialized_objects();
|
2014-04-25 12:58:15 +00:00
|
|
|
CHECK_LT(index, array->length());
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
for (int i = index; i < frame_fps_.length(); i++) {
|
|
|
|
array->set(i, array->get(i + 1));
|
|
|
|
}
|
|
|
|
array->set(frame_fps_.length(), isolate()->heap()->undefined_value());
|
2015-05-04 16:43:56 +00:00
|
|
|
return true;
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int MaterializedObjectStore::StackIdToIndex(Address fp) {
|
|
|
|
for (int i = 0; i < frame_fps_.length(); i++) {
|
|
|
|
if (frame_fps_[i] == fp) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
|
|
|
|
return Handle<FixedArray>(isolate()->heap()->materialized_objects());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
|
|
|
|
Handle<FixedArray> array = GetStackEntries();
|
|
|
|
if (array->length() >= length) {
|
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
|
|
|
int new_length = length > 10 ? length : 10;
|
|
|
|
if (new_length < 2 * array->length()) {
|
|
|
|
new_length = 2 * array->length();
|
|
|
|
}
|
|
|
|
|
|
|
|
Handle<FixedArray> new_array =
|
|
|
|
isolate()->factory()->NewFixedArray(new_length, TENURED);
|
|
|
|
for (int i = 0; i < array->length(); i++) {
|
|
|
|
new_array->set(i, array->get(i));
|
|
|
|
}
|
|
|
|
for (int i = array->length(); i < length; i++) {
|
|
|
|
new_array->set(i, isolate()->heap()->undefined_value());
|
|
|
|
}
|
2015-08-27 12:30:03 +00:00
|
|
|
isolate()->heap()->SetRootMaterializedObjects(*new_array);
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
return new_array;
|
2011-04-01 11:41:36 +00:00
|
|
|
}
|
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
namespace {
|
2011-04-01 11:41:36 +00:00
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it,
|
|
|
|
Isolate* isolate) {
|
|
|
|
if (it->GetRawValue() == isolate->heap()->arguments_marker()) {
|
|
|
|
if (!it->IsMaterializableByDebugger()) {
|
|
|
|
return isolate->factory()->undefined_value();
|
2015-08-03 10:43:24 +00:00
|
|
|
}
|
2012-01-24 08:43:12 +00:00
|
|
|
}
|
2016-02-15 07:36:15 +00:00
|
|
|
return it->GetValue();
|
|
|
|
}
|
2012-01-24 08:43:12 +00:00
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
|
|
|
|
TranslatedState::iterator frame_it,
|
|
|
|
Isolate* isolate) {
|
|
|
|
// If the previous frame is an adaptor frame, we will take the parameters
|
|
|
|
// from there.
|
|
|
|
TranslatedState::iterator parameter_frame = frame_it;
|
|
|
|
if (parameter_frame != state->begin()) {
|
|
|
|
parameter_frame--;
|
|
|
|
}
|
|
|
|
int parameter_count;
|
|
|
|
if (parameter_frame->kind() == TranslatedFrame::kArgumentsAdaptor) {
|
|
|
|
parameter_count = parameter_frame->height() - 1; // Ignore the receiver.
|
|
|
|
} else {
|
|
|
|
parameter_frame = frame_it;
|
|
|
|
parameter_count =
|
|
|
|
frame_it->shared_info()->internal_formal_parameter_count();
|
2011-06-29 13:02:00 +00:00
|
|
|
}
|
2016-02-15 07:36:15 +00:00
|
|
|
TranslatedFrame::iterator parameter_it = parameter_frame->begin();
|
|
|
|
parameter_it++; // Skip the function.
|
|
|
|
parameter_it++; // Skip the receiver.
|
|
|
|
|
|
|
|
// Figure out whether there is a construct stub frame on top of
|
|
|
|
// the parameter frame.
|
|
|
|
has_construct_stub_ =
|
|
|
|
parameter_frame != state->begin() &&
|
|
|
|
(parameter_frame - 1)->kind() == TranslatedFrame::kConstructStub;
|
|
|
|
|
2016-08-12 12:01:37 +00:00
|
|
|
if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
|
|
|
|
source_position_ = Deoptimizer::ComputeSourcePositionFromBytecodeArray(
|
|
|
|
*frame_it->shared_info(), frame_it->node_id());
|
|
|
|
} else {
|
|
|
|
DCHECK_EQ(TranslatedFrame::kFunction, frame_it->kind());
|
|
|
|
source_position_ = Deoptimizer::ComputeSourcePositionFromBaselineCode(
|
|
|
|
*frame_it->shared_info(), frame_it->node_id());
|
|
|
|
}
|
2016-02-15 07:36:15 +00:00
|
|
|
|
|
|
|
TranslatedFrame::iterator value_it = frame_it->begin();
|
|
|
|
// Get the function. Note that this might materialize the function.
|
|
|
|
// In case the debugger mutates this value, we should deoptimize
|
|
|
|
// the function and remember the value in the materialized value store.
|
|
|
|
function_ = Handle<JSFunction>::cast(value_it->GetValue());
|
|
|
|
|
|
|
|
parameters_.resize(static_cast<size_t>(parameter_count));
|
|
|
|
for (int i = 0; i < parameter_count; i++) {
|
|
|
|
Handle<Object> parameter = GetValueForDebugger(parameter_it, isolate);
|
|
|
|
SetParameter(i, parameter);
|
|
|
|
parameter_it++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip the function, the receiver and the arguments.
|
|
|
|
int skip_count =
|
|
|
|
frame_it->shared_info()->internal_formal_parameter_count() + 2;
|
|
|
|
TranslatedFrame::iterator stack_it = frame_it->begin();
|
|
|
|
for (int i = 0; i < skip_count; i++) {
|
|
|
|
stack_it++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the context.
|
|
|
|
context_ = GetValueForDebugger(stack_it, isolate);
|
|
|
|
stack_it++;
|
|
|
|
|
|
|
|
// Get the expression stack.
|
|
|
|
int stack_height = frame_it->height();
|
2016-02-18 12:49:58 +00:00
|
|
|
if (frame_it->kind() == TranslatedFrame::kFunction ||
|
|
|
|
frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
|
2016-02-15 07:36:15 +00:00
|
|
|
// For full-code frames, we should not count the context.
|
2016-02-18 12:49:58 +00:00
|
|
|
// For interpreter frames, we should not count the accumulator.
|
2016-02-15 07:36:15 +00:00
|
|
|
// TODO(jarin): Clean up the indexing in translated frames.
|
|
|
|
stack_height--;
|
|
|
|
}
|
|
|
|
expression_stack_.resize(static_cast<size_t>(stack_height));
|
|
|
|
for (int i = 0; i < stack_height; i++) {
|
|
|
|
Handle<Object> expression = GetValueForDebugger(stack_it, isolate);
|
|
|
|
SetExpression(i, expression);
|
|
|
|
stack_it++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// For interpreter frame, skip the accumulator.
|
2016-02-18 12:49:58 +00:00
|
|
|
if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
|
2016-02-15 07:36:15 +00:00
|
|
|
stack_it++;
|
|
|
|
}
|
|
|
|
CHECK(stack_it == frame_it->end());
|
2011-06-29 13:02:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-03-09 14:43:29 +00:00
|
|
|
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
|
2015-02-27 13:34:23 +00:00
|
|
|
SourcePosition last_position = SourcePosition::Unknown();
|
2016-07-18 09:23:28 +00:00
|
|
|
DeoptimizeReason last_reason = DeoptimizeReason::kNoReason;
|
2016-07-19 08:22:04 +00:00
|
|
|
int last_deopt_id = kNoDeoptimizationId;
|
2015-02-10 14:32:42 +00:00
|
|
|
int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
|
2016-05-11 14:05:41 +00:00
|
|
|
RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
|
This CL enables precise source positions for all V8 compilers. It merges compiler::SourcePosition and internal::SourcePosition to a single class used throughout the codebase. The new internal::SourcePosition instances store an id identifying an inlined function in addition to a script offset.
SourcePosition::InliningId() refers to a the new table DeoptimizationInputData::InliningPositions(), which provides the following data for every inlining id:
- The inlined SharedFunctionInfo as an offset into DeoptimizationInfo::LiteralArray
- The SourcePosition of the inlining. Recursively, this yields the full inlining stack.
Before the Code object is created, the same information can be found in CompilationInfo::inlined_functions().
If SourcePosition::InliningId() is SourcePosition::kNotInlined, it refers to the outer (non-inlined) function.
So every SourcePosition has full information about its inlining stack, as long as the corresponding Code object is known. The internal represenation of a source position is a positive 64bit integer.
All compilers create now appropriate source positions for inlined functions. In the case of Turbofan, this required using AstGraphBuilderWithPositions for inlined functions too. So this class is now moved to a header file.
At the moment, the additional information in source positions is only used in --trace-deopt and --code-comments. The profiler needs to be updated, at the moment it gets the correct script offsets from the deopt info, but the wrong script id from the reconstructed deopt stack, which can lead to wrong outputs. This should be resolved by making the profiler use the new inlining information for deopts.
I activated the inlined deoptimization tests in test-cpu-profiler.cc for Turbofan, changing them to a case where the deopt stack and the inlining position agree. It is currently still broken for other cases.
The following additional changes were necessary:
- The source position table (internal::SourcePositionTableBuilder etc.) supports now 64bit source positions. Encoding source positions in a single 64bit int together with the difference encoding in the source position table results in very little overhead for the inlining id, since only 12% of the source positions in Octane have a changed inlining id.
- The class HPositionInfo was effectively dead code and is now removed.
- SourcePosition has new printing and information facilities, including computing a full inlining stack.
- I had to rename compiler/source-position.{h,cc} to compiler/compiler-source-position-table.{h,cc} to avoid clashes with the new src/source-position.cc file.
- I wrote the new wrapper PodArray for ByteArray. It is a template working with any POD-type. This is used in DeoptimizationInputData::InliningPositions().
- I removed HInlinedFunctionInfo and HGraph::inlined_function_infos, because they were only used for the now obsolete Crankshaft inlining ids.
- Crankshaft managed a list of inlined functions in Lithium: LChunk::inlined_functions. This is an analog structure to CompilationInfo::inlined_functions. So I removed LChunk::inlined_functions and made Crankshaft use CompilationInfo::inlined_functions instead, because this was necessary to register the offsets into the literal array in a uniform way. This is a safe change because LChunk::inlined_functions has no other uses and the functions in CompilationInfo::inlined_functions have a strictly longer lifespan, being created earlier (in Hydrogen already).
BUG=v8:5432
Review-Url: https://codereview.chromium.org/2451853002
Cr-Commit-Position: refs/heads/master@{#40975}
2016-11-14 17:21:37 +00:00
|
|
|
RelocInfo::ModeMask(RelocInfo::DEOPT_SCRIPT_OFFSET) |
|
|
|
|
RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID);
|
2015-02-10 14:32:42 +00:00
|
|
|
for (RelocIterator it(code, mask); !it.done(); it.next()) {
|
|
|
|
RelocInfo* info = it.rinfo();
|
2016-05-11 14:05:41 +00:00
|
|
|
if (info->pc() >= pc) {
|
2016-05-13 08:49:49 +00:00
|
|
|
return DeoptInfo(last_position, last_reason, last_deopt_id);
|
2016-05-11 14:05:41 +00:00
|
|
|
}
|
This CL enables precise source positions for all V8 compilers. It merges compiler::SourcePosition and internal::SourcePosition to a single class used throughout the codebase. The new internal::SourcePosition instances store an id identifying an inlined function in addition to a script offset.
SourcePosition::InliningId() refers to a the new table DeoptimizationInputData::InliningPositions(), which provides the following data for every inlining id:
- The inlined SharedFunctionInfo as an offset into DeoptimizationInfo::LiteralArray
- The SourcePosition of the inlining. Recursively, this yields the full inlining stack.
Before the Code object is created, the same information can be found in CompilationInfo::inlined_functions().
If SourcePosition::InliningId() is SourcePosition::kNotInlined, it refers to the outer (non-inlined) function.
So every SourcePosition has full information about its inlining stack, as long as the corresponding Code object is known. The internal represenation of a source position is a positive 64bit integer.
All compilers create now appropriate source positions for inlined functions. In the case of Turbofan, this required using AstGraphBuilderWithPositions for inlined functions too. So this class is now moved to a header file.
At the moment, the additional information in source positions is only used in --trace-deopt and --code-comments. The profiler needs to be updated, at the moment it gets the correct script offsets from the deopt info, but the wrong script id from the reconstructed deopt stack, which can lead to wrong outputs. This should be resolved by making the profiler use the new inlining information for deopts.
I activated the inlined deoptimization tests in test-cpu-profiler.cc for Turbofan, changing them to a case where the deopt stack and the inlining position agree. It is currently still broken for other cases.
The following additional changes were necessary:
- The source position table (internal::SourcePositionTableBuilder etc.) supports now 64bit source positions. Encoding source positions in a single 64bit int together with the difference encoding in the source position table results in very little overhead for the inlining id, since only 12% of the source positions in Octane have a changed inlining id.
- The class HPositionInfo was effectively dead code and is now removed.
- SourcePosition has new printing and information facilities, including computing a full inlining stack.
- I had to rename compiler/source-position.{h,cc} to compiler/compiler-source-position-table.{h,cc} to avoid clashes with the new src/source-position.cc file.
- I wrote the new wrapper PodArray for ByteArray. It is a template working with any POD-type. This is used in DeoptimizationInputData::InliningPositions().
- I removed HInlinedFunctionInfo and HGraph::inlined_function_infos, because they were only used for the now obsolete Crankshaft inlining ids.
- Crankshaft managed a list of inlined functions in Lithium: LChunk::inlined_functions. This is an analog structure to CompilationInfo::inlined_functions. So I removed LChunk::inlined_functions and made Crankshaft use CompilationInfo::inlined_functions instead, because this was necessary to register the offsets into the literal array in a uniform way. This is a safe change because LChunk::inlined_functions has no other uses and the functions in CompilationInfo::inlined_functions have a strictly longer lifespan, being created earlier (in Hydrogen already).
BUG=v8:5432
Review-Url: https://codereview.chromium.org/2451853002
Cr-Commit-Position: refs/heads/master@{#40975}
2016-11-14 17:21:37 +00:00
|
|
|
if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) {
|
|
|
|
int script_offset = static_cast<int>(info->data());
|
|
|
|
it.next();
|
|
|
|
DCHECK(it.rinfo()->rmode() == RelocInfo::DEOPT_INLINING_ID);
|
|
|
|
int inlining_id = static_cast<int>(it.rinfo()->data());
|
|
|
|
last_position = SourcePosition(script_offset, inlining_id);
|
2016-05-11 14:05:41 +00:00
|
|
|
} else if (info->rmode() == RelocInfo::DEOPT_ID) {
|
2016-05-13 08:49:49 +00:00
|
|
|
last_deopt_id = static_cast<int>(info->data());
|
2015-02-10 14:32:42 +00:00
|
|
|
} else if (info->rmode() == RelocInfo::DEOPT_REASON) {
|
2016-07-18 09:23:28 +00:00
|
|
|
last_reason = static_cast<DeoptimizeReason>(info->data());
|
2015-02-10 14:32:42 +00:00
|
|
|
}
|
|
|
|
}
|
2016-07-18 09:23:28 +00:00
|
|
|
return DeoptInfo(SourcePosition::Unknown(), DeoptimizeReason::kNoReason, -1);
|
2015-02-10 14:32:42 +00:00
|
|
|
}
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
|
2016-05-13 08:49:49 +00:00
|
|
|
// static
|
2016-08-12 12:01:37 +00:00
|
|
|
int Deoptimizer::ComputeSourcePositionFromBaselineCode(
|
|
|
|
SharedFunctionInfo* shared, BailoutId node_id) {
|
|
|
|
DCHECK(shared->HasBaselineCode());
|
|
|
|
Code* code = shared->code();
|
|
|
|
FixedArray* raw_data = code->deoptimization_data();
|
|
|
|
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
|
|
|
|
unsigned pc_and_state = Deoptimizer::GetOutputInfo(data, node_id, shared);
|
|
|
|
int code_offset =
|
|
|
|
static_cast<int>(FullCodeGenerator::PcField::decode(pc_and_state));
|
|
|
|
return AbstractCode::cast(code)->SourcePosition(code_offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
// static
|
|
|
|
int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
|
|
|
|
SharedFunctionInfo* shared, BailoutId node_id) {
|
|
|
|
DCHECK(shared->HasBytecodeArray());
|
|
|
|
return AbstractCode::cast(shared->bytecode_array())
|
2016-11-10 11:34:46 +00:00
|
|
|
->SourcePosition(node_id.ToInt());
|
2016-05-13 08:49:49 +00:00
|
|
|
}
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
// static
|
|
|
|
TranslatedValue TranslatedValue::NewArgumentsObject(TranslatedState* container,
|
|
|
|
int length,
|
|
|
|
int object_index) {
|
|
|
|
TranslatedValue slot(container, kArgumentsObject);
|
|
|
|
slot.materialization_info_ = {object_index, length};
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// static
|
|
|
|
TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container,
|
|
|
|
int length,
|
|
|
|
int object_index) {
|
|
|
|
TranslatedValue slot(container, kCapturedObject);
|
|
|
|
slot.materialization_info_ = {object_index, length};
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// static
|
|
|
|
TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container,
|
|
|
|
int id) {
|
|
|
|
TranslatedValue slot(container, kDuplicatedObject);
|
|
|
|
slot.materialization_info_ = {id, -1};
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-06-03 14:16:15 +00:00
|
|
|
// static
|
|
|
|
TranslatedValue TranslatedValue::NewFloat(TranslatedState* container,
|
|
|
|
float value) {
|
|
|
|
TranslatedValue slot(container, kFloat);
|
|
|
|
slot.float_value_ = value;
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
// static
|
|
|
|
TranslatedValue TranslatedValue::NewDouble(TranslatedState* container,
|
|
|
|
double value) {
|
|
|
|
TranslatedValue slot(container, kDouble);
|
|
|
|
slot.double_value_ = value;
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// static
|
|
|
|
TranslatedValue TranslatedValue::NewInt32(TranslatedState* container,
|
|
|
|
int32_t value) {
|
|
|
|
TranslatedValue slot(container, kInt32);
|
|
|
|
slot.int32_value_ = value;
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// static
|
|
|
|
TranslatedValue TranslatedValue::NewUInt32(TranslatedState* container,
|
|
|
|
uint32_t value) {
|
|
|
|
TranslatedValue slot(container, kUInt32);
|
|
|
|
slot.uint32_value_ = value;
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// static
|
|
|
|
TranslatedValue TranslatedValue::NewBool(TranslatedState* container,
|
|
|
|
uint32_t value) {
|
|
|
|
TranslatedValue slot(container, kBoolBit);
|
|
|
|
slot.uint32_value_ = value;
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// static
|
|
|
|
TranslatedValue TranslatedValue::NewTagged(TranslatedState* container,
|
|
|
|
Object* literal) {
|
|
|
|
TranslatedValue slot(container, kTagged);
|
|
|
|
slot.raw_literal_ = literal;
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// static
|
2015-06-09 13:10:10 +00:00
|
|
|
TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) {
|
|
|
|
return TranslatedValue(container, kInvalid);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Isolate* TranslatedValue::isolate() const { return container_->isolate(); }
|
|
|
|
|
|
|
|
|
|
|
|
Object* TranslatedValue::raw_literal() const {
|
|
|
|
DCHECK_EQ(kTagged, kind());
|
|
|
|
return raw_literal_;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32_t TranslatedValue::int32_value() const {
|
|
|
|
DCHECK_EQ(kInt32, kind());
|
|
|
|
return int32_value_;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
uint32_t TranslatedValue::uint32_value() const {
|
|
|
|
DCHECK(kind() == kUInt32 || kind() == kBoolBit);
|
|
|
|
return uint32_value_;
|
|
|
|
}
|
|
|
|
|
2016-06-03 14:16:15 +00:00
|
|
|
float TranslatedValue::float_value() const {
|
|
|
|
DCHECK_EQ(kFloat, kind());
|
|
|
|
return float_value_;
|
|
|
|
}
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
double TranslatedValue::double_value() const {
|
|
|
|
DCHECK_EQ(kDouble, kind());
|
|
|
|
return double_value_;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int TranslatedValue::object_length() const {
|
|
|
|
DCHECK(kind() == kArgumentsObject || kind() == kCapturedObject);
|
|
|
|
return materialization_info_.length_;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int TranslatedValue::object_index() const {
|
|
|
|
DCHECK(kind() == kArgumentsObject || kind() == kCapturedObject ||
|
|
|
|
kind() == kDuplicatedObject);
|
|
|
|
return materialization_info_.id_;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Object* TranslatedValue::GetRawValue() const {
|
|
|
|
// If we have a value, return it.
|
|
|
|
Handle<Object> result_handle;
|
|
|
|
if (value_.ToHandle(&result_handle)) {
|
|
|
|
return *result_handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, do a best effort to get the value without allocation.
|
|
|
|
switch (kind()) {
|
|
|
|
case kTagged:
|
|
|
|
return raw_literal();
|
|
|
|
|
|
|
|
case kInt32: {
|
|
|
|
bool is_smi = Smi::IsValid(int32_value());
|
|
|
|
if (is_smi) {
|
|
|
|
return Smi::FromInt(int32_value());
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case kUInt32: {
|
|
|
|
bool is_smi = (uint32_value() <= static_cast<uintptr_t>(Smi::kMaxValue));
|
|
|
|
if (is_smi) {
|
|
|
|
return Smi::FromInt(static_cast<int32_t>(uint32_value()));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case kBoolBit: {
|
|
|
|
if (uint32_value() == 0) {
|
|
|
|
return isolate()->heap()->false_value();
|
|
|
|
} else {
|
2015-06-09 06:12:47 +00:00
|
|
|
CHECK_EQ(1U, uint32_value());
|
2015-06-08 10:04:51 +00:00
|
|
|
return isolate()->heap()->true_value();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we could not get the value without allocation, return the arguments
|
|
|
|
// marker.
|
|
|
|
return isolate()->heap()->arguments_marker();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Handle<Object> TranslatedValue::GetValue() {
|
|
|
|
Handle<Object> result;
|
|
|
|
// If we already have a value, then get it.
|
|
|
|
if (value_.ToHandle(&result)) return result;
|
|
|
|
|
|
|
|
// Otherwise we have to materialize.
|
|
|
|
switch (kind()) {
|
|
|
|
case TranslatedValue::kTagged:
|
|
|
|
case TranslatedValue::kInt32:
|
|
|
|
case TranslatedValue::kUInt32:
|
|
|
|
case TranslatedValue::kBoolBit:
|
2016-06-03 14:16:15 +00:00
|
|
|
case TranslatedValue::kFloat:
|
2015-06-08 10:04:51 +00:00
|
|
|
case TranslatedValue::kDouble: {
|
|
|
|
MaterializeSimple();
|
|
|
|
return value_.ToHandleChecked();
|
|
|
|
}
|
|
|
|
|
|
|
|
case TranslatedValue::kArgumentsObject:
|
|
|
|
case TranslatedValue::kCapturedObject:
|
|
|
|
case TranslatedValue::kDuplicatedObject:
|
|
|
|
return container_->MaterializeObjectAt(object_index());
|
|
|
|
|
|
|
|
case TranslatedValue::kInvalid:
|
|
|
|
FATAL("unexpected case");
|
|
|
|
return Handle<Object>::null();
|
|
|
|
}
|
|
|
|
|
|
|
|
FATAL("internal error: value missing");
|
|
|
|
return Handle<Object>::null();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void TranslatedValue::MaterializeSimple() {
|
|
|
|
// If we already have materialized, return.
|
|
|
|
if (!value_.is_null()) return;
|
|
|
|
|
|
|
|
Object* raw_value = GetRawValue();
|
|
|
|
if (raw_value != isolate()->heap()->arguments_marker()) {
|
|
|
|
// We can get the value without allocation, just return it here.
|
|
|
|
value_ = Handle<Object>(raw_value, isolate());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (kind()) {
|
|
|
|
case kInt32: {
|
|
|
|
value_ = Handle<Object>(isolate()->factory()->NewNumber(int32_value()));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case kUInt32:
|
|
|
|
value_ = Handle<Object>(isolate()->factory()->NewNumber(uint32_value()));
|
|
|
|
return;
|
|
|
|
|
2016-06-03 14:16:15 +00:00
|
|
|
case kFloat:
|
|
|
|
value_ = Handle<Object>(isolate()->factory()->NewNumber(float_value()));
|
|
|
|
return;
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
case kDouble:
|
|
|
|
value_ = Handle<Object>(isolate()->factory()->NewNumber(double_value()));
|
|
|
|
return;
|
|
|
|
|
|
|
|
case kCapturedObject:
|
|
|
|
case kDuplicatedObject:
|
|
|
|
case kArgumentsObject:
|
|
|
|
case kInvalid:
|
|
|
|
case kTagged:
|
|
|
|
case kBoolBit:
|
|
|
|
FATAL("internal error: unexpected materialization.");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool TranslatedValue::IsMaterializedObject() const {
|
|
|
|
switch (kind()) {
|
|
|
|
case kCapturedObject:
|
|
|
|
case kDuplicatedObject:
|
|
|
|
case kArgumentsObject:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
bool TranslatedValue::IsMaterializableByDebugger() const {
|
|
|
|
// At the moment, we only allow materialization of doubles.
|
|
|
|
return (kind() == kDouble);
|
|
|
|
}
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
int TranslatedValue::GetChildrenCount() const {
|
|
|
|
if (kind() == kCapturedObject || kind() == kArgumentsObject) {
|
|
|
|
return object_length();
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) {
|
|
|
|
Address address = fp + slot_offset;
|
|
|
|
#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
|
|
|
|
return Memory::uint32_at(address + kIntSize);
|
|
|
|
#else
|
|
|
|
return Memory::uint32_at(address);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void TranslatedValue::Handlify() {
|
|
|
|
if (kind() == kTagged) {
|
|
|
|
value_ = Handle<Object>(raw_literal(), isolate());
|
|
|
|
raw_literal_ = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TranslatedFrame TranslatedFrame::JSFrame(BailoutId node_id,
|
2015-06-10 11:52:35 +00:00
|
|
|
SharedFunctionInfo* shared_info,
|
|
|
|
int height) {
|
|
|
|
TranslatedFrame frame(kFunction, shared_info->GetIsolate(), shared_info,
|
|
|
|
height);
|
2015-06-08 10:04:51 +00:00
|
|
|
frame.node_id_ = node_id;
|
|
|
|
return frame;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
TranslatedFrame TranslatedFrame::InterpretedFrame(
|
|
|
|
BailoutId bytecode_offset, SharedFunctionInfo* shared_info, int height) {
|
|
|
|
TranslatedFrame frame(kInterpretedFunction, shared_info->GetIsolate(),
|
|
|
|
shared_info, height);
|
|
|
|
frame.node_id_ = bytecode_offset;
|
|
|
|
return frame;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-10 11:52:35 +00:00
|
|
|
TranslatedFrame TranslatedFrame::AccessorFrame(
|
|
|
|
Kind kind, SharedFunctionInfo* shared_info) {
|
2015-06-08 10:04:51 +00:00
|
|
|
DCHECK(kind == kSetter || kind == kGetter);
|
2015-06-10 11:52:35 +00:00
|
|
|
return TranslatedFrame(kind, shared_info->GetIsolate(), shared_info);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-10 11:52:35 +00:00
|
|
|
TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame(
|
|
|
|
SharedFunctionInfo* shared_info, int height) {
|
|
|
|
return TranslatedFrame(kArgumentsAdaptor, shared_info->GetIsolate(),
|
|
|
|
shared_info, height);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
TranslatedFrame TranslatedFrame::TailCallerFrame(
|
|
|
|
SharedFunctionInfo* shared_info) {
|
|
|
|
return TranslatedFrame(kTailCallerFunction, shared_info->GetIsolate(),
|
|
|
|
shared_info, 0);
|
|
|
|
}
|
2015-06-08 10:04:51 +00:00
|
|
|
|
2015-06-10 11:52:35 +00:00
|
|
|
TranslatedFrame TranslatedFrame::ConstructStubFrame(
|
|
|
|
SharedFunctionInfo* shared_info, int height) {
|
|
|
|
return TranslatedFrame(kConstructStub, shared_info->GetIsolate(), shared_info,
|
2015-06-08 10:04:51 +00:00
|
|
|
height);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int TranslatedFrame::GetValueCount() {
|
|
|
|
switch (kind()) {
|
|
|
|
case kFunction: {
|
|
|
|
int parameter_count =
|
2015-06-10 11:52:35 +00:00
|
|
|
raw_shared_info_->internal_formal_parameter_count() + 1;
|
2015-12-18 18:34:21 +00:00
|
|
|
// + 1 for function.
|
2015-06-10 11:52:35 +00:00
|
|
|
return height_ + parameter_count + 1;
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
case kInterpretedFunction: {
|
|
|
|
int parameter_count =
|
|
|
|
raw_shared_info_->internal_formal_parameter_count() + 1;
|
2016-02-18 12:49:58 +00:00
|
|
|
// + 2 for function and context.
|
|
|
|
return height_ + parameter_count + 2;
|
2015-12-18 18:34:21 +00:00
|
|
|
}
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
case kGetter:
|
2015-06-10 11:52:35 +00:00
|
|
|
return 2; // Function and receiver.
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
case kSetter:
|
2015-06-10 11:52:35 +00:00
|
|
|
return 3; // Function, receiver and the value to set.
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
case kArgumentsAdaptor:
|
|
|
|
case kConstructStub:
|
2015-06-10 11:52:35 +00:00
|
|
|
return 1 + height_;
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
case kTailCallerFunction:
|
|
|
|
return 1; // Function.
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
case kCompiledStub:
|
|
|
|
return height_;
|
|
|
|
|
|
|
|
case kInvalid:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
UNREACHABLE();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-10 11:52:35 +00:00
|
|
|
void TranslatedFrame::Handlify() {
|
|
|
|
if (raw_shared_info_ != nullptr) {
|
|
|
|
shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_);
|
|
|
|
raw_shared_info_ = nullptr;
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
for (auto& value : values_) {
|
|
|
|
value.Handlify();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
|
|
|
|
TranslationIterator* iterator, FixedArray* literal_array, Address fp,
|
2015-11-11 10:45:13 +00:00
|
|
|
FILE* trace_file) {
|
2015-06-08 10:04:51 +00:00
|
|
|
Translation::Opcode opcode =
|
|
|
|
static_cast<Translation::Opcode>(iterator->Next());
|
|
|
|
switch (opcode) {
|
|
|
|
case Translation::JS_FRAME: {
|
|
|
|
BailoutId node_id = BailoutId(iterator->Next());
|
2015-06-10 11:52:35 +00:00
|
|
|
SharedFunctionInfo* shared_info =
|
|
|
|
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
|
2015-06-08 10:04:51 +00:00
|
|
|
int height = iterator->Next();
|
|
|
|
if (trace_file != nullptr) {
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
|
2015-06-10 11:52:35 +00:00
|
|
|
PrintF(trace_file, " reading input frame %s", name.get());
|
|
|
|
int arg_count = shared_info->internal_formal_parameter_count() + 1;
|
2015-06-08 10:04:51 +00:00
|
|
|
PrintF(trace_file, " => node=%d, args=%d, height=%d; inputs:\n",
|
2015-11-09 06:07:41 +00:00
|
|
|
node_id.ToInt(), arg_count, height);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
2015-06-10 11:52:35 +00:00
|
|
|
return TranslatedFrame::JSFrame(node_id, shared_info, height);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
2015-12-18 18:34:21 +00:00
|
|
|
case Translation::INTERPRETED_FRAME: {
|
|
|
|
BailoutId bytecode_offset = BailoutId(iterator->Next());
|
|
|
|
SharedFunctionInfo* shared_info =
|
|
|
|
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
|
|
|
|
int height = iterator->Next();
|
|
|
|
if (trace_file != nullptr) {
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
|
2015-12-18 18:34:21 +00:00
|
|
|
PrintF(trace_file, " reading input frame %s", name.get());
|
|
|
|
int arg_count = shared_info->internal_formal_parameter_count() + 1;
|
|
|
|
PrintF(trace_file,
|
|
|
|
" => bytecode_offset=%d, args=%d, height=%d; inputs:\n",
|
|
|
|
bytecode_offset.ToInt(), arg_count, height);
|
|
|
|
}
|
|
|
|
return TranslatedFrame::InterpretedFrame(bytecode_offset, shared_info,
|
|
|
|
height);
|
|
|
|
}
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
case Translation::ARGUMENTS_ADAPTOR_FRAME: {
|
2015-06-10 11:52:35 +00:00
|
|
|
SharedFunctionInfo* shared_info =
|
|
|
|
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
|
2015-06-08 10:04:51 +00:00
|
|
|
int height = iterator->Next();
|
|
|
|
if (trace_file != nullptr) {
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
|
2015-06-10 11:52:35 +00:00
|
|
|
PrintF(trace_file, " reading arguments adaptor frame %s", name.get());
|
2015-06-08 10:04:51 +00:00
|
|
|
PrintF(trace_file, " => height=%d; inputs:\n", height);
|
|
|
|
}
|
2015-06-10 11:52:35 +00:00
|
|
|
return TranslatedFrame::ArgumentsAdaptorFrame(shared_info, height);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
2016-03-09 11:33:10 +00:00
|
|
|
case Translation::TAIL_CALLER_FRAME: {
|
|
|
|
SharedFunctionInfo* shared_info =
|
|
|
|
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
|
|
|
|
if (trace_file != nullptr) {
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
|
2016-03-09 11:33:10 +00:00
|
|
|
PrintF(trace_file, " reading tail caller frame marker %s\n",
|
|
|
|
name.get());
|
|
|
|
}
|
|
|
|
return TranslatedFrame::TailCallerFrame(shared_info);
|
|
|
|
}
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
case Translation::CONSTRUCT_STUB_FRAME: {
|
2015-06-10 11:52:35 +00:00
|
|
|
SharedFunctionInfo* shared_info =
|
|
|
|
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
|
2015-06-08 10:04:51 +00:00
|
|
|
int height = iterator->Next();
|
|
|
|
if (trace_file != nullptr) {
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
|
2015-06-10 11:52:35 +00:00
|
|
|
PrintF(trace_file, " reading construct stub frame %s", name.get());
|
2015-06-08 10:04:51 +00:00
|
|
|
PrintF(trace_file, " => height=%d; inputs:\n", height);
|
|
|
|
}
|
2015-06-10 11:52:35 +00:00
|
|
|
return TranslatedFrame::ConstructStubFrame(shared_info, height);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::GETTER_STUB_FRAME: {
|
2015-06-10 11:52:35 +00:00
|
|
|
SharedFunctionInfo* shared_info =
|
|
|
|
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
|
2015-06-08 10:04:51 +00:00
|
|
|
if (trace_file != nullptr) {
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
|
2015-06-10 11:52:35 +00:00
|
|
|
PrintF(trace_file, " reading getter frame %s; inputs:\n", name.get());
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
2015-06-10 11:52:35 +00:00
|
|
|
return TranslatedFrame::AccessorFrame(TranslatedFrame::kGetter,
|
|
|
|
shared_info);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::SETTER_STUB_FRAME: {
|
2015-06-10 11:52:35 +00:00
|
|
|
SharedFunctionInfo* shared_info =
|
|
|
|
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
|
2015-06-08 10:04:51 +00:00
|
|
|
if (trace_file != nullptr) {
|
2016-07-25 10:24:45 +00:00
|
|
|
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
|
2015-06-10 11:52:35 +00:00
|
|
|
PrintF(trace_file, " reading setter frame %s; inputs:\n", name.get());
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
2015-06-10 11:52:35 +00:00
|
|
|
return TranslatedFrame::AccessorFrame(TranslatedFrame::kSetter,
|
|
|
|
shared_info);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::COMPILED_STUB_FRAME: {
|
|
|
|
int height = iterator->Next();
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file,
|
|
|
|
" reading compiler stub frame => height=%d; inputs:\n", height);
|
|
|
|
}
|
|
|
|
return TranslatedFrame::CompiledStubFrame(height,
|
|
|
|
literal_array->GetIsolate());
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::BEGIN:
|
|
|
|
case Translation::DUPLICATED_OBJECT:
|
|
|
|
case Translation::ARGUMENTS_OBJECT:
|
|
|
|
case Translation::CAPTURED_OBJECT:
|
|
|
|
case Translation::REGISTER:
|
|
|
|
case Translation::INT32_REGISTER:
|
|
|
|
case Translation::UINT32_REGISTER:
|
|
|
|
case Translation::BOOL_REGISTER:
|
2016-06-03 14:16:15 +00:00
|
|
|
case Translation::FLOAT_REGISTER:
|
2015-06-08 10:04:51 +00:00
|
|
|
case Translation::DOUBLE_REGISTER:
|
|
|
|
case Translation::STACK_SLOT:
|
|
|
|
case Translation::INT32_STACK_SLOT:
|
|
|
|
case Translation::UINT32_STACK_SLOT:
|
|
|
|
case Translation::BOOL_STACK_SLOT:
|
2016-06-03 14:16:15 +00:00
|
|
|
case Translation::FLOAT_STACK_SLOT:
|
2015-06-08 10:04:51 +00:00
|
|
|
case Translation::DOUBLE_STACK_SLOT:
|
|
|
|
case Translation::LITERAL:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
FATAL("We should never get here - unexpected deopt info.");
|
|
|
|
return TranslatedFrame::InvalidFrame();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// static
|
|
|
|
void TranslatedFrame::AdvanceIterator(
|
|
|
|
std::deque<TranslatedValue>::iterator* iter) {
|
|
|
|
int values_to_skip = 1;
|
|
|
|
while (values_to_skip > 0) {
|
|
|
|
// Consume the current element.
|
|
|
|
values_to_skip--;
|
|
|
|
// Add all the children.
|
|
|
|
values_to_skip += (*iter)->GetChildrenCount();
|
|
|
|
|
|
|
|
(*iter)++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// We can't intermix stack decoding and allocations because
|
|
|
|
// deoptimization infrastracture is not GC safe.
|
|
|
|
// Thus we build a temporary structure in malloced space.
|
|
|
|
TranslatedValue TranslatedState::CreateNextTranslatedValue(
|
|
|
|
int frame_index, int value_index, TranslationIterator* iterator,
|
|
|
|
FixedArray* literal_array, Address fp, RegisterValues* registers,
|
|
|
|
FILE* trace_file) {
|
|
|
|
disasm::NameConverter converter;
|
|
|
|
|
|
|
|
Translation::Opcode opcode =
|
|
|
|
static_cast<Translation::Opcode>(iterator->Next());
|
|
|
|
switch (opcode) {
|
|
|
|
case Translation::BEGIN:
|
|
|
|
case Translation::JS_FRAME:
|
2015-12-18 18:34:21 +00:00
|
|
|
case Translation::INTERPRETED_FRAME:
|
2015-06-08 10:04:51 +00:00
|
|
|
case Translation::ARGUMENTS_ADAPTOR_FRAME:
|
2016-03-09 11:33:10 +00:00
|
|
|
case Translation::TAIL_CALLER_FRAME:
|
2015-06-08 10:04:51 +00:00
|
|
|
case Translation::CONSTRUCT_STUB_FRAME:
|
|
|
|
case Translation::GETTER_STUB_FRAME:
|
|
|
|
case Translation::SETTER_STUB_FRAME:
|
|
|
|
case Translation::COMPILED_STUB_FRAME:
|
|
|
|
// Peeled off before getting here.
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Translation::DUPLICATED_OBJECT: {
|
|
|
|
int object_id = iterator->Next();
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "duplicated object #%d", object_id);
|
|
|
|
}
|
|
|
|
object_positions_.push_back(object_positions_[object_id]);
|
|
|
|
return TranslatedValue::NewDuplicateObject(this, object_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::ARGUMENTS_OBJECT: {
|
|
|
|
int arg_count = iterator->Next();
|
|
|
|
int object_index = static_cast<int>(object_positions_.size());
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "argumets object #%d (length = %d)", object_index,
|
|
|
|
arg_count);
|
|
|
|
}
|
|
|
|
object_positions_.push_back({frame_index, value_index});
|
|
|
|
return TranslatedValue::NewArgumentsObject(this, arg_count, object_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::CAPTURED_OBJECT: {
|
|
|
|
int field_count = iterator->Next();
|
|
|
|
int object_index = static_cast<int>(object_positions_.size());
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "captured object #%d (length = %d)", object_index,
|
|
|
|
field_count);
|
|
|
|
}
|
|
|
|
object_positions_.push_back({frame_index, value_index});
|
|
|
|
return TranslatedValue::NewDeferredObject(this, field_count,
|
|
|
|
object_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::REGISTER: {
|
|
|
|
int input_reg = iterator->Next();
|
2015-06-09 13:10:10 +00:00
|
|
|
if (registers == nullptr) return TranslatedValue::NewInvalid(this);
|
2015-06-08 10:04:51 +00:00
|
|
|
intptr_t value = registers->GetRegister(input_reg);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "0x%08" V8PRIxPTR " ; %s ", value,
|
|
|
|
converter.NameOfCPURegister(input_reg));
|
|
|
|
reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewTagged(this, reinterpret_cast<Object*>(value));
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::INT32_REGISTER: {
|
|
|
|
int input_reg = iterator->Next();
|
2015-06-09 13:10:10 +00:00
|
|
|
if (registers == nullptr) return TranslatedValue::NewInvalid(this);
|
2015-06-08 10:04:51 +00:00
|
|
|
intptr_t value = registers->GetRegister(input_reg);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "%" V8PRIdPTR " ; %s ", value,
|
|
|
|
converter.NameOfCPURegister(input_reg));
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewInt32(this, static_cast<int32_t>(value));
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::UINT32_REGISTER: {
|
|
|
|
int input_reg = iterator->Next();
|
2015-06-09 13:10:10 +00:00
|
|
|
if (registers == nullptr) return TranslatedValue::NewInvalid(this);
|
2015-06-08 10:04:51 +00:00
|
|
|
intptr_t value = registers->GetRegister(input_reg);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "%" V8PRIuPTR " ; %s (uint)", value,
|
|
|
|
converter.NameOfCPURegister(input_reg));
|
|
|
|
reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewUInt32(this, static_cast<uint32_t>(value));
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::BOOL_REGISTER: {
|
|
|
|
int input_reg = iterator->Next();
|
2015-06-09 13:10:10 +00:00
|
|
|
if (registers == nullptr) return TranslatedValue::NewInvalid(this);
|
2015-06-08 10:04:51 +00:00
|
|
|
intptr_t value = registers->GetRegister(input_reg);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "%" V8PRIdPTR " ; %s (bool)", value,
|
|
|
|
converter.NameOfCPURegister(input_reg));
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewBool(this, static_cast<uint32_t>(value));
|
|
|
|
}
|
|
|
|
|
2016-06-03 14:16:15 +00:00
|
|
|
case Translation::FLOAT_REGISTER: {
|
|
|
|
int input_reg = iterator->Next();
|
|
|
|
if (registers == nullptr) return TranslatedValue::NewInvalid(this);
|
|
|
|
float value = registers->GetFloatRegister(input_reg);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "%e ; %s (float)", value,
|
2016-06-27 15:29:51 +00:00
|
|
|
RegisterConfiguration::Crankshaft()->GetFloatRegisterName(
|
|
|
|
input_reg));
|
2016-06-03 14:16:15 +00:00
|
|
|
}
|
|
|
|
return TranslatedValue::NewFloat(this, value);
|
|
|
|
}
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
case Translation::DOUBLE_REGISTER: {
|
|
|
|
int input_reg = iterator->Next();
|
2015-06-09 13:10:10 +00:00
|
|
|
if (registers == nullptr) return TranslatedValue::NewInvalid(this);
|
2015-06-08 10:04:51 +00:00
|
|
|
double value = registers->GetDoubleRegister(input_reg);
|
|
|
|
if (trace_file != nullptr) {
|
2016-06-03 14:16:15 +00:00
|
|
|
PrintF(trace_file, "%e ; %s (double)", value,
|
2016-06-27 15:29:51 +00:00
|
|
|
RegisterConfiguration::Crankshaft()->GetDoubleRegisterName(
|
|
|
|
input_reg));
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
return TranslatedValue::NewDouble(this, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::STACK_SLOT: {
|
[turbofan] Unify referencing of stack slots
Previously, it was not possible to specify StackSlotOperands for all
slots in both the caller and callee stacks. Specifically, the region
of the callee's stack including the saved return address, frame
pointer, function pointer and context pointer could not be addressed
by the register allocator/gap resolver.
In preparation for better tail call support, which will use the gap
resolver to reconcile outgoing parameters, this change makes it
possible to address all slots on the stack, because slots in the
previously inaccessible dead zone may become parameter slots for
outgoing tail calls. All caller stack slots are accessible as they
were before, with slot -1 corresponding to the last stack
parameter. Stack slot indices >= 0 access the callee stack, with slot
0 corresponding to the callee's saved return address, 1 corresponding
to the saved frame pointer, 2 corresponding to the current function
context, 3 corresponding to the frame marker/JSFunction, and slots 4
and above corresponding to spill slots.
The following changes were specifically needed:
* Frame has been changed to explicitly manage three areas of the
callee frame, the fixed header, the spill slot area, and the
callee-saved register area.
* Conversions from stack slot indices to fp offsets all now go through
a common bottleneck: OptimizedFrame::StackSlotOffsetRelativeToFp
* The generation of deoptimization translation tables has been changed
to support the new stack slot indexing scheme. Crankshaft, which
doesn't support the new slot numbering in its register allocator,
must adapt the indexes when creating translation tables.
* Callee-saved parameters are now kept below spill slots, not above,
to support saving only the optimal set of used registers, which is
only known after register allocation is finished and spill slots
have been allocated.
Review URL: https://codereview.chromium.org/1261923007
Cr-Commit-Position: refs/heads/master@{#30224}
2015-08-18 14:47:56 +00:00
|
|
|
int slot_offset =
|
|
|
|
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
|
2015-06-08 10:04:51 +00:00
|
|
|
intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "0x%08" V8PRIxPTR " ; [fp %c %d] ", value,
|
|
|
|
slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
|
|
|
|
reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewTagged(this, reinterpret_cast<Object*>(value));
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::INT32_STACK_SLOT: {
|
[turbofan] Unify referencing of stack slots
Previously, it was not possible to specify StackSlotOperands for all
slots in both the caller and callee stacks. Specifically, the region
of the callee's stack including the saved return address, frame
pointer, function pointer and context pointer could not be addressed
by the register allocator/gap resolver.
In preparation for better tail call support, which will use the gap
resolver to reconcile outgoing parameters, this change makes it
possible to address all slots on the stack, because slots in the
previously inaccessible dead zone may become parameter slots for
outgoing tail calls. All caller stack slots are accessible as they
were before, with slot -1 corresponding to the last stack
parameter. Stack slot indices >= 0 access the callee stack, with slot
0 corresponding to the callee's saved return address, 1 corresponding
to the saved frame pointer, 2 corresponding to the current function
context, 3 corresponding to the frame marker/JSFunction, and slots 4
and above corresponding to spill slots.
The following changes were specifically needed:
* Frame has been changed to explicitly manage three areas of the
callee frame, the fixed header, the spill slot area, and the
callee-saved register area.
* Conversions from stack slot indices to fp offsets all now go through
a common bottleneck: OptimizedFrame::StackSlotOffsetRelativeToFp
* The generation of deoptimization translation tables has been changed
to support the new stack slot indexing scheme. Crankshaft, which
doesn't support the new slot numbering in its register allocator,
must adapt the indexes when creating translation tables.
* Callee-saved parameters are now kept below spill slots, not above,
to support saving only the optimal set of used registers, which is
only known after register allocation is finished and spill slots
have been allocated.
Review URL: https://codereview.chromium.org/1261923007
Cr-Commit-Position: refs/heads/master@{#30224}
2015-08-18 14:47:56 +00:00
|
|
|
int slot_offset =
|
|
|
|
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
|
2015-06-08 10:04:51 +00:00
|
|
|
uint32_t value = GetUInt32Slot(fp, slot_offset);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "%d ; (int) [fp %c %d] ",
|
|
|
|
static_cast<int32_t>(value), slot_offset < 0 ? '-' : '+',
|
|
|
|
std::abs(slot_offset));
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewInt32(this, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::UINT32_STACK_SLOT: {
|
[turbofan] Unify referencing of stack slots
Previously, it was not possible to specify StackSlotOperands for all
slots in both the caller and callee stacks. Specifically, the region
of the callee's stack including the saved return address, frame
pointer, function pointer and context pointer could not be addressed
by the register allocator/gap resolver.
In preparation for better tail call support, which will use the gap
resolver to reconcile outgoing parameters, this change makes it
possible to address all slots on the stack, because slots in the
previously inaccessible dead zone may become parameter slots for
outgoing tail calls. All caller stack slots are accessible as they
were before, with slot -1 corresponding to the last stack
parameter. Stack slot indices >= 0 access the callee stack, with slot
0 corresponding to the callee's saved return address, 1 corresponding
to the saved frame pointer, 2 corresponding to the current function
context, 3 corresponding to the frame marker/JSFunction, and slots 4
and above corresponding to spill slots.
The following changes were specifically needed:
* Frame has been changed to explicitly manage three areas of the
callee frame, the fixed header, the spill slot area, and the
callee-saved register area.
* Conversions from stack slot indices to fp offsets all now go through
a common bottleneck: OptimizedFrame::StackSlotOffsetRelativeToFp
* The generation of deoptimization translation tables has been changed
to support the new stack slot indexing scheme. Crankshaft, which
doesn't support the new slot numbering in its register allocator,
must adapt the indexes when creating translation tables.
* Callee-saved parameters are now kept below spill slots, not above,
to support saving only the optimal set of used registers, which is
only known after register allocation is finished and spill slots
have been allocated.
Review URL: https://codereview.chromium.org/1261923007
Cr-Commit-Position: refs/heads/master@{#30224}
2015-08-18 14:47:56 +00:00
|
|
|
int slot_offset =
|
|
|
|
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
|
2015-06-08 10:04:51 +00:00
|
|
|
uint32_t value = GetUInt32Slot(fp, slot_offset);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "%u ; (uint) [fp %c %d] ", value,
|
|
|
|
slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewUInt32(this, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::BOOL_STACK_SLOT: {
|
[turbofan] Unify referencing of stack slots
Previously, it was not possible to specify StackSlotOperands for all
slots in both the caller and callee stacks. Specifically, the region
of the callee's stack including the saved return address, frame
pointer, function pointer and context pointer could not be addressed
by the register allocator/gap resolver.
In preparation for better tail call support, which will use the gap
resolver to reconcile outgoing parameters, this change makes it
possible to address all slots on the stack, because slots in the
previously inaccessible dead zone may become parameter slots for
outgoing tail calls. All caller stack slots are accessible as they
were before, with slot -1 corresponding to the last stack
parameter. Stack slot indices >= 0 access the callee stack, with slot
0 corresponding to the callee's saved return address, 1 corresponding
to the saved frame pointer, 2 corresponding to the current function
context, 3 corresponding to the frame marker/JSFunction, and slots 4
and above corresponding to spill slots.
The following changes were specifically needed:
* Frame has been changed to explicitly manage three areas of the
callee frame, the fixed header, the spill slot area, and the
callee-saved register area.
* Conversions from stack slot indices to fp offsets all now go through
a common bottleneck: OptimizedFrame::StackSlotOffsetRelativeToFp
* The generation of deoptimization translation tables has been changed
to support the new stack slot indexing scheme. Crankshaft, which
doesn't support the new slot numbering in its register allocator,
must adapt the indexes when creating translation tables.
* Callee-saved parameters are now kept below spill slots, not above,
to support saving only the optimal set of used registers, which is
only known after register allocation is finished and spill slots
have been allocated.
Review URL: https://codereview.chromium.org/1261923007
Cr-Commit-Position: refs/heads/master@{#30224}
2015-08-18 14:47:56 +00:00
|
|
|
int slot_offset =
|
|
|
|
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
|
2015-06-08 10:04:51 +00:00
|
|
|
uint32_t value = GetUInt32Slot(fp, slot_offset);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "%u ; (bool) [fp %c %d] ", value,
|
|
|
|
slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewBool(this, value);
|
|
|
|
}
|
|
|
|
|
2016-06-03 14:16:15 +00:00
|
|
|
case Translation::FLOAT_STACK_SLOT: {
|
|
|
|
int slot_offset =
|
|
|
|
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
|
|
|
|
float value = ReadFloatValue(fp + slot_offset);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "%e ; (float) [fp %c %d] ", value,
|
|
|
|
slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewFloat(this, value);
|
|
|
|
}
|
|
|
|
|
2015-06-08 10:04:51 +00:00
|
|
|
case Translation::DOUBLE_STACK_SLOT: {
|
[turbofan] Unify referencing of stack slots
Previously, it was not possible to specify StackSlotOperands for all
slots in both the caller and callee stacks. Specifically, the region
of the callee's stack including the saved return address, frame
pointer, function pointer and context pointer could not be addressed
by the register allocator/gap resolver.
In preparation for better tail call support, which will use the gap
resolver to reconcile outgoing parameters, this change makes it
possible to address all slots on the stack, because slots in the
previously inaccessible dead zone may become parameter slots for
outgoing tail calls. All caller stack slots are accessible as they
were before, with slot -1 corresponding to the last stack
parameter. Stack slot indices >= 0 access the callee stack, with slot
0 corresponding to the callee's saved return address, 1 corresponding
to the saved frame pointer, 2 corresponding to the current function
context, 3 corresponding to the frame marker/JSFunction, and slots 4
and above corresponding to spill slots.
The following changes were specifically needed:
* Frame has been changed to explicitly manage three areas of the
callee frame, the fixed header, the spill slot area, and the
callee-saved register area.
* Conversions from stack slot indices to fp offsets all now go through
a common bottleneck: OptimizedFrame::StackSlotOffsetRelativeToFp
* The generation of deoptimization translation tables has been changed
to support the new stack slot indexing scheme. Crankshaft, which
doesn't support the new slot numbering in its register allocator,
must adapt the indexes when creating translation tables.
* Callee-saved parameters are now kept below spill slots, not above,
to support saving only the optimal set of used registers, which is
only known after register allocation is finished and spill slots
have been allocated.
Review URL: https://codereview.chromium.org/1261923007
Cr-Commit-Position: refs/heads/master@{#30224}
2015-08-18 14:47:56 +00:00
|
|
|
int slot_offset =
|
|
|
|
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
|
2015-06-18 07:01:24 +00:00
|
|
|
double value = ReadDoubleValue(fp + slot_offset);
|
2015-06-08 10:04:51 +00:00
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "%e ; (double) [fp %c %d] ", value,
|
|
|
|
slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
|
|
|
|
}
|
|
|
|
return TranslatedValue::NewDouble(this, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Translation::LITERAL: {
|
|
|
|
int literal_index = iterator->Next();
|
|
|
|
Object* value = literal_array->get(literal_index);
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "0x%08" V8PRIxPTR " ; (literal %d) ",
|
|
|
|
reinterpret_cast<intptr_t>(value), literal_index);
|
|
|
|
reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
|
|
|
|
}
|
|
|
|
|
|
|
|
return TranslatedValue::NewTagged(this, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FATAL("We should never get here - unexpected deopt info.");
|
|
|
|
return TranslatedValue(nullptr, TranslatedValue::kInvalid);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TranslatedState::TranslatedState(JavaScriptFrame* frame)
|
|
|
|
: isolate_(nullptr),
|
|
|
|
stack_frame_pointer_(nullptr),
|
|
|
|
has_adapted_arguments_(false) {
|
|
|
|
int deopt_index = Safepoint::kNoDeoptimizationIndex;
|
|
|
|
DeoptimizationInputData* data =
|
|
|
|
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
|
2016-04-22 12:37:24 +00:00
|
|
|
DCHECK(data != nullptr && deopt_index != Safepoint::kNoDeoptimizationIndex);
|
2015-06-08 10:04:51 +00:00
|
|
|
TranslationIterator it(data->TranslationByteArray(),
|
|
|
|
data->TranslationIndex(deopt_index)->value());
|
2015-11-11 10:45:13 +00:00
|
|
|
Init(frame->fp(), &it, data->LiteralArray(), nullptr /* registers */,
|
|
|
|
nullptr /* trace file */);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TranslatedState::TranslatedState()
|
|
|
|
: isolate_(nullptr),
|
|
|
|
stack_frame_pointer_(nullptr),
|
|
|
|
has_adapted_arguments_(false) {}
|
|
|
|
|
|
|
|
|
|
|
|
void TranslatedState::Init(Address input_frame_pointer,
|
|
|
|
TranslationIterator* iterator,
|
|
|
|
FixedArray* literal_array, RegisterValues* registers,
|
|
|
|
FILE* trace_file) {
|
|
|
|
DCHECK(frames_.empty());
|
|
|
|
|
|
|
|
isolate_ = literal_array->GetIsolate();
|
|
|
|
// Read out the 'header' translation.
|
|
|
|
Translation::Opcode opcode =
|
|
|
|
static_cast<Translation::Opcode>(iterator->Next());
|
|
|
|
CHECK(opcode == Translation::BEGIN);
|
|
|
|
|
|
|
|
int count = iterator->Next();
|
|
|
|
iterator->Next(); // Drop JS frames count.
|
|
|
|
|
|
|
|
frames_.reserve(count);
|
|
|
|
|
|
|
|
std::stack<int> nested_counts;
|
|
|
|
|
|
|
|
// Read the frames
|
|
|
|
for (int i = 0; i < count; i++) {
|
|
|
|
// Read the frame descriptor.
|
2015-11-11 10:45:13 +00:00
|
|
|
frames_.push_back(CreateNextTranslatedFrame(
|
|
|
|
iterator, literal_array, input_frame_pointer, trace_file));
|
2015-06-08 10:04:51 +00:00
|
|
|
TranslatedFrame& frame = frames_.back();
|
|
|
|
|
|
|
|
// Read the values.
|
|
|
|
int values_to_process = frame.GetValueCount();
|
|
|
|
while (values_to_process > 0 || !nested_counts.empty()) {
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
if (nested_counts.empty()) {
|
|
|
|
// For top level values, print the value number.
|
|
|
|
PrintF(trace_file, " %3i: ",
|
|
|
|
frame.GetValueCount() - values_to_process);
|
|
|
|
} else {
|
|
|
|
// Take care of indenting for nested values.
|
|
|
|
PrintF(trace_file, " ");
|
|
|
|
for (size_t j = 0; j < nested_counts.size(); j++) {
|
|
|
|
PrintF(trace_file, " ");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TranslatedValue value = CreateNextTranslatedValue(
|
|
|
|
i, static_cast<int>(frame.values_.size()), iterator, literal_array,
|
|
|
|
input_frame_pointer, registers, trace_file);
|
|
|
|
frame.Add(value);
|
|
|
|
|
|
|
|
if (trace_file != nullptr) {
|
|
|
|
PrintF(trace_file, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the value count and resolve the nesting.
|
|
|
|
values_to_process--;
|
|
|
|
int children_count = value.GetChildrenCount();
|
|
|
|
if (children_count > 0) {
|
|
|
|
nested_counts.push(values_to_process);
|
|
|
|
values_to_process = children_count;
|
|
|
|
} else {
|
|
|
|
while (values_to_process == 0 && !nested_counts.empty()) {
|
|
|
|
values_to_process = nested_counts.top();
|
|
|
|
nested_counts.pop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECK(!iterator->HasNext() ||
|
|
|
|
static_cast<Translation::Opcode>(iterator->Next()) ==
|
|
|
|
Translation::BEGIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void TranslatedState::Prepare(bool has_adapted_arguments,
|
|
|
|
Address stack_frame_pointer) {
|
2015-06-10 11:52:35 +00:00
|
|
|
for (auto& frame : frames_) frame.Handlify();
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
stack_frame_pointer_ = stack_frame_pointer;
|
|
|
|
has_adapted_arguments_ = has_adapted_arguments;
|
|
|
|
|
|
|
|
UpdateFromPreviouslyMaterializedObjects();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Handle<Object> TranslatedState::MaterializeAt(int frame_index,
|
|
|
|
int* value_index) {
|
|
|
|
TranslatedFrame* frame = &(frames_[frame_index]);
|
2016-02-15 07:36:15 +00:00
|
|
|
CHECK(static_cast<size_t>(*value_index) < frame->values_.size());
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
TranslatedValue* slot = &(frame->values_[*value_index]);
|
|
|
|
(*value_index)++;
|
|
|
|
|
|
|
|
switch (slot->kind()) {
|
|
|
|
case TranslatedValue::kTagged:
|
|
|
|
case TranslatedValue::kInt32:
|
|
|
|
case TranslatedValue::kUInt32:
|
|
|
|
case TranslatedValue::kBoolBit:
|
2016-06-03 14:16:15 +00:00
|
|
|
case TranslatedValue::kFloat:
|
2015-06-08 10:04:51 +00:00
|
|
|
case TranslatedValue::kDouble: {
|
|
|
|
slot->MaterializeSimple();
|
|
|
|
Handle<Object> value = slot->GetValue();
|
|
|
|
if (value->IsMutableHeapNumber()) {
|
|
|
|
HeapNumber::cast(*value)->set_map(isolate()->heap()->heap_number_map());
|
|
|
|
}
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
case TranslatedValue::kArgumentsObject: {
|
|
|
|
int length = slot->GetChildrenCount();
|
|
|
|
Handle<JSObject> arguments;
|
|
|
|
if (GetAdaptedArguments(&arguments, frame_index)) {
|
|
|
|
// Store the materialized object and consume the nested values.
|
|
|
|
for (int i = 0; i < length; ++i) {
|
|
|
|
MaterializeAt(frame_index, value_index);
|
|
|
|
}
|
|
|
|
} else {
|
2015-06-10 11:52:35 +00:00
|
|
|
Handle<JSFunction> function =
|
|
|
|
Handle<JSFunction>::cast(frame->front().GetValue());
|
2015-06-08 10:04:51 +00:00
|
|
|
arguments = isolate_->factory()->NewArgumentsObject(function, length);
|
|
|
|
Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
|
|
|
|
DCHECK_EQ(array->length(), length);
|
|
|
|
arguments->set_elements(*array);
|
|
|
|
for (int i = 0; i < length; ++i) {
|
|
|
|
Handle<Object> value = MaterializeAt(frame_index, value_index);
|
|
|
|
array->set(i, *value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
slot->value_ = arguments;
|
|
|
|
return arguments;
|
|
|
|
}
|
|
|
|
case TranslatedValue::kCapturedObject: {
|
|
|
|
int length = slot->GetChildrenCount();
|
|
|
|
|
|
|
|
// The map must be a tagged object.
|
|
|
|
CHECK(frame->values_[*value_index].kind() == TranslatedValue::kTagged);
|
|
|
|
|
|
|
|
Handle<Object> result;
|
|
|
|
if (slot->value_.ToHandle(&result)) {
|
|
|
|
// This has been previously materialized, return the previous value.
|
|
|
|
// We still need to skip all the nested objects.
|
|
|
|
for (int i = 0; i < length; i++) {
|
|
|
|
MaterializeAt(frame_index, value_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
Handle<Object> map_object = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Map> map =
|
|
|
|
Map::GeneralizeAllFieldRepresentations(Handle<Map>::cast(map_object));
|
|
|
|
switch (map->instance_type()) {
|
|
|
|
case MUTABLE_HEAP_NUMBER_TYPE:
|
|
|
|
case HEAP_NUMBER_TYPE: {
|
|
|
|
// Reuse the HeapNumber value directly as it is already properly
|
|
|
|
// tagged and skip materializing the HeapNumber explicitly.
|
|
|
|
Handle<Object> object = MaterializeAt(frame_index, value_index);
|
|
|
|
slot->value_ = object;
|
|
|
|
// On 32-bit architectures, there is an extra slot there because
|
|
|
|
// the escape analysis calculates the number of slots as
|
|
|
|
// object-size/pointer-size. To account for this, we read out
|
|
|
|
// any extra slots.
|
|
|
|
for (int i = 0; i < length - 2; i++) {
|
|
|
|
MaterializeAt(frame_index, value_index);
|
|
|
|
}
|
|
|
|
return object;
|
|
|
|
}
|
2016-06-23 14:39:30 +00:00
|
|
|
case JS_OBJECT_TYPE:
|
|
|
|
case JS_ERROR_TYPE:
|
|
|
|
case JS_ARGUMENTS_TYPE: {
|
2016-05-27 12:37:17 +00:00
|
|
|
Handle<JSObject> object =
|
|
|
|
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
|
2015-06-08 10:04:51 +00:00
|
|
|
slot->value_ = object;
|
|
|
|
Handle<Object> properties = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> elements = MaterializeAt(frame_index, value_index);
|
|
|
|
object->set_properties(FixedArray::cast(*properties));
|
|
|
|
object->set_elements(FixedArrayBase::cast(*elements));
|
|
|
|
for (int i = 0; i < length - 3; ++i) {
|
|
|
|
Handle<Object> value = MaterializeAt(frame_index, value_index);
|
|
|
|
FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
|
|
|
|
object->FastPropertyAtPut(index, *value);
|
|
|
|
}
|
|
|
|
return object;
|
|
|
|
}
|
|
|
|
case JS_ARRAY_TYPE: {
|
2016-09-08 11:37:44 +00:00
|
|
|
Handle<JSArray> object = Handle<JSArray>::cast(
|
|
|
|
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
|
2015-06-08 10:04:51 +00:00
|
|
|
slot->value_ = object;
|
|
|
|
Handle<Object> properties = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> elements = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> length = MaterializeAt(frame_index, value_index);
|
|
|
|
object->set_properties(FixedArray::cast(*properties));
|
|
|
|
object->set_elements(FixedArrayBase::cast(*elements));
|
|
|
|
object->set_length(*length);
|
|
|
|
return object;
|
|
|
|
}
|
2016-05-20 09:18:36 +00:00
|
|
|
case JS_FUNCTION_TYPE: {
|
2016-09-08 12:15:32 +00:00
|
|
|
Handle<SharedFunctionInfo> temporary_shared =
|
|
|
|
isolate_->factory()->NewSharedFunctionInfo(
|
|
|
|
isolate_->factory()->empty_string(), MaybeHandle<Code>(),
|
|
|
|
false);
|
2016-05-20 09:18:36 +00:00
|
|
|
Handle<JSFunction> object =
|
|
|
|
isolate_->factory()->NewFunctionFromSharedFunctionInfo(
|
2016-09-08 12:15:32 +00:00
|
|
|
map, temporary_shared, isolate_->factory()->undefined_value(),
|
|
|
|
NOT_TENURED);
|
2016-05-20 09:18:36 +00:00
|
|
|
slot->value_ = object;
|
|
|
|
Handle<Object> properties = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> elements = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> prototype = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> shared = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> context = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> literals = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> entry = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> next_link = MaterializeAt(frame_index, value_index);
|
|
|
|
object->ReplaceCode(*isolate_->builtins()->CompileLazy());
|
|
|
|
object->set_map(*map);
|
|
|
|
object->set_properties(FixedArray::cast(*properties));
|
|
|
|
object->set_elements(FixedArrayBase::cast(*elements));
|
|
|
|
object->set_prototype_or_initial_map(*prototype);
|
|
|
|
object->set_shared(SharedFunctionInfo::cast(*shared));
|
|
|
|
object->set_context(Context::cast(*context));
|
|
|
|
object->set_literals(LiteralsArray::cast(*literals));
|
|
|
|
CHECK(entry->IsNumber()); // Entry to compile lazy stub.
|
2016-06-06 12:58:10 +00:00
|
|
|
CHECK(next_link->IsUndefined(isolate_));
|
2016-05-20 09:18:36 +00:00
|
|
|
return object;
|
|
|
|
}
|
2016-09-21 12:29:02 +00:00
|
|
|
case CONS_STRING_TYPE: {
|
|
|
|
Handle<ConsString> object = Handle<ConsString>::cast(
|
|
|
|
isolate_->factory()
|
|
|
|
->NewConsString(isolate_->factory()->undefined_string(),
|
|
|
|
isolate_->factory()->undefined_string())
|
|
|
|
.ToHandleChecked());
|
|
|
|
slot->value_ = object;
|
|
|
|
Handle<Object> hash = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> length = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> first = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> second = MaterializeAt(frame_index, value_index);
|
|
|
|
object->set_map(*map);
|
|
|
|
object->set_length(Smi::cast(*length)->value());
|
|
|
|
object->set_first(String::cast(*first));
|
|
|
|
object->set_second(String::cast(*second));
|
|
|
|
CHECK(hash->IsNumber()); // The {Name::kEmptyHashField} value.
|
|
|
|
return object;
|
|
|
|
}
|
2016-09-08 10:33:05 +00:00
|
|
|
case CONTEXT_EXTENSION_TYPE: {
|
|
|
|
Handle<ContextExtension> object =
|
|
|
|
isolate_->factory()->NewContextExtension(
|
|
|
|
isolate_->factory()->NewScopeInfo(1),
|
|
|
|
isolate_->factory()->undefined_value());
|
|
|
|
slot->value_ = object;
|
|
|
|
Handle<Object> scope_info = MaterializeAt(frame_index, value_index);
|
|
|
|
Handle<Object> extension = MaterializeAt(frame_index, value_index);
|
|
|
|
object->set_scope_info(ScopeInfo::cast(*scope_info));
|
|
|
|
object->set_extension(*extension);
|
|
|
|
return object;
|
|
|
|
}
|
2016-01-05 13:30:32 +00:00
|
|
|
case FIXED_ARRAY_TYPE: {
|
|
|
|
Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
|
|
|
|
int32_t length = 0;
|
|
|
|
CHECK(lengthObject->ToInt32(&length));
|
|
|
|
Handle<FixedArray> object =
|
|
|
|
isolate_->factory()->NewFixedArray(length);
|
2016-01-11 12:47:52 +00:00
|
|
|
// We need to set the map, because the fixed array we are
|
|
|
|
// materializing could be a context or an arguments object,
|
|
|
|
// in which case we must retain that information.
|
|
|
|
object->set_map(*map);
|
2016-01-05 13:30:32 +00:00
|
|
|
slot->value_ = object;
|
|
|
|
for (int i = 0; i < length; ++i) {
|
|
|
|
Handle<Object> value = MaterializeAt(frame_index, value_index);
|
|
|
|
object->set(i, *value);
|
|
|
|
}
|
|
|
|
return object;
|
|
|
|
}
|
|
|
|
case FIXED_DOUBLE_ARRAY_TYPE: {
|
2016-01-11 12:47:52 +00:00
|
|
|
DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
|
2016-01-05 13:30:32 +00:00
|
|
|
Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
|
|
|
|
int32_t length = 0;
|
|
|
|
CHECK(lengthObject->ToInt32(&length));
|
|
|
|
Handle<FixedArrayBase> object =
|
|
|
|
isolate_->factory()->NewFixedDoubleArray(length);
|
|
|
|
slot->value_ = object;
|
|
|
|
if (length > 0) {
|
|
|
|
Handle<FixedDoubleArray> double_array =
|
|
|
|
Handle<FixedDoubleArray>::cast(object);
|
|
|
|
for (int i = 0; i < length; ++i) {
|
|
|
|
Handle<Object> value = MaterializeAt(frame_index, value_index);
|
|
|
|
CHECK(value->IsNumber());
|
|
|
|
double_array->set(i, value->Number());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return object;
|
|
|
|
}
|
2015-06-08 10:04:51 +00:00
|
|
|
default:
|
|
|
|
PrintF(stderr, "[couldn't handle instance type %d]\n",
|
|
|
|
map->instance_type());
|
|
|
|
FATAL("unreachable");
|
|
|
|
return Handle<Object>::null();
|
|
|
|
}
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case TranslatedValue::kDuplicatedObject: {
|
|
|
|
int object_index = slot->object_index();
|
|
|
|
TranslatedState::ObjectPosition pos = object_positions_[object_index];
|
|
|
|
|
|
|
|
// Make sure the duplicate is refering to a previous object.
|
2016-02-15 07:36:15 +00:00
|
|
|
CHECK(pos.frame_index_ < frame_index ||
|
|
|
|
(pos.frame_index_ == frame_index &&
|
|
|
|
pos.value_index_ < *value_index - 1));
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
Handle<Object> object =
|
|
|
|
frames_[pos.frame_index_].values_[pos.value_index_].GetValue();
|
|
|
|
|
|
|
|
// The object should have a (non-sentinel) value.
|
2016-02-15 07:36:15 +00:00
|
|
|
CHECK(!object.is_null() &&
|
|
|
|
!object.is_identical_to(isolate_->factory()->arguments_marker()));
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
slot->value_ = object;
|
|
|
|
return object;
|
|
|
|
}
|
|
|
|
|
|
|
|
case TranslatedValue::kInvalid:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
FATAL("We should never get here - unexpected deopt slot kind.");
|
|
|
|
return Handle<Object>::null();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Handle<Object> TranslatedState::MaterializeObjectAt(int object_index) {
|
|
|
|
TranslatedState::ObjectPosition pos = object_positions_[object_index];
|
|
|
|
return MaterializeAt(pos.frame_index_, &(pos.value_index_));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool TranslatedState::GetAdaptedArguments(Handle<JSObject>* result,
|
|
|
|
int frame_index) {
|
|
|
|
if (frame_index == 0) {
|
|
|
|
// Top level frame -> we need to go to the parent frame on the stack.
|
|
|
|
if (!has_adapted_arguments_) return false;
|
|
|
|
|
|
|
|
// This is top level frame, so we need to go to the stack to get
|
|
|
|
// this function's argument. (Note that this relies on not inlining
|
|
|
|
// recursive functions!)
|
2015-06-10 11:52:35 +00:00
|
|
|
Handle<JSFunction> function =
|
|
|
|
Handle<JSFunction>::cast(frames_[frame_index].front().GetValue());
|
2016-02-11 07:12:49 +00:00
|
|
|
*result = Accessors::FunctionGetArguments(function);
|
2015-06-08 10:04:51 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
TranslatedFrame* previous_frame = &(frames_[frame_index]);
|
|
|
|
if (previous_frame->kind() != TranslatedFrame::kArgumentsAdaptor) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// We get the adapted arguments from the parent translation.
|
2015-06-10 11:52:35 +00:00
|
|
|
int length = previous_frame->height();
|
|
|
|
Handle<JSFunction> function =
|
|
|
|
Handle<JSFunction>::cast(previous_frame->front().GetValue());
|
2015-06-08 10:04:51 +00:00
|
|
|
Handle<JSObject> arguments =
|
|
|
|
isolate_->factory()->NewArgumentsObject(function, length);
|
|
|
|
Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
|
|
|
|
arguments->set_elements(*array);
|
|
|
|
TranslatedFrame::iterator arg_iterator = previous_frame->begin();
|
2015-06-10 11:52:35 +00:00
|
|
|
arg_iterator++; // Skip function.
|
2015-06-08 10:04:51 +00:00
|
|
|
for (int i = 0; i < length; ++i) {
|
|
|
|
Handle<Object> value = arg_iterator->GetValue();
|
|
|
|
array->set(i, *value);
|
|
|
|
arg_iterator++;
|
|
|
|
}
|
|
|
|
CHECK(arg_iterator == previous_frame->end());
|
|
|
|
*result = arguments;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
|
|
|
|
int jsframe_index, int* args_count) {
|
|
|
|
for (size_t i = 0; i < frames_.size(); i++) {
|
2016-02-17 08:25:45 +00:00
|
|
|
if (frames_[i].kind() == TranslatedFrame::kFunction ||
|
|
|
|
frames_[i].kind() == TranslatedFrame::kInterpretedFunction) {
|
2015-06-08 10:04:51 +00:00
|
|
|
if (jsframe_index > 0) {
|
|
|
|
jsframe_index--;
|
|
|
|
} else {
|
|
|
|
// We have the JS function frame, now check if it has arguments adaptor.
|
|
|
|
if (i > 0 &&
|
|
|
|
frames_[i - 1].kind() == TranslatedFrame::kArgumentsAdaptor) {
|
|
|
|
*args_count = frames_[i - 1].height();
|
|
|
|
return &(frames_[i - 1]);
|
|
|
|
}
|
|
|
|
*args_count =
|
2015-06-10 11:52:35 +00:00
|
|
|
frames_[i].shared_info()->internal_formal_parameter_count() + 1;
|
2015-06-08 10:04:51 +00:00
|
|
|
return &(frames_[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-11-29 11:34:07 +00:00
|
|
|
void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
|
2015-06-08 10:04:51 +00:00
|
|
|
MaterializedObjectStore* materialized_store =
|
|
|
|
isolate_->materialized_object_store();
|
|
|
|
Handle<FixedArray> previously_materialized_objects =
|
|
|
|
materialized_store->Get(stack_frame_pointer_);
|
|
|
|
|
|
|
|
Handle<Object> marker = isolate_->factory()->arguments_marker();
|
|
|
|
|
|
|
|
int length = static_cast<int>(object_positions_.size());
|
|
|
|
bool new_store = false;
|
|
|
|
if (previously_materialized_objects.is_null()) {
|
|
|
|
previously_materialized_objects =
|
|
|
|
isolate_->factory()->NewFixedArray(length);
|
|
|
|
for (int i = 0; i < length; i++) {
|
|
|
|
previously_materialized_objects->set(i, *marker);
|
|
|
|
}
|
|
|
|
new_store = true;
|
|
|
|
}
|
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
CHECK_EQ(length, previously_materialized_objects->length());
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
bool value_changed = false;
|
|
|
|
for (int i = 0; i < length; i++) {
|
|
|
|
TranslatedState::ObjectPosition pos = object_positions_[i];
|
|
|
|
TranslatedValue* value_info =
|
|
|
|
&(frames_[pos.frame_index_].values_[pos.value_index_]);
|
|
|
|
|
2016-02-15 07:36:15 +00:00
|
|
|
CHECK(value_info->IsMaterializedObject());
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
Handle<Object> value(value_info->GetRawValue(), isolate_);
|
|
|
|
|
|
|
|
if (!value.is_identical_to(marker)) {
|
|
|
|
if (previously_materialized_objects->get(i) == *marker) {
|
|
|
|
previously_materialized_objects->set(i, *value);
|
|
|
|
value_changed = true;
|
|
|
|
} else {
|
2016-02-15 07:36:15 +00:00
|
|
|
CHECK(previously_materialized_objects->get(i) == *value);
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (new_store && value_changed) {
|
|
|
|
materialized_store->Set(stack_frame_pointer_,
|
|
|
|
previously_materialized_objects);
|
2016-02-17 08:25:45 +00:00
|
|
|
CHECK(frames_[0].kind() == TranslatedFrame::kFunction ||
|
2016-04-11 12:19:23 +00:00
|
|
|
frames_[0].kind() == TranslatedFrame::kInterpretedFunction ||
|
|
|
|
frames_[0].kind() == TranslatedFrame::kTailCallerFunction);
|
2016-11-29 11:34:07 +00:00
|
|
|
CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
|
|
|
|
Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode());
|
2015-06-08 10:04:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
|
|
|
|
MaterializedObjectStore* materialized_store =
|
|
|
|
isolate_->materialized_object_store();
|
|
|
|
Handle<FixedArray> previously_materialized_objects =
|
|
|
|
materialized_store->Get(stack_frame_pointer_);
|
|
|
|
|
|
|
|
// If we have no previously materialized objects, there is nothing to do.
|
|
|
|
if (previously_materialized_objects.is_null()) return;
|
|
|
|
|
|
|
|
Handle<Object> marker = isolate_->factory()->arguments_marker();
|
|
|
|
|
|
|
|
int length = static_cast<int>(object_positions_.size());
|
2016-02-15 07:36:15 +00:00
|
|
|
CHECK_EQ(length, previously_materialized_objects->length());
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < length; i++) {
|
|
|
|
// For a previously materialized objects, inject their value into the
|
|
|
|
// translated values.
|
|
|
|
if (previously_materialized_objects->get(i) != *marker) {
|
|
|
|
TranslatedState::ObjectPosition pos = object_positions_[i];
|
|
|
|
TranslatedValue* value_info =
|
|
|
|
&(frames_[pos.frame_index_].values_[pos.value_index_]);
|
2016-02-15 07:36:15 +00:00
|
|
|
CHECK(value_info->IsMaterializedObject());
|
2015-06-08 10:04:51 +00:00
|
|
|
|
|
|
|
value_info->value_ =
|
|
|
|
Handle<Object>(previously_materialized_objects->get(i), isolate_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-01 22:46:54 +00:00
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|