2012-01-24 08:43:12 +00:00
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2014-04-29 06:42:26 +00:00
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
#ifndef V8_DEOPTIMIZER_H_
|
|
|
|
#define V8_DEOPTIMIZER_H_
|
|
|
|
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/v8.h"
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/allocation.h"
|
|
|
|
#include "src/macro-assembler.h"
|
|
|
|
#include "src/zone-inl.h"
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
|
2013-05-27 17:12:48 +00:00
|
|
|
|
|
|
|
static inline double read_double_value(Address p) {
|
|
|
|
#ifdef V8_HOST_CAN_READ_UNALIGNED
|
|
|
|
return Memory::double_at(p);
|
|
|
|
#else // V8_HOST_CAN_READ_UNALIGNED
|
|
|
|
// Prevent gcc from using load-double (mips ldc1) on (possibly)
|
|
|
|
// non-64-bit aligned address.
|
|
|
|
union conversion {
|
|
|
|
double d;
|
|
|
|
uint32_t u[2];
|
|
|
|
} c;
|
|
|
|
c.u[0] = *reinterpret_cast<uint32_t*>(p);
|
|
|
|
c.u[1] = *reinterpret_cast<uint32_t*>(p + 4);
|
|
|
|
return c.d;
|
|
|
|
#endif // V8_HOST_CAN_READ_UNALIGNED
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
class FrameDescription;
|
|
|
|
class TranslationIterator;
|
2011-06-29 13:02:00 +00:00
|
|
|
class DeoptimizedFrameInfo;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2013-10-16 03:30:06 +00:00
|
|
|
template<typename T>
|
2011-04-06 14:23:27 +00:00
|
|
|
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
|
2010-12-07 11:31:57 +00:00
|
|
|
public:
|
2013-10-16 03:30:06 +00:00
|
|
|
HeapNumberMaterializationDescriptor(T destination, double value)
|
|
|
|
: destination_(destination), value_(value) { }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2013-10-16 03:30:06 +00:00
|
|
|
T destination() const { return destination_; }
|
|
|
|
double value() const { return value_; }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
private:
|
2013-10-16 03:30:06 +00:00
|
|
|
T destination_;
|
|
|
|
double value_;
|
2010-12-07 11:31:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-06-12 14:22:49 +00:00
|
|
|
class ObjectMaterializationDescriptor BASE_EMBEDDED {
|
2012-09-12 12:28:42 +00:00
|
|
|
public:
|
2013-08-07 11:24:14 +00:00
|
|
|
ObjectMaterializationDescriptor(
|
|
|
|
Address slot_address, int frame, int length, int duplicate, bool is_args)
|
|
|
|
: slot_address_(slot_address),
|
|
|
|
jsframe_index_(frame),
|
|
|
|
object_length_(length),
|
|
|
|
duplicate_object_(duplicate),
|
|
|
|
is_arguments_(is_args) { }
|
2012-09-12 12:28:42 +00:00
|
|
|
|
|
|
|
Address slot_address() const { return slot_address_; }
|
2013-08-07 11:24:14 +00:00
|
|
|
int jsframe_index() const { return jsframe_index_; }
|
2013-06-12 14:22:49 +00:00
|
|
|
int object_length() const { return object_length_; }
|
2013-08-07 11:24:14 +00:00
|
|
|
int duplicate_object() const { return duplicate_object_; }
|
|
|
|
bool is_arguments() const { return is_arguments_; }
|
|
|
|
|
|
|
|
// Only used for allocated receivers in DoComputeConstructStubFrame.
|
|
|
|
void patch_slot_address(intptr_t slot) {
|
|
|
|
slot_address_ = reinterpret_cast<Address>(slot);
|
|
|
|
}
|
2012-09-12 12:28:42 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
Address slot_address_;
|
2013-08-07 11:24:14 +00:00
|
|
|
int jsframe_index_;
|
2013-06-12 14:22:49 +00:00
|
|
|
int object_length_;
|
2013-08-07 11:24:14 +00:00
|
|
|
int duplicate_object_;
|
|
|
|
bool is_arguments_;
|
2012-09-12 12:28:42 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
class OptimizedFunctionVisitor BASE_EMBEDDED {
|
|
|
|
public:
|
|
|
|
virtual ~OptimizedFunctionVisitor() {}
|
|
|
|
|
|
|
|
// Function which is called before iteration of any optimized functions
|
2012-08-17 09:03:08 +00:00
|
|
|
// from given native context.
|
2010-12-07 11:31:57 +00:00
|
|
|
virtual void EnterContext(Context* context) = 0;
|
|
|
|
|
|
|
|
virtual void VisitFunction(JSFunction* function) = 0;
|
|
|
|
|
|
|
|
// Function which is called after iteration of all optimized functions
|
2012-08-17 09:03:08 +00:00
|
|
|
// from given native context.
|
2010-12-07 11:31:57 +00:00
|
|
|
virtual void LeaveContext(Context* context) = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class Deoptimizer : public Malloced {
|
|
|
|
public:
|
|
|
|
enum BailoutType {
|
|
|
|
EAGER,
|
|
|
|
LAZY,
|
2013-05-14 11:45:33 +00:00
|
|
|
SOFT,
|
2011-06-29 13:02:00 +00:00
|
|
|
// This last bailout type is not really a bailout, but used by the
|
|
|
|
// debugger to deoptimize stack frames to allow inspection.
|
|
|
|
DEBUGGER
|
2010-12-07 11:31:57 +00:00
|
|
|
};
|
|
|
|
|
2013-05-14 11:45:33 +00:00
|
|
|
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
|
|
|
|
|
2014-03-14 15:14:42 +00:00
|
|
|
struct JumpTableEntry : public ZoneObject {
|
2013-05-14 11:45:33 +00:00
|
|
|
inline JumpTableEntry(Address entry,
|
|
|
|
Deoptimizer::BailoutType type,
|
|
|
|
bool frame)
|
|
|
|
: label(),
|
|
|
|
address(entry),
|
|
|
|
bailout_type(type),
|
|
|
|
needs_frame(frame) { }
|
|
|
|
Label label;
|
|
|
|
Address address;
|
|
|
|
Deoptimizer::BailoutType bailout_type;
|
|
|
|
bool needs_frame;
|
|
|
|
};
|
|
|
|
|
2013-02-05 16:28:36 +00:00
|
|
|
static bool TraceEnabledFor(BailoutType deopt_type,
|
|
|
|
StackFrame::Type frame_type);
|
2012-12-21 07:18:56 +00:00
|
|
|
static const char* MessageFor(BailoutType type);
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
int output_count() const { return output_count_; }
|
|
|
|
|
2013-08-22 13:03:40 +00:00
|
|
|
Handle<JSFunction> function() const { return Handle<JSFunction>(function_); }
|
|
|
|
Handle<Code> compiled_code() const { return Handle<Code>(compiled_code_); }
|
|
|
|
BailoutType bailout_type() const { return bailout_type_; }
|
2012-12-18 16:25:45 +00:00
|
|
|
|
2012-01-24 08:43:12 +00:00
|
|
|
// Number of created JS frames. Not all created frames are necessarily JS.
|
|
|
|
int jsframe_count() const { return jsframe_count_; }
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
static Deoptimizer* New(JSFunction* function,
|
|
|
|
BailoutType type,
|
|
|
|
unsigned bailout_id,
|
|
|
|
Address from,
|
2011-03-18 20:35:07 +00:00
|
|
|
int fp_to_sp_delta,
|
|
|
|
Isolate* isolate);
|
|
|
|
static Deoptimizer* Grab(Isolate* isolate);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
// The returned object with information on the optimized frame needs to be
|
|
|
|
// freed before another one can be generated.
|
|
|
|
static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
|
2012-01-24 08:43:12 +00:00
|
|
|
int jsframe_index,
|
2011-06-29 13:02:00 +00:00
|
|
|
Isolate* isolate);
|
|
|
|
static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
|
|
|
|
Isolate* isolate);
|
|
|
|
|
2011-03-25 10:29:34 +00:00
|
|
|
// Makes sure that there is enough room in the relocation
|
|
|
|
// information of a code object to perform lazy deoptimization
|
|
|
|
// patching. If there is not enough room a new relocation
|
|
|
|
// information object is allocated and comments are added until it
|
|
|
|
// is big enough.
|
|
|
|
static void EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code);
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
// Deoptimize the function now. Its current optimized code will never be run
|
|
|
|
// again and any activations of the optimized code will get deoptimized when
|
|
|
|
// execution returns.
|
|
|
|
static void DeoptimizeFunction(JSFunction* function);
|
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Deoptimize all code in the given isolate.
|
2013-03-18 13:57:49 +00:00
|
|
|
static void DeoptimizeAll(Isolate* isolate);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Deoptimize code associated with the given global object.
|
2010-12-07 11:31:57 +00:00
|
|
|
static void DeoptimizeGlobalObject(JSObject* object);
|
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Deoptimizes all optimized code that has been previously marked
|
|
|
|
// (via code->set_marked_for_deoptimization) and unlinks all functions that
|
|
|
|
// refer to that code.
|
|
|
|
static void DeoptimizeMarkedCode(Isolate* isolate);
|
2012-12-17 10:23:52 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Visit all the known optimized functions in a given isolate.
|
|
|
|
static void VisitAllOptimizedFunctions(
|
|
|
|
Isolate* isolate, OptimizedFunctionVisitor* visitor);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2011-02-02 13:55:29 +00:00
|
|
|
// The size in bytes of the code required at a lazy deopt patch site.
|
|
|
|
static int patch_size();
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
~Deoptimizer();
|
|
|
|
|
2012-09-12 12:28:42 +00:00
|
|
|
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
|
2014-04-25 11:00:37 +00:00
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
void MaterializeHeapNumbersForDebuggerInspectableFrame(
|
2012-01-24 08:43:12 +00:00
|
|
|
Address parameters_top,
|
|
|
|
uint32_t parameters_size,
|
|
|
|
Address expressions_top,
|
|
|
|
uint32_t expressions_size,
|
|
|
|
DeoptimizedFrameInfo* info);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2011-03-30 18:05:16 +00:00
|
|
|
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2012-11-07 08:49:17 +00:00
|
|
|
|
|
|
|
enum GetEntryMode {
|
|
|
|
CALCULATE_ENTRY_ADDRESS,
|
|
|
|
ENSURE_ENTRY_CODE
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static Address GetDeoptimizationEntry(
|
2013-02-27 14:45:59 +00:00
|
|
|
Isolate* isolate,
|
2012-11-07 08:49:17 +00:00
|
|
|
int id,
|
|
|
|
BailoutType type,
|
|
|
|
GetEntryMode mode = ENSURE_ENTRY_CODE);
|
2013-03-18 13:57:49 +00:00
|
|
|
static int GetDeoptimizationId(Isolate* isolate,
|
|
|
|
Address addr,
|
|
|
|
BailoutType type);
|
2010-12-22 09:49:26 +00:00
|
|
|
static int GetOutputInfo(DeoptimizationOutputData* data,
|
2012-08-06 14:13:09 +00:00
|
|
|
BailoutId node_id,
|
2010-12-22 09:49:26 +00:00
|
|
|
SharedFunctionInfo* shared);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
// Code generation support.
|
|
|
|
static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
|
|
|
|
static int output_count_offset() {
|
|
|
|
return OFFSET_OF(Deoptimizer, output_count_);
|
|
|
|
}
|
|
|
|
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
|
|
|
|
|
2012-06-12 10:22:33 +00:00
|
|
|
static int has_alignment_padding_offset() {
|
|
|
|
return OFFSET_OF(Deoptimizer, has_alignment_padding_);
|
|
|
|
}
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
static int GetDeoptimizedCodeCount(Isolate* isolate);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
static const int kNotDeoptimizationEntry = -1;
|
|
|
|
|
|
|
|
// Generators for the deoptimization entry code.
|
|
|
|
class EntryGenerator BASE_EMBEDDED {
|
|
|
|
public:
|
|
|
|
EntryGenerator(MacroAssembler* masm, BailoutType type)
|
|
|
|
: masm_(masm), type_(type) { }
|
|
|
|
virtual ~EntryGenerator() { }
|
|
|
|
|
|
|
|
void Generate();
|
|
|
|
|
|
|
|
protected:
|
|
|
|
MacroAssembler* masm() const { return masm_; }
|
|
|
|
BailoutType type() const { return type_; }
|
2013-04-24 07:39:35 +00:00
|
|
|
Isolate* isolate() const { return masm_->isolate(); }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
virtual void GeneratePrologue() { }
|
|
|
|
|
|
|
|
private:
|
|
|
|
MacroAssembler* masm_;
|
|
|
|
Deoptimizer::BailoutType type_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class TableEntryGenerator : public EntryGenerator {
|
|
|
|
public:
|
|
|
|
TableEntryGenerator(MacroAssembler* masm, BailoutType type, int count)
|
|
|
|
: EntryGenerator(masm, type), count_(count) { }
|
|
|
|
|
|
|
|
protected:
|
|
|
|
virtual void GeneratePrologue();
|
|
|
|
|
|
|
|
private:
|
|
|
|
int count() const { return count_; }
|
|
|
|
|
|
|
|
int count_;
|
|
|
|
};
|
|
|
|
|
2012-01-24 08:43:12 +00:00
|
|
|
int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
|
|
|
|
|
2012-11-07 08:49:17 +00:00
|
|
|
static size_t GetMaxDeoptTableSize();
|
|
|
|
|
2013-02-27 14:45:59 +00:00
|
|
|
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
|
|
|
|
BailoutType type,
|
2012-12-18 16:25:45 +00:00
|
|
|
int max_entry_id);
|
|
|
|
|
2013-03-18 13:57:49 +00:00
|
|
|
Isolate* isolate() const { return isolate_; }
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
private:
|
2012-11-07 08:49:17 +00:00
|
|
|
static const int kMinNumberOfEntries = 64;
|
|
|
|
static const int kMaxNumberOfEntries = 16384;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
Deoptimizer(Isolate* isolate,
|
|
|
|
JSFunction* function,
|
2010-12-07 11:31:57 +00:00
|
|
|
BailoutType type,
|
|
|
|
unsigned bailout_id,
|
|
|
|
Address from,
|
2011-06-29 13:02:00 +00:00
|
|
|
int fp_to_sp_delta,
|
|
|
|
Code* optimized_code);
|
2012-12-21 07:18:56 +00:00
|
|
|
Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
|
|
|
|
void PrintFunctionName();
|
2010-12-07 11:31:57 +00:00
|
|
|
void DeleteFrameDescriptions();
|
|
|
|
|
|
|
|
void DoComputeOutputFrames();
|
2012-01-24 08:43:12 +00:00
|
|
|
void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
|
|
|
|
void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
|
|
|
|
int frame_index);
|
2012-02-28 09:05:55 +00:00
|
|
|
void DoComputeConstructStubFrame(TranslationIterator* iterator,
|
|
|
|
int frame_index);
|
2012-09-07 09:01:54 +00:00
|
|
|
void DoComputeAccessorStubFrame(TranslationIterator* iterator,
|
|
|
|
int frame_index,
|
|
|
|
bool is_setter_stub_frame);
|
2013-02-26 13:08:08 +00:00
|
|
|
void DoComputeCompiledStubFrame(TranslationIterator* iterator,
|
|
|
|
int frame_index);
|
2013-04-02 11:28:01 +00:00
|
|
|
|
2014-04-25 12:58:15 +00:00
|
|
|
// Translate object, store the result into an auxiliary array
|
|
|
|
// (deferred_objects_tagged_values_).
|
2013-06-12 14:22:49 +00:00
|
|
|
void DoTranslateObject(TranslationIterator* iterator,
|
2013-08-07 11:24:14 +00:00
|
|
|
int object_index,
|
2013-06-12 14:22:49 +00:00
|
|
|
int field_index);
|
|
|
|
|
2014-04-25 12:58:15 +00:00
|
|
|
// Translate value, store the result into the given frame slot.
|
2010-12-07 11:31:57 +00:00
|
|
|
void DoTranslateCommand(TranslationIterator* iterator,
|
2013-11-13 10:07:04 +00:00
|
|
|
int frame_index,
|
|
|
|
unsigned output_offset);
|
2013-04-02 11:28:01 +00:00
|
|
|
|
2014-04-25 12:58:15 +00:00
|
|
|
// Translate object, do not store the result anywhere (but do update
|
|
|
|
// the deferred materialization array).
|
|
|
|
void DoTranslateObjectAndSkip(TranslationIterator* iterator);
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
unsigned ComputeInputFrameSize() const;
|
|
|
|
unsigned ComputeFixedSize(JSFunction* function) const;
|
|
|
|
|
|
|
|
unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
|
|
|
|
unsigned ComputeOutgoingArgumentSize() const;
|
|
|
|
|
|
|
|
Object* ComputeLiteral(int index) const;
|
|
|
|
|
2013-08-07 11:24:14 +00:00
|
|
|
void AddObjectStart(intptr_t slot_address, int argc, bool is_arguments);
|
|
|
|
void AddObjectDuplication(intptr_t slot, int object_index);
|
2013-06-12 14:22:49 +00:00
|
|
|
void AddObjectTaggedValue(intptr_t value);
|
|
|
|
void AddObjectDoubleValue(double value);
|
2011-04-06 14:23:27 +00:00
|
|
|
void AddDoubleValue(intptr_t slot_address, double value);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2013-08-07 11:24:14 +00:00
|
|
|
bool ArgumentsObjectIsAdapted(int object_index) {
|
|
|
|
ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
|
|
|
|
int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
|
|
|
|
return jsframe_has_adapted_arguments_[reverse_jsframe_index];
|
|
|
|
}
|
|
|
|
|
|
|
|
Handle<JSFunction> ArgumentsObjectFunction(int object_index) {
|
|
|
|
ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
|
|
|
|
int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
|
|
|
|
return jsframe_functions_[reverse_jsframe_index];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Helper function for heap object materialization.
|
|
|
|
Handle<Object> MaterializeNextHeapObject();
|
|
|
|
Handle<Object> MaterializeNextValue();
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
static void GenerateDeoptimizationEntries(
|
|
|
|
MacroAssembler* masm, int count, BailoutType type);
|
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Marks all the code in the given context for deoptimization.
|
|
|
|
static void MarkAllCodeForContext(Context* native_context);
|
|
|
|
|
|
|
|
// Visit all the known optimized functions in a given context.
|
|
|
|
static void VisitAllOptimizedFunctionsForContext(
|
|
|
|
Context* context, OptimizedFunctionVisitor* visitor);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Deoptimizes all code marked in the given context.
|
|
|
|
static void DeoptimizeMarkedCodeForContext(Context* native_context);
|
2013-07-24 11:12:17 +00:00
|
|
|
|
|
|
|
// Patch the given code so that it will deoptimize itself.
|
|
|
|
static void PatchCodeForDeoptimization(Isolate* isolate, Code* code);
|
2012-12-17 10:23:52 +00:00
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
// Searches the list of known deoptimizing code for a Code object
|
|
|
|
// containing the given address (which is supposedly faster than
|
|
|
|
// searching all code objects).
|
|
|
|
Code* FindDeoptimizingCode(Address addr);
|
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
// Fill the input from from a JavaScript frame. This is used when
|
|
|
|
// the debugger needs to inspect an optimized frame. For normal
|
|
|
|
// deoptimizations the input frame is filled in generated code.
|
|
|
|
void FillInputFrame(Address tos, JavaScriptFrame* frame);
|
|
|
|
|
2013-03-08 16:18:50 +00:00
|
|
|
// Fill the given output frame's registers to contain the failure handler
|
|
|
|
// address and the number of parameters for a stub failure trampoline.
|
|
|
|
void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
|
|
|
|
CodeStubInterfaceDescriptor* desc);
|
|
|
|
|
|
|
|
// Fill the given output frame's double registers with the original values
|
|
|
|
// from the input frame's double registers.
|
|
|
|
void CopyDoubleRegisters(FrameDescription* output_frame);
|
|
|
|
|
2013-05-17 08:27:56 +00:00
|
|
|
// Determines whether the input frame contains alignment padding by looking
|
|
|
|
// at the dynamic alignment state slot inside the frame.
|
|
|
|
bool HasAlignmentPadding(JSFunction* function);
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
Isolate* isolate_;
|
2010-12-07 11:31:57 +00:00
|
|
|
JSFunction* function_;
|
2012-12-18 16:25:45 +00:00
|
|
|
Code* compiled_code_;
|
2010-12-07 11:31:57 +00:00
|
|
|
unsigned bailout_id_;
|
|
|
|
BailoutType bailout_type_;
|
|
|
|
Address from_;
|
|
|
|
int fp_to_sp_delta_;
|
2012-06-12 10:22:33 +00:00
|
|
|
int has_alignment_padding_;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
// Input frame description.
|
|
|
|
FrameDescription* input_;
|
|
|
|
// Number of output frames.
|
|
|
|
int output_count_;
|
2012-01-24 08:43:12 +00:00
|
|
|
// Number of output js frames.
|
|
|
|
int jsframe_count_;
|
2010-12-07 11:31:57 +00:00
|
|
|
// Array of output frame descriptions.
|
|
|
|
FrameDescription** output_;
|
|
|
|
|
2013-08-07 11:24:14 +00:00
|
|
|
// Deferred values to be materialized.
|
2013-06-12 14:22:49 +00:00
|
|
|
List<Object*> deferred_objects_tagged_values_;
|
2013-10-16 03:30:06 +00:00
|
|
|
List<HeapNumberMaterializationDescriptor<int> >
|
|
|
|
deferred_objects_double_values_;
|
2013-06-12 14:22:49 +00:00
|
|
|
List<ObjectMaterializationDescriptor> deferred_objects_;
|
2013-10-16 03:30:06 +00:00
|
|
|
List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
|
2013-08-07 11:24:14 +00:00
|
|
|
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
// Key for lookup of previously materialized objects
|
|
|
|
Address stack_fp_;
|
|
|
|
Handle<FixedArray> previously_materialized_objects_;
|
|
|
|
int prev_materialized_count_;
|
|
|
|
|
2013-08-07 11:24:14 +00:00
|
|
|
// Output frame information. Only used during heap object materialization.
|
|
|
|
List<Handle<JSFunction> > jsframe_functions_;
|
|
|
|
List<bool> jsframe_has_adapted_arguments_;
|
|
|
|
|
|
|
|
// Materialized objects. Only used during heap object materialization.
|
|
|
|
List<Handle<Object> >* materialized_values_;
|
|
|
|
List<Handle<Object> >* materialized_objects_;
|
|
|
|
int materialization_value_index_;
|
|
|
|
int materialization_object_index_;
|
|
|
|
|
2013-06-03 15:32:22 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
DisallowHeapAllocation* disallow_heap_allocation_;
|
|
|
|
#endif // DEBUG
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2013-11-07 16:35:27 +00:00
|
|
|
CodeTracer::Scope* trace_scope_;
|
2013-02-05 16:28:36 +00:00
|
|
|
|
2011-08-05 11:32:46 +00:00
|
|
|
static const int table_entry_size_;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
friend class FrameDescription;
|
2011-06-29 13:02:00 +00:00
|
|
|
friend class DeoptimizedFrameInfo;
|
2010-12-07 11:31:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class FrameDescription {
|
|
|
|
public:
|
|
|
|
FrameDescription(uint32_t frame_size,
|
|
|
|
JSFunction* function);
|
|
|
|
|
|
|
|
void* operator new(size_t size, uint32_t frame_size) {
|
2011-03-25 13:26:55 +00:00
|
|
|
// Subtracts kPointerSize, as the member frame_content_ already supplies
|
|
|
|
// the first element of the area to store the frame.
|
|
|
|
return malloc(size + frame_size - kPointerSize);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
2011-08-29 09:14:59 +00:00
|
|
|
void operator delete(void* pointer, uint32_t frame_size) {
|
|
|
|
free(pointer);
|
|
|
|
}
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void operator delete(void* description) {
|
|
|
|
free(description);
|
|
|
|
}
|
|
|
|
|
2011-06-30 15:57:56 +00:00
|
|
|
uint32_t GetFrameSize() const {
|
|
|
|
ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_);
|
|
|
|
return static_cast<uint32_t>(frame_size_);
|
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
JSFunction* GetFunction() const { return function_; }
|
|
|
|
|
2012-01-24 08:43:12 +00:00
|
|
|
unsigned GetOffsetFromSlotIndex(int slot_index);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2010-12-07 11:53:19 +00:00
|
|
|
intptr_t GetFrameSlot(unsigned offset) {
|
2010-12-07 11:31:57 +00:00
|
|
|
return *GetFrameSlotPointer(offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
double GetDoubleFrameSlot(unsigned offset) {
|
2011-10-28 08:14:46 +00:00
|
|
|
intptr_t* ptr = GetFrameSlotPointer(offset);
|
2013-05-27 17:12:48 +00:00
|
|
|
return read_double_value(reinterpret_cast<Address>(ptr));
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
2010-12-07 11:53:19 +00:00
|
|
|
void SetFrameSlot(unsigned offset, intptr_t value) {
|
2010-12-07 11:31:57 +00:00
|
|
|
*GetFrameSlotPointer(offset) = value;
|
|
|
|
}
|
|
|
|
|
2013-07-23 13:46:10 +00:00
|
|
|
void SetCallerPc(unsigned offset, intptr_t value);
|
|
|
|
|
|
|
|
void SetCallerFp(unsigned offset, intptr_t value);
|
|
|
|
|
2014-03-14 15:11:58 +00:00
|
|
|
void SetCallerConstantPool(unsigned offset, intptr_t value);
|
|
|
|
|
2010-12-07 11:53:19 +00:00
|
|
|
intptr_t GetRegister(unsigned n) const {
|
2013-10-25 11:10:28 +00:00
|
|
|
#if DEBUG
|
|
|
|
// This convoluted ASSERT is needed to work around a gcc problem that
|
|
|
|
// improperly detects an array bounds overflow in optimized debug builds
|
|
|
|
// when using a plain ASSERT.
|
|
|
|
if (n >= ARRAY_SIZE(registers_)) {
|
|
|
|
ASSERT(false);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
2010-12-07 11:31:57 +00:00
|
|
|
return registers_[n];
|
|
|
|
}
|
|
|
|
|
|
|
|
double GetDoubleRegister(unsigned n) const {
|
|
|
|
ASSERT(n < ARRAY_SIZE(double_registers_));
|
|
|
|
return double_registers_[n];
|
|
|
|
}
|
|
|
|
|
2010-12-07 11:53:19 +00:00
|
|
|
void SetRegister(unsigned n, intptr_t value) {
|
2010-12-07 11:31:57 +00:00
|
|
|
ASSERT(n < ARRAY_SIZE(registers_));
|
|
|
|
registers_[n] = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetDoubleRegister(unsigned n, double value) {
|
|
|
|
ASSERT(n < ARRAY_SIZE(double_registers_));
|
|
|
|
double_registers_[n] = value;
|
|
|
|
}
|
|
|
|
|
2010-12-07 11:53:19 +00:00
|
|
|
intptr_t GetTop() const { return top_; }
|
|
|
|
void SetTop(intptr_t top) { top_ = top; }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2010-12-07 11:53:19 +00:00
|
|
|
intptr_t GetPc() const { return pc_; }
|
|
|
|
void SetPc(intptr_t pc) { pc_ = pc; }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2010-12-07 11:53:19 +00:00
|
|
|
intptr_t GetFp() const { return fp_; }
|
|
|
|
void SetFp(intptr_t fp) { fp_ = fp; }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2012-02-28 09:05:55 +00:00
|
|
|
intptr_t GetContext() const { return context_; }
|
|
|
|
void SetContext(intptr_t context) { context_ = context; }
|
|
|
|
|
2013-12-30 11:23:59 +00:00
|
|
|
intptr_t GetConstantPool() const { return constant_pool_; }
|
|
|
|
void SetConstantPool(intptr_t constant_pool) {
|
|
|
|
constant_pool_ = constant_pool;
|
|
|
|
}
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
Smi* GetState() const { return state_; }
|
|
|
|
void SetState(Smi* state) { state_ = state; }
|
|
|
|
|
2010-12-07 11:53:19 +00:00
|
|
|
void SetContinuation(intptr_t pc) { continuation_ = pc; }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2012-01-24 08:43:12 +00:00
|
|
|
StackFrame::Type GetFrameType() const { return type_; }
|
|
|
|
void SetFrameType(StackFrame::Type type) { type_ = type; }
|
2011-06-29 13:02:00 +00:00
|
|
|
|
2011-07-07 14:29:16 +00:00
|
|
|
// Get the incoming arguments count.
|
|
|
|
int ComputeParametersCount();
|
|
|
|
|
|
|
|
// Get a parameter value for an unoptimized frame.
|
2012-01-24 08:43:12 +00:00
|
|
|
Object* GetParameter(int index);
|
2011-07-07 14:29:16 +00:00
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
// Get the expression stack height for a unoptimized frame.
|
2012-01-24 08:43:12 +00:00
|
|
|
unsigned GetExpressionCount();
|
2011-06-29 13:02:00 +00:00
|
|
|
|
|
|
|
// Get the expression stack value for an unoptimized frame.
|
2012-01-24 08:43:12 +00:00
|
|
|
Object* GetExpression(int index);
|
2011-06-29 13:02:00 +00:00
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
static int registers_offset() {
|
|
|
|
return OFFSET_OF(FrameDescription, registers_);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int double_registers_offset() {
|
|
|
|
return OFFSET_OF(FrameDescription, double_registers_);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int frame_size_offset() {
|
|
|
|
return OFFSET_OF(FrameDescription, frame_size_);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pc_offset() {
|
|
|
|
return OFFSET_OF(FrameDescription, pc_);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int state_offset() {
|
|
|
|
return OFFSET_OF(FrameDescription, state_);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int continuation_offset() {
|
|
|
|
return OFFSET_OF(FrameDescription, continuation_);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int frame_content_offset() {
|
2011-03-25 13:26:55 +00:00
|
|
|
return OFFSET_OF(FrameDescription, frame_content_);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
static const uint32_t kZapUint32 = 0xbeeddead;
|
|
|
|
|
2011-06-30 15:57:56 +00:00
|
|
|
// Frame_size_ must hold a uint32_t value. It is only a uintptr_t to
|
|
|
|
// keep the variable-size array frame_content_ of type intptr_t at
|
|
|
|
// the end of the structure aligned.
|
2010-12-07 11:53:19 +00:00
|
|
|
uintptr_t frame_size_; // Number of bytes.
|
2010-12-07 11:31:57 +00:00
|
|
|
JSFunction* function_;
|
2010-12-07 11:53:19 +00:00
|
|
|
intptr_t registers_[Register::kNumRegisters];
|
2013-02-04 12:01:59 +00:00
|
|
|
double double_registers_[DoubleRegister::kMaxNumRegisters];
|
2010-12-07 11:53:19 +00:00
|
|
|
intptr_t top_;
|
|
|
|
intptr_t pc_;
|
|
|
|
intptr_t fp_;
|
2012-02-28 09:05:55 +00:00
|
|
|
intptr_t context_;
|
2013-12-30 11:23:59 +00:00
|
|
|
intptr_t constant_pool_;
|
2012-01-24 08:43:12 +00:00
|
|
|
StackFrame::Type type_;
|
2010-12-07 11:31:57 +00:00
|
|
|
Smi* state_;
|
|
|
|
|
|
|
|
// Continuation is the PC where the execution continues after
|
|
|
|
// deoptimizing.
|
2010-12-07 11:53:19 +00:00
|
|
|
intptr_t continuation_;
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2011-03-25 13:26:55 +00:00
|
|
|
// This must be at the end of the object as the object is allocated larger
|
|
|
|
// than it's definition indicate to extend this array.
|
|
|
|
intptr_t frame_content_[1];
|
|
|
|
|
2010-12-07 11:53:19 +00:00
|
|
|
intptr_t* GetFrameSlotPointer(unsigned offset) {
|
2010-12-07 11:31:57 +00:00
|
|
|
ASSERT(offset < frame_size_);
|
2010-12-07 11:53:19 +00:00
|
|
|
return reinterpret_cast<intptr_t*>(
|
2010-12-07 11:31:57 +00:00
|
|
|
reinterpret_cast<Address>(this) + frame_content_offset() + offset);
|
|
|
|
}
|
2012-01-24 08:43:12 +00:00
|
|
|
|
|
|
|
int ComputeFixedSize();
|
2010-12-07 11:31:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-05-14 11:45:33 +00:00
|
|
|
class DeoptimizerData {
|
|
|
|
public:
|
|
|
|
explicit DeoptimizerData(MemoryAllocator* allocator);
|
|
|
|
~DeoptimizerData();
|
|
|
|
|
|
|
|
void Iterate(ObjectVisitor* v);
|
|
|
|
|
|
|
|
private:
|
|
|
|
MemoryAllocator* allocator_;
|
|
|
|
int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
|
|
|
|
MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
|
|
|
|
|
|
|
|
DeoptimizedFrameInfo* deoptimized_frame_info_;
|
|
|
|
|
2013-09-04 13:53:24 +00:00
|
|
|
Deoptimizer* current_;
|
2013-05-14 11:45:33 +00:00
|
|
|
|
|
|
|
friend class Deoptimizer;
|
|
|
|
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
class TranslationBuffer BASE_EMBEDDED {
|
|
|
|
public:
|
2012-06-04 14:42:58 +00:00
|
|
|
explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
int CurrentIndex() const { return contents_.length(); }
|
2012-06-04 14:42:58 +00:00
|
|
|
void Add(int32_t value, Zone* zone);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2013-03-18 13:57:49 +00:00
|
|
|
Handle<ByteArray> CreateByteArray(Factory* factory);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
ZoneList<uint8_t> contents_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class TranslationIterator BASE_EMBEDDED {
|
|
|
|
public:
|
|
|
|
TranslationIterator(ByteArray* buffer, int index)
|
|
|
|
: buffer_(buffer), index_(index) {
|
|
|
|
ASSERT(index >= 0 && index < buffer->length());
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t Next();
|
|
|
|
|
2011-08-08 07:17:01 +00:00
|
|
|
bool HasNext() const { return index_ < buffer_->length(); }
|
2010-12-07 11:31:57 +00:00
|
|
|
|
|
|
|
void Skip(int n) {
|
|
|
|
for (int i = 0; i < n; i++) Next();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
ByteArray* buffer_;
|
|
|
|
int index_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-10-01 11:18:30 +00:00
|
|
|
#define TRANSLATION_OPCODE_LIST(V) \
|
|
|
|
V(BEGIN) \
|
|
|
|
V(JS_FRAME) \
|
|
|
|
V(CONSTRUCT_STUB_FRAME) \
|
|
|
|
V(GETTER_STUB_FRAME) \
|
|
|
|
V(SETTER_STUB_FRAME) \
|
|
|
|
V(ARGUMENTS_ADAPTOR_FRAME) \
|
|
|
|
V(COMPILED_STUB_FRAME) \
|
|
|
|
V(DUPLICATED_OBJECT) \
|
|
|
|
V(ARGUMENTS_OBJECT) \
|
|
|
|
V(CAPTURED_OBJECT) \
|
|
|
|
V(REGISTER) \
|
|
|
|
V(INT32_REGISTER) \
|
|
|
|
V(UINT32_REGISTER) \
|
|
|
|
V(DOUBLE_REGISTER) \
|
|
|
|
V(STACK_SLOT) \
|
|
|
|
V(INT32_STACK_SLOT) \
|
|
|
|
V(UINT32_STACK_SLOT) \
|
|
|
|
V(DOUBLE_STACK_SLOT) \
|
|
|
|
V(LITERAL)
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
class Translation BASE_EMBEDDED {
|
|
|
|
public:
|
2013-10-01 11:18:30 +00:00
|
|
|
#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item,
|
2010-12-07 11:31:57 +00:00
|
|
|
enum Opcode {
|
2013-10-01 11:18:30 +00:00
|
|
|
TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM)
|
|
|
|
LAST = LITERAL
|
2010-12-07 11:31:57 +00:00
|
|
|
};
|
2013-10-01 11:18:30 +00:00
|
|
|
#undef DECLARE_TRANSLATION_OPCODE_ENUM
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2012-06-04 14:42:58 +00:00
|
|
|
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
|
|
|
|
Zone* zone)
|
2010-12-07 11:31:57 +00:00
|
|
|
: buffer_(buffer),
|
2012-06-04 14:42:58 +00:00
|
|
|
index_(buffer->CurrentIndex()),
|
|
|
|
zone_(zone) {
|
|
|
|
buffer_->Add(BEGIN, zone);
|
|
|
|
buffer_->Add(frame_count, zone);
|
|
|
|
buffer_->Add(jsframe_count, zone);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int index() const { return index_; }
|
|
|
|
|
|
|
|
// Commands.
|
2012-08-06 14:13:09 +00:00
|
|
|
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
|
2012-12-18 16:25:45 +00:00
|
|
|
void BeginCompiledStubFrame();
|
2012-01-24 08:43:12 +00:00
|
|
|
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
|
2012-02-28 09:05:55 +00:00
|
|
|
void BeginConstructStubFrame(int literal_id, unsigned height);
|
2012-09-07 09:01:54 +00:00
|
|
|
void BeginGetterStubFrame(int literal_id);
|
2012-08-17 10:43:32 +00:00
|
|
|
void BeginSetterStubFrame(int literal_id);
|
2013-06-12 14:22:49 +00:00
|
|
|
void BeginArgumentsObject(int args_length);
|
2013-08-07 11:24:14 +00:00
|
|
|
void BeginCapturedObject(int length);
|
|
|
|
void DuplicateObject(int object_index);
|
2010-12-07 11:31:57 +00:00
|
|
|
void StoreRegister(Register reg);
|
|
|
|
void StoreInt32Register(Register reg);
|
2012-08-22 15:44:17 +00:00
|
|
|
void StoreUint32Register(Register reg);
|
2010-12-07 11:31:57 +00:00
|
|
|
void StoreDoubleRegister(DoubleRegister reg);
|
|
|
|
void StoreStackSlot(int index);
|
|
|
|
void StoreInt32StackSlot(int index);
|
2012-08-22 15:44:17 +00:00
|
|
|
void StoreUint32StackSlot(int index);
|
2010-12-07 11:31:57 +00:00
|
|
|
void StoreDoubleStackSlot(int index);
|
|
|
|
void StoreLiteral(int literal_id);
|
2013-06-26 08:43:27 +00:00
|
|
|
void StoreArgumentsObject(bool args_known, int args_index, int args_length);
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2012-06-11 12:42:31 +00:00
|
|
|
Zone* zone() const { return zone_; }
|
2012-06-04 14:42:58 +00:00
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
static int NumberOfOperandsFor(Opcode opcode);
|
|
|
|
|
2011-06-16 07:58:47 +00:00
|
|
|
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
|
2010-12-07 11:31:57 +00:00
|
|
|
static const char* StringFor(Opcode opcode);
|
|
|
|
#endif
|
|
|
|
|
2012-06-14 14:06:22 +00:00
|
|
|
// A literal id which refers to the JSFunction itself.
|
|
|
|
static const int kSelfLiteralId = -239;
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
private:
|
|
|
|
TranslationBuffer* buffer_;
|
|
|
|
int index_;
|
2012-06-04 14:42:58 +00:00
|
|
|
Zone* zone_;
|
2010-12-07 11:31:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-04-01 11:41:36 +00:00
|
|
|
class SlotRef BASE_EMBEDDED {
|
|
|
|
public:
|
|
|
|
enum SlotRepresentation {
|
|
|
|
UNKNOWN,
|
|
|
|
TAGGED,
|
|
|
|
INT32,
|
2012-08-22 15:44:17 +00:00
|
|
|
UINT32,
|
2011-04-01 11:41:36 +00:00
|
|
|
DOUBLE,
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
LITERAL,
|
|
|
|
DEFERRED_OBJECT, // Object captured by the escape analysis.
|
|
|
|
// The number of nested objects can be obtained
|
|
|
|
// with the DeferredObjectLength() method
|
|
|
|
// (the SlotRefs of the nested objects follow
|
|
|
|
// this SlotRef in the depth-first order.)
|
2014-02-27 15:12:12 +00:00
|
|
|
DUPLICATE_OBJECT, // Duplicated object of a deferred object.
|
|
|
|
ARGUMENTS_OBJECT // Arguments object - only used to keep indexing
|
|
|
|
// in sync, it should not be materialized.
|
2011-04-01 11:41:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
SlotRef()
|
|
|
|
: addr_(NULL), representation_(UNKNOWN) { }
|
|
|
|
|
|
|
|
SlotRef(Address addr, SlotRepresentation representation)
|
|
|
|
: addr_(addr), representation_(representation) { }
|
|
|
|
|
2013-02-25 14:46:09 +00:00
|
|
|
SlotRef(Isolate* isolate, Object* literal)
|
|
|
|
: literal_(literal, isolate), representation_(LITERAL) { }
|
2011-04-01 11:41:36 +00:00
|
|
|
|
2014-02-27 15:12:12 +00:00
|
|
|
static SlotRef NewArgumentsObject(int length) {
|
|
|
|
SlotRef slot;
|
|
|
|
slot.representation_ = ARGUMENTS_OBJECT;
|
|
|
|
slot.deferred_object_length_ = length;
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
static SlotRef NewDeferredObject(int length) {
|
|
|
|
SlotRef slot;
|
|
|
|
slot.representation_ = DEFERRED_OBJECT;
|
|
|
|
slot.deferred_object_length_ = length;
|
|
|
|
return slot;
|
This is a preview of the captured arguments object materialization,
mostly to make sure that it is going in the right direction. The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is there a simpler/more correct way to store the already-materialized
objects? (At the moment there is a custom root reference to JSArray
containing frames' FixedArrays with their captured objects.)
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=mstarzinger@chromium.org, danno@chromium.org
LOG=N
BUG=
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18918 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-29 15:14:15 +00:00
|
|
|
}
|
|
|
|
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
SlotRepresentation Representation() { return representation_; }
|
|
|
|
|
|
|
|
static SlotRef NewDuplicateObject(int id) {
|
|
|
|
SlotRef slot;
|
|
|
|
slot.representation_ = DUPLICATE_OBJECT;
|
|
|
|
slot.duplicate_object_id_ = id;
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
2014-02-27 15:12:12 +00:00
|
|
|
int GetChildrenCount() {
|
|
|
|
if (representation_ == DEFERRED_OBJECT ||
|
|
|
|
representation_ == ARGUMENTS_OBJECT) {
|
|
|
|
return deferred_object_length_;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
|
|
|
|
int DuplicateObjectId() { return duplicate_object_id_; }
|
|
|
|
|
|
|
|
Handle<Object> GetValue(Isolate* isolate);
|
This is a preview of the captured arguments object materialization,
mostly to make sure that it is going in the right direction. The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is there a simpler/more correct way to store the already-materialized
objects? (At the moment there is a custom root reference to JSArray
containing frames' FixedArrays with their captured objects.)
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=mstarzinger@chromium.org, danno@chromium.org
LOG=N
BUG=
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18918 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-29 15:14:15 +00:00
|
|
|
|
|
|
|
private:
|
2014-01-29 15:49:48 +00:00
|
|
|
Address addr_;
|
|
|
|
Handle<Object> literal_;
|
|
|
|
SlotRepresentation representation_;
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
int deferred_object_length_;
|
|
|
|
int duplicate_object_id_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class SlotRefValueBuilder BASE_EMBEDDED {
|
|
|
|
public:
|
|
|
|
SlotRefValueBuilder(
|
|
|
|
JavaScriptFrame* frame,
|
|
|
|
int inlined_frame_index,
|
|
|
|
int formal_parameter_count);
|
|
|
|
|
|
|
|
void Prepare(Isolate* isolate);
|
|
|
|
Handle<Object> GetNext(Isolate* isolate, int level);
|
|
|
|
void Finish(Isolate* isolate);
|
|
|
|
|
|
|
|
int args_length() { return args_length_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
List<Handle<Object> > materialized_objects_;
|
|
|
|
Handle<FixedArray> previously_materialized_objects_;
|
|
|
|
int prev_materialized_count_;
|
|
|
|
Address stack_frame_id_;
|
|
|
|
List<SlotRef> slot_refs_;
|
|
|
|
int current_slot_;
|
|
|
|
int args_length_;
|
|
|
|
int first_slot_index_;
|
|
|
|
|
|
|
|
static SlotRef ComputeSlotForNextArgument(
|
|
|
|
Translation::Opcode opcode,
|
|
|
|
TranslationIterator* iterator,
|
|
|
|
DeoptimizationInputData* data,
|
|
|
|
JavaScriptFrame* frame);
|
|
|
|
|
|
|
|
Handle<Object> GetPreviouslyMaterialized(Isolate* isolate, int length);
|
2011-04-01 11:41:36 +00:00
|
|
|
|
|
|
|
static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
|
|
|
|
if (slot_index >= 0) {
|
|
|
|
const int offset = JavaScriptFrameConstants::kLocal0Offset;
|
|
|
|
return frame->fp() + offset - (slot_index * kPointerSize);
|
|
|
|
} else {
|
|
|
|
const int offset = JavaScriptFrameConstants::kLastParameterOffset;
|
|
|
|
return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
Handle<Object> GetDeferredObject(Isolate* isolate);
|
|
|
|
};
|
This is a preview of the captured arguments object materialization,
mostly to make sure that it is going in the right direction. The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is there a simpler/more correct way to store the already-materialized
objects? (At the moment there is a custom root reference to JSArray
containing frames' FixedArrays with their captured objects.)
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=mstarzinger@chromium.org, danno@chromium.org
LOG=N
BUG=
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18918 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-29 15:14:15 +00:00
|
|
|
|
The current
version is passing all the existing test + a bunch of new tests
(packaged in the change list, too).
The patch extends the SlotRef object to describe captured and duplicated
objects. Since the SlotRefs are not independent of each other anymore,
there is a new SlotRefValueBuilder class that stores the SlotRefs and
later materializes the objects from the SlotRefs.
Note that unlike the previous implementation of SlotRefs, we now build
the SlotRef entries for the entire frame, not just the particular
function. This is because duplicate objects might refer to previous
captured objects (that might live inside other inlined function's part
of the frame).
We also need to store the materialized objects between other potential
invocations of the same arguments object so that we materialize each
captured object at most once. The materialized objects of frames live
in the new MaterielizedObjectStore object (contained in Isolate),
indexed by the frame's FP address. Each argument materialization (and
deoptimization) tries to lookup its captured objects in the store before
building new ones. Deoptimization also removes the materialized objects
from the store. We also schedule a lazy deopt to be sure that we always
get rid of the materialized objects and that the optmized function
adopts the materialized objects (instead of happily computing with its
captured representations).
Concerns:
- Is the FP address the right key for a frame? (Note that deoptimizer's
representation of frame is different from the argument object
materializer's one - it is not easy to find common ground.)
- Performance is suboptimal in several places, but a quick local run of
benchmarks does not seem to show a perf hit. Examples of possible
improvements: smarter generation of SlotRefs (build other functions'
SlotRefs only for captured objects and only if necessary), smarter
lookup of stored materialized objects.
- Ideally, we would like to share the code for argument materialization
with deoptimizer's materializer. However, the supporting data structures
(mainly the frame descriptor) are quite different in each case, so it
looks more like a separate project.
Thanks for any feedback.
R=danno@chromium.org, mstarzinger@chromium.org
LOG=N
BUG=
Committed: https://code.google.com/p/v8/source/detail?r=18918
Review URL: https://codereview.chromium.org/103243005
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18936 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-01-30 10:33:53 +00:00
|
|
|
class MaterializedObjectStore {
|
|
|
|
public:
|
|
|
|
explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {
|
|
|
|
}
|
|
|
|
|
|
|
|
Handle<FixedArray> Get(Address fp);
|
|
|
|
void Set(Address fp, Handle<FixedArray> materialized_objects);
|
|
|
|
void Remove(Address fp);
|
|
|
|
|
|
|
|
private:
|
|
|
|
Isolate* isolate() { return isolate_; }
|
|
|
|
Handle<FixedArray> GetStackEntries();
|
|
|
|
Handle<FixedArray> EnsureStackEntries(int size);
|
|
|
|
|
|
|
|
int StackIdToIndex(Address fp);
|
|
|
|
|
|
|
|
Isolate* isolate_;
|
|
|
|
List<Address> frame_fps_;
|
2011-04-01 11:41:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
// Class used to represent an unoptimized frame when the debugger
|
|
|
|
// needs to inspect a frame that is part of an optimized frame. The
|
|
|
|
// internally used FrameDescription objects are not GC safe so for use
|
|
|
|
// by the debugger frame information is copied to an object of this type.
|
2012-01-24 08:43:12 +00:00
|
|
|
// Represents parameters in unadapted form so their number might mismatch
|
|
|
|
// formal parameter count.
|
2011-06-29 13:02:00 +00:00
|
|
|
class DeoptimizedFrameInfo : public Malloced {
|
|
|
|
public:
|
2012-01-24 08:43:12 +00:00
|
|
|
DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
|
|
|
|
int frame_index,
|
2012-02-28 09:05:55 +00:00
|
|
|
bool has_arguments_adaptor,
|
|
|
|
bool has_construct_stub);
|
2011-06-29 13:02:00 +00:00
|
|
|
virtual ~DeoptimizedFrameInfo();
|
|
|
|
|
|
|
|
// GC support.
|
|
|
|
void Iterate(ObjectVisitor* v);
|
|
|
|
|
2011-07-07 14:29:16 +00:00
|
|
|
// Return the number of incoming arguments.
|
|
|
|
int parameters_count() { return parameters_count_; }
|
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
// Return the height of the expression stack.
|
|
|
|
int expression_count() { return expression_count_; }
|
|
|
|
|
2011-07-06 13:02:17 +00:00
|
|
|
// Get the frame function.
|
|
|
|
JSFunction* GetFunction() {
|
|
|
|
return function_;
|
|
|
|
}
|
|
|
|
|
2012-02-28 09:05:55 +00:00
|
|
|
// Check if this frame is preceded by construct stub frame. The bottom-most
|
|
|
|
// inlined frame might still be called by an uninlined construct stub.
|
|
|
|
bool HasConstructStub() {
|
|
|
|
return has_construct_stub_;
|
|
|
|
}
|
|
|
|
|
2011-07-07 14:29:16 +00:00
|
|
|
// Get an incoming argument.
|
|
|
|
Object* GetParameter(int index) {
|
|
|
|
ASSERT(0 <= index && index < parameters_count());
|
|
|
|
return parameters_[index];
|
|
|
|
}
|
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
// Get an expression from the expression stack.
|
|
|
|
Object* GetExpression(int index) {
|
|
|
|
ASSERT(0 <= index && index < expression_count());
|
|
|
|
return expression_stack_[index];
|
|
|
|
}
|
|
|
|
|
2012-01-31 12:08:33 +00:00
|
|
|
int GetSourcePosition() {
|
|
|
|
return source_position_;
|
2012-01-30 13:07:01 +00:00
|
|
|
}
|
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
private:
|
2011-07-07 14:29:16 +00:00
|
|
|
// Set an incoming argument.
|
|
|
|
void SetParameter(int index, Object* obj) {
|
|
|
|
ASSERT(0 <= index && index < parameters_count());
|
|
|
|
parameters_[index] = obj;
|
|
|
|
}
|
|
|
|
|
2011-06-29 13:02:00 +00:00
|
|
|
// Set an expression on the expression stack.
|
|
|
|
void SetExpression(int index, Object* obj) {
|
|
|
|
ASSERT(0 <= index && index < expression_count());
|
|
|
|
expression_stack_[index] = obj;
|
|
|
|
}
|
|
|
|
|
2011-07-06 13:02:17 +00:00
|
|
|
JSFunction* function_;
|
2012-02-28 09:05:55 +00:00
|
|
|
bool has_construct_stub_;
|
2011-07-07 14:29:16 +00:00
|
|
|
int parameters_count_;
|
2011-06-29 13:02:00 +00:00
|
|
|
int expression_count_;
|
2011-07-07 14:29:16 +00:00
|
|
|
Object** parameters_;
|
2011-06-29 13:02:00 +00:00
|
|
|
Object** expression_stack_;
|
2012-01-31 12:08:33 +00:00
|
|
|
int source_position_;
|
2011-06-29 13:02:00 +00:00
|
|
|
|
|
|
|
friend class Deoptimizer;
|
|
|
|
};
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
} } // namespace v8::internal
|
|
|
|
|
|
|
|
#endif // V8_DEOPTIMIZER_H_
|