v8/src/compiler.h

712 lines
22 KiB
C
Raw Normal View History

// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_H_
#define V8_COMPILER_H_
#include "src/allocation.h"
#include "src/ast.h"
#include "src/bailout-reason.h"
#include "src/compilation-dependencies.h"
#include "src/signature.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
class AstValueFactory;
class HydrogenCodeStub;
[turbofan] Reland "Add new JSFrameSpecialization reducer." and "Perform OSR deconstruction early and remove type propagation.". We have to reland these two commits at once, because the first breaks some asm.js benchmarks without the second. The change was reverted because of bogus checks in the verifier, which will not work in the presence of OSR (and where hidden because of the type back propagation hack in OSR so far). Original messages are below: [turbofan] Add new JSFrameSpecialization reducer. The JSFrameSpecialization specializes an OSR graph to the current unoptimized frame on which we will perform the on-stack replacement. This is used for asm.js functions, where we cannot reuse the OSR code object anyway because of context specialization, and so we could as well specialize to the max instead. It works by replacing all OsrValues in the graph with their values in the JavaScriptFrame. The idea is that using this trick we get better performance without doing the unsound backpropagation of types to OsrValues later. This is the first step towards fixing OSR for TurboFan. [turbofan] Perform OSR deconstruction early and remove type propagation. This way we don't have to deal with dead pre-OSR code in the graph and risk optimizing the wrong code, especially we don't make optimistic assumptions in the dead code that leaks into the OSR code (i.e. deopt guards are in dead code, but the types propagate to OSR code via the OsrValue type back propagation). BUG=v8:4273 LOG=n R=jarin@chromium.org Review URL: https://codereview.chromium.org/1226673005 Cr-Commit-Position: refs/heads/master@{#29486}
2015-07-06 11:11:15 +00:00
class JavaScriptFrame;
class ParseInfo;
class ScriptData;
// This class encapsulates encoding and decoding of sources positions from
// which hydrogen values originated.
// When FLAG_track_hydrogen_positions is set this object encodes the
// identifier of the inlining and absolute offset from the start of the
// inlined function.
// When the flag is not set we simply track absolute offset from the
// script start.
class SourcePosition {
public:
static SourcePosition Unknown() {
return SourcePosition::FromRaw(kNoPosition);
}
bool IsUnknown() const { return value_ == kNoPosition; }
uint32_t position() const { return PositionField::decode(value_); }
void set_position(uint32_t position) {
if (FLAG_hydrogen_track_positions) {
value_ = static_cast<uint32_t>(PositionField::update(value_, position));
} else {
value_ = position;
}
}
uint32_t inlining_id() const { return InliningIdField::decode(value_); }
void set_inlining_id(uint32_t inlining_id) {
if (FLAG_hydrogen_track_positions) {
value_ =
static_cast<uint32_t>(InliningIdField::update(value_, inlining_id));
}
}
uint32_t raw() const { return value_; }
private:
static const uint32_t kNoPosition =
static_cast<uint32_t>(RelocInfo::kNoPosition);
typedef BitField<uint32_t, 0, 9> InliningIdField;
// Offset from the start of the inlined function.
typedef BitField<uint32_t, 9, 23> PositionField;
friend class HPositionInfo;
friend class Deoptimizer;
static SourcePosition FromRaw(uint32_t raw_position) {
SourcePosition position;
position.value_ = raw_position;
return position;
}
// If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
// and PositionField.
// Otherwise contains absolute offset from the script start.
uint32_t value_;
};
std::ostream& operator<<(std::ostream& os, const SourcePosition& p);
struct InlinedFunctionInfo {
InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
int script_id, int start_position)
: parent_id(parent_id),
inline_position(inline_position),
script_id(script_id),
start_position(start_position) {}
int parent_id;
SourcePosition inline_position;
int script_id;
int start_position;
std::vector<size_t> deopt_pc_offsets;
static const int kNoParentId = -1;
};
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
class CompilationInfo {
public:
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
enum Flag {
kDeferredCalling = 1 << 0,
kNonDeferredCalling = 1 << 1,
kSavesCallerDoubles = 1 << 2,
kRequiresFrame = 1 << 3,
kMustNotHaveEagerFrame = 1 << 4,
kDeoptimizationSupport = 1 << 5,
kDebug = 1 << 6,
kSerializing = 1 << 7,
kFunctionContextSpecializing = 1 << 8,
kFrameSpecializing = 1 << 9,
kNativeContextSpecializing = 1 << 10,
kInliningEnabled = 1 << 11,
kTypingEnabled = 1 << 12,
kDisableFutureOptimization = 1 << 13,
kSplittingEnabled = 1 << 14,
kTypeFeedbackEnabled = 1 << 15,
kDeoptimizationEnabled = 1 << 16,
kSourcePositionsEnabled = 1 << 17,
kFirstCompile = 1 << 18,
};
explicit CompilationInfo(ParseInfo* parse_info);
CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone);
CompilationInfo(const char* debug_name, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
ParseInfo* parse_info() const { return parse_info_; }
// -----------------------------------------------------------
// TODO(titzer): inline and delete accessors of ParseInfo
// -----------------------------------------------------------
Handle<Script> script() const;
bool is_eval() const;
bool is_native() const;
bool is_module() const;
LanguageMode language_mode() const;
Handle<JSFunction> closure() const;
FunctionLiteral* literal() const;
Scope* scope() const;
Handle<Context> context() const;
Handle<SharedFunctionInfo> shared_info() const;
bool has_shared_info() const;
bool has_context() const;
bool has_literal() const;
bool has_scope() const;
// -----------------------------------------------------------
Isolate* isolate() const {
return isolate_;
}
Zone* zone() { return zone_; }
bool is_osr() const { return !osr_ast_id_.IsNone(); }
Handle<Code> code() const { return code_; }
CodeStub* code_stub() const { return code_stub_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
Handle<Code> unoptimized_code() const { return unoptimized_code_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_parameters_including_this() const;
bool is_this_defined() const;
int num_heap_slots() const;
void set_parameter_count(int parameter_count) {
DCHECK(IsStub());
parameter_count_ = parameter_count;
}
bool is_tracking_positions() const { return track_positions_; }
bool is_calling() const {
return GetFlag(kDeferredCalling) || GetFlag(kNonDeferredCalling);
}
void MarkAsDeferredCalling() { SetFlag(kDeferredCalling); }
bool is_deferred_calling() const { return GetFlag(kDeferredCalling); }
void MarkAsNonDeferredCalling() { SetFlag(kNonDeferredCalling); }
bool is_non_deferred_calling() const { return GetFlag(kNonDeferredCalling); }
void MarkAsSavesCallerDoubles() { SetFlag(kSavesCallerDoubles); }
bool saves_caller_doubles() const { return GetFlag(kSavesCallerDoubles); }
void MarkAsRequiresFrame() { SetFlag(kRequiresFrame); }
bool requires_frame() const { return GetFlag(kRequiresFrame); }
void MarkMustNotHaveEagerFrame() { SetFlag(kMustNotHaveEagerFrame); }
bool GetMustNotHaveEagerFrame() const {
return GetFlag(kMustNotHaveEagerFrame);
}
// Compiles marked as debug produce unoptimized code with debug break slots.
// Inner functions that cannot be compiled w/o context are compiled eagerly.
// Always include deoptimization support to avoid having to recompile again.
void MarkAsDebug() {
SetFlag(kDebug);
SetFlag(kDeoptimizationSupport);
}
bool is_debug() const { return GetFlag(kDebug); }
void PrepareForSerializing() { SetFlag(kSerializing); }
bool will_serialize() const { return GetFlag(kSerializing); }
void MarkAsFunctionContextSpecializing() {
SetFlag(kFunctionContextSpecializing);
}
bool is_function_context_specializing() const {
return GetFlag(kFunctionContextSpecializing);
}
[turbofan] Reland "Add new JSFrameSpecialization reducer." and "Perform OSR deconstruction early and remove type propagation.". We have to reland these two commits at once, because the first breaks some asm.js benchmarks without the second. The change was reverted because of bogus checks in the verifier, which will not work in the presence of OSR (and where hidden because of the type back propagation hack in OSR so far). Original messages are below: [turbofan] Add new JSFrameSpecialization reducer. The JSFrameSpecialization specializes an OSR graph to the current unoptimized frame on which we will perform the on-stack replacement. This is used for asm.js functions, where we cannot reuse the OSR code object anyway because of context specialization, and so we could as well specialize to the max instead. It works by replacing all OsrValues in the graph with their values in the JavaScriptFrame. The idea is that using this trick we get better performance without doing the unsound backpropagation of types to OsrValues later. This is the first step towards fixing OSR for TurboFan. [turbofan] Perform OSR deconstruction early and remove type propagation. This way we don't have to deal with dead pre-OSR code in the graph and risk optimizing the wrong code, especially we don't make optimistic assumptions in the dead code that leaks into the OSR code (i.e. deopt guards are in dead code, but the types propagate to OSR code via the OsrValue type back propagation). BUG=v8:4273 LOG=n R=jarin@chromium.org Review URL: https://codereview.chromium.org/1226673005 Cr-Commit-Position: refs/heads/master@{#29486}
2015-07-06 11:11:15 +00:00
void MarkAsFrameSpecializing() { SetFlag(kFrameSpecializing); }
bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
void MarkAsNativeContextSpecializing() {
SetFlag(kNativeContextSpecializing);
}
bool is_native_context_specializing() const {
return GetFlag(kNativeContextSpecializing);
}
void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
bool is_type_feedback_enabled() const {
return GetFlag(kTypeFeedbackEnabled);
}
void MarkAsDeoptimizationEnabled() { SetFlag(kDeoptimizationEnabled); }
bool is_deoptimization_enabled() const {
return GetFlag(kDeoptimizationEnabled);
}
void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
bool is_source_positions_enabled() const {
return GetFlag(kSourcePositionsEnabled);
}
void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
void MarkAsFirstCompile() { SetFlag(kFirstCompile); }
void MarkAsCompiled() { SetFlag(kFirstCompile, false); }
bool is_first_compile() const { return GetFlag(kFirstCompile); }
bool IsCodePreAgingActive() const {
return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
!is_debug();
}
void EnsureFeedbackVector();
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
void SetCode(Handle<Code> code) { code_ = code; }
bool ShouldTrapOnDeopt() const {
return (FLAG_trap_on_deopt && IsOptimizing()) ||
(FLAG_trap_on_stub_deopt && IsStub());
}
bool has_global_object() const {
return !closure().is_null() &&
(closure()->context()->global_object() != NULL);
}
GlobalObject* global_object() const {
return has_global_object() ? closure()->context()->global_object() : NULL;
}
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
DCHECK(has_shared_info());
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized;
optimization_id_ = isolate()->NextOptimizationId();
set_output_code_kind(Code::OPTIMIZED_FUNCTION);
}
void SetFunctionType(Type::FunctionType* function_type) {
function_type_ = function_type;
}
Type::FunctionType* function_type() const { return function_type_; }
void SetStub(CodeStub* code_stub);
// Deoptimization support.
bool HasDeoptimizationSupport() const {
return GetFlag(kDeoptimizationSupport);
}
void EnableDeoptimizationSupport() {
DCHECK_EQ(BASE, mode_);
SetFlag(kDeoptimizationSupport);
}
bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
bool MustReplaceUndefinedReceiverWithGlobalProxy();
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
void set_deferred_handles(DeferredHandles* deferred_handles) {
DCHECK(deferred_handles_ == NULL);
deferred_handles_ = deferred_handles;
}
void ReopenHandlesInNewHandleScope() {
unoptimized_code_ = Handle<Code>(*unoptimized_code_);
}
void AbortOptimization(BailoutReason reason) {
DCHECK(reason != kNoReason);
if (bailout_reason_ == kNoReason) bailout_reason_ = reason;
SetFlag(kDisableFutureOptimization);
}
void RetryOptimization(BailoutReason reason) {
DCHECK(reason != kNoReason);
if (GetFlag(kDisableFutureOptimization)) return;
bailout_reason_ = reason;
}
BailoutReason bailout_reason() const { return bailout_reason_; }
int prologue_offset() const {
DCHECK_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
return prologue_offset_;
}
void set_prologue_offset(int prologue_offset) {
DCHECK_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
prologue_offset_ = prologue_offset;
}
int start_position_for(uint32_t inlining_id) {
return inlined_function_infos_.at(inlining_id).start_position;
}
const std::vector<InlinedFunctionInfo>& inlined_function_infos() {
return inlined_function_infos_;
}
void LogDeoptCallPosition(int pc_offset, int inlining_id);
int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
SourcePosition position, int pareint_id);
CompilationDependencies* dependencies() { return &dependencies_; }
bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure());
}
int optimization_id() const { return optimization_id_; }
int osr_expr_stack_height() { return osr_expr_stack_height_; }
void set_osr_expr_stack_height(int height) {
DCHECK(height >= 0);
osr_expr_stack_height_ = height;
}
[turbofan] Reland "Add new JSFrameSpecialization reducer." and "Perform OSR deconstruction early and remove type propagation.". We have to reland these two commits at once, because the first breaks some asm.js benchmarks without the second. The change was reverted because of bogus checks in the verifier, which will not work in the presence of OSR (and where hidden because of the type back propagation hack in OSR so far). Original messages are below: [turbofan] Add new JSFrameSpecialization reducer. The JSFrameSpecialization specializes an OSR graph to the current unoptimized frame on which we will perform the on-stack replacement. This is used for asm.js functions, where we cannot reuse the OSR code object anyway because of context specialization, and so we could as well specialize to the max instead. It works by replacing all OsrValues in the graph with their values in the JavaScriptFrame. The idea is that using this trick we get better performance without doing the unsound backpropagation of types to OsrValues later. This is the first step towards fixing OSR for TurboFan. [turbofan] Perform OSR deconstruction early and remove type propagation. This way we don't have to deal with dead pre-OSR code in the graph and risk optimizing the wrong code, especially we don't make optimistic assumptions in the dead code that leaks into the OSR code (i.e. deopt guards are in dead code, but the types propagate to OSR code via the OsrValue type back propagation). BUG=v8:4273 LOG=n R=jarin@chromium.org Review URL: https://codereview.chromium.org/1226673005 Cr-Commit-Position: refs/heads/master@{#29486}
2015-07-06 11:11:15 +00:00
JavaScriptFrame* osr_frame() const { return osr_frame_; }
void set_osr_frame(JavaScriptFrame* osr_frame) { osr_frame_ = osr_frame; }
#if DEBUG
void PrintAstForTesting();
#endif
bool has_simple_parameters();
typedef std::vector<Handle<SharedFunctionInfo>> InlinedFunctionList;
InlinedFunctionList const& inlined_functions() const {
return inlined_functions_;
}
void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function) {
inlined_functions_.push_back(inlined_function);
}
base::SmartArrayPointer<char> GetDebugName() const;
Code::Kind output_code_kind() const { return output_code_kind_; }
void set_output_code_kind(Code::Kind kind) { output_code_kind_ = kind; }
protected:
ParseInfo* parse_info_;
void DisableFutureOptimization() {
if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
shared_info()->DisableOptimization(bailout_reason());
}
}
private:
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
enum Mode {
BASE,
OPTIMIZE,
STUB
};
CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
const char* debug_name, Mode mode, Isolate* isolate,
Zone* zone);
Isolate* isolate_;
void SetMode(Mode mode) {
mode_ = mode;
}
void SetFlag(Flag flag) { flags_ |= flag; }
void SetFlag(Flag flag, bool value) {
flags_ = value ? flags_ | flag : flags_ & ~flag;
}
bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
unsigned flags_;
Code::Kind output_code_kind_;
// For compiled stubs, the stub object
CodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
// Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
Handle<TypeFeedbackVector> feedback_vector_;
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
BailoutId osr_ast_id_;
// The unoptimized code we patched for OSR may not be the shared code
// afterwards, since we may need to compile it again to include deoptimization
// data. Keep track which code we patched.
Handle<Code> unoptimized_code_;
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
Zone* zone_;
DeferredHandles* deferred_handles_;
// Dependencies for this compilation, e.g. stable maps.
CompilationDependencies dependencies_;
BailoutReason bailout_reason_;
int prologue_offset_;
std::vector<InlinedFunctionInfo> inlined_function_infos_;
bool track_positions_;
InlinedFunctionList inlined_functions_;
// A copy of shared_info()->opt_count() to avoid handle deref
// during graph optimization.
int opt_count_;
// Number of parameters used for compilation of stubs that require arguments.
int parameter_count_;
int optimization_id_;
int osr_expr_stack_height_;
[turbofan] Reland "Add new JSFrameSpecialization reducer." and "Perform OSR deconstruction early and remove type propagation.". We have to reland these two commits at once, because the first breaks some asm.js benchmarks without the second. The change was reverted because of bogus checks in the verifier, which will not work in the presence of OSR (and where hidden because of the type back propagation hack in OSR so far). Original messages are below: [turbofan] Add new JSFrameSpecialization reducer. The JSFrameSpecialization specializes an OSR graph to the current unoptimized frame on which we will perform the on-stack replacement. This is used for asm.js functions, where we cannot reuse the OSR code object anyway because of context specialization, and so we could as well specialize to the max instead. It works by replacing all OsrValues in the graph with their values in the JavaScriptFrame. The idea is that using this trick we get better performance without doing the unsound backpropagation of types to OsrValues later. This is the first step towards fixing OSR for TurboFan. [turbofan] Perform OSR deconstruction early and remove type propagation. This way we don't have to deal with dead pre-OSR code in the graph and risk optimizing the wrong code, especially we don't make optimistic assumptions in the dead code that leaks into the OSR code (i.e. deopt guards are in dead code, but the types propagate to OSR code via the OsrValue type back propagation). BUG=v8:4273 LOG=n R=jarin@chromium.org Review URL: https://codereview.chromium.org/1226673005 Cr-Commit-Position: refs/heads/master@{#29486}
2015-07-06 11:11:15 +00:00
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
Type::FunctionType* function_type_;
const char* debug_name_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
// A wrapper around a CompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
class CompilationHandleScope BASE_EMBEDDED {
public:
explicit CompilationHandleScope(CompilationInfo* info)
: deferred_(info->isolate()), info_(info) {}
~CompilationHandleScope() {
info_->set_deferred_handles(deferred_.Detach());
}
private:
DeferredHandleScope deferred_;
CompilationInfo* info_;
};
class HGraph;
class HOptimizedGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
// Crankshaft and keeps track of its state. The three phases
// CreateGraph, OptimizeGraph and GenerateAndInstallCode can either
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
class OptimizedCompileJob: public ZoneObject {
public:
explicit OptimizedCompileJob(CompilationInfo* info)
: info_(info),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
last_status_(FAILED),
awaiting_install_(false) { }
enum Status {
FAILED, BAILED_OUT, SUCCEEDED
};
MUST_USE_RESULT Status CreateGraph();
MUST_USE_RESULT Status OptimizeGraph();
MUST_USE_RESULT Status GenerateCode();
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
Status RetryOptimization(BailoutReason reason) {
info_->RetryOptimization(reason);
return SetLastStatus(BAILED_OUT);
}
Status AbortOptimization(BailoutReason reason) {
info_->AbortOptimization(reason);
return SetLastStatus(BAILED_OUT);
}
void WaitForInstall() {
DCHECK(info_->is_osr());
awaiting_install_ = true;
}
bool IsWaitingForInstall() { return awaiting_install_; }
private:
CompilationInfo* info_;
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
base::TimeDelta time_taken_to_create_graph_;
base::TimeDelta time_taken_to_optimize_;
base::TimeDelta time_taken_to_codegen_;
Status last_status_;
bool awaiting_install_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
return last_status_;
}
void RecordOptimizationStats();
struct Timer {
Timer(OptimizedCompileJob* job, base::TimeDelta* location)
: job_(job), location_(location) {
DCHECK(location_ != NULL);
timer_.Start();
}
~Timer() {
*location_ += timer_.Elapsed();
}
OptimizedCompileJob* job_;
base::ElapsedTimer timer_;
base::TimeDelta* location_;
};
};
// The V8 compiler
//
// General strategy: Source code is translated into an anonymous function w/o
// parameters which then can be executed. If the source code contains other
// functions, they will be compiled and allocated as part of the compilation
// of the source code.
// Please note this interface returns shared function infos. This means you
// need to call Factory::NewFunctionFromSharedFunctionInfo before you have a
// real function with a context.
class Compiler : public AllStatic {
public:
MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
Handle<JSFunction> function);
MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
Handle<JSFunction> function);
MUST_USE_RESULT static MaybeHandle<Code> GetStubCode(
Handle<JSFunction> function, CodeStub* stub);
static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
static bool CompileDebugCode(Handle<JSFunction> function);
static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
static void CompileForLiveEdit(Handle<Script> script);
// Parser::Parse, then Compiler::Analyze.
static bool ParseAndAnalyze(ParseInfo* info);
// Rewrite, analyze scopes, and renumber.
static bool Analyze(ParseInfo* info);
// Adds deoptimization support, requires ParseAndAnalyze.
static bool EnsureDeoptimizationSupport(CompilationInfo* info);
// Compile a String source within a context for eval.
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
ParseRestriction restriction, int line_offset, int column_offset = 0,
Handle<Object> script_name = Handle<Object>(),
ScriptOriginOptions options = ScriptOriginOptions());
// Compile a String source within a context.
static Handle<SharedFunctionInfo> CompileScript(
Change ScriptCompiler::CompileOptions to allow for two 'cache' modes (parser or code) and to be explicit about cache consumption or production (rather than making presence of cached_data imply one or the other.) Also add a --cache flag to d8, to allow testing the functionality. ----------------------------- API change Reason: Currently, V8 supports a 'parser cache' for repeatedly executing the same script. We'd like to add a 2nd mode that would cache code, and would like to let the embedder decide which mode they chose (if any). Note: Previously, the 'use cached data' property was implied by the presence of the cached data itself. (That is, kNoCompileOptions and source->cached_data != NULL.) That is no longer sufficient, since the presence of data is no longer sufficient to determine /which kind/ of data is present. Changes from old behaviour: - If you previously didn't use caching, nothing changes. Example: v8::CompileUnbound(isolate, source, kNoCompileOptions); - If you previously used caching, it worked like this: - 1st run: v8::CompileUnbound(isolate, source, kProduceToCache); Then, source->cached_data would contain the data-to-be cached. This remains the same, except you need to tell V8 which type of data you want. v8::CompileUnbound(isolate, source, kProduceParserCache); - 2nd run: v8::CompileUnbound(isolate, source, kNoCompileOptions); with source->cached_data set to the data you received in the first run. This will now ignore the cached data, and you need to explicitly tell V8 to use it: v8::CompileUnbound(isolate, source, kConsumeParserCache); ----------------------------- BUG= R=marja@chromium.org, yangguo@chromium.org Review URL: https://codereview.chromium.org/389573006 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22431 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2014-07-16 12:18:33 +00:00
Handle<String> source, Handle<Object> script_name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Object> source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options,
NativesFlag is_natives_code, bool is_module);
static Handle<SharedFunctionInfo> CompileStreamedScript(Handle<Script> script,
ParseInfo* info,
int source_length);
// Create a shared function info object (the code may be lazily compiled).
static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
// Generate and return optimized code or start a concurrent optimization job.
// In the latter case, return the InOptimizationQueue builtin. On failure,
// return the empty handle.
MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCode(
[turbofan] Reland "Add new JSFrameSpecialization reducer." and "Perform OSR deconstruction early and remove type propagation.". We have to reland these two commits at once, because the first breaks some asm.js benchmarks without the second. The change was reverted because of bogus checks in the verifier, which will not work in the presence of OSR (and where hidden because of the type back propagation hack in OSR so far). Original messages are below: [turbofan] Add new JSFrameSpecialization reducer. The JSFrameSpecialization specializes an OSR graph to the current unoptimized frame on which we will perform the on-stack replacement. This is used for asm.js functions, where we cannot reuse the OSR code object anyway because of context specialization, and so we could as well specialize to the max instead. It works by replacing all OsrValues in the graph with their values in the JavaScriptFrame. The idea is that using this trick we get better performance without doing the unsound backpropagation of types to OsrValues later. This is the first step towards fixing OSR for TurboFan. [turbofan] Perform OSR deconstruction early and remove type propagation. This way we don't have to deal with dead pre-OSR code in the graph and risk optimizing the wrong code, especially we don't make optimistic assumptions in the dead code that leaks into the OSR code (i.e. deopt guards are in dead code, but the types propagate to OSR code via the OsrValue type back propagation). BUG=v8:4273 LOG=n R=jarin@chromium.org Review URL: https://codereview.chromium.org/1226673005 Cr-Commit-Position: refs/heads/master@{#29486}
2015-07-06 11:11:15 +00:00
Handle<JSFunction> function, Handle<Code> current_code,
ConcurrencyMode mode, BailoutId osr_ast_id = BailoutId::None(),
JavaScriptFrame* osr_frame = nullptr);
// Generate and return code from previously queued optimization job.
// On failure, return the empty handle.
static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
};
class CompilationPhase BASE_EMBEDDED {
public:
CompilationPhase(const char* name, CompilationInfo* info);
~CompilationPhase();
protected:
bool ShouldProduceTraceOutput() const;
const char* name() const { return name_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
Zone* zone() { return &zone_; }
private:
const char* name_;
CompilationInfo* info_;
Zone zone_;
size_t info_zone_start_allocation_size_;
base::ElapsedTimer timer_;
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_H_