Optimize functions on a second thread.
BUG= TEST= Review URL: https://chromiumcodereview.appspot.com/10807024 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12148 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
5b0d3a09eb
commit
693c7643d2
@ -106,6 +106,7 @@ SOURCES = {
|
||||
objects-visiting.cc
|
||||
objects.cc
|
||||
once.cc
|
||||
optimizing-compiler-thread.cc
|
||||
parser.cc
|
||||
preparse-data.cc
|
||||
preparser.cc
|
||||
|
@ -697,6 +697,43 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
|
||||
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
|
||||
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ mov(pc, r2);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
|
||||
GenerateTailCallToSharedCode(masm);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
|
||||
// Push a copy of the function onto the stack.
|
||||
__ push(r1);
|
||||
// Push call kind information.
|
||||
__ push(r5);
|
||||
|
||||
__ push(r1); // Function is also the parameter to the runtime call.
|
||||
__ CallRuntime(Runtime::kParallelRecompile, 1);
|
||||
|
||||
// Restore call kind information.
|
||||
__ pop(r5);
|
||||
// Restore receiver.
|
||||
__ pop(r1);
|
||||
|
||||
// Tear down internal frame.
|
||||
}
|
||||
|
||||
GenerateTailCallToSharedCode(masm);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
bool is_api_function,
|
||||
bool count_constructions) {
|
||||
|
@ -37,7 +37,7 @@
|
||||
#include "list-inl.h"
|
||||
#include "runtime.h"
|
||||
#include "small-pointer-list.h"
|
||||
#include "smart-array-pointer.h"
|
||||
#include "smart-pointers.h"
|
||||
#include "token.h"
|
||||
#include "utils.h"
|
||||
#include "variables.h"
|
||||
|
@ -66,6 +66,8 @@ enum BuiltinExtraArguments {
|
||||
#define BUILTIN_LIST_A(V) \
|
||||
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
|
||||
@ -80,6 +82,8 @@ enum BuiltinExtraArguments {
|
||||
Code::kNoExtraICState) \
|
||||
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
|
||||
@ -347,6 +351,8 @@ class Builtins {
|
||||
static void Generate_Adaptor(MacroAssembler* masm,
|
||||
CFunctionId id,
|
||||
BuiltinExtraArguments extra_args);
|
||||
static void Generate_InRecompileQueue(MacroAssembler* masm);
|
||||
static void Generate_ParallelRecompile(MacroAssembler* masm);
|
||||
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
|
||||
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
|
||||
static void Generate_JSConstructStubApi(MacroAssembler* masm);
|
||||
|
279
src/compiler.cc
279
src/compiler.cc
@ -207,6 +207,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
|
||||
}
|
||||
status = compiler.OptimizeGraph();
|
||||
if (status != OptimizingCompiler::SUCCEEDED) {
|
||||
status = compiler.AbortOptimization();
|
||||
return status != OptimizingCompiler::FAILED;
|
||||
}
|
||||
status = compiler.GenerateAndInstallCode();
|
||||
@ -340,17 +341,20 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
|
||||
}
|
||||
|
||||
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
|
||||
AssertNoAllocation no_gc;
|
||||
NoHandleAllocation no_handles;
|
||||
|
||||
ASSERT(last_status() == SUCCEEDED);
|
||||
Timer t(this, &time_taken_to_optimize_);
|
||||
ASSERT(graph_ != NULL);
|
||||
SmartArrayPointer<char> bailout_reason;
|
||||
if (!graph_->Optimize(&bailout_reason)) {
|
||||
if (!bailout_reason.is_empty()) graph_builder_->Bailout(*bailout_reason);
|
||||
return AbortOptimization();
|
||||
return SetLastStatus(BAILED_OUT);
|
||||
} else {
|
||||
chunk_ = LChunk::NewChunk(graph_);
|
||||
if (chunk_ == NULL) {
|
||||
return AbortOptimization();
|
||||
return SetLastStatus(BAILED_OUT);
|
||||
}
|
||||
}
|
||||
return SetLastStatus(SUCCEEDED);
|
||||
@ -658,21 +662,91 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
|
||||
}
|
||||
|
||||
|
||||
bool Compiler::CompileLazy(CompilationInfo* info) {
|
||||
Isolate* isolate = info->isolate();
|
||||
|
||||
ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
|
||||
|
||||
// The VM is in the COMPILER state until exiting this function.
|
||||
VMState state(isolate, COMPILER);
|
||||
|
||||
PostponeInterruptsScope postpone(isolate);
|
||||
|
||||
static bool InstallFullCode(CompilationInfo* info) {
|
||||
// Update the shared function info with the compiled code and the
|
||||
// scope info. Please note, that the order of the shared function
|
||||
// info initialization is important since set_scope_info might
|
||||
// trigger a GC, causing the ASSERT below to be invalid if the code
|
||||
// was flushed. By setting the code object last we avoid this.
|
||||
Handle<SharedFunctionInfo> shared = info->shared_info();
|
||||
int compiled_size = shared->end_position() - shared->start_position();
|
||||
isolate->counters()->total_compile_size()->Increment(compiled_size);
|
||||
Handle<Code> code = info->code();
|
||||
Handle<JSFunction> function = info->closure();
|
||||
Handle<ScopeInfo> scope_info =
|
||||
ScopeInfo::Create(info->scope(), info->zone());
|
||||
shared->set_scope_info(*scope_info);
|
||||
shared->set_code(*code);
|
||||
if (!function.is_null()) {
|
||||
function->ReplaceCode(*code);
|
||||
ASSERT(!function->IsOptimized());
|
||||
}
|
||||
|
||||
// Set the expected number of properties for instances.
|
||||
FunctionLiteral* lit = info->function();
|
||||
int expected = lit->expected_property_count();
|
||||
SetExpectedNofPropertiesFromEstimate(shared, expected);
|
||||
|
||||
// Set the optimization hints after performing lazy compilation, as
|
||||
// these are not set when the function is set up as a lazily
|
||||
// compiled function.
|
||||
shared->SetThisPropertyAssignmentsInfo(
|
||||
lit->has_only_simple_this_property_assignments(),
|
||||
*lit->this_property_assignments());
|
||||
|
||||
// Check the function has compiled code.
|
||||
ASSERT(shared->is_compiled());
|
||||
shared->set_code_age(0);
|
||||
shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
|
||||
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
|
||||
shared->set_ast_node_count(lit->ast_node_count());
|
||||
|
||||
if (V8::UseCrankshaft()&&
|
||||
!function.is_null() &&
|
||||
!shared->optimization_disabled()) {
|
||||
// If we're asked to always optimize, we compile the optimized
|
||||
// version of the function right away - unless the debugger is
|
||||
// active as it makes no sense to compile optimized code then.
|
||||
if (FLAG_always_opt &&
|
||||
!Isolate::Current()->DebuggerHasBreakPoints()) {
|
||||
CompilationInfoWithZone optimized(function);
|
||||
optimized.SetOptimizing(AstNode::kNoNumber);
|
||||
return Compiler::CompileLazy(&optimized);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static void InstallCodeCommon(CompilationInfo* info) {
|
||||
Handle<SharedFunctionInfo> shared = info->shared_info();
|
||||
Handle<Code> code = info->code();
|
||||
ASSERT(!code.is_null());
|
||||
|
||||
// Set optimizable to false if this is disallowed by the shared
|
||||
// function info, e.g., we might have flushed the code and must
|
||||
// reset this bit when lazy compiling the code again.
|
||||
if (shared->optimization_disabled()) code->set_optimizable(false);
|
||||
|
||||
Handle<JSFunction> function = info->closure();
|
||||
Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
|
||||
}
|
||||
|
||||
|
||||
static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
|
||||
Handle<Code> code = info->code();
|
||||
Handle<JSFunction> function = info->closure();
|
||||
if (FLAG_cache_optimized_code && code->kind() == Code::OPTIMIZED_FUNCTION) {
|
||||
Handle<SharedFunctionInfo> shared(function->shared());
|
||||
Handle<FixedArray> literals(function->literals());
|
||||
Handle<Context> global_context(function->context()->global_context());
|
||||
SharedFunctionInfo::AddToOptimizedCodeMap(
|
||||
shared, global_context, code, literals);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
|
||||
if (FLAG_cache_optimized_code && info->IsOptimizing()) {
|
||||
Handle<SharedFunctionInfo> shared = info->shared_info();
|
||||
Handle<JSFunction> function = info->closure();
|
||||
ASSERT(!function.is_null());
|
||||
Handle<Context> global_context(function->context()->global_context());
|
||||
@ -688,6 +762,25 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool Compiler::CompileLazy(CompilationInfo* info) {
|
||||
Isolate* isolate = info->isolate();
|
||||
|
||||
ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
|
||||
|
||||
// The VM is in the COMPILER state until exiting this function.
|
||||
VMState state(isolate, COMPILER);
|
||||
|
||||
PostponeInterruptsScope postpone(isolate);
|
||||
|
||||
Handle<SharedFunctionInfo> shared = info->shared_info();
|
||||
int compiled_size = shared->end_position() - shared->start_position();
|
||||
isolate->counters()->total_compile_size()->Increment(compiled_size);
|
||||
|
||||
if (InstallCodeFromOptimizedCodeMap(info)) return true;
|
||||
|
||||
// Generate the AST for the lazily compiled function.
|
||||
if (ParserApi::Parse(info, kNoParsingFlags)) {
|
||||
@ -707,78 +800,17 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
|
||||
isolate->StackOverflow();
|
||||
}
|
||||
} else {
|
||||
ASSERT(!info->code().is_null());
|
||||
Handle<Code> code = info->code();
|
||||
// Set optimizable to false if this is disallowed by the shared
|
||||
// function info, e.g., we might have flushed the code and must
|
||||
// reset this bit when lazy compiling the code again.
|
||||
if (shared->optimization_disabled()) code->set_optimizable(false);
|
||||
|
||||
Handle<JSFunction> function = info->closure();
|
||||
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
|
||||
InstallCodeCommon(info);
|
||||
|
||||
if (info->IsOptimizing()) {
|
||||
Handle<Code> code = info->code();
|
||||
ASSERT(shared->scope_info() != ScopeInfo::Empty());
|
||||
function->ReplaceCode(*code);
|
||||
if (FLAG_cache_optimized_code &&
|
||||
code->kind() == Code::OPTIMIZED_FUNCTION) {
|
||||
Handle<SharedFunctionInfo> shared(function->shared());
|
||||
Handle<FixedArray> literals(function->literals());
|
||||
Handle<Context> global_context(function->context()->global_context());
|
||||
SharedFunctionInfo::AddToOptimizedCodeMap(
|
||||
shared, global_context, code, literals);
|
||||
}
|
||||
info->closure()->ReplaceCode(*code);
|
||||
InsertCodeIntoOptimizedCodeMap(info);
|
||||
return true;
|
||||
} else {
|
||||
// Update the shared function info with the compiled code and the
|
||||
// scope info. Please note, that the order of the shared function
|
||||
// info initialization is important since set_scope_info might
|
||||
// trigger a GC, causing the ASSERT below to be invalid if the code
|
||||
// was flushed. By setting the code object last we avoid this.
|
||||
Handle<ScopeInfo> scope_info =
|
||||
ScopeInfo::Create(info->scope(), info->zone());
|
||||
shared->set_scope_info(*scope_info);
|
||||
shared->set_code(*code);
|
||||
if (!function.is_null()) {
|
||||
function->ReplaceCode(*code);
|
||||
ASSERT(!function->IsOptimized());
|
||||
}
|
||||
|
||||
// Set the expected number of properties for instances.
|
||||
FunctionLiteral* lit = info->function();
|
||||
int expected = lit->expected_property_count();
|
||||
SetExpectedNofPropertiesFromEstimate(shared, expected);
|
||||
|
||||
// Set the optimization hints after performing lazy compilation, as
|
||||
// these are not set when the function is set up as a lazily
|
||||
// compiled function.
|
||||
shared->SetThisPropertyAssignmentsInfo(
|
||||
lit->has_only_simple_this_property_assignments(),
|
||||
*lit->this_property_assignments());
|
||||
|
||||
// Check the function has compiled code.
|
||||
ASSERT(shared->is_compiled());
|
||||
shared->set_code_age(0);
|
||||
shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
|
||||
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
|
||||
shared->set_dont_cache(lit->flags()->Contains(kDontCache));
|
||||
shared->set_ast_node_count(lit->ast_node_count());
|
||||
|
||||
if (V8::UseCrankshaft()&&
|
||||
!function.is_null() &&
|
||||
!shared->optimization_disabled()) {
|
||||
// If we're asked to always optimize, we compile the optimized
|
||||
// version of the function right away - unless the debugger is
|
||||
// active as it makes no sense to compile optimized code then.
|
||||
if (FLAG_always_opt &&
|
||||
!Isolate::Current()->DebuggerHasBreakPoints()) {
|
||||
CompilationInfoWithZone optimized(function);
|
||||
optimized.SetOptimizing(AstNode::kNoNumber);
|
||||
return CompileLazy(&optimized);
|
||||
}
|
||||
}
|
||||
return InstallFullCode(info);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -787,6 +819,91 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
|
||||
}
|
||||
|
||||
|
||||
void Compiler::RecompileParallel(Handle<JSFunction> closure) {
|
||||
ASSERT(closure->IsMarkedForParallelRecompilation());
|
||||
if (closure->IsInRecompileQueue()) return;
|
||||
|
||||
Isolate* isolate = closure->GetIsolate();
|
||||
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
|
||||
if (FLAG_trace_parallel_recompilation) {
|
||||
PrintF(" ** Compilation queue, will retry opting on next run.\n");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
|
||||
VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
|
||||
PostponeInterruptsScope postpone(isolate);
|
||||
|
||||
Handle<SharedFunctionInfo> shared = info->shared_info();
|
||||
int compiled_size = shared->end_position() - shared->start_position();
|
||||
isolate->counters()->total_compile_size()->Increment(compiled_size);
|
||||
info->SetOptimizing(AstNode::kNoNumber);
|
||||
|
||||
{
|
||||
CompilationHandleScope handle_scope(*info);
|
||||
|
||||
if (InstallCodeFromOptimizedCodeMap(*info)) return;
|
||||
|
||||
if (ParserApi::Parse(*info, kNoParsingFlags)) {
|
||||
LanguageMode language_mode = info->function()->language_mode();
|
||||
info->SetLanguageMode(language_mode);
|
||||
shared->set_language_mode(language_mode);
|
||||
info->SaveHandles();
|
||||
|
||||
if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
|
||||
OptimizingCompiler* compiler =
|
||||
new(info->zone()) OptimizingCompiler(*info);
|
||||
OptimizingCompiler::Status status = compiler->CreateGraph();
|
||||
if (status == OptimizingCompiler::SUCCEEDED) {
|
||||
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
|
||||
shared->code()->set_profiler_ticks(0);
|
||||
closure->ReplaceCode(isolate->builtins()->builtin(
|
||||
Builtins::kInRecompileQueue));
|
||||
info.Detach();
|
||||
} else if (status == OptimizingCompiler::BAILED_OUT) {
|
||||
isolate->clear_pending_exception();
|
||||
InstallFullCode(*info);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isolate->has_pending_exception()) {
|
||||
isolate->clear_pending_exception();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
|
||||
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
|
||||
// If crankshaft succeeded, install the optimized code else install
|
||||
// the unoptimized code.
|
||||
OptimizingCompiler::Status status = optimizing_compiler->last_status();
|
||||
if (status != OptimizingCompiler::SUCCEEDED) {
|
||||
status = optimizing_compiler->AbortOptimization();
|
||||
} else {
|
||||
status = optimizing_compiler->GenerateAndInstallCode();
|
||||
ASSERT(status == OptimizingCompiler::SUCCEEDED ||
|
||||
status == OptimizingCompiler::BAILED_OUT);
|
||||
}
|
||||
|
||||
InstallCodeCommon(*info);
|
||||
if (status == OptimizingCompiler::SUCCEEDED) {
|
||||
Handle<Code> code = info->code();
|
||||
ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty());
|
||||
info->closure()->ReplaceCode(*code);
|
||||
if (info->shared_info()->SearchOptimizedCodeMap(
|
||||
info->closure()->context()->global_context()) == -1) {
|
||||
InsertCodeIntoOptimizedCodeMap(*info);
|
||||
}
|
||||
} else {
|
||||
info->SetCode(Handle<Code>(info->shared_info()->code()));
|
||||
InstallFullCode(*info);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
|
||||
Handle<Script> script) {
|
||||
// Precondition: code has been parsed and scopes have been analyzed.
|
||||
|
@ -39,7 +39,7 @@ class ScriptDataImpl;
|
||||
|
||||
// CompilationInfo encapsulates some information known at compile time. It
|
||||
// is constructed based on the resources available at compile-time.
|
||||
class CompilationInfo BASE_EMBEDDED {
|
||||
class CompilationInfo {
|
||||
public:
|
||||
CompilationInfo(Handle<Script> script, Zone* zone);
|
||||
CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
|
||||
@ -180,6 +180,13 @@ class CompilationInfo BASE_EMBEDDED {
|
||||
deferred_handles_ = deferred_handles;
|
||||
}
|
||||
|
||||
void SaveHandles() {
|
||||
SaveHandle(&closure_);
|
||||
SaveHandle(&shared_info_);
|
||||
SaveHandle(&calling_context_);
|
||||
SaveHandle(&script_);
|
||||
}
|
||||
|
||||
private:
|
||||
Isolate* isolate_;
|
||||
|
||||
@ -268,6 +275,14 @@ class CompilationInfo BASE_EMBEDDED {
|
||||
|
||||
DeferredHandles* deferred_handles_;
|
||||
|
||||
template<typename T>
|
||||
void SaveHandle(Handle<T> *object) {
|
||||
if (!object->is_null()) {
|
||||
Handle<T> handle(*(*object));
|
||||
*object = handle;
|
||||
}
|
||||
}
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
|
||||
};
|
||||
|
||||
@ -346,6 +361,12 @@ class OptimizingCompiler: public ZoneObject {
|
||||
Status last_status() const { return last_status_; }
|
||||
CompilationInfo* info() const { return info_; }
|
||||
|
||||
MUST_USE_RESULT Status AbortOptimization() {
|
||||
info_->AbortOptimization();
|
||||
info_->shared_info()->DisableOptimization();
|
||||
return SetLastStatus(BAILED_OUT);
|
||||
}
|
||||
|
||||
private:
|
||||
CompilationInfo* info_;
|
||||
TypeFeedbackOracle* oracle_;
|
||||
@ -362,11 +383,6 @@ class OptimizingCompiler: public ZoneObject {
|
||||
return last_status_;
|
||||
}
|
||||
void RecordOptimizationStats();
|
||||
MUST_USE_RESULT Status AbortOptimization() {
|
||||
info_->AbortOptimization();
|
||||
info_->shared_info()->DisableOptimization();
|
||||
return SetLastStatus(BAILED_OUT);
|
||||
}
|
||||
|
||||
struct Timer {
|
||||
Timer(OptimizingCompiler* compiler, int64_t* location)
|
||||
@ -432,6 +448,8 @@ class Compiler : public AllStatic {
|
||||
// success and false if the compilation resulted in a stack overflow.
|
||||
static bool CompileLazy(CompilationInfo* info);
|
||||
|
||||
static void RecompileParallel(Handle<JSFunction> function);
|
||||
|
||||
// Compile a shared function info object (the function is possibly lazily
|
||||
// compiled).
|
||||
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
|
||||
@ -443,6 +461,8 @@ class Compiler : public AllStatic {
|
||||
bool is_toplevel,
|
||||
Handle<Script> script);
|
||||
|
||||
static void InstallOptimizedCode(OptimizingCompiler* info);
|
||||
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
static bool MakeCodeForLiveEdit(CompilationInfo* info);
|
||||
#endif
|
||||
|
2
src/d8.h
2
src/d8.h
@ -31,7 +31,7 @@
|
||||
#ifndef V8_SHARED
|
||||
#include "allocation.h"
|
||||
#include "hashmap.h"
|
||||
#include "smart-array-pointer.h"
|
||||
#include "smart-pointers.h"
|
||||
#include "v8.h"
|
||||
#else
|
||||
#include "../include/v8.h"
|
||||
|
@ -446,6 +446,25 @@ void StackGuard::RequestRuntimeProfilerTick() {
|
||||
}
|
||||
|
||||
|
||||
void StackGuard::RequestCodeReadyEvent() {
|
||||
ASSERT(FLAG_parallel_recompilation);
|
||||
if (ExecutionAccess::TryLock(isolate_)) {
|
||||
thread_local_.interrupt_flags_ |= CODE_READY;
|
||||
if (thread_local_.postpone_interrupts_nesting_ == 0) {
|
||||
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
|
||||
isolate_->heap()->SetStackLimits();
|
||||
}
|
||||
ExecutionAccess::Unlock(isolate_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool StackGuard::IsCodeReadyEvent() {
|
||||
ExecutionAccess access(isolate_);
|
||||
return (thread_local_.interrupt_flags_ & CODE_READY) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool StackGuard::IsGCRequest() {
|
||||
ExecutionAccess access(isolate_);
|
||||
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
|
||||
@ -911,6 +930,17 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
|
||||
stack_guard->Continue(GC_REQUEST);
|
||||
}
|
||||
|
||||
if (stack_guard->IsCodeReadyEvent()) {
|
||||
ASSERT(FLAG_parallel_recompilation);
|
||||
if (FLAG_trace_parallel_recompilation) {
|
||||
PrintF(" ** CODE_READY event received.\n");
|
||||
}
|
||||
stack_guard->Continue(CODE_READY);
|
||||
}
|
||||
if (!stack_guard->IsTerminateExecution()) {
|
||||
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
|
||||
}
|
||||
|
||||
isolate->counters()->stack_interrupts()->Increment();
|
||||
// If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
|
||||
if (FLAG_count_based_interrupts ||
|
||||
|
@ -42,7 +42,8 @@ enum InterruptFlag {
|
||||
PREEMPT = 1 << 3,
|
||||
TERMINATE = 1 << 4,
|
||||
RUNTIME_PROFILER_TICK = 1 << 5,
|
||||
GC_REQUEST = 1 << 6
|
||||
GC_REQUEST = 1 << 6,
|
||||
CODE_READY = 1 << 7
|
||||
};
|
||||
|
||||
|
||||
@ -195,6 +196,8 @@ class StackGuard {
|
||||
void TerminateExecution();
|
||||
bool IsRuntimeProfilerTick();
|
||||
void RequestRuntimeProfilerTick();
|
||||
bool IsCodeReadyEvent();
|
||||
void RequestCodeReadyEvent();
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
bool IsDebugBreak();
|
||||
void DebugBreak();
|
||||
|
@ -218,6 +218,12 @@ DEFINE_int(loop_weight, 1, "loop weight for representation inference")
|
||||
DEFINE_bool(optimize_for_in, true,
|
||||
"optimize functions containing for-in loops")
|
||||
|
||||
DEFINE_bool(parallel_recompilation, false,
|
||||
"optimizing hot functions asynchronously on a separate thread")
|
||||
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
|
||||
DEFINE_int(parallel_recompilation_queue_length, 2,
|
||||
"the length of the parallel compilation queue")
|
||||
|
||||
// Experimental profiler changes.
|
||||
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
|
||||
DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "v8.h"
|
||||
|
||||
#include "platform.h"
|
||||
#include "smart-array-pointer.h"
|
||||
#include "smart-pointers.h"
|
||||
#include "string-stream.h"
|
||||
|
||||
|
||||
|
@ -149,25 +149,31 @@ T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
|
||||
|
||||
#ifdef DEBUG
|
||||
inline NoHandleAllocation::NoHandleAllocation() {
|
||||
Isolate* isolate = Isolate::Current();
|
||||
v8::ImplementationUtilities::HandleScopeData* current =
|
||||
Isolate::Current()->handle_scope_data();
|
||||
isolate->handle_scope_data();
|
||||
|
||||
// Shrink the current handle scope to make it impossible to do
|
||||
// handle allocations without an explicit handle scope.
|
||||
current->limit = current->next;
|
||||
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
|
||||
if (active_) {
|
||||
// Shrink the current handle scope to make it impossible to do
|
||||
// handle allocations without an explicit handle scope.
|
||||
current->limit = current->next;
|
||||
|
||||
level_ = current->level;
|
||||
current->level = 0;
|
||||
level_ = current->level;
|
||||
current->level = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline NoHandleAllocation::~NoHandleAllocation() {
|
||||
// Restore state in current handle scope to re-enable handle
|
||||
// allocations.
|
||||
v8::ImplementationUtilities::HandleScopeData* data =
|
||||
Isolate::Current()->handle_scope_data();
|
||||
ASSERT_EQ(0, data->level);
|
||||
data->level = level_;
|
||||
if (active_) {
|
||||
// Restore state in current handle scope to re-enable handle
|
||||
// allocations.
|
||||
v8::ImplementationUtilities::HandleScopeData* data =
|
||||
Isolate::Current()->handle_scope_data();
|
||||
ASSERT_EQ(0, data->level);
|
||||
data->level = level_;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -327,6 +327,7 @@ class NoHandleAllocation BASE_EMBEDDED {
|
||||
inline ~NoHandleAllocation();
|
||||
private:
|
||||
int level_;
|
||||
bool active_;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -772,22 +772,30 @@ DisallowAllocationFailure::~DisallowAllocationFailure() {
|
||||
|
||||
#ifdef DEBUG
|
||||
AssertNoAllocation::AssertNoAllocation() {
|
||||
old_state_ = HEAP->allow_allocation(false);
|
||||
Isolate* isolate = ISOLATE;
|
||||
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
|
||||
if (active_) {
|
||||
old_state_ = isolate->heap()->allow_allocation(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
AssertNoAllocation::~AssertNoAllocation() {
|
||||
HEAP->allow_allocation(old_state_);
|
||||
if (active_) HEAP->allow_allocation(old_state_);
|
||||
}
|
||||
|
||||
|
||||
DisableAssertNoAllocation::DisableAssertNoAllocation() {
|
||||
old_state_ = HEAP->allow_allocation(true);
|
||||
Isolate* isolate = ISOLATE;
|
||||
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
|
||||
if (active_) {
|
||||
old_state_ = isolate->heap()->allow_allocation(true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DisableAssertNoAllocation::~DisableAssertNoAllocation() {
|
||||
HEAP->allow_allocation(old_state_);
|
||||
if (active_) HEAP->allow_allocation(old_state_);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -155,7 +155,8 @@ Heap::Heap()
|
||||
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
|
||||
promotion_queue_(this),
|
||||
configured_(false),
|
||||
chunks_queued_for_free_(NULL) {
|
||||
chunks_queued_for_free_(NULL),
|
||||
relocation_mutex_(NULL) {
|
||||
// Allow build-time customization of the max semispace size. Building
|
||||
// V8 with snapshots and a non-default max semispace size is much
|
||||
// easier if you can define it as part of the build environment.
|
||||
@ -1199,6 +1200,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
|
||||
|
||||
|
||||
void Heap::Scavenge() {
|
||||
RelocationLock relocation_lock(this);
|
||||
#ifdef DEBUG
|
||||
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
|
||||
#endif
|
||||
@ -6156,6 +6158,8 @@ bool Heap::SetUp(bool create_heap_objects) {
|
||||
|
||||
store_buffer()->SetUp();
|
||||
|
||||
if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -6241,6 +6245,8 @@ void Heap::TearDown() {
|
||||
|
||||
isolate_->memory_allocator()->TearDown();
|
||||
|
||||
delete relocation_mutex_;
|
||||
|
||||
#ifdef DEBUG
|
||||
delete debug_utils_;
|
||||
debug_utils_ = NULL;
|
||||
|
23
src/heap.h
23
src/heap.h
@ -1620,6 +1620,25 @@ class Heap {
|
||||
|
||||
void CheckpointObjectStats();
|
||||
|
||||
// We don't use a ScopedLock here since we want to lock the heap
|
||||
// only when FLAG_parallel_recompilation is true.
|
||||
class RelocationLock {
|
||||
public:
|
||||
explicit RelocationLock(Heap* heap) : heap_(heap) {
|
||||
if (FLAG_parallel_recompilation) {
|
||||
heap_->relocation_mutex_->Lock();
|
||||
}
|
||||
}
|
||||
~RelocationLock() {
|
||||
if (FLAG_parallel_recompilation) {
|
||||
heap_->relocation_mutex_->Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
Heap* heap_;
|
||||
};
|
||||
|
||||
private:
|
||||
Heap();
|
||||
|
||||
@ -2072,6 +2091,8 @@ class Heap {
|
||||
|
||||
MemoryChunk* chunks_queued_for_free_;
|
||||
|
||||
Mutex* relocation_mutex_;
|
||||
|
||||
friend class Factory;
|
||||
friend class GCTracer;
|
||||
friend class DisallowAllocationFailure;
|
||||
@ -2395,6 +2416,7 @@ class AssertNoAllocation {
|
||||
#ifdef DEBUG
|
||||
private:
|
||||
bool old_state_;
|
||||
bool active_;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -2407,6 +2429,7 @@ class DisableAssertNoAllocation {
|
||||
#ifdef DEBUG
|
||||
private:
|
||||
bool old_state_;
|
||||
bool active_;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -224,6 +224,16 @@ class LChunkBuilder;
|
||||
virtual Opcode opcode() const { return HValue::k##type; }
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
#define ASSERT_ALLOCATION_DISABLED do { \
|
||||
OptimizingCompilerThread* thread = \
|
||||
ISOLATE->optimizing_compiler_thread(); \
|
||||
ASSERT(thread->IsOptimizerThread() || !HEAP->IsAllocationAllowed()); \
|
||||
} while (0)
|
||||
#else
|
||||
#define ASSERT_ALLOCATION_DISABLED do {} while (0)
|
||||
#endif
|
||||
|
||||
class Range: public ZoneObject {
|
||||
public:
|
||||
Range()
|
||||
@ -2289,7 +2299,7 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
|
||||
virtual void PrintDataTo(StringStream* stream);
|
||||
|
||||
virtual intptr_t Hashcode() {
|
||||
ASSERT(!HEAP->IsAllocationAllowed());
|
||||
ASSERT_ALLOCATION_DISABLED;
|
||||
intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
|
||||
hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
|
||||
return hash;
|
||||
@ -2535,7 +2545,7 @@ class HConstant: public HTemplateInstruction<0> {
|
||||
bool ToBoolean();
|
||||
|
||||
virtual intptr_t Hashcode() {
|
||||
ASSERT(!HEAP->allow_allocation(false));
|
||||
ASSERT_ALLOCATION_DISABLED;
|
||||
intptr_t hash;
|
||||
|
||||
if (has_int32_value_) {
|
||||
@ -3640,7 +3650,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
|
||||
virtual void PrintDataTo(StringStream* stream);
|
||||
|
||||
virtual intptr_t Hashcode() {
|
||||
ASSERT(!HEAP->allow_allocation(false));
|
||||
ASSERT_ALLOCATION_DISABLED;
|
||||
return reinterpret_cast<intptr_t>(*cell_);
|
||||
}
|
||||
|
||||
|
@ -1711,7 +1711,10 @@ class HGlobalValueNumberer BASE_EMBEDDED {
|
||||
block_side_effects_(graph->blocks()->length(), graph->zone()),
|
||||
loop_side_effects_(graph->blocks()->length(), graph->zone()),
|
||||
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
|
||||
ASSERT(!info->isolate()->heap()->IsAllocationAllowed());
|
||||
#ifdef DEBUG
|
||||
ASSERT(info->isolate()->optimizing_compiler_thread()->IsOptimizerThread() ||
|
||||
!info->isolate()->heap()->IsAllocationAllowed());
|
||||
#endif
|
||||
block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
|
||||
graph_->zone());
|
||||
loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
|
||||
@ -3018,7 +3021,6 @@ HGraph* HGraphBuilder::CreateGraph() {
|
||||
|
||||
{
|
||||
HPhase phase("H_Block building");
|
||||
CompilationHandleScope handle_scope(info());
|
||||
current_block_ = graph()->entry_block();
|
||||
|
||||
Scope* scope = info()->scope();
|
||||
@ -3079,9 +3081,6 @@ HGraph* HGraphBuilder::CreateGraph() {
|
||||
}
|
||||
|
||||
bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
|
||||
NoHandleAllocation no_handles;
|
||||
AssertNoAllocation no_gc;
|
||||
|
||||
*bailout_reason = SmartArrayPointer<char>();
|
||||
OrderBlocks();
|
||||
AssignDominators();
|
||||
|
@ -74,6 +74,43 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
|
||||
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
|
||||
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
|
||||
__ jmp(eax);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
|
||||
GenerateTailCallToSharedCode(masm);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
|
||||
// Push a copy of the function onto the stack.
|
||||
__ push(edi);
|
||||
// Push call kind information.
|
||||
__ push(ecx);
|
||||
|
||||
__ push(edi); // Function is also the parameter to the runtime call.
|
||||
__ CallRuntime(Runtime::kParallelRecompile, 1);
|
||||
|
||||
// Restore call kind information.
|
||||
__ pop(ecx);
|
||||
// Restore receiver.
|
||||
__ pop(edi);
|
||||
|
||||
// Tear down internal frame.
|
||||
}
|
||||
|
||||
GenerateTailCallToSharedCode(masm);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
bool is_api_function,
|
||||
bool count_constructions) {
|
||||
|
@ -1493,7 +1493,8 @@ Isolate::Isolate()
|
||||
regexp_stack_(NULL),
|
||||
date_cache_(NULL),
|
||||
context_exit_happened_(false),
|
||||
deferred_handles_head_(NULL) {
|
||||
deferred_handles_head_(NULL),
|
||||
optimizing_compiler_thread_(this) {
|
||||
TRACE_ISOLATE(constructor);
|
||||
|
||||
memset(isolate_addresses_, 0,
|
||||
@ -1574,6 +1575,8 @@ void Isolate::Deinit() {
|
||||
if (state_ == INITIALIZED) {
|
||||
TRACE_ISOLATE(deinit);
|
||||
|
||||
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
|
||||
|
||||
if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
|
||||
|
||||
// We must stop the logger before we tear down other components.
|
||||
@ -1915,6 +1918,7 @@ bool Isolate::Init(Deserializer* des) {
|
||||
|
||||
state_ = INITIALIZED;
|
||||
time_millis_at_init_ = OS::TimeCurrentMillis();
|
||||
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "handles.h"
|
||||
#include "hashmap.h"
|
||||
#include "heap.h"
|
||||
#include "optimizing-compiler-thread.h"
|
||||
#include "regexp-stack.h"
|
||||
#include "runtime-profiler.h"
|
||||
#include "runtime.h"
|
||||
@ -1059,6 +1060,10 @@ class Isolate {
|
||||
void LinkDeferredHandles(DeferredHandles* deferred_handles);
|
||||
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
|
||||
|
||||
OptimizingCompilerThread* optimizing_compiler_thread() {
|
||||
return &optimizing_compiler_thread_;
|
||||
}
|
||||
|
||||
private:
|
||||
Isolate();
|
||||
|
||||
@ -1283,9 +1288,12 @@ class Isolate {
|
||||
#endif
|
||||
|
||||
DeferredHandles* deferred_handles_head_;
|
||||
OptimizingCompilerThread optimizing_compiler_thread_;
|
||||
|
||||
friend class ExecutionAccess;
|
||||
friend class HandleScopeImplementer;
|
||||
friend class IsolateInitializer;
|
||||
friend class OptimizingCompilerThread;
|
||||
friend class ThreadManager;
|
||||
friend class Simulator;
|
||||
friend class StackGuard;
|
||||
|
@ -3309,6 +3309,8 @@ void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
|
||||
|
||||
|
||||
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
||||
Heap::RelocationLock relocation_lock(heap());
|
||||
|
||||
bool code_slots_filtering_required;
|
||||
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
|
||||
code_slots_filtering_required = MarkInvalidatedCode();
|
||||
|
@ -4250,6 +4250,18 @@ bool JSFunction::IsMarkedForLazyRecompilation() {
|
||||
}
|
||||
|
||||
|
||||
bool JSFunction::IsMarkedForParallelRecompilation() {
|
||||
return code() ==
|
||||
GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile);
|
||||
}
|
||||
|
||||
|
||||
bool JSFunction::IsInRecompileQueue() {
|
||||
return code() == GetIsolate()->builtins()->builtin(
|
||||
Builtins::kInRecompileQueue);
|
||||
}
|
||||
|
||||
|
||||
Code* JSFunction::code() {
|
||||
return Code::cast(unchecked_code());
|
||||
}
|
||||
|
@ -7243,6 +7243,18 @@ void JSFunction::MarkForLazyRecompilation() {
|
||||
ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
|
||||
}
|
||||
|
||||
void JSFunction::MarkForParallelRecompilation() {
|
||||
ASSERT(is_compiled() && !IsOptimized());
|
||||
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
|
||||
Builtins* builtins = GetIsolate()->builtins();
|
||||
ReplaceCode(builtins->builtin(Builtins::kParallelRecompile));
|
||||
|
||||
// Unlike MarkForLazyRecompilation, after queuing a function for
|
||||
// recompilation on the compiler thread, we actually tail-call into
|
||||
// the full code. We reset the profiler ticks here so that the
|
||||
// function doesn't bother the runtime profiler too much.
|
||||
shared()->code()->set_profiler_ticks(0);
|
||||
}
|
||||
|
||||
static bool CompileLazyHelper(CompilationInfo* info,
|
||||
ClearExceptionFlag flag) {
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include "elements-kind.h"
|
||||
#include "list.h"
|
||||
#include "property-details.h"
|
||||
#include "smart-array-pointer.h"
|
||||
#include "smart-pointers.h"
|
||||
#include "unicode-inl.h"
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
#include "arm/constants-arm.h"
|
||||
@ -5978,6 +5978,7 @@ class JSFunction: public JSObject {
|
||||
// Mark this function for lazy recompilation. The function will be
|
||||
// recompiled the next time it is executed.
|
||||
void MarkForLazyRecompilation();
|
||||
void MarkForParallelRecompilation();
|
||||
|
||||
// Helpers to compile this function. Returns true on success, false on
|
||||
// failure (e.g., stack overflow during compilation).
|
||||
@ -5992,6 +5993,11 @@ class JSFunction: public JSObject {
|
||||
// Tells whether or not the function is already marked for lazy
|
||||
// recompilation.
|
||||
inline bool IsMarkedForLazyRecompilation();
|
||||
inline bool IsMarkedForParallelRecompilation();
|
||||
|
||||
// Tells whether or not the function is on the parallel
|
||||
// recompilation queue.
|
||||
inline bool IsInRecompileQueue();
|
||||
|
||||
// Check whether or not this function is inlineable.
|
||||
bool IsInlineable();
|
||||
|
107
src/optimizing-compiler-thread.cc
Normal file
107
src/optimizing-compiler-thread.cc
Normal file
@ -0,0 +1,107 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "optimizing-compiler-thread.h"
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "hydrogen.h"
|
||||
#include "isolate.h"
|
||||
#include "v8threads.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
void OptimizingCompilerThread::Run() {
|
||||
#ifdef DEBUG
|
||||
thread_id_ = ThreadId::Current().ToInteger();
|
||||
#endif
|
||||
Isolate::SetIsolateThreadLocals(isolate_, NULL);
|
||||
|
||||
while (true) {
|
||||
input_queue_semaphore_->Wait();
|
||||
if (Acquire_Load(&stop_thread_)) {
|
||||
stop_semaphore_->Signal();
|
||||
return;
|
||||
}
|
||||
|
||||
Heap::RelocationLock relocation_lock(isolate_->heap());
|
||||
OptimizingCompiler* optimizing_compiler = NULL;
|
||||
input_queue_.Dequeue(&optimizing_compiler);
|
||||
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
|
||||
|
||||
ASSERT(!optimizing_compiler->info()->closure()->IsOptimized());
|
||||
|
||||
OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
|
||||
ASSERT(status != OptimizingCompiler::FAILED);
|
||||
// Prevent an unused-variable error in release mode.
|
||||
(void) status;
|
||||
|
||||
output_queue_.Enqueue(optimizing_compiler);
|
||||
isolate_->stack_guard()->RequestCodeReadyEvent();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompilerThread::Stop() {
|
||||
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
|
||||
input_queue_semaphore_->Signal();
|
||||
stop_semaphore_->Wait();
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompilerThread::InstallOptimizedFunctions() {
|
||||
HandleScope handle_scope(isolate_);
|
||||
int functions_installed = 0;
|
||||
while (!output_queue_.IsEmpty()) {
|
||||
OptimizingCompiler* compiler = NULL;
|
||||
output_queue_.Dequeue(&compiler);
|
||||
Compiler::InstallOptimizedCode(compiler);
|
||||
functions_installed++;
|
||||
}
|
||||
if (FLAG_trace_parallel_recompilation && functions_installed != 0) {
|
||||
PrintF(" ** Installed %d function(s).\n", functions_installed);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompilerThread::QueueForOptimization(
|
||||
OptimizingCompiler* optimizing_compiler) {
|
||||
input_queue_.Enqueue(optimizing_compiler);
|
||||
input_queue_semaphore_->Signal();
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
bool OptimizingCompilerThread::IsOptimizerThread() {
|
||||
if (!FLAG_parallel_recompilation) return false;
|
||||
return ThreadId::Current().ToInteger() == thread_id_;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
97
src/optimizing-compiler-thread.h
Normal file
97
src/optimizing-compiler-thread.h
Normal file
@ -0,0 +1,97 @@
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef V8_OPTIMIZING_COMPILER_THREAD_H_
|
||||
#define V8_OPTIMIZING_COMPILER_THREAD_H_
|
||||
|
||||
#include "atomicops.h"
|
||||
#include "platform.h"
|
||||
#include "flags.h"
|
||||
#include "unbound-queue.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class HGraphBuilder;
|
||||
class OptimizingCompiler;
|
||||
|
||||
class OptimizingCompilerThread : public Thread {
|
||||
public:
|
||||
explicit OptimizingCompilerThread(Isolate *isolate) :
|
||||
Thread("OptimizingCompilerThread"),
|
||||
isolate_(isolate),
|
||||
stop_semaphore_(OS::CreateSemaphore(0)),
|
||||
input_queue_semaphore_(OS::CreateSemaphore(0)) {
|
||||
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
|
||||
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
|
||||
}
|
||||
|
||||
void Run();
|
||||
void Stop();
|
||||
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
|
||||
void InstallOptimizedFunctions();
|
||||
|
||||
inline bool IsQueueAvailable() {
|
||||
// We don't need a barrier since we have a data dependency right
|
||||
// after.
|
||||
Atomic32 current_length = NoBarrier_Load(&queue_length_);
|
||||
|
||||
// This can be queried only from the execution thread.
|
||||
ASSERT(!IsOptimizerThread());
|
||||
// Since only the execution thread increments queue_length_ and
|
||||
// only one thread can run inside an Isolate at one time, a direct
|
||||
// doesn't introduce a race -- queue_length_ may decreased in
|
||||
// meantime, but not increased.
|
||||
return (current_length < FLAG_parallel_recompilation_queue_length);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
bool IsOptimizerThread();
|
||||
#endif
|
||||
|
||||
~OptimizingCompilerThread() {
|
||||
delete input_queue_semaphore_;
|
||||
delete stop_semaphore_;
|
||||
}
|
||||
|
||||
private:
|
||||
Isolate* isolate_;
|
||||
Semaphore* stop_semaphore_;
|
||||
Semaphore* input_queue_semaphore_;
|
||||
UnboundQueue<OptimizingCompiler*> input_queue_;
|
||||
UnboundQueue<OptimizingCompiler*> output_queue_;
|
||||
volatile AtomicWord stop_thread_;
|
||||
volatile Atomic32 queue_length_;
|
||||
|
||||
#ifdef DEBUG
|
||||
int thread_id_;
|
||||
#endif
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_OPTIMIZING_COMPILER_THREAD_H_
|
@ -84,6 +84,7 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
|
||||
return gc_entry_;
|
||||
case JS:
|
||||
case COMPILER:
|
||||
case PARALLEL_COMPILER_PROLOGUE:
|
||||
// DOM events handlers are reported as OTHER / EXTERNAL entries.
|
||||
// To avoid confusing people, let's put all these entries into
|
||||
// one bucket.
|
||||
|
@ -151,15 +151,20 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
|
||||
PrintF("]\n");
|
||||
}
|
||||
|
||||
// The next call to the function will trigger optimization.
|
||||
function->MarkForLazyRecompilation();
|
||||
if (FLAG_parallel_recompilation) {
|
||||
function->MarkForParallelRecompilation();
|
||||
} else {
|
||||
// The next call to the function will trigger optimization.
|
||||
function->MarkForLazyRecompilation();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
|
||||
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
|
||||
// Debug::has_break_points().
|
||||
ASSERT(function->IsMarkedForLazyRecompilation());
|
||||
ASSERT(function->IsMarkedForLazyRecompilation() ||
|
||||
function->IsMarkedForParallelRecompilation());
|
||||
if (!FLAG_use_osr ||
|
||||
isolate_->DebuggerHasBreakPoints() ||
|
||||
function->IsBuiltin()) {
|
||||
@ -278,7 +283,8 @@ void RuntimeProfiler::OptimizeNow() {
|
||||
|
||||
if (shared_code->kind() != Code::FUNCTION) continue;
|
||||
|
||||
if (function->IsMarkedForLazyRecompilation()) {
|
||||
if (function->IsMarkedForLazyRecompilation() ||
|
||||
function->IsMarkedForParallelRecompilation()) {
|
||||
int nesting = shared_code->allow_osr_at_loop_nesting_level();
|
||||
if (nesting == 0) AttemptOnStackReplacement(function);
|
||||
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
|
||||
|
@ -54,7 +54,7 @@
|
||||
#include "runtime-profiler.h"
|
||||
#include "runtime.h"
|
||||
#include "scopeinfo.h"
|
||||
#include "smart-array-pointer.h"
|
||||
#include "smart-pointers.h"
|
||||
#include "string-search.h"
|
||||
#include "stub-cache.h"
|
||||
#include "v8threads.h"
|
||||
@ -8292,6 +8292,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
|
||||
}
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
|
||||
HandleScope handle_scope(isolate);
|
||||
ASSERT(FLAG_parallel_recompilation);
|
||||
Compiler::RecompileParallel(args.at<JSFunction>(0));
|
||||
return *isolate->factory()->undefined_value();
|
||||
}
|
||||
|
||||
|
||||
class ActivationsFinder : public ThreadVisitor {
|
||||
public:
|
||||
explicit ActivationsFinder(JSFunction* function)
|
||||
@ -8486,6 +8494,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
|
||||
return Smi::FromInt(4); // 4 == "never".
|
||||
}
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
|
||||
if (FLAG_parallel_recompilation) {
|
||||
if (function->IsMarkedForLazyRecompilation()) {
|
||||
return Smi::FromInt(5);
|
||||
}
|
||||
}
|
||||
if (FLAG_always_opt) {
|
||||
// We may have always opt, but that is more best-effort than a real
|
||||
// promise, so we still say "no" if it is not optimized.
|
||||
|
@ -86,6 +86,7 @@ namespace internal {
|
||||
F(NewStrictArgumentsFast, 3, 1) \
|
||||
F(LazyCompile, 1, 1) \
|
||||
F(LazyRecompile, 1, 1) \
|
||||
F(ParallelRecompile, 1, 1) \
|
||||
F(NotifyDeoptimized, 1, 1) \
|
||||
F(NotifyOSR, 0, 1) \
|
||||
F(DeoptimizeFunction, 1, 1) \
|
||||
|
@ -25,34 +25,33 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef V8_SMART_ARRAY_POINTER_H_
|
||||
#define V8_SMART_ARRAY_POINTER_H_
|
||||
#ifndef V8_SMART_POINTERS_H_
|
||||
#define V8_SMART_POINTERS_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
// A 'scoped array pointer' that calls DeleteArray on its pointer when the
|
||||
// destructor is called.
|
||||
template<typename T>
|
||||
class SmartArrayPointer {
|
||||
template<typename Deallocator, typename T>
|
||||
class SmartPointerBase {
|
||||
public:
|
||||
// Default constructor. Constructs an empty scoped pointer.
|
||||
inline SmartArrayPointer() : p_(NULL) {}
|
||||
inline SmartPointerBase() : p_(NULL) {}
|
||||
|
||||
// Constructs a scoped pointer from a plain one.
|
||||
explicit inline SmartArrayPointer(T* ptr) : p_(ptr) {}
|
||||
explicit inline SmartPointerBase(T* ptr) : p_(ptr) {}
|
||||
|
||||
// Copy constructor removes the pointer from the original to avoid double
|
||||
// freeing.
|
||||
inline SmartArrayPointer(const SmartArrayPointer<T>& rhs) : p_(rhs.p_) {
|
||||
const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
|
||||
inline SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
|
||||
: p_(rhs.p_) {
|
||||
const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
|
||||
}
|
||||
|
||||
// When the destructor of the scoped pointer is executed the plain pointer
|
||||
// is deleted using DeleteArray. This implies that you must allocate with
|
||||
// NewArray.
|
||||
inline ~SmartArrayPointer() { if (p_) DeleteArray(p_); }
|
||||
inline ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
|
||||
|
||||
inline T* operator->() const { return p_; }
|
||||
|
||||
@ -81,10 +80,11 @@ class SmartArrayPointer {
|
||||
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
|
||||
// the copy constructor it removes the pointer in the original to avoid
|
||||
// double freeing.
|
||||
inline SmartArrayPointer& operator=(const SmartArrayPointer<T>& rhs) {
|
||||
inline SmartPointerBase<Deallocator, T>& operator=(
|
||||
const SmartPointerBase<Deallocator, T>& rhs) {
|
||||
ASSERT(is_empty());
|
||||
T* tmp = rhs.p_; // swap to handle self-assignment
|
||||
const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
|
||||
const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
|
||||
p_ = tmp;
|
||||
return *this;
|
||||
}
|
||||
@ -95,6 +95,45 @@ class SmartArrayPointer {
|
||||
T* p_;
|
||||
};
|
||||
|
||||
// A 'scoped array pointer' that calls DeleteArray on its pointer when the
|
||||
// destructor is called.
|
||||
|
||||
template<typename T>
|
||||
struct ArrayDeallocator {
|
||||
static void Delete(T* array) {
|
||||
DeleteArray(array);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<typename T>
|
||||
class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
|
||||
public:
|
||||
inline SmartArrayPointer() { }
|
||||
explicit inline SmartArrayPointer(T* ptr)
|
||||
: SmartPointerBase<ArrayDeallocator<T>, T>(ptr) { }
|
||||
inline SmartArrayPointer(const SmartArrayPointer<T>& rhs)
|
||||
: SmartPointerBase<ArrayDeallocator<T>, T>(rhs) { }
|
||||
};
|
||||
|
||||
|
||||
template<typename T>
|
||||
struct ObjectDeallocator {
|
||||
static void Delete(T* array) {
|
||||
Malloced::Delete(array);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
|
||||
public:
|
||||
inline SmartPointer() { }
|
||||
explicit inline SmartPointer(T* ptr)
|
||||
: SmartPointerBase<ObjectDeallocator<T>, T>(ptr) { }
|
||||
inline SmartPointer(const SmartPointer<T>& rhs)
|
||||
: SmartPointerBase<ObjectDeallocator<T>, T>(rhs) { }
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_SMART_ARRAY_POINTER_H_
|
||||
#endif // V8_SMART_POINTERS_H_
|
@ -106,13 +106,16 @@ void V8::TearDown() {
|
||||
|
||||
if (!has_been_set_up_ || has_been_disposed_) return;
|
||||
|
||||
// The isolate has to be torn down before clearing the LOperand
|
||||
// caches so that the optimizing compiler thread (if running)
|
||||
// doesn't see an inconsistent view of the lithium instructions.
|
||||
isolate->TearDown();
|
||||
delete isolate;
|
||||
|
||||
ElementsAccessor::TearDown();
|
||||
LOperand::TearDownCaches();
|
||||
RegisteredExtension::UnregisterAll();
|
||||
|
||||
isolate->TearDown();
|
||||
delete isolate;
|
||||
|
||||
is_running_ = false;
|
||||
has_been_disposed_ = true;
|
||||
|
||||
|
@ -359,11 +359,12 @@ struct AccessorDescriptor {
|
||||
// VMState object leaves a state by popping the current state from the
|
||||
// stack.
|
||||
|
||||
#define STATE_TAG_LIST(V) \
|
||||
V(JS) \
|
||||
V(GC) \
|
||||
V(COMPILER) \
|
||||
V(OTHER) \
|
||||
#define STATE_TAG_LIST(V) \
|
||||
V(JS) \
|
||||
V(GC) \
|
||||
V(COMPILER) \
|
||||
V(PARALLEL_COMPILER_PROLOGUE) \
|
||||
V(OTHER) \
|
||||
V(EXTERNAL)
|
||||
|
||||
enum StateTag {
|
||||
|
@ -47,6 +47,8 @@ inline const char* StateToString(StateTag state) {
|
||||
return "GC";
|
||||
case COMPILER:
|
||||
return "COMPILER";
|
||||
case PARALLEL_COMPILER_PROLOGUE:
|
||||
return "PARALLEL_COMPILER_PROLOGUE";
|
||||
case OTHER:
|
||||
return "OTHER";
|
||||
case EXTERNAL:
|
||||
|
@ -73,6 +73,45 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
|
||||
__ movq(kScratchRegister,
|
||||
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ movq(kScratchRegister,
|
||||
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
|
||||
__ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
|
||||
__ jmp(kScratchRegister);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
|
||||
GenerateTailCallToSharedCode(masm);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
|
||||
// Push a copy of the function onto the stack.
|
||||
__ push(rdi);
|
||||
// Push call kind information.
|
||||
__ push(rcx);
|
||||
|
||||
__ push(rdi); // Function is also the parameter to the runtime call.
|
||||
__ CallRuntime(Runtime::kParallelRecompile, 1);
|
||||
|
||||
// Restore call kind information.
|
||||
__ pop(rcx);
|
||||
// Restore receiver.
|
||||
__ pop(rdi);
|
||||
|
||||
// Tear down internal frame.
|
||||
}
|
||||
|
||||
GenerateTailCallToSharedCode(masm);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
bool is_api_function,
|
||||
bool count_constructions) {
|
||||
|
@ -526,7 +526,10 @@ static intptr_t MemoryInUse() {
|
||||
|
||||
TEST(BootUpMemoryUse) {
|
||||
intptr_t initial_memory = MemoryInUse();
|
||||
FLAG_crankshaft = false; // Avoid flakiness.
|
||||
// Avoid flakiness.
|
||||
FLAG_crankshaft = false;
|
||||
FLAG_parallel_recompilation = false;
|
||||
|
||||
// Only Linux has the proc filesystem and only if it is mapped. If it's not
|
||||
// there we just skip the test.
|
||||
if (initial_memory >= 0) {
|
||||
|
@ -25,7 +25,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
|
||||
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc --noparallel-recompilation
|
||||
// Test element kind of objects.
|
||||
// Since --smi-only-arrays affects builtins, its default setting at compile
|
||||
// time sticks if built with snapshot. If --smi-only-arrays is deactivated
|
||||
|
@ -25,7 +25,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --allow-natives-syntax
|
||||
// Flags: --allow-natives-syntax --noparallel-recompilation
|
||||
|
||||
/**
|
||||
* This class shows how to use %GetOptimizationCount() and
|
||||
|
@ -25,7 +25,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --count-based-interrupts --interrupt-budget=10 --weighted-back-edges --allow-natives-syntax
|
||||
// Flags: --count-based-interrupts --interrupt-budget=10 --weighted-back-edges --allow-natives-syntax --noparallel-recompilation
|
||||
|
||||
// Test that OSR works properly when using count-based interrupting/profiling.
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --expose-debug-as debug --allow-natives-syntax
|
||||
// Flags: --expose-debug-as debug --allow-natives-syntax --noparallel-recompilation
|
||||
|
||||
// This test tests that deoptimization due to debug breaks works for
|
||||
// inlined functions where the full-code is generated before the
|
||||
|
@ -25,7 +25,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --expose-debug-as debug --expose-gc
|
||||
// Flags: --expose-debug-as debug --expose-gc --noparallel-recompilation
|
||||
// Get the Debug object exposed from the debug context global object.
|
||||
Debug = debug.Debug
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
|
||||
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc --noparallel-recompilation
|
||||
|
||||
// Ensure that ElementsKind transitions in various situations are hoisted (or
|
||||
// not hoisted) correctly, don't change the semantics programs and don't trigger
|
||||
|
@ -149,6 +149,7 @@ var knownProblems = {
|
||||
"PushBlockContext": true,
|
||||
"LazyCompile": true,
|
||||
"LazyRecompile": true,
|
||||
"ParallelRecompile": true,
|
||||
"NotifyDeoptimized": true,
|
||||
"NotifyOSR": true,
|
||||
"CreateObjectLiteralBoilerplate": true,
|
||||
|
@ -25,7 +25,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --harmony-scoping --allow-natives-syntax
|
||||
// Flags: --harmony-scoping --allow-natives-syntax --noparallel-recompilation
|
||||
|
||||
// TODO(ES6): properly activate extended mode
|
||||
"use strict";
|
||||
|
@ -25,7 +25,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --allow-natives-syntax --nouse_inlining
|
||||
// Flags: --allow-natives-syntax --nouse_inlining --noparallel-recompilation
|
||||
|
||||
// Test for negative zero that doesn't need bail out
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --allow-natives-syntax
|
||||
// Flags: --allow-natives-syntax --noparallel-recompilation
|
||||
|
||||
// An exception thrown in a function optimized by on-stack replacement (OSR)
|
||||
// should be able to construct a receiver from all optimized stack frames.
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
// Test dictionary -> double elements -> dictionary elements round trip
|
||||
|
||||
// Flags: --allow-natives-syntax --unbox-double-arrays --expose-gc
|
||||
// Flags: --allow-natives-syntax --unbox-double-arrays --expose-gc --noparallel-recompilation
|
||||
var large_array_size = 100000;
|
||||
var approx_dict_to_elements_threshold = 70000;
|
||||
|
||||
|
@ -386,6 +386,8 @@
|
||||
'../../src/objects.h',
|
||||
'../../src/once.cc',
|
||||
'../../src/once.h',
|
||||
'../../src/optimizing-compiler-thread.h',
|
||||
'../../src/optimizing-compiler-thread.cc',
|
||||
'../../src/parser.cc',
|
||||
'../../src/parser.h',
|
||||
'../../src/platform-posix.h',
|
||||
@ -434,7 +436,7 @@
|
||||
'../../src/serialize.cc',
|
||||
'../../src/serialize.h',
|
||||
'../../src/small-pointer-list.h',
|
||||
'../../src/smart-array-pointer.h',
|
||||
'../../src/smart-pointers.h',
|
||||
'../../src/snapshot-common.cc',
|
||||
'../../src/snapshot.h',
|
||||
'../../src/spaces-inl.h',
|
||||
|
Loading…
Reference in New Issue
Block a user