[Interpreter] Clean up runtime-profiler logic for three tier pipeline.
Remove checks for IC hotness from Ignition tiering up decision since this is not relevent for full-codegen compilation. Also make the decision about what tier we are moving to more explicit and visible in --trace-opt. BUG=v8:4280 LOG=N Review-Url: https://codereview.chromium.org/1969773002 Cr-Commit-Position: refs/heads/master@{#36260}
This commit is contained in:
parent
ba76726209
commit
9c6ff18355
@ -170,7 +170,8 @@ namespace internal {
|
|||||||
V(kOperandNotANumber, "Operand not a number") \
|
V(kOperandNotANumber, "Operand not a number") \
|
||||||
V(kObjectTagged, "The object is tagged") \
|
V(kObjectTagged, "The object is tagged") \
|
||||||
V(kObjectNotTagged, "The object is not tagged") \
|
V(kObjectNotTagged, "The object is not tagged") \
|
||||||
V(kOptimizationDisabled, "Optimization is disabled") \
|
V(kOptimizationDisabled, "Optimization disabled") \
|
||||||
|
V(kOptimizationDisabledForTest, "Optimization disabled for test") \
|
||||||
V(kOptimizedTooManyTimes, "Optimized too many times") \
|
V(kOptimizedTooManyTimes, "Optimized too many times") \
|
||||||
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
|
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
|
||||||
"Out of virtual registers while trying to allocate temp register") \
|
"Out of virtual registers while trying to allocate temp register") \
|
||||||
|
@ -19,6 +19,9 @@ namespace v8 {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
|
|
||||||
|
// Number of times a function has to be seen on the stack before it is
|
||||||
|
// compiled for baseline.
|
||||||
|
static const int kProfilerTicksBeforeBaseline = 2;
|
||||||
// Number of times a function has to be seen on the stack before it is
|
// Number of times a function has to be seen on the stack before it is
|
||||||
// optimized.
|
// optimized.
|
||||||
static const int kProfilerTicksBeforeOptimization = 2;
|
static const int kProfilerTicksBeforeOptimization = 2;
|
||||||
@ -88,13 +91,13 @@ static void GetICCounts(SharedFunctionInfo* shared,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void TraceRecompile(JSFunction* function, const char* reason,
|
||||||
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
|
const char* type) {
|
||||||
if (FLAG_trace_opt &&
|
if (FLAG_trace_opt &&
|
||||||
function->shared()->PassesFilter(FLAG_hydrogen_filter)) {
|
function->shared()->PassesFilter(FLAG_hydrogen_filter)) {
|
||||||
PrintF("[marking ");
|
PrintF("[marking ");
|
||||||
function->ShortPrint();
|
function->ShortPrint();
|
||||||
PrintF(" for recompilation, reason: %s", reason);
|
PrintF(" for %s recompilation, reason: %s", type, reason);
|
||||||
if (FLAG_type_info_threshold > 0) {
|
if (FLAG_type_info_threshold > 0) {
|
||||||
int typeinfo, generic, total, type_percentage, generic_percentage;
|
int typeinfo, generic, total, type_percentage, generic_percentage;
|
||||||
GetICCounts(function->shared(), &typeinfo, &generic, &total,
|
GetICCounts(function->shared(), &typeinfo, &generic, &total,
|
||||||
@ -105,14 +108,27 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
|
|||||||
}
|
}
|
||||||
PrintF("]\n");
|
PrintF("]\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (function->shared()->HasBytecodeArray()) {
|
|
||||||
function->MarkForBaseline();
|
|
||||||
} else {
|
|
||||||
function->AttemptConcurrentOptimization();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
|
||||||
|
TraceRecompile(function, reason, "optimized");
|
||||||
|
|
||||||
|
// TODO(4280): Fix this to check function is compiled to baseline once we
|
||||||
|
// have a standard way to check that. For now, if baseline code doesn't have
|
||||||
|
// a bytecode array.
|
||||||
|
DCHECK(!function->shared()->HasBytecodeArray());
|
||||||
|
function->AttemptConcurrentOptimization();
|
||||||
|
}
|
||||||
|
|
||||||
|
void RuntimeProfiler::Baseline(JSFunction* function, const char* reason) {
|
||||||
|
TraceRecompile(function, reason, "baseline");
|
||||||
|
|
||||||
|
// TODO(4280): Fix this to check function is compiled for the interpreter
|
||||||
|
// once we have a standard way to check that. For now function will only
|
||||||
|
// have a bytecode array if compiled for the interpreter.
|
||||||
|
DCHECK(function->shared()->HasBytecodeArray());
|
||||||
|
function->MarkForBaseline();
|
||||||
|
}
|
||||||
|
|
||||||
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
|
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
|
||||||
int loop_nesting_levels) {
|
int loop_nesting_levels) {
|
||||||
@ -239,8 +255,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
|
void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function) {
|
||||||
bool frame_optimized) {
|
|
||||||
if (function->IsInOptimizationQueue()) return;
|
if (function->IsInOptimizationQueue()) return;
|
||||||
|
|
||||||
SharedFunctionInfo* shared = function->shared();
|
SharedFunctionInfo* shared = function->shared();
|
||||||
@ -251,49 +266,22 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
|
|||||||
// TODO(rmcilroy): Consider whether we should optimize small functions when
|
// TODO(rmcilroy): Consider whether we should optimize small functions when
|
||||||
// they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
|
// they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
|
||||||
|
|
||||||
if (!frame_optimized && (function->IsMarkedForBaseline() ||
|
if (function->IsMarkedForBaseline() || function->IsMarkedForOptimization() ||
|
||||||
function->IsMarkedForOptimization() ||
|
function->IsMarkedForConcurrentOptimization() ||
|
||||||
function->IsMarkedForConcurrentOptimization() ||
|
function->IsOptimized()) {
|
||||||
function->IsOptimized())) {
|
|
||||||
// TODO(rmcilroy): Support OSR in these cases.
|
// TODO(rmcilroy): Support OSR in these cases.
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not optimize non-optimizable functions.
|
if (shared->optimization_disabled() &&
|
||||||
if (shared->optimization_disabled()) {
|
shared->disable_optimization_reason() == kOptimizationDisabledForTest) {
|
||||||
if (shared->deopt_count() >= FLAG_max_opt_count) {
|
// Don't baseline functions which have been marked by NeverOptimizeFunction
|
||||||
// If optimization was disabled due to many deoptimizations,
|
// in a test.
|
||||||
// then check if the function is hot and try to reenable optimization.
|
|
||||||
if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
|
|
||||||
shared->set_profiler_ticks(0);
|
|
||||||
shared->TryReenableOptimization();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (function->IsOptimized()) return;
|
if (ticks >= kProfilerTicksBeforeBaseline) {
|
||||||
|
Baseline(function, "hot enough for baseline");
|
||||||
if (ticks >= kProfilerTicksBeforeOptimization) {
|
|
||||||
int typeinfo, generic, total, type_percentage, generic_percentage;
|
|
||||||
GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
|
|
||||||
&generic_percentage);
|
|
||||||
if (type_percentage >= FLAG_type_info_threshold &&
|
|
||||||
generic_percentage <= FLAG_generic_ic_threshold) {
|
|
||||||
// If this particular function hasn't had any ICs patched for enough
|
|
||||||
// ticks, optimize it now.
|
|
||||||
Optimize(function, "hot and stable");
|
|
||||||
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
|
|
||||||
Optimize(function, "not much type info but very hot");
|
|
||||||
} else {
|
|
||||||
if (FLAG_trace_opt_verbose) {
|
|
||||||
PrintF("[not yet optimizing ");
|
|
||||||
function->PrintName();
|
|
||||||
PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
|
|
||||||
type_percentage);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,7 +314,8 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (frame->is_interpreted()) {
|
if (frame->is_interpreted()) {
|
||||||
MaybeOptimizeIgnition(function, frame->is_optimized());
|
DCHECK(!frame->is_optimized());
|
||||||
|
MaybeOptimizeIgnition(function);
|
||||||
} else {
|
} else {
|
||||||
MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized());
|
MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized());
|
||||||
}
|
}
|
||||||
|
@ -26,8 +26,9 @@ class RuntimeProfiler {
|
|||||||
private:
|
private:
|
||||||
void MaybeOptimizeFullCodegen(JSFunction* function, int frame_count,
|
void MaybeOptimizeFullCodegen(JSFunction* function, int frame_count,
|
||||||
bool frame_optimized);
|
bool frame_optimized);
|
||||||
void MaybeOptimizeIgnition(JSFunction* function, bool frame_optimized);
|
void MaybeOptimizeIgnition(JSFunction* function);
|
||||||
void Optimize(JSFunction* function, const char* reason);
|
void Optimize(JSFunction* function, const char* reason);
|
||||||
|
void Baseline(JSFunction* function, const char* reason);
|
||||||
|
|
||||||
bool CodeSizeOKForOSR(Code* shared_code);
|
bool CodeSizeOKForOSR(Code* shared_code);
|
||||||
|
|
||||||
|
@ -178,7 +178,8 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
|
|||||||
HandleScope scope(isolate);
|
HandleScope scope(isolate);
|
||||||
DCHECK(args.length() == 1);
|
DCHECK(args.length() == 1);
|
||||||
CONVERT_ARG_CHECKED(JSFunction, function, 0);
|
CONVERT_ARG_CHECKED(JSFunction, function, 0);
|
||||||
function->shared()->set_disable_optimization_reason(kOptimizationDisabled);
|
function->shared()->set_disable_optimization_reason(
|
||||||
|
kOptimizationDisabledForTest);
|
||||||
function->shared()->set_optimization_disabled(true);
|
function->shared()->set_optimization_disabled(true);
|
||||||
return isolate->heap()->undefined_value();
|
return isolate->heap()->undefined_value();
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user