Remove obsolete Code::optimizable flag.

This flag mostly duplicates SharedFunctionInfo::optimization_disabled
and is only queried in places where the original is available. Remove
the brittle and error-prone duplication.

R=bmeurer@chromium.org

Review URL: https://codereview.chromium.org/1148043002

Cr-Commit-Position: refs/heads/master@{#28520}
This commit is contained in:
mstarzinger 2015-05-20 07:44:58 -07:00 committed by Commit bot
parent c1253668bc
commit 794aa07283
9 changed files with 23 additions and 60 deletions

View File

@ -674,7 +674,6 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
// Update the code and feedback vector for the shared function info.
shared->ReplaceCode(*info->code());
if (shared->optimization_disabled()) info->code()->set_optimizable(false);
shared->set_feedback_vector(*info->feedback_vector());
return info->code();

View File

@ -310,9 +310,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable() &&
!info->function()->dont_optimize() &&
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());

View File

@ -48,12 +48,14 @@ for (int i = 0; i < listeners_.length(); ++i) { \
} \
} while (false);
// ComputeMarker must only be used when SharedFunctionInfo is known.
static const char* ComputeMarker(Code* code) {
static const char* ComputeMarker(SharedFunctionInfo* shared, Code* code) {
switch (code->kind()) {
case Code::FUNCTION: return code->optimizable() ? "~" : "";
case Code::OPTIMIZED_FUNCTION: return "*";
default: return "";
case Code::FUNCTION:
return shared->optimization_disabled() ? "" : "~";
case Code::OPTIMIZED_FUNCTION:
return "*";
default:
return "";
}
}
@ -183,7 +185,7 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
CompilationInfo* info,
Name* name) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(code));
name_buffer_->AppendBytes(ComputeMarker(shared, code));
name_buffer_->AppendName(name);
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
@ -195,7 +197,7 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
CompilationInfo* info,
Name* source, int line, int column) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(code));
name_buffer_->AppendBytes(ComputeMarker(shared, code));
name_buffer_->AppendString(shared->DebugName());
name_buffer_->AppendByte(' ');
if (source->IsString()) {
@ -1199,7 +1201,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
msg.Append(',');
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
msg.Append(",%s", ComputeMarker(shared, code));
msg.WriteToLogFile();
}
@ -1233,7 +1235,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
msg.Append(":%d:%d\",", line, column);
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
msg.Append(",%s", ComputeMarker(shared, code));
msg.WriteToLogFile();
}

View File

@ -4918,18 +4918,6 @@ inline void Code::set_can_have_weak_objects(bool value) {
}
bool Code::optimizable() {
DCHECK_EQ(FUNCTION, kind());
return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
}
void Code::set_optimizable(bool value) {
DCHECK_EQ(FUNCTION, kind());
WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
}
bool Code::has_deoptimization_support() {
DCHECK_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
@ -5741,11 +5729,6 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
set_compiler_hints(BooleanBit::set(compiler_hints(),
kOptimizationDisabled,
disable));
// If disabling optimizations we reflect that in the code object so
// it will not be counted as optimizable code.
if ((code()->kind() == Code::FUNCTION) && disable) {
code()->set_optimizable(false);
}
}
@ -5986,7 +5969,6 @@ void SharedFunctionInfo::TryReenableOptimization() {
set_optimization_disabled(false);
set_opt_count(0);
set_deopt_count(0);
code()->set_optimizable(true);
}
}
@ -6024,7 +6006,7 @@ bool JSFunction::IsOptimized() {
bool JSFunction::IsOptimizable() {
return code()->kind() == Code::FUNCTION && code()->optimizable();
return code()->kind() == Code::FUNCTION && !shared()->optimization_disabled();
}

View File

@ -9807,7 +9807,7 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
void JSFunction::MarkForOptimization() {
Isolate* isolate = GetIsolate();
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
DCHECK(shared()->allows_lazy_compilation() || IsOptimizable());
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
@ -9831,7 +9831,7 @@ void JSFunction::AttemptConcurrentOptimization() {
}
DCHECK(!IsInOptimizationQueue());
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
DCHECK(shared()->allows_lazy_compilation() || IsOptimizable());
DCHECK(isolate->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
@ -10729,10 +10729,7 @@ Handle<Object> SharedFunctionInfo::GetSourceCode() {
bool SharedFunctionInfo::IsInlineable() {
// Check that the function has a script associated with it.
if (!script()->IsScript()) return false;
if (optimization_disabled()) return false;
// If we never ran this (unlikely) then lets try to optimize it.
if (code()->kind() != Code::FUNCTION) return true;
return code()->optimizable();
return !optimization_disabled();
}
@ -10834,12 +10831,8 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
DCHECK(reason != kNoReason);
set_optimization_disabled(true);
set_disable_optimization_reason(reason);
// Code should be the lazy compilation stub or else unoptimized. If the
// latter, disable optimization for the code too.
// Code should be the lazy compilation stub or else unoptimized.
DCHECK(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
if (code()->kind() == Code::FUNCTION) {
code()->set_optimizable(false);
}
PROFILE(GetIsolate(), CodeDisableOptEvent(code(), this));
if (FLAG_trace_opt) {
PrintF("[disabled optimization for ");
@ -10921,7 +10914,6 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
opt_count() >= FLAG_max_opt_count) {
// Re-enable optimizations if they were disabled due to opt_count limit.
set_optimization_disabled(false);
code()->set_optimizable(true);
}
set_opt_count(0);
set_deopt_count(0);

View File

@ -5341,10 +5341,6 @@ class Code: public HeapObject {
inline bool can_have_weak_objects();
inline void set_can_have_weak_objects(bool value);
// [optimizable]: For FUNCTION kind, tells if it is optimizable.
inline bool optimizable();
inline void set_optimizable(bool value);
// [has_deoptimization_support]: For FUNCTION kind, tells if it has
// deoptimization support.
inline bool has_deoptimization_support();
@ -5650,9 +5646,7 @@ class Code: public HeapObject {
STATIC_ASSERT((kConstantPoolOffset & kPointerAlignmentMask) == 0);
// Byte offsets within kKindSpecificFlags1Offset.
static const int kOptimizableOffset = kKindSpecificFlags1Offset;
static const int kFullCodeFlags = kOptimizableOffset + 1;
static const int kFullCodeFlags = kKindSpecificFlags1Offset;
class FullCodeFlagsHasDeoptimizationSupportField:
public BitField<bool, 0, 1> {}; // NOLINT
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};

View File

@ -117,7 +117,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
}
// If the code is not optimizable, don't try OSR.
if (!shared->code()->optimizable()) return;
if (shared->optimization_disabled()) return;
// We are not prepared to do OSR for a function that already has an
// allocated arguments object. The optimized code would bypass it for

View File

@ -168,10 +168,9 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function,
Handle<Code> current_code) {
Handle<JSFunction> function) {
// Keep track of whether we've succeeded in optimizing.
if (!current_code->optimizable()) return false;
if (function->shared()->optimization_disabled()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
@ -253,7 +252,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
PrintF(" at AST id %d]\n", ast_id.ToInt());
}
result = Compiler::GetConcurrentlyOptimizedCode(job);
} else if (IsSuitableForOnStackReplacement(isolate, function, caller_code)) {
} else if (IsSuitableForOnStackReplacement(isolate, function)) {
if (FLAG_trace_osr) {
PrintF("[OSR - Compiling: ");
function->PrintName();

View File

@ -86,8 +86,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
// The following assertion was lifted from the DCHECK inside
// JSFunction::MarkForOptimization().
RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
(function->code()->kind() == Code::FUNCTION &&
function->code()->optimizable()));
function->IsOptimizable());
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
@ -131,8 +130,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// The following assertion was lifted from the DCHECK inside
// JSFunction::MarkForOptimization().
RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
(function->code()->kind() == Code::FUNCTION &&
function->code()->optimizable()));
function->IsOptimizable());
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();