[compiler] Remove ContinuationForConcurrentOptimization
.. to simplify logic within compiler.cc. GetOrCompileOptimized now only returns Code object if the requested optimized Code object is available. This change also required updating CompileLazy to install the appropriate Code object before potentially calling CompileOptimized_* runtime functions in order to satisfy the is_compiled precondition. Bug: v8:12161 Change-Id: I991dbcc0ba8f3d635aa1e1f06e4cffd89e08a47b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3562978 Reviewed-by: Leszek Swirski <leszeks@chromium.org> Commit-Queue: Jakob Linke <jgruber@chromium.org> Cr-Commit-Position: refs/heads/main@{#79762}
This commit is contained in:
parent
3f5a3df63b
commit
5f3ed078b6
@ -129,6 +129,10 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
|
||||
// If feedback cell isn't initialized, compile function
|
||||
GotoIf(IsUndefined(feedback_cell_value), &compile_function);
|
||||
|
||||
CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
|
||||
isolate(), CompileLazy))));
|
||||
StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
|
||||
|
||||
Label maybe_use_sfi_code(this);
|
||||
// If there is no feedback, don't check for optimized code.
|
||||
GotoIf(HasInstanceType(feedback_cell_value, CLOSURE_FEEDBACK_CELL_ARRAY_TYPE),
|
||||
@ -145,13 +149,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
|
||||
// optimized Code object (we'd have tail-called it above). A usual case would
|
||||
// be the InterpreterEntryTrampoline to start executing existing bytecode.
|
||||
BIND(&maybe_use_sfi_code);
|
||||
CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
|
||||
isolate(), CompileLazy))));
|
||||
StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
|
||||
|
||||
Label tailcall_code(this);
|
||||
Label baseline(this);
|
||||
|
||||
Label tailcall_code(this), baseline(this);
|
||||
TVARIABLE(CodeT, code);
|
||||
|
||||
// Check if we have baseline code.
|
||||
@ -170,8 +168,8 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
|
||||
function));
|
||||
});
|
||||
Goto(&tailcall_code);
|
||||
|
||||
BIND(&tailcall_code);
|
||||
// Jump to the selected code entry.
|
||||
GenerateTailCallToJSCode(code.value(), function);
|
||||
|
||||
BIND(&compile_function);
|
||||
|
@ -1066,27 +1066,6 @@ bool CompileTurbofan_Concurrent(Isolate* isolate,
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns the code object at which execution continues after a concurrent
|
||||
// optimization job has been started (but not finished).
|
||||
MaybeHandle<CodeT> ContinuationForConcurrentOptimization(
|
||||
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset) {
|
||||
if (IsOSR(osr_offset)) {
|
||||
// OSR tierup differs from plain tierup in that we don't simply continue
|
||||
// execution at the returned code. Instead, we must signal unavailability
|
||||
// of OSR'd code by returning the empty handle.
|
||||
return {};
|
||||
}
|
||||
|
||||
DCHECK(!IsOSR(osr_offset));
|
||||
if (function->shared().HasBaselineCode()) {
|
||||
CodeT baseline_code = function->shared().baseline_code(kAcquireLoad);
|
||||
function->set_code(baseline_code);
|
||||
return handle(baseline_code, isolate);
|
||||
}
|
||||
DCHECK(function->ActiveTierIsIgnition());
|
||||
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
|
||||
}
|
||||
|
||||
enum class CompileResultBehavior {
|
||||
// Default behavior, i.e. install the result, insert into caches, etc.
|
||||
kDefault,
|
||||
@ -1136,10 +1115,7 @@ MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
|
||||
|
||||
// Prepare the job and launch concurrent compilation, or compile now.
|
||||
if (IsConcurrent(mode)) {
|
||||
if (CompileTurbofan_Concurrent(isolate, std::move(job))) {
|
||||
return ContinuationForConcurrentOptimization(isolate, function,
|
||||
osr_offset);
|
||||
}
|
||||
if (CompileTurbofan_Concurrent(isolate, std::move(job))) return {};
|
||||
} else {
|
||||
DCHECK(IsSynchronous(mode));
|
||||
if (CompileTurbofan_NotConcurrent(isolate, job.get())) {
|
||||
@ -1192,8 +1168,7 @@ MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
|
||||
// Remember that the function is currently being processed.
|
||||
SetTieringState(*function, osr_offset, TieringState::kInProgress);
|
||||
|
||||
// The code that triggered optimization continues execution here.
|
||||
return ContinuationForConcurrentOptimization(isolate, function, osr_offset);
|
||||
return {};
|
||||
#else // V8_ENABLE_MAGLEV
|
||||
UNREACHABLE();
|
||||
#endif // V8_ENABLE_MAGLEV
|
||||
@ -2256,26 +2231,15 @@ void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
|
||||
}
|
||||
|
||||
Handle<CodeT> code;
|
||||
if (!GetOrCompileOptimized(isolate, function, mode, code_kind)
|
||||
.ToHandle(&code)) {
|
||||
// Optimization failed, get the existing code. We could have optimized code
|
||||
// from a lower tier here. Unoptimized code must exist already if we are
|
||||
// optimizing.
|
||||
DCHECK(!isolate->has_pending_exception());
|
||||
DCHECK(function->shared().is_compiled());
|
||||
DCHECK(function->shared().HasBytecodeArray());
|
||||
code = ContinuationForConcurrentOptimization(isolate, function,
|
||||
BytecodeOffset::None())
|
||||
.ToHandleChecked();
|
||||
if (GetOrCompileOptimized(isolate, function, mode, code_kind)
|
||||
.ToHandle(&code)) {
|
||||
function->set_code(*code, kReleaseStore);
|
||||
}
|
||||
|
||||
function->set_code(*code, kReleaseStore);
|
||||
|
||||
#ifdef DEBUG
|
||||
// Check postconditions on success.
|
||||
DCHECK(!isolate->has_pending_exception());
|
||||
DCHECK(function->shared().is_compiled());
|
||||
DCHECK(function->is_compiled());
|
||||
DCHECK(function->shared().HasBytecodeArray());
|
||||
const TieringState tiering_state = function->tiering_state();
|
||||
DCHECK(IsNone(tiering_state) || IsInProgress(tiering_state));
|
||||
DCHECK_IMPLIES(IsInProgress(tiering_state), function->ChecksTieringState());
|
||||
|
@ -31,6 +31,11 @@ namespace {
|
||||
|
||||
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
|
||||
CodeKind target_kind, ConcurrencyMode mode) {
|
||||
// As a pre- and post-condition of CompileOptimized, the function *must* be
|
||||
// compiled, i.e. the installed Code object must not be CompileLazy.
|
||||
IsCompiledScope is_compiled_scope(function->shared(), isolate);
|
||||
DCHECK(is_compiled_scope.is_compiled());
|
||||
|
||||
StackLimitCheck check(isolate);
|
||||
// Concurrent optimization runs on another thread, thus no additional gap.
|
||||
const int gap =
|
||||
@ -39,8 +44,6 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
|
||||
|
||||
Compiler::CompileOptimized(isolate, function, mode, target_kind);
|
||||
|
||||
// As a post-condition of CompileOptimized, the function *must* be compiled,
|
||||
// i.e. the installed Code object must not be the CompileLazy builtin.
|
||||
DCHECK(function->is_compiled());
|
||||
return function->code();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user