[execution] Extract interrupt scopes and stack guard
Refactor-only, this moves interrupt scopes and stack guard code into their own dedicated files. Change-Id: I5723a04786a04bba31a0da54622f3cd0b926ef07 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1655288 Commit-Queue: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Cr-Commit-Position: refs/heads/master@{#62141}
This commit is contained in:
parent
b9a128f751
commit
6d990aee80
4
BUILD.gn
4
BUILD.gn
@ -2173,6 +2173,8 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/execution/frames.h",
|
||||
"src/execution/futex-emulation.cc",
|
||||
"src/execution/futex-emulation.h",
|
||||
"src/execution/interrupts-scope.cc",
|
||||
"src/execution/interrupts-scope.h",
|
||||
"src/execution/isolate-data.h",
|
||||
"src/execution/isolate-inl.h",
|
||||
"src/execution/isolate.cc",
|
||||
@ -2187,6 +2189,8 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/execution/simulator-base.cc",
|
||||
"src/execution/simulator-base.h",
|
||||
"src/execution/simulator.h",
|
||||
"src/execution/stack-guard.cc",
|
||||
"src/execution/stack-guard.h",
|
||||
"src/execution/thread-id.cc",
|
||||
"src/execution/thread-id.h",
|
||||
"src/execution/thread-local-top.cc",
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "src/debug/debug-interface.h"
|
||||
#include "src/debug/interface-types.h"
|
||||
#include "src/execution/frames.h"
|
||||
#include "src/execution/interrupts-scope.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/handles/handles.h"
|
||||
#include "src/objects/debug-objects.h"
|
||||
|
@ -5,32 +5,13 @@
|
||||
#include "src/execution/execution.h"
|
||||
|
||||
#include "src/api/api-inl.h"
|
||||
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
|
||||
#include "src/debug/debug.h"
|
||||
#include "src/execution/isolate-inl.h"
|
||||
#include "src/execution/runtime-profiler.h"
|
||||
#include "src/execution/vm-state-inl.h"
|
||||
#include "src/init/bootstrapper.h"
|
||||
#include "src/logging/counters.h"
|
||||
#include "src/wasm/wasm-engine.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
|
||||
DCHECK_NOT_NULL(isolate_);
|
||||
thread_local_.set_jslimit(kInterruptLimit);
|
||||
thread_local_.set_climit(kInterruptLimit);
|
||||
isolate_->heap()->SetStackLimits();
|
||||
}
|
||||
|
||||
void StackGuard::reset_limits(const ExecutionAccess& lock) {
|
||||
DCHECK_NOT_NULL(isolate_);
|
||||
thread_local_.set_jslimit(thread_local_.real_jslimit_);
|
||||
thread_local_.set_climit(thread_local_.real_climit_);
|
||||
isolate_->heap()->SetStackLimits();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
Handle<Object> NormalizeReceiver(Isolate* isolate, Handle<Object> receiver) {
|
||||
@ -412,315 +393,5 @@ MaybeHandle<Object> Execution::TryRunMicrotasks(
|
||||
exception_out));
|
||||
}
|
||||
|
||||
void StackGuard::SetStackLimit(uintptr_t limit) {
|
||||
ExecutionAccess access(isolate_);
|
||||
// If the current limits are special (e.g. due to a pending interrupt) then
|
||||
// leave them alone.
|
||||
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
|
||||
if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
|
||||
thread_local_.set_jslimit(jslimit);
|
||||
}
|
||||
if (thread_local_.climit() == thread_local_.real_climit_) {
|
||||
thread_local_.set_climit(limit);
|
||||
}
|
||||
thread_local_.real_climit_ = limit;
|
||||
thread_local_.real_jslimit_ = jslimit;
|
||||
}
|
||||
|
||||
void StackGuard::AdjustStackLimitForSimulator() {
|
||||
ExecutionAccess access(isolate_);
|
||||
uintptr_t climit = thread_local_.real_climit_;
|
||||
// If the current limits are special (e.g. due to a pending interrupt) then
|
||||
// leave them alone.
|
||||
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
|
||||
if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
|
||||
thread_local_.set_jslimit(jslimit);
|
||||
isolate_->heap()->SetStackLimits();
|
||||
}
|
||||
}
|
||||
|
||||
void StackGuard::EnableInterrupts() {
|
||||
ExecutionAccess access(isolate_);
|
||||
if (has_pending_interrupts(access)) {
|
||||
set_interrupt_limits(access);
|
||||
}
|
||||
}
|
||||
|
||||
void StackGuard::DisableInterrupts() {
|
||||
ExecutionAccess access(isolate_);
|
||||
reset_limits(access);
|
||||
}
|
||||
|
||||
void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
|
||||
ExecutionAccess access(isolate_);
|
||||
DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
|
||||
if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
|
||||
// Intercept already requested interrupts.
|
||||
int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
|
||||
scope->intercepted_flags_ = intercepted;
|
||||
thread_local_.interrupt_flags_ &= ~intercepted;
|
||||
} else {
|
||||
DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
|
||||
// Restore postponed interrupts.
|
||||
int restored_flags = 0;
|
||||
for (InterruptsScope* current = thread_local_.interrupt_scopes_;
|
||||
current != nullptr; current = current->prev_) {
|
||||
restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
|
||||
current->intercepted_flags_ &= ~scope->intercept_mask_;
|
||||
}
|
||||
thread_local_.interrupt_flags_ |= restored_flags;
|
||||
}
|
||||
if (!has_pending_interrupts(access)) reset_limits(access);
|
||||
// Add scope to the chain.
|
||||
scope->prev_ = thread_local_.interrupt_scopes_;
|
||||
thread_local_.interrupt_scopes_ = scope;
|
||||
}
|
||||
|
||||
void StackGuard::PopInterruptsScope() {
|
||||
ExecutionAccess access(isolate_);
|
||||
InterruptsScope* top = thread_local_.interrupt_scopes_;
|
||||
DCHECK_NE(top->mode_, InterruptsScope::kNoop);
|
||||
if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
|
||||
// Make intercepted interrupts active.
|
||||
DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
|
||||
thread_local_.interrupt_flags_ |= top->intercepted_flags_;
|
||||
} else {
|
||||
DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
|
||||
// Postpone existing interupts if needed.
|
||||
if (top->prev_) {
|
||||
for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
|
||||
interrupt = interrupt << 1) {
|
||||
InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
|
||||
if ((thread_local_.interrupt_flags_ & flag) &&
|
||||
top->prev_->Intercept(flag)) {
|
||||
thread_local_.interrupt_flags_ &= ~flag;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (has_pending_interrupts(access)) set_interrupt_limits(access);
|
||||
// Remove scope from chain.
|
||||
thread_local_.interrupt_scopes_ = top->prev_;
|
||||
}
|
||||
|
||||
bool StackGuard::CheckInterrupt(InterruptFlag flag) {
|
||||
ExecutionAccess access(isolate_);
|
||||
return thread_local_.interrupt_flags_ & flag;
|
||||
}
|
||||
|
||||
void StackGuard::RequestInterrupt(InterruptFlag flag) {
|
||||
ExecutionAccess access(isolate_);
|
||||
// Check the chain of InterruptsScope for interception.
|
||||
if (thread_local_.interrupt_scopes_ &&
|
||||
thread_local_.interrupt_scopes_->Intercept(flag)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Not intercepted. Set as active interrupt flag.
|
||||
thread_local_.interrupt_flags_ |= flag;
|
||||
set_interrupt_limits(access);
|
||||
|
||||
// If this isolate is waiting in a futex, notify it to wake up.
|
||||
isolate_->futex_wait_list_node()->NotifyWake();
|
||||
}
|
||||
|
||||
void StackGuard::ClearInterrupt(InterruptFlag flag) {
|
||||
ExecutionAccess access(isolate_);
|
||||
// Clear the interrupt flag from the chain of InterruptsScope.
|
||||
for (InterruptsScope* current = thread_local_.interrupt_scopes_;
|
||||
current != nullptr; current = current->prev_) {
|
||||
current->intercepted_flags_ &= ~flag;
|
||||
}
|
||||
|
||||
// Clear the interrupt flag from the active interrupt flags.
|
||||
thread_local_.interrupt_flags_ &= ~flag;
|
||||
if (!has_pending_interrupts(access)) reset_limits(access);
|
||||
}
|
||||
|
||||
int StackGuard::FetchAndClearInterrupts() {
|
||||
ExecutionAccess access(isolate_);
|
||||
|
||||
int result = 0;
|
||||
if (thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) {
|
||||
// The TERMINATE_EXECUTION interrupt is special, since it terminates
|
||||
// execution but should leave V8 in a resumable state. If it exists, we only
|
||||
// fetch and clear that bit. On resume, V8 can continue processing other
|
||||
// interrupts.
|
||||
result = TERMINATE_EXECUTION;
|
||||
thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
|
||||
if (!has_pending_interrupts(access)) reset_limits(access);
|
||||
} else {
|
||||
result = thread_local_.interrupt_flags_;
|
||||
thread_local_.interrupt_flags_ = 0;
|
||||
reset_limits(access);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
char* StackGuard::ArchiveStackGuard(char* to) {
|
||||
ExecutionAccess access(isolate_);
|
||||
MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
|
||||
ThreadLocal blank;
|
||||
|
||||
// Set the stack limits using the old thread_local_.
|
||||
// TODO(isolates): This was the old semantics of constructing a ThreadLocal
|
||||
// (as the ctor called SetStackLimits, which looked at the
|
||||
// current thread_local_ from StackGuard)-- but is this
|
||||
// really what was intended?
|
||||
isolate_->heap()->SetStackLimits();
|
||||
thread_local_ = blank;
|
||||
|
||||
return to + sizeof(ThreadLocal);
|
||||
}
|
||||
|
||||
char* StackGuard::RestoreStackGuard(char* from) {
|
||||
ExecutionAccess access(isolate_);
|
||||
MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
|
||||
isolate_->heap()->SetStackLimits();
|
||||
return from + sizeof(ThreadLocal);
|
||||
}
|
||||
|
||||
void StackGuard::FreeThreadResources() {
|
||||
Isolate::PerIsolateThreadData* per_thread =
|
||||
isolate_->FindOrAllocatePerThreadDataForThisThread();
|
||||
per_thread->set_stack_limit(thread_local_.real_climit_);
|
||||
}
|
||||
|
||||
void StackGuard::ThreadLocal::Clear() {
|
||||
real_jslimit_ = kIllegalLimit;
|
||||
set_jslimit(kIllegalLimit);
|
||||
real_climit_ = kIllegalLimit;
|
||||
set_climit(kIllegalLimit);
|
||||
interrupt_scopes_ = nullptr;
|
||||
interrupt_flags_ = 0;
|
||||
}
|
||||
|
||||
bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
|
||||
bool should_set_stack_limits = false;
|
||||
if (real_climit_ == kIllegalLimit) {
|
||||
const uintptr_t kLimitSize = FLAG_stack_size * KB;
|
||||
DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
|
||||
uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
|
||||
real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
|
||||
set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
|
||||
real_climit_ = limit;
|
||||
set_climit(limit);
|
||||
should_set_stack_limits = true;
|
||||
}
|
||||
interrupt_scopes_ = nullptr;
|
||||
interrupt_flags_ = 0;
|
||||
return should_set_stack_limits;
|
||||
}
|
||||
|
||||
void StackGuard::ClearThread(const ExecutionAccess& lock) {
|
||||
thread_local_.Clear();
|
||||
isolate_->heap()->SetStackLimits();
|
||||
}
|
||||
|
||||
void StackGuard::InitThread(const ExecutionAccess& lock) {
|
||||
if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
|
||||
Isolate::PerIsolateThreadData* per_thread =
|
||||
isolate_->FindOrAllocatePerThreadDataForThisThread();
|
||||
uintptr_t stored_limit = per_thread->stack_limit();
|
||||
// You should hold the ExecutionAccess lock when you call this.
|
||||
if (stored_limit != 0) {
|
||||
SetStackLimit(stored_limit);
|
||||
}
|
||||
}
|
||||
|
||||
// --- C a l l s t o n a t i v e s ---
|
||||
|
||||
namespace {
|
||||
|
||||
bool TestAndClear(int* bitfield, int mask) {
|
||||
bool result = (*bitfield & mask);
|
||||
*bitfield &= ~mask;
|
||||
return result;
|
||||
}
|
||||
|
||||
class ShouldBeZeroOnReturnScope final {
|
||||
public:
|
||||
#ifndef DEBUG
|
||||
explicit ShouldBeZeroOnReturnScope(int*) {}
|
||||
#else // DEBUG
|
||||
explicit ShouldBeZeroOnReturnScope(int* v) : v_(v) {}
|
||||
~ShouldBeZeroOnReturnScope() { DCHECK_EQ(*v_, 0); }
|
||||
|
||||
private:
|
||||
int* v_;
|
||||
#endif // DEBUG
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
Object StackGuard::HandleInterrupts() {
|
||||
TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
|
||||
|
||||
if (FLAG_verify_predictable) {
|
||||
// Advance synthetic time by making a time request.
|
||||
isolate_->heap()->MonotonicallyIncreasingTimeInMs();
|
||||
}
|
||||
|
||||
// Fetch and clear interrupt bits in one go. See comments inside the method
|
||||
// for special handling of TERMINATE_EXECUTION.
|
||||
int interrupt_flags = FetchAndClearInterrupts();
|
||||
|
||||
// All interrupts should be fully processed when returning from this method.
|
||||
ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags);
|
||||
|
||||
if (TestAndClear(&interrupt_flags, TERMINATE_EXECUTION)) {
|
||||
TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
|
||||
return isolate_->TerminateExecution();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, GC_REQUEST)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
|
||||
isolate_->heap()->HandleGCRequest();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
|
||||
"V8.WasmGrowSharedMemory");
|
||||
isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
|
||||
isolate_);
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
|
||||
"V8.GCDeoptMarkedAllocationSites");
|
||||
isolate_->heap()->DeoptMarkedAllocationSites();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, INSTALL_CODE)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
|
||||
"V8.InstallOptimizedFunctions");
|
||||
DCHECK(isolate_->concurrent_recompilation_enabled());
|
||||
isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
|
||||
TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
|
||||
// Callbacks must be invoked outside of ExecutionAccess lock.
|
||||
isolate_->InvokeApiInterruptCallbacks();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
|
||||
isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
|
||||
isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
|
||||
}
|
||||
|
||||
isolate_->counters()->stack_interrupts()->Increment();
|
||||
isolate_->counters()->runtime_profiler_ticks()->Increment();
|
||||
isolate_->runtime_profiler()->MarkCandidatesForOptimization();
|
||||
|
||||
return ReadOnlyRoots(isolate_).undefined_value();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -5,7 +5,6 @@
|
||||
#ifndef V8_EXECUTION_EXECUTION_H_
|
||||
#define V8_EXECUTION_EXECUTION_H_
|
||||
|
||||
#include "src/base/atomicops.h"
|
||||
#include "src/common/globals.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -58,173 +57,6 @@ class Execution final : public AllStatic {
|
||||
MaybeHandle<Object>* exception_out);
|
||||
};
|
||||
|
||||
class ExecutionAccess;
|
||||
class InterruptsScope;
|
||||
|
||||
// StackGuard contains the handling of the limits that are used to limit the
|
||||
// number of nested invocations of JavaScript and the stack size used in each
|
||||
// invocation.
|
||||
class V8_EXPORT_PRIVATE StackGuard final {
|
||||
public:
|
||||
explicit StackGuard(Isolate* isolate) : isolate_(isolate) {}
|
||||
|
||||
// Pass the address beyond which the stack should not grow. The stack
|
||||
// is assumed to grow downwards.
|
||||
void SetStackLimit(uintptr_t limit);
|
||||
|
||||
// The simulator uses a separate JS stack. Limits on the JS stack might have
|
||||
// to be adjusted in order to reflect overflows of the C stack, because we
|
||||
// cannot rely on the interleaving of frames on the simulator.
|
||||
void AdjustStackLimitForSimulator();
|
||||
|
||||
// Threading support.
|
||||
char* ArchiveStackGuard(char* to);
|
||||
char* RestoreStackGuard(char* from);
|
||||
static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
|
||||
void FreeThreadResources();
|
||||
// Sets up the default stack guard for this thread if it has not
|
||||
// already been set up.
|
||||
void InitThread(const ExecutionAccess& lock);
|
||||
// Clears the stack guard for this thread so it does not look as if
|
||||
// it has been set up.
|
||||
void ClearThread(const ExecutionAccess& lock);
|
||||
|
||||
#define INTERRUPT_LIST(V) \
|
||||
V(TERMINATE_EXECUTION, TerminateExecution, 0) \
|
||||
V(GC_REQUEST, GC, 1) \
|
||||
V(INSTALL_CODE, InstallCode, 2) \
|
||||
V(API_INTERRUPT, ApiInterrupt, 3) \
|
||||
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
|
||||
V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
|
||||
V(LOG_WASM_CODE, LogWasmCode, 6) \
|
||||
V(WASM_CODE_GC, WasmCodeGC, 7)
|
||||
|
||||
#define V(NAME, Name, id) \
|
||||
inline bool Check##Name() { return CheckInterrupt(NAME); } \
|
||||
inline void Request##Name() { RequestInterrupt(NAME); } \
|
||||
inline void Clear##Name() { ClearInterrupt(NAME); }
|
||||
INTERRUPT_LIST(V)
|
||||
#undef V
|
||||
|
||||
// Flag used to set the interrupt causes.
|
||||
enum InterruptFlag {
|
||||
#define V(NAME, Name, id) NAME = (1 << id),
|
||||
INTERRUPT_LIST(V)
|
||||
#undef V
|
||||
#define V(NAME, Name, id) NAME |
|
||||
ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
|
||||
#undef V
|
||||
};
|
||||
|
||||
uintptr_t climit() { return thread_local_.climit(); }
|
||||
uintptr_t jslimit() { return thread_local_.jslimit(); }
|
||||
// This provides an asynchronous read of the stack limits for the current
|
||||
// thread. There are no locks protecting this, but it is assumed that you
|
||||
// have the global V8 lock if you are using multiple V8 threads.
|
||||
uintptr_t real_climit() { return thread_local_.real_climit_; }
|
||||
uintptr_t real_jslimit() { return thread_local_.real_jslimit_; }
|
||||
Address address_of_jslimit() {
|
||||
return reinterpret_cast<Address>(&thread_local_.jslimit_);
|
||||
}
|
||||
Address address_of_real_jslimit() {
|
||||
return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
|
||||
}
|
||||
|
||||
// If the stack guard is triggered, but it is not an actual
|
||||
// stack overflow, then handle the interruption accordingly.
|
||||
Object HandleInterrupts();
|
||||
|
||||
private:
|
||||
bool CheckInterrupt(InterruptFlag flag);
|
||||
void RequestInterrupt(InterruptFlag flag);
|
||||
void ClearInterrupt(InterruptFlag flag);
|
||||
int FetchAndClearInterrupts();
|
||||
|
||||
// You should hold the ExecutionAccess lock when calling this method.
|
||||
bool has_pending_interrupts(const ExecutionAccess& lock) {
|
||||
return thread_local_.interrupt_flags_ != 0;
|
||||
}
|
||||
|
||||
// You should hold the ExecutionAccess lock when calling this method.
|
||||
inline void set_interrupt_limits(const ExecutionAccess& lock);
|
||||
|
||||
// Reset limits to actual values. For example after handling interrupt.
|
||||
// You should hold the ExecutionAccess lock when calling this method.
|
||||
inline void reset_limits(const ExecutionAccess& lock);
|
||||
|
||||
// Enable or disable interrupts.
|
||||
void EnableInterrupts();
|
||||
void DisableInterrupts();
|
||||
|
||||
#if V8_TARGET_ARCH_64_BIT
|
||||
static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
|
||||
static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
|
||||
#else
|
||||
static const uintptr_t kInterruptLimit = 0xfffffffe;
|
||||
static const uintptr_t kIllegalLimit = 0xfffffff8;
|
||||
#endif
|
||||
|
||||
void PushInterruptsScope(InterruptsScope* scope);
|
||||
void PopInterruptsScope();
|
||||
|
||||
class ThreadLocal final {
|
||||
public:
|
||||
ThreadLocal() { Clear(); }
|
||||
// You should hold the ExecutionAccess lock when you call Initialize or
|
||||
// Clear.
|
||||
void Clear();
|
||||
|
||||
// Returns true if the heap's stack limits should be set, false if not.
|
||||
bool Initialize(Isolate* isolate);
|
||||
|
||||
// The stack limit is split into a JavaScript and a C++ stack limit. These
|
||||
// two are the same except when running on a simulator where the C++ and
|
||||
// JavaScript stacks are separate. Each of the two stack limits have two
|
||||
// values. The one eith the real_ prefix is the actual stack limit
|
||||
// set for the VM. The one without the real_ prefix has the same value as
|
||||
// the actual stack limit except when there is an interruption (e.g. debug
|
||||
// break or preemption) in which case it is lowered to make stack checks
|
||||
// fail. Both the generated code and the runtime system check against the
|
||||
// one without the real_ prefix.
|
||||
uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
|
||||
uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
|
||||
|
||||
// jslimit_ and climit_ can be read without any lock.
|
||||
// Writing requires the ExecutionAccess lock.
|
||||
base::AtomicWord jslimit_;
|
||||
base::AtomicWord climit_;
|
||||
|
||||
uintptr_t jslimit() {
|
||||
return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
|
||||
}
|
||||
void set_jslimit(uintptr_t limit) {
|
||||
return base::Relaxed_Store(&jslimit_,
|
||||
static_cast<base::AtomicWord>(limit));
|
||||
}
|
||||
uintptr_t climit() {
|
||||
return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
|
||||
}
|
||||
void set_climit(uintptr_t limit) {
|
||||
return base::Relaxed_Store(&climit_,
|
||||
static_cast<base::AtomicWord>(limit));
|
||||
}
|
||||
|
||||
InterruptsScope* interrupt_scopes_;
|
||||
int interrupt_flags_;
|
||||
};
|
||||
|
||||
// TODO(isolates): Technically this could be calculated directly from a
|
||||
// pointer to StackGuard.
|
||||
Isolate* isolate_;
|
||||
ThreadLocal thread_local_;
|
||||
|
||||
friend class Isolate;
|
||||
friend class StackLimitCheck;
|
||||
friend class InterruptsScope;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(StackGuard);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
42
src/execution/interrupts-scope.cc
Normal file
42
src/execution/interrupts-scope.cc
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/execution/interrupts-scope.h"
|
||||
|
||||
#include "src/execution/isolate.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
InterruptsScope::InterruptsScope(Isolate* isolate, int intercept_mask,
|
||||
Mode mode)
|
||||
: stack_guard_(isolate->stack_guard()),
|
||||
intercept_mask_(intercept_mask),
|
||||
intercepted_flags_(0),
|
||||
mode_(mode) {
|
||||
if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
|
||||
}
|
||||
|
||||
bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
|
||||
InterruptsScope* last_postpone_scope = nullptr;
|
||||
for (InterruptsScope* current = this; current; current = current->prev_) {
|
||||
// We only consider scopes related to passed flag.
|
||||
if (!(current->intercept_mask_ & flag)) continue;
|
||||
if (current->mode_ == kRunInterrupts) {
|
||||
// If innermost scope is kRunInterrupts scope, prevent interrupt from
|
||||
// being intercepted.
|
||||
break;
|
||||
} else {
|
||||
DCHECK_EQ(current->mode_, kPostponeInterrupts);
|
||||
last_postpone_scope = current;
|
||||
}
|
||||
}
|
||||
// If there is no postpone scope for passed flag then we should not intercept.
|
||||
if (!last_postpone_scope) return false;
|
||||
last_postpone_scope->intercepted_flags_ |= flag;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
72
src/execution/interrupts-scope.h
Normal file
72
src/execution/interrupts-scope.h
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_EXECUTION_INTERRUPTS_SCOPE_H_
|
||||
#define V8_EXECUTION_INTERRUPTS_SCOPE_H_
|
||||
|
||||
#include "src/execution/stack-guard.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Isolate;
|
||||
|
||||
// Scope intercepts only interrupt which is part of its interrupt_mask and does
|
||||
// not affect other interrupts.
|
||||
class InterruptsScope {
|
||||
public:
|
||||
enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
|
||||
|
||||
V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, int intercept_mask,
|
||||
Mode mode);
|
||||
|
||||
virtual ~InterruptsScope() {
|
||||
if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
|
||||
}
|
||||
|
||||
// Find the scope that intercepts this interrupt.
|
||||
// It may be outermost PostponeInterruptsScope or innermost
|
||||
// SafeForInterruptsScope if any.
|
||||
// Return whether the interrupt has been intercepted.
|
||||
bool Intercept(StackGuard::InterruptFlag flag);
|
||||
|
||||
private:
|
||||
StackGuard* stack_guard_;
|
||||
int intercept_mask_;
|
||||
int intercepted_flags_;
|
||||
Mode mode_;
|
||||
InterruptsScope* prev_;
|
||||
|
||||
friend class StackGuard;
|
||||
};
|
||||
|
||||
// Support for temporarily postponing interrupts. When the outermost
|
||||
// postpone scope is left the interrupts will be re-enabled and any
|
||||
// interrupts that occurred while in the scope will be taken into
|
||||
// account.
|
||||
class PostponeInterruptsScope : public InterruptsScope {
|
||||
public:
|
||||
PostponeInterruptsScope(Isolate* isolate,
|
||||
int intercept_mask = StackGuard::ALL_INTERRUPTS)
|
||||
: InterruptsScope(isolate, intercept_mask,
|
||||
InterruptsScope::kPostponeInterrupts) {}
|
||||
~PostponeInterruptsScope() override = default;
|
||||
};
|
||||
|
||||
// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
|
||||
// innermost scope is SafeForInterruptsScope ignoring any outer
|
||||
// PostponeInterruptsScopes.
|
||||
class SafeForInterruptsScope : public InterruptsScope {
|
||||
public:
|
||||
SafeForInterruptsScope(Isolate* isolate,
|
||||
int intercept_mask = StackGuard::ALL_INTERRUPTS)
|
||||
: InterruptsScope(isolate, intercept_mask,
|
||||
InterruptsScope::kRunInterrupts) {}
|
||||
~SafeForInterruptsScope() override = default;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_EXECUTION_INTERRUPTS_SCOPE_H_
|
@ -4642,26 +4642,6 @@ AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
|
||||
: isolate_(isolate), context_(isolate->context(), isolate) {}
|
||||
#endif // DEBUG
|
||||
|
||||
bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
|
||||
InterruptsScope* last_postpone_scope = nullptr;
|
||||
for (InterruptsScope* current = this; current; current = current->prev_) {
|
||||
// We only consider scopes related to passed flag.
|
||||
if (!(current->intercept_mask_ & flag)) continue;
|
||||
if (current->mode_ == kRunInterrupts) {
|
||||
// If innermost scope is kRunInterrupts scope, prevent interrupt from
|
||||
// being intercepted.
|
||||
break;
|
||||
} else {
|
||||
DCHECK_EQ(current->mode_, kPostponeInterrupts);
|
||||
last_postpone_scope = current;
|
||||
}
|
||||
}
|
||||
// If there is no postpone scope for passed flag then we should not intercept.
|
||||
if (!last_postpone_scope) return false;
|
||||
last_postpone_scope->intercepted_flags_ |= flag;
|
||||
return true;
|
||||
}
|
||||
|
||||
#undef TRACE_ISOLATE
|
||||
|
||||
} // namespace internal
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "src/execution/futex-emulation.h"
|
||||
#include "src/execution/isolate-data.h"
|
||||
#include "src/execution/messages.h"
|
||||
#include "src/execution/stack-guard.h"
|
||||
#include "src/handles/handles.h"
|
||||
#include "src/heap/factory.h"
|
||||
#include "src/heap/heap.h"
|
||||
@ -1985,65 +1986,6 @@ class StackLimitCheck {
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// Scope intercepts only interrupt which is part of its interrupt_mask and does
|
||||
// not affect other interrupts.
|
||||
class InterruptsScope {
|
||||
public:
|
||||
enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
|
||||
|
||||
virtual ~InterruptsScope() {
|
||||
if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
|
||||
}
|
||||
|
||||
// Find the scope that intercepts this interrupt.
|
||||
// It may be outermost PostponeInterruptsScope or innermost
|
||||
// SafeForInterruptsScope if any.
|
||||
// Return whether the interrupt has been intercepted.
|
||||
bool Intercept(StackGuard::InterruptFlag flag);
|
||||
|
||||
InterruptsScope(Isolate* isolate, int intercept_mask, Mode mode)
|
||||
: stack_guard_(isolate->stack_guard()),
|
||||
intercept_mask_(intercept_mask),
|
||||
intercepted_flags_(0),
|
||||
mode_(mode) {
|
||||
if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
|
||||
}
|
||||
|
||||
private:
|
||||
StackGuard* stack_guard_;
|
||||
int intercept_mask_;
|
||||
int intercepted_flags_;
|
||||
Mode mode_;
|
||||
InterruptsScope* prev_;
|
||||
|
||||
friend class StackGuard;
|
||||
};
|
||||
|
||||
// Support for temporarily postponing interrupts. When the outermost
|
||||
// postpone scope is left the interrupts will be re-enabled and any
|
||||
// interrupts that occurred while in the scope will be taken into
|
||||
// account.
|
||||
class PostponeInterruptsScope : public InterruptsScope {
|
||||
public:
|
||||
PostponeInterruptsScope(Isolate* isolate,
|
||||
int intercept_mask = StackGuard::ALL_INTERRUPTS)
|
||||
: InterruptsScope(isolate, intercept_mask,
|
||||
InterruptsScope::kPostponeInterrupts) {}
|
||||
~PostponeInterruptsScope() override = default;
|
||||
};
|
||||
|
||||
// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
|
||||
// innermost scope is SafeForInterruptsScope ignoring any outer
|
||||
// PostponeInterruptsScopes.
|
||||
class SafeForInterruptsScope : public InterruptsScope {
|
||||
public:
|
||||
SafeForInterruptsScope(Isolate* isolate,
|
||||
int intercept_mask = StackGuard::ALL_INTERRUPTS)
|
||||
: InterruptsScope(isolate, intercept_mask,
|
||||
InterruptsScope::kRunInterrupts) {}
|
||||
~SafeForInterruptsScope() override = default;
|
||||
};
|
||||
|
||||
class StackTraceFailureMessage {
|
||||
public:
|
||||
explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
|
||||
|
345
src/execution/stack-guard.cc
Normal file
345
src/execution/stack-guard.cc
Normal file
@ -0,0 +1,345 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/execution/stack-guard.h"
|
||||
|
||||
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
|
||||
#include "src/execution/interrupts-scope.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/execution/runtime-profiler.h"
|
||||
#include "src/execution/simulator.h"
|
||||
#include "src/logging/counters.h"
|
||||
#include "src/roots/roots-inl.h"
|
||||
#include "src/utils/memcopy.h"
|
||||
#include "src/wasm/wasm-engine.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
|
||||
DCHECK_NOT_NULL(isolate_);
|
||||
thread_local_.set_jslimit(kInterruptLimit);
|
||||
thread_local_.set_climit(kInterruptLimit);
|
||||
isolate_->heap()->SetStackLimits();
|
||||
}
|
||||
|
||||
void StackGuard::reset_limits(const ExecutionAccess& lock) {
|
||||
DCHECK_NOT_NULL(isolate_);
|
||||
thread_local_.set_jslimit(thread_local_.real_jslimit_);
|
||||
thread_local_.set_climit(thread_local_.real_climit_);
|
||||
isolate_->heap()->SetStackLimits();
|
||||
}
|
||||
|
||||
void StackGuard::SetStackLimit(uintptr_t limit) {
|
||||
ExecutionAccess access(isolate_);
|
||||
// If the current limits are special (e.g. due to a pending interrupt) then
|
||||
// leave them alone.
|
||||
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
|
||||
if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
|
||||
thread_local_.set_jslimit(jslimit);
|
||||
}
|
||||
if (thread_local_.climit() == thread_local_.real_climit_) {
|
||||
thread_local_.set_climit(limit);
|
||||
}
|
||||
thread_local_.real_climit_ = limit;
|
||||
thread_local_.real_jslimit_ = jslimit;
|
||||
}
|
||||
|
||||
void StackGuard::AdjustStackLimitForSimulator() {
|
||||
ExecutionAccess access(isolate_);
|
||||
uintptr_t climit = thread_local_.real_climit_;
|
||||
// If the current limits are special (e.g. due to a pending interrupt) then
|
||||
// leave them alone.
|
||||
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
|
||||
if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
|
||||
thread_local_.set_jslimit(jslimit);
|
||||
isolate_->heap()->SetStackLimits();
|
||||
}
|
||||
}
|
||||
|
||||
void StackGuard::EnableInterrupts() {
|
||||
ExecutionAccess access(isolate_);
|
||||
if (has_pending_interrupts(access)) {
|
||||
set_interrupt_limits(access);
|
||||
}
|
||||
}
|
||||
|
||||
void StackGuard::DisableInterrupts() {
|
||||
ExecutionAccess access(isolate_);
|
||||
reset_limits(access);
|
||||
}
|
||||
|
||||
void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
|
||||
ExecutionAccess access(isolate_);
|
||||
DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
|
||||
if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
|
||||
// Intercept already requested interrupts.
|
||||
int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
|
||||
scope->intercepted_flags_ = intercepted;
|
||||
thread_local_.interrupt_flags_ &= ~intercepted;
|
||||
} else {
|
||||
DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
|
||||
// Restore postponed interrupts.
|
||||
int restored_flags = 0;
|
||||
for (InterruptsScope* current = thread_local_.interrupt_scopes_;
|
||||
current != nullptr; current = current->prev_) {
|
||||
restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
|
||||
current->intercepted_flags_ &= ~scope->intercept_mask_;
|
||||
}
|
||||
thread_local_.interrupt_flags_ |= restored_flags;
|
||||
}
|
||||
if (!has_pending_interrupts(access)) reset_limits(access);
|
||||
// Add scope to the chain.
|
||||
scope->prev_ = thread_local_.interrupt_scopes_;
|
||||
thread_local_.interrupt_scopes_ = scope;
|
||||
}
|
||||
|
||||
void StackGuard::PopInterruptsScope() {
|
||||
ExecutionAccess access(isolate_);
|
||||
InterruptsScope* top = thread_local_.interrupt_scopes_;
|
||||
DCHECK_NE(top->mode_, InterruptsScope::kNoop);
|
||||
if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
|
||||
// Make intercepted interrupts active.
|
||||
DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
|
||||
thread_local_.interrupt_flags_ |= top->intercepted_flags_;
|
||||
} else {
|
||||
DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
|
||||
// Postpone existing interupts if needed.
|
||||
if (top->prev_) {
|
||||
for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
|
||||
interrupt = interrupt << 1) {
|
||||
InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
|
||||
if ((thread_local_.interrupt_flags_ & flag) &&
|
||||
top->prev_->Intercept(flag)) {
|
||||
thread_local_.interrupt_flags_ &= ~flag;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (has_pending_interrupts(access)) set_interrupt_limits(access);
|
||||
// Remove scope from chain.
|
||||
thread_local_.interrupt_scopes_ = top->prev_;
|
||||
}
|
||||
|
||||
bool StackGuard::CheckInterrupt(InterruptFlag flag) {
|
||||
ExecutionAccess access(isolate_);
|
||||
return thread_local_.interrupt_flags_ & flag;
|
||||
}
|
||||
|
||||
void StackGuard::RequestInterrupt(InterruptFlag flag) {
|
||||
ExecutionAccess access(isolate_);
|
||||
// Check the chain of InterruptsScope for interception.
|
||||
if (thread_local_.interrupt_scopes_ &&
|
||||
thread_local_.interrupt_scopes_->Intercept(flag)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Not intercepted. Set as active interrupt flag.
|
||||
thread_local_.interrupt_flags_ |= flag;
|
||||
set_interrupt_limits(access);
|
||||
|
||||
// If this isolate is waiting in a futex, notify it to wake up.
|
||||
isolate_->futex_wait_list_node()->NotifyWake();
|
||||
}
|
||||
|
||||
void StackGuard::ClearInterrupt(InterruptFlag flag) {
|
||||
ExecutionAccess access(isolate_);
|
||||
// Clear the interrupt flag from the chain of InterruptsScope.
|
||||
for (InterruptsScope* current = thread_local_.interrupt_scopes_;
|
||||
current != nullptr; current = current->prev_) {
|
||||
current->intercepted_flags_ &= ~flag;
|
||||
}
|
||||
|
||||
// Clear the interrupt flag from the active interrupt flags.
|
||||
thread_local_.interrupt_flags_ &= ~flag;
|
||||
if (!has_pending_interrupts(access)) reset_limits(access);
|
||||
}
|
||||
|
||||
int StackGuard::FetchAndClearInterrupts() {
|
||||
ExecutionAccess access(isolate_);
|
||||
|
||||
int result = 0;
|
||||
if (thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) {
|
||||
// The TERMINATE_EXECUTION interrupt is special, since it terminates
|
||||
// execution but should leave V8 in a resumable state. If it exists, we only
|
||||
// fetch and clear that bit. On resume, V8 can continue processing other
|
||||
// interrupts.
|
||||
result = TERMINATE_EXECUTION;
|
||||
thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
|
||||
if (!has_pending_interrupts(access)) reset_limits(access);
|
||||
} else {
|
||||
result = thread_local_.interrupt_flags_;
|
||||
thread_local_.interrupt_flags_ = 0;
|
||||
reset_limits(access);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
char* StackGuard::ArchiveStackGuard(char* to) {
|
||||
ExecutionAccess access(isolate_);
|
||||
MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
|
||||
ThreadLocal blank;
|
||||
|
||||
// Set the stack limits using the old thread_local_.
|
||||
// TODO(isolates): This was the old semantics of constructing a ThreadLocal
|
||||
// (as the ctor called SetStackLimits, which looked at the
|
||||
// current thread_local_ from StackGuard)-- but is this
|
||||
// really what was intended?
|
||||
isolate_->heap()->SetStackLimits();
|
||||
thread_local_ = blank;
|
||||
|
||||
return to + sizeof(ThreadLocal);
|
||||
}
|
||||
|
||||
char* StackGuard::RestoreStackGuard(char* from) {
|
||||
ExecutionAccess access(isolate_);
|
||||
MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
|
||||
isolate_->heap()->SetStackLimits();
|
||||
return from + sizeof(ThreadLocal);
|
||||
}
|
||||
|
||||
void StackGuard::FreeThreadResources() {
|
||||
Isolate::PerIsolateThreadData* per_thread =
|
||||
isolate_->FindOrAllocatePerThreadDataForThisThread();
|
||||
per_thread->set_stack_limit(thread_local_.real_climit_);
|
||||
}
|
||||
|
||||
void StackGuard::ThreadLocal::Clear() {
|
||||
real_jslimit_ = kIllegalLimit;
|
||||
set_jslimit(kIllegalLimit);
|
||||
real_climit_ = kIllegalLimit;
|
||||
set_climit(kIllegalLimit);
|
||||
interrupt_scopes_ = nullptr;
|
||||
interrupt_flags_ = 0;
|
||||
}
|
||||
|
||||
bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
|
||||
bool should_set_stack_limits = false;
|
||||
if (real_climit_ == kIllegalLimit) {
|
||||
const uintptr_t kLimitSize = FLAG_stack_size * KB;
|
||||
DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
|
||||
uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
|
||||
real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
|
||||
set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
|
||||
real_climit_ = limit;
|
||||
set_climit(limit);
|
||||
should_set_stack_limits = true;
|
||||
}
|
||||
interrupt_scopes_ = nullptr;
|
||||
interrupt_flags_ = 0;
|
||||
return should_set_stack_limits;
|
||||
}
|
||||
|
||||
void StackGuard::ClearThread(const ExecutionAccess& lock) {
|
||||
thread_local_.Clear();
|
||||
isolate_->heap()->SetStackLimits();
|
||||
}
|
||||
|
||||
void StackGuard::InitThread(const ExecutionAccess& lock) {
|
||||
if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
|
||||
Isolate::PerIsolateThreadData* per_thread =
|
||||
isolate_->FindOrAllocatePerThreadDataForThisThread();
|
||||
uintptr_t stored_limit = per_thread->stack_limit();
|
||||
// You should hold the ExecutionAccess lock when you call this.
|
||||
if (stored_limit != 0) {
|
||||
SetStackLimit(stored_limit);
|
||||
}
|
||||
}
|
||||
|
||||
// --- C a l l s t o n a t i v e s ---
|
||||
|
||||
namespace {
|
||||
|
||||
bool TestAndClear(int* bitfield, int mask) {
|
||||
bool result = (*bitfield & mask);
|
||||
*bitfield &= ~mask;
|
||||
return result;
|
||||
}
|
||||
|
||||
class ShouldBeZeroOnReturnScope final {
|
||||
public:
|
||||
#ifndef DEBUG
|
||||
explicit ShouldBeZeroOnReturnScope(int*) {}
|
||||
#else // DEBUG
|
||||
explicit ShouldBeZeroOnReturnScope(int* v) : v_(v) {}
|
||||
~ShouldBeZeroOnReturnScope() { DCHECK_EQ(*v_, 0); }
|
||||
|
||||
private:
|
||||
int* v_;
|
||||
#endif // DEBUG
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
Object StackGuard::HandleInterrupts() {
|
||||
TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
|
||||
|
||||
if (FLAG_verify_predictable) {
|
||||
// Advance synthetic time by making a time request.
|
||||
isolate_->heap()->MonotonicallyIncreasingTimeInMs();
|
||||
}
|
||||
|
||||
// Fetch and clear interrupt bits in one go. See comments inside the method
|
||||
// for special handling of TERMINATE_EXECUTION.
|
||||
int interrupt_flags = FetchAndClearInterrupts();
|
||||
|
||||
// All interrupts should be fully processed when returning from this method.
|
||||
ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags);
|
||||
|
||||
if (TestAndClear(&interrupt_flags, TERMINATE_EXECUTION)) {
|
||||
TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
|
||||
return isolate_->TerminateExecution();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, GC_REQUEST)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
|
||||
isolate_->heap()->HandleGCRequest();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
|
||||
"V8.WasmGrowSharedMemory");
|
||||
isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
|
||||
isolate_);
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
|
||||
"V8.GCDeoptMarkedAllocationSites");
|
||||
isolate_->heap()->DeoptMarkedAllocationSites();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, INSTALL_CODE)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
|
||||
"V8.InstallOptimizedFunctions");
|
||||
DCHECK(isolate_->concurrent_recompilation_enabled());
|
||||
isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
|
||||
TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
|
||||
// Callbacks must be invoked outside of ExecutionAccess lock.
|
||||
isolate_->InvokeApiInterruptCallbacks();
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
|
||||
isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
|
||||
}
|
||||
|
||||
if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
|
||||
isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
|
||||
}
|
||||
|
||||
isolate_->counters()->stack_interrupts()->Increment();
|
||||
isolate_->counters()->runtime_profiler_ticks()->Increment();
|
||||
isolate_->runtime_profiler()->MarkCandidatesForOptimization();
|
||||
|
||||
return ReadOnlyRoots(isolate_).undefined_value();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
186
src/execution/stack-guard.h
Normal file
186
src/execution/stack-guard.h
Normal file
@ -0,0 +1,186 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_EXECUTION_STACK_GUARD_H_
|
||||
#define V8_EXECUTION_STACK_GUARD_H_
|
||||
|
||||
#include "include/v8-internal.h"
|
||||
#include "src/base/atomicops.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class ExecutionAccess;
|
||||
class InterruptsScope;
|
||||
class Isolate;
|
||||
class Object;
|
||||
|
||||
// StackGuard contains the handling of the limits that are used to limit the
|
||||
// number of nested invocations of JavaScript and the stack size used in each
|
||||
// invocation.
|
||||
class V8_EXPORT_PRIVATE StackGuard final {
|
||||
public:
|
||||
explicit StackGuard(Isolate* isolate) : isolate_(isolate) {}
|
||||
|
||||
// Pass the address beyond which the stack should not grow. The stack
|
||||
// is assumed to grow downwards.
|
||||
void SetStackLimit(uintptr_t limit);
|
||||
|
||||
// The simulator uses a separate JS stack. Limits on the JS stack might have
|
||||
// to be adjusted in order to reflect overflows of the C stack, because we
|
||||
// cannot rely on the interleaving of frames on the simulator.
|
||||
void AdjustStackLimitForSimulator();
|
||||
|
||||
// Threading support.
|
||||
char* ArchiveStackGuard(char* to);
|
||||
char* RestoreStackGuard(char* from);
|
||||
static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
|
||||
void FreeThreadResources();
|
||||
// Sets up the default stack guard for this thread if it has not
|
||||
// already been set up.
|
||||
void InitThread(const ExecutionAccess& lock);
|
||||
// Clears the stack guard for this thread so it does not look as if
|
||||
// it has been set up.
|
||||
void ClearThread(const ExecutionAccess& lock);
|
||||
|
||||
#define INTERRUPT_LIST(V) \
|
||||
V(TERMINATE_EXECUTION, TerminateExecution, 0) \
|
||||
V(GC_REQUEST, GC, 1) \
|
||||
V(INSTALL_CODE, InstallCode, 2) \
|
||||
V(API_INTERRUPT, ApiInterrupt, 3) \
|
||||
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
|
||||
V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
|
||||
V(LOG_WASM_CODE, LogWasmCode, 6) \
|
||||
V(WASM_CODE_GC, WasmCodeGC, 7)
|
||||
|
||||
#define V(NAME, Name, id) \
|
||||
inline bool Check##Name() { return CheckInterrupt(NAME); } \
|
||||
inline void Request##Name() { RequestInterrupt(NAME); } \
|
||||
inline void Clear##Name() { ClearInterrupt(NAME); }
|
||||
INTERRUPT_LIST(V)
|
||||
#undef V
|
||||
|
||||
// Flag used to set the interrupt causes.
|
||||
enum InterruptFlag {
|
||||
#define V(NAME, Name, id) NAME = (1 << id),
|
||||
INTERRUPT_LIST(V)
|
||||
#undef V
|
||||
#define V(NAME, Name, id) NAME |
|
||||
ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
|
||||
#undef V
|
||||
};
|
||||
|
||||
uintptr_t climit() { return thread_local_.climit(); }
|
||||
uintptr_t jslimit() { return thread_local_.jslimit(); }
|
||||
// This provides an asynchronous read of the stack limits for the current
|
||||
// thread. There are no locks protecting this, but it is assumed that you
|
||||
// have the global V8 lock if you are using multiple V8 threads.
|
||||
uintptr_t real_climit() { return thread_local_.real_climit_; }
|
||||
uintptr_t real_jslimit() { return thread_local_.real_jslimit_; }
|
||||
Address address_of_jslimit() {
|
||||
return reinterpret_cast<Address>(&thread_local_.jslimit_);
|
||||
}
|
||||
Address address_of_real_jslimit() {
|
||||
return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
|
||||
}
|
||||
|
||||
// If the stack guard is triggered, but it is not an actual
|
||||
// stack overflow, then handle the interruption accordingly.
|
||||
Object HandleInterrupts();
|
||||
|
||||
private:
|
||||
bool CheckInterrupt(InterruptFlag flag);
|
||||
void RequestInterrupt(InterruptFlag flag);
|
||||
void ClearInterrupt(InterruptFlag flag);
|
||||
int FetchAndClearInterrupts();
|
||||
|
||||
// You should hold the ExecutionAccess lock when calling this method.
|
||||
bool has_pending_interrupts(const ExecutionAccess& lock) {
|
||||
return thread_local_.interrupt_flags_ != 0;
|
||||
}
|
||||
|
||||
// You should hold the ExecutionAccess lock when calling this method.
|
||||
inline void set_interrupt_limits(const ExecutionAccess& lock);
|
||||
|
||||
// Reset limits to actual values. For example after handling interrupt.
|
||||
// You should hold the ExecutionAccess lock when calling this method.
|
||||
inline void reset_limits(const ExecutionAccess& lock);
|
||||
|
||||
// Enable or disable interrupts.
|
||||
void EnableInterrupts();
|
||||
void DisableInterrupts();
|
||||
|
||||
#if V8_TARGET_ARCH_64_BIT
|
||||
static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
|
||||
static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
|
||||
#else
|
||||
static const uintptr_t kInterruptLimit = 0xfffffffe;
|
||||
static const uintptr_t kIllegalLimit = 0xfffffff8;
|
||||
#endif
|
||||
|
||||
void PushInterruptsScope(InterruptsScope* scope);
|
||||
void PopInterruptsScope();
|
||||
|
||||
class ThreadLocal final {
|
||||
public:
|
||||
ThreadLocal() { Clear(); }
|
||||
// You should hold the ExecutionAccess lock when you call Initialize or
|
||||
// Clear.
|
||||
void Clear();
|
||||
|
||||
// Returns true if the heap's stack limits should be set, false if not.
|
||||
bool Initialize(Isolate* isolate);
|
||||
|
||||
// The stack limit is split into a JavaScript and a C++ stack limit. These
|
||||
// two are the same except when running on a simulator where the C++ and
|
||||
// JavaScript stacks are separate. Each of the two stack limits have two
|
||||
// values. The one eith the real_ prefix is the actual stack limit
|
||||
// set for the VM. The one without the real_ prefix has the same value as
|
||||
// the actual stack limit except when there is an interruption (e.g. debug
|
||||
// break or preemption) in which case it is lowered to make stack checks
|
||||
// fail. Both the generated code and the runtime system check against the
|
||||
// one without the real_ prefix.
|
||||
uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
|
||||
uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
|
||||
|
||||
// jslimit_ and climit_ can be read without any lock.
|
||||
// Writing requires the ExecutionAccess lock.
|
||||
base::AtomicWord jslimit_;
|
||||
base::AtomicWord climit_;
|
||||
|
||||
uintptr_t jslimit() {
|
||||
return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
|
||||
}
|
||||
void set_jslimit(uintptr_t limit) {
|
||||
return base::Relaxed_Store(&jslimit_,
|
||||
static_cast<base::AtomicWord>(limit));
|
||||
}
|
||||
uintptr_t climit() {
|
||||
return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
|
||||
}
|
||||
void set_climit(uintptr_t limit) {
|
||||
return base::Relaxed_Store(&climit_,
|
||||
static_cast<base::AtomicWord>(limit));
|
||||
}
|
||||
|
||||
InterruptsScope* interrupt_scopes_;
|
||||
int interrupt_flags_;
|
||||
};
|
||||
|
||||
// TODO(isolates): Technically this could be calculated directly from a
|
||||
// pointer to StackGuard.
|
||||
Isolate* isolate_;
|
||||
ThreadLocal thread_local_;
|
||||
|
||||
friend class Isolate;
|
||||
friend class StackLimitCheck;
|
||||
friend class InterruptsScope;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(StackGuard);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_EXECUTION_STACK_GUARD_H_
|
Loading…
Reference in New Issue
Block a user