// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "v8.h" #include "ast.h" #include "bootstrapper.h" #include "codegen.h" #include "compilation-cache.h" #include "cpu-profiler.h" #include "debug.h" #include "deoptimizer.h" #include "heap-profiler.h" #include "hydrogen.h" #include "isolate-inl.h" #include "lithium-allocator.h" #include "log.h" #include "messages.h" #include "platform.h" #include "regexp-stack.h" #include "runtime-profiler.h" #include "sampler.h" #include "scopeinfo.h" #include "serialize.h" #include "simulator.h" #include "spaces.h" #include "stub-cache.h" #include "sweeper-thread.h" #include "utils/random-number-generator.h" #include "version.h" #include "vm-state-inl.h" namespace v8 { namespace internal { Atomic32 ThreadId::highest_thread_id_ = 0; int ThreadId::AllocateThreadId() { int new_id = NoBarrier_AtomicIncrement(&highest_thread_id_, 1); return new_id; } int ThreadId::GetCurrentThreadId() { int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_); if (thread_id == 0) { thread_id = AllocateThreadId(); Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id); } return thread_id; } ThreadLocalTop::ThreadLocalTop() { InitializeInternal(); // This flag may be set using v8::V8::IgnoreOutOfMemoryException() // before an isolate is initialized. The initialize methods below do // not touch it to preserve its value. ignore_out_of_memory_ = false; } void ThreadLocalTop::InitializeInternal() { c_entry_fp_ = 0; handler_ = 0; #ifdef USE_SIMULATOR simulator_ = NULL; #endif js_entry_sp_ = NULL; external_callback_scope_ = NULL; current_vm_state_ = EXTERNAL; try_catch_handler_address_ = NULL; context_ = NULL; thread_id_ = ThreadId::Invalid(); external_caught_exception_ = false; failed_access_check_callback_ = NULL; save_context_ = NULL; catcher_ = NULL; top_lookup_result_ = NULL; // These members are re-initialized later after deserialization // is complete. pending_exception_ = NULL; has_pending_message_ = false; rethrowing_message_ = false; pending_message_obj_ = NULL; pending_message_script_ = NULL; scheduled_exception_ = NULL; } void ThreadLocalTop::Initialize() { InitializeInternal(); #ifdef USE_SIMULATOR simulator_ = Simulator::current(isolate_); #endif thread_id_ = ThreadId::Current(); } v8::TryCatch* ThreadLocalTop::TryCatchHandler() { return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address()); } Isolate* Isolate::default_isolate_ = NULL; Thread::LocalStorageKey Isolate::isolate_key_; Thread::LocalStorageKey Isolate::thread_id_key_; Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_; #ifdef DEBUG Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key; #endif // DEBUG Mutex Isolate::process_wide_mutex_; // TODO(dcarney): Remove with default isolate. enum DefaultIsolateStatus { kDefaultIsolateUninitialized, kDefaultIsolateInitialized, kDefaultIsolateCrashIfInitialized }; static DefaultIsolateStatus default_isolate_status_ = kDefaultIsolateUninitialized; Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL; Atomic32 Isolate::isolate_counter_ = 0; Isolate::PerIsolateThreadData* Isolate::FindOrAllocatePerThreadDataForThisThread() { ThreadId thread_id = ThreadId::Current(); PerIsolateThreadData* per_thread = NULL; { LockGuard lock_guard(&process_wide_mutex_); per_thread = thread_data_table_->Lookup(this, thread_id); if (per_thread == NULL) { per_thread = new PerIsolateThreadData(this, thread_id); thread_data_table_->Insert(per_thread); } } ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread); return per_thread; } Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() { ThreadId thread_id = ThreadId::Current(); return FindPerThreadDataForThread(thread_id); } Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread( ThreadId thread_id) { PerIsolateThreadData* per_thread = NULL; { LockGuard lock_guard(&process_wide_mutex_); per_thread = thread_data_table_->Lookup(this, thread_id); } return per_thread; } void Isolate::SetCrashIfDefaultIsolateInitialized() { LockGuard lock_guard(&process_wide_mutex_); CHECK(default_isolate_status_ != kDefaultIsolateInitialized); default_isolate_status_ = kDefaultIsolateCrashIfInitialized; } void Isolate::EnsureDefaultIsolate() { LockGuard lock_guard(&process_wide_mutex_); CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized); if (default_isolate_ == NULL) { isolate_key_ = Thread::CreateThreadLocalKey(); thread_id_key_ = Thread::CreateThreadLocalKey(); per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey(); #ifdef DEBUG PerThreadAssertScopeBase::thread_local_key = Thread::CreateThreadLocalKey(); #endif // DEBUG thread_data_table_ = new Isolate::ThreadDataTable(); default_isolate_ = new Isolate(); } // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here // because a non-null thread data may be already set. if (Thread::GetThreadLocal(isolate_key_) == NULL) { Thread::SetThreadLocal(isolate_key_, default_isolate_); } } struct StaticInitializer { StaticInitializer() { Isolate::EnsureDefaultIsolate(); } } static_initializer; #ifdef ENABLE_DEBUGGER_SUPPORT Debugger* Isolate::GetDefaultIsolateDebugger() { EnsureDefaultIsolate(); return default_isolate_->debugger(); } #endif StackGuard* Isolate::GetDefaultIsolateStackGuard() { EnsureDefaultIsolate(); return default_isolate_->stack_guard(); } void Isolate::EnterDefaultIsolate() { EnsureDefaultIsolate(); ASSERT(default_isolate_ != NULL); PerIsolateThreadData* data = CurrentPerIsolateThreadData(); // If not yet in default isolate - enter it. if (data == NULL || data->isolate() != default_isolate_) { default_isolate_->Enter(); } } v8::Isolate* Isolate::GetDefaultIsolateForLocking() { EnsureDefaultIsolate(); return reinterpret_cast(default_isolate_); } Address Isolate::get_address_from_id(Isolate::AddressId id) { return isolate_addresses_[id]; } char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) { ThreadLocalTop* thread = reinterpret_cast(thread_storage); Iterate(v, thread); return thread_storage + sizeof(ThreadLocalTop); } void Isolate::IterateThread(ThreadVisitor* v, char* t) { ThreadLocalTop* thread = reinterpret_cast(t); v->VisitThread(this, thread); } void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) { // Visit the roots from the top for a given thread. Object* pending; // The pending exception can sometimes be a failure. We can't show // that to the GC, which only understands objects. if (thread->pending_exception_->ToObject(&pending)) { v->VisitPointer(&pending); thread->pending_exception_ = pending; // In case GC updated it. } v->VisitPointer(&(thread->pending_message_obj_)); v->VisitPointer(BitCast(&(thread->pending_message_script_))); v->VisitPointer(BitCast(&(thread->context_))); Object* scheduled; if (thread->scheduled_exception_->ToObject(&scheduled)) { v->VisitPointer(&scheduled); thread->scheduled_exception_ = scheduled; } for (v8::TryCatch* block = thread->TryCatchHandler(); block != NULL; block = TRY_CATCH_FROM_ADDRESS(block->next_)) { v->VisitPointer(BitCast(&(block->exception_))); v->VisitPointer(BitCast(&(block->message_obj_))); v->VisitPointer(BitCast(&(block->message_script_))); } // Iterate over pointers on native execution stack. for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) { it.frame()->Iterate(v); } // Iterate pointers in live lookup results. thread->top_lookup_result_->Iterate(v); } void Isolate::Iterate(ObjectVisitor* v) { ThreadLocalTop* current_t = thread_local_top(); Iterate(v, current_t); } void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) { for (DeferredHandles* deferred = deferred_handles_head_; deferred != NULL; deferred = deferred->next_) { deferred->Iterate(visitor); } } #ifdef DEBUG bool Isolate::IsDeferredHandle(Object** handle) { // Each DeferredHandles instance keeps the handles to one job in the // concurrent recompilation queue, containing a list of blocks. Each block // contains kHandleBlockSize handles except for the first block, which may // not be fully filled. // We iterate through all the blocks to see whether the argument handle // belongs to one of the blocks. If so, it is deferred. for (DeferredHandles* deferred = deferred_handles_head_; deferred != NULL; deferred = deferred->next_) { List* blocks = &deferred->blocks_; for (int i = 0; i < blocks->length(); i++) { Object** block_limit = (i == 0) ? deferred->first_block_limit_ : blocks->at(i) + kHandleBlockSize; if (blocks->at(i) <= handle && handle < block_limit) return true; } } return false; } #endif // DEBUG void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) { // The ARM simulator has a separate JS stack. We therefore register // the C++ try catch handler with the simulator and get back an // address that can be used for comparisons with addresses into the // JS stack. When running without the simulator, the address // returned will be the address of the C++ try catch handler itself. Address address = reinterpret_cast
( SimulatorStack::RegisterCTryCatch(reinterpret_cast(that))); thread_local_top()->set_try_catch_handler_address(address); } void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) { ASSERT(thread_local_top()->TryCatchHandler() == that); thread_local_top()->set_try_catch_handler_address( reinterpret_cast
(that->next_)); thread_local_top()->catcher_ = NULL; SimulatorStack::UnregisterCTryCatch(); } Handle Isolate::StackTraceString() { if (stack_trace_nesting_level_ == 0) { stack_trace_nesting_level_++; HeapStringAllocator allocator; StringStream::ClearMentionedObjectCache(this); StringStream accumulator(&allocator); incomplete_message_ = &accumulator; PrintStack(&accumulator); Handle stack_trace = accumulator.ToString(this); incomplete_message_ = NULL; stack_trace_nesting_level_ = 0; return stack_trace; } else if (stack_trace_nesting_level_ == 1) { stack_trace_nesting_level_++; OS::PrintError( "\n\nAttempt to print stack while printing stack (double fault)\n"); OS::PrintError( "If you are lucky you may find a partial stack dump on stdout.\n\n"); incomplete_message_->OutputToStdOut(); return factory()->empty_string(); } else { OS::Abort(); // Unreachable return factory()->empty_string(); } } void Isolate::PushStackTraceAndDie(unsigned int magic, Object* object, Map* map, unsigned int magic2) { const int kMaxStackTraceSize = 8192; Handle trace = StackTraceString(); uint8_t buffer[kMaxStackTraceSize]; int length = Min(kMaxStackTraceSize - 1, trace->length()); String::WriteToFlat(*trace, buffer, 0, length); buffer[length] = '\0'; // TODO(dcarney): convert buffer to utf8? OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", magic, magic2, static_cast(object), static_cast(map), reinterpret_cast(buffer)); OS::Abort(); } // Determines whether the given stack frame should be displayed in // a stack trace. The caller is the error constructor that asked // for the stack trace to be collected. The first time a construct // call to this function is encountered it is skipped. The seen_caller // in/out parameter is used to remember if the caller has been seen // yet. static bool IsVisibleInStackTrace(StackFrame* raw_frame, Object* caller, bool* seen_caller) { // Only display JS frames. if (!raw_frame->is_java_script()) return false; JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); JSFunction* fun = frame->function(); if ((fun == caller) && !(*seen_caller)) { *seen_caller = true; return false; } // Skip all frames until we've seen the caller. if (!(*seen_caller)) return false; // Also, skip non-visible built-in functions and any call with the builtins // object as receiver, so as to not reveal either the builtins object or // an internal function. // The --builtins-in-stack-traces command line flag allows including // internal call sites in the stack trace for debugging purposes. if (!FLAG_builtins_in_stack_traces) { if (frame->receiver()->IsJSBuiltinsObject() || (fun->IsBuiltin() && !fun->shared()->native())) { return false; } } return true; } Handle Isolate::CaptureSimpleStackTrace(Handle error_object, Handle caller, int limit) { limit = Max(limit, 0); // Ensure that limit is not negative. int initial_size = Min(limit, 10); Handle elements = factory()->NewFixedArrayWithHoles(initial_size * 4 + 1); // If the caller parameter is a function we skip frames until we're // under it before starting to collect. bool seen_caller = !caller->IsJSFunction(); // First element is reserved to store the number of non-strict frames. int cursor = 1; int frames_seen = 0; int non_strict_frames = 0; bool encountered_strict_function = false; for (StackFrameIterator iter(this); !iter.done() && frames_seen < limit; iter.Advance()) { StackFrame* raw_frame = iter.frame(); if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) { frames_seen++; JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); // Set initial size to the maximum inlining level + 1 for the outermost // function. List frames(FLAG_max_inlining_levels + 1); frame->Summarize(&frames); for (int i = frames.length() - 1; i >= 0; i--) { if (cursor + 4 > elements->length()) { int new_capacity = JSObject::NewElementsCapacity(elements->length()); Handle new_elements = factory()->NewFixedArrayWithHoles(new_capacity); for (int i = 0; i < cursor; i++) { new_elements->set(i, elements->get(i)); } elements = new_elements; } ASSERT(cursor + 4 <= elements->length()); Handle recv = frames[i].receiver(); Handle fun = frames[i].function(); Handle code = frames[i].code(); Handle offset(Smi::FromInt(frames[i].offset()), this); // The stack trace API should not expose receivers and function // objects on frames deeper than the top-most one with a strict // mode function. The number of non-strict frames is stored as // first element in the result array. if (!encountered_strict_function) { if (!fun->shared()->is_classic_mode()) { encountered_strict_function = true; } else { non_strict_frames++; } } elements->set(cursor++, *recv); elements->set(cursor++, *fun); elements->set(cursor++, *code); elements->set(cursor++, *offset); } } } elements->set(0, Smi::FromInt(non_strict_frames)); Handle result = factory()->NewJSArrayWithElements(elements); result->set_length(Smi::FromInt(cursor)); return result; } void Isolate::CaptureAndSetDetailedStackTrace(Handle error_object) { if (capture_stack_trace_for_uncaught_exceptions_) { // Capture stack trace for a detailed exception message. Handle key = factory()->hidden_stack_trace_string(); Handle stack_trace = CaptureCurrentStackTrace( stack_trace_for_uncaught_exceptions_frame_limit_, stack_trace_for_uncaught_exceptions_options_); JSObject::SetHiddenProperty(error_object, key, stack_trace); } } Handle Isolate::CaptureCurrentStackTrace( int frame_limit, StackTrace::StackTraceOptions options) { // Ensure no negative values. int limit = Max(frame_limit, 0); Handle stack_trace = factory()->NewJSArray(frame_limit); Handle column_key = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column")); Handle line_key = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber")); Handle script_id_key = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptId")); Handle script_name_key = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName")); Handle script_name_or_source_url_key = factory()->InternalizeOneByteString( STATIC_ASCII_VECTOR("scriptNameOrSourceURL")); Handle function_key = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("functionName")); Handle eval_key = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isEval")); Handle constructor_key = factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isConstructor")); StackTraceFrameIterator it(this); int frames_seen = 0; while (!it.done() && (frames_seen < limit)) { JavaScriptFrame* frame = it.frame(); // Set initial size to the maximum inlining level + 1 for the outermost // function. List frames(FLAG_max_inlining_levels + 1); frame->Summarize(&frames); for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) { // Create a JSObject to hold the information for the StackFrame. Handle stack_frame = factory()->NewJSObject(object_function()); Handle fun = frames[i].function(); Handle