[snapshot] Lazy-deserialize bytecode handlers

Add support for interpreter bytecode handlers that are deserialized
lazily immediately before they are first used.

Design doc: http://goo.gl/QxZBL2

Bug: v8:6624
Change-Id: Id68844ed14e76ca781b0bfe42c25a94b4fed1ae5
Reviewed-on: https://chromium-review.googlesource.com/750982
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Yang Guo <yangguo@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Mythri Alle <mythria@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49220}
This commit is contained in:
jgruber 2017-11-08 12:56:00 +01:00 committed by Commit Bot
parent 17eda5f968
commit b458736986
26 changed files with 438 additions and 54 deletions

View File

@ -960,6 +960,10 @@ DEFINE_VALUE_IMPLICATION(runtime_call_stats, runtime_stats, 1)
// snapshot-common.cc
DEFINE_BOOL(lazy_deserialization, true,
"Deserialize code lazily from the snapshot.")
DEFINE_BOOL(lazy_handler_deserialization, false,
"Deserialize bytecode handlers lazily from the snapshot.")
DEFINE_IMPLICATION(lazy_handler_deserialization, lazy_deserialization)
DEFINE_IMPLICATION(future, lazy_handler_deserialization)
DEFINE_BOOL(trace_lazy_deserialization, false, "Trace lazy deserialization.")
DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")

View File

@ -2581,6 +2581,9 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kApiSymbolTableRootIndex:
case kApiPrivateSymbolTableRootIndex:
case kMessageListenersRootIndex:
case kDeserializeLazyHandlerRootIndex:
case kDeserializeLazyHandlerWideRootIndex:
case kDeserializeLazyHandlerExtraWideRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
@ -6245,6 +6248,23 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
}
bool Heap::IsDeserializeLazyHandler(Code* code) {
return (code == deserialize_lazy_handler() ||
code == deserialize_lazy_handler_wide() ||
code == deserialize_lazy_handler_extra_wide());
}
void Heap::SetDeserializeLazyHandler(Code* code) {
set_deserialize_lazy_handler(code);
}
void Heap::SetDeserializeLazyHandlerWide(Code* code) {
set_deserialize_lazy_handler_wide(code);
}
void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
set_deserialize_lazy_handler_extra_wide(code);
}
size_t Heap::NumberOfTrackedHeapObjectTypes() {
return ObjectStats::OBJECT_STATS_COUNT;

View File

@ -240,6 +240,11 @@ using v8::MemoryPressureLevel;
V(FixedArray, serialized_templates, SerializedTemplates) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
/* DeserializeLazy handlers for lazy bytecode deserialization */ \
V(Object, deserialize_lazy_handler, DeserializeLazyHandler) \
V(Object, deserialize_lazy_handler_wide, DeserializeLazyHandlerWide) \
V(Object, deserialize_lazy_handler_extra_wide, \
DeserializeLazyHandlerExtraWide) \
/* JS Entries */ \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode)
@ -1092,6 +1097,11 @@ class Heap {
void RegisterStrongRoots(Object** start, Object** end);
void UnregisterStrongRoots(Object** start);
bool IsDeserializeLazyHandler(Code* code);
void SetDeserializeLazyHandler(Code* code);
void SetDeserializeLazyHandlerWide(Code* code);
void SetDeserializeLazyHandlerExtraWide(Code* code);
// ===========================================================================
// Inline allocation. ========================================================
// ===========================================================================

View File

@ -11,6 +11,7 @@
#include "src/factory.h"
#include "src/heap-symbols.h"
#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/layout-descriptor.h"
#include "src/lookup-cache.h"
@ -621,6 +622,11 @@ void Heap::CreateInitialObjects() {
set_noscript_shared_function_infos(Smi::kZero);
STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
set_deserialize_lazy_handler(Smi::kZero);
set_deserialize_lazy_handler_wide(Smi::kZero);
set_deserialize_lazy_handler_extra_wide(Smi::kZero);
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();

View File

@ -8,6 +8,7 @@
#include <cstdint>
#include <iosfwd>
#include <string>
#include <vector>
#include "src/globals.h"
#include "src/interpreter/bytecode-operands.h"
@ -675,6 +676,12 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode == Bytecode::kDebugBreakWide;
}
// Returns true if the bytecode can be lazily deserialized.
static constexpr bool IsLazy(Bytecode bytecode) {
// Currently, all handlers are deserialized lazily.
return true;
}
// Returns the number of values which |bytecode| returns.
static constexpr size_t ReturnCount(Bytecode bytecode) {
return bytecode == Bytecode::kReturn ? 1 : 0;
@ -853,6 +860,26 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
}
}
static std::vector<Bytecode> AllBytecodesUsingHandler(Bytecode bytecode) {
Bytecode dummy;
USE(dummy);
switch (bytecode) {
case Bytecode::kLdaContextSlot:
DCHECK(
ReusesExistingHandler(Bytecode::kLdaImmutableContextSlot, &dummy));
DCHECK_EQ(bytecode, dummy);
return {bytecode, Bytecode::kLdaImmutableContextSlot};
case Bytecode::kLdaCurrentContextSlot:
DCHECK(ReusesExistingHandler(Bytecode::kLdaImmutableCurrentContextSlot,
&dummy));
DCHECK_EQ(bytecode, dummy);
return {bytecode, Bytecode::kLdaImmutableCurrentContextSlot};
default:
DCHECK(!ReusesExistingHandler(bytecode, &dummy));
return {bytecode};
}
}
// Returns the size of |operand_type| for |operand_scale|.
static OperandSize SizeOfOperand(OperandType operand_type,
OperandScale operand_scale) {

View File

@ -1527,6 +1527,18 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
Dispatch();
}
void InterpreterAssembler::DeserializeLazyAndDispatch() {
Node* context = GetContext();
Node* bytecode_offset = BytecodeOffset();
Node* bytecode = LoadBytecode(bytecode_offset);
Node* target_handler =
CallRuntime(Runtime::kInterpreterDeserializeLazy, context,
SmiTag(bytecode), SmiConstant(operand_scale()));
DispatchToBytecodeHandler(target_handler, bytecode_offset);
}
} // namespace interpreter
} // namespace internal
} // namespace v8

View File

@ -245,6 +245,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void ToNumberOrNumeric(Object::Conversion mode);
// Lazily deserializes the current bytecode's handler and tail-calls into it.
void DeserializeLazyAndDispatch();
private:
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();

View File

@ -3243,6 +3243,71 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
return code;
}
namespace {
// DeserializeLazy
//
// Deserialize the bytecode handler, store it in the dispatch table, and
// finally jump there (preserving existing args).
// We manually create a custom assembler instead of using the helper macros
// above since no corresponding bytecode exists.
class DeserializeLazyAssembler : public InterpreterAssembler {
public:
static const Bytecode kFakeBytecode = Bytecode::kIllegal;
explicit DeserializeLazyAssembler(compiler::CodeAssemblerState* state,
OperandScale operand_scale)
: InterpreterAssembler(state, kFakeBytecode, operand_scale) {}
static void Generate(compiler::CodeAssemblerState* state,
OperandScale operand_scale) {
DeserializeLazyAssembler assembler(state, operand_scale);
state->SetInitialDebugInformation("DeserializeLazy", __FILE__, __LINE__);
assembler.GenerateImpl();
}
private:
void GenerateImpl() { DeserializeLazyAndDispatch(); }
DISALLOW_COPY_AND_ASSIGN(DeserializeLazyAssembler);
};
} // namespace
Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
OperandScale operand_scale) {
Zone zone(isolate->allocator(), ZONE_NAME);
const size_t return_count = 0;
std::string debug_name = std::string("DeserializeLazy");
if (operand_scale > OperandScale::kSingle) {
Bytecode prefix_bytecode =
Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
debug_name = debug_name.append(Bytecodes::ToString(prefix_bytecode));
}
InterpreterDispatchDescriptor descriptor(isolate);
compiler::CodeAssemblerState state(isolate, &zone, descriptor,
Code::BYTECODE_HANDLER, debug_name.c_str(),
return_count);
DeserializeLazyAssembler::Generate(&state, operand_scale);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PROFILE(isolate,
CodeCreateEvent(CodeEventListener::BYTECODE_HANDLER_TAG,
AbstractCode::cast(*code), debug_name.c_str()));
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
OFStream os(stdout);
code->Disassemble(debug_name.c_str(), os);
os << std::flush;
}
#endif // ENABLE_DISASSEMBLER
return code;
}
} // namespace interpreter
} // namespace internal
} // namespace v8

View File

@ -15,6 +15,9 @@ namespace interpreter {
extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
OperandScale operand_scale);
extern Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
OperandScale operand_scale);
} // namespace interpreter
} // namespace internal
} // namespace v8

View File

@ -19,6 +19,7 @@
#include "src/objects/shared-function-info.h"
#include "src/parsing/parse-info.h"
#include "src/setup-isolate.h"
#include "src/snapshot/snapshot.h"
#include "src/visitors.h"
namespace v8 {
@ -87,6 +88,44 @@ Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
}
}
Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
Bytecode bytecode, OperandScale operand_scale) {
Code* code = GetBytecodeHandler(bytecode, operand_scale);
// Already deserialized? Then just return the handler.
if (!isolate_->heap()->IsDeserializeLazyHandler(code)) return code;
DCHECK(FLAG_lazy_handler_deserialization);
if (FLAG_trace_lazy_deserialization) {
PrintF("Lazy-deserializing handler %s\n",
Bytecodes::ToString(bytecode, operand_scale).c_str());
}
// Some handlers are reused for several bytecodes. If we encounter such a
// bytecode, find the canonical handler, deserialize it, and write it into all
// slots in the dispatch table that (re)use it.
Bytecode maybe_reused_bytecode;
const bool reuses_existing_handler =
Bytecodes::ReusesExistingHandler(bytecode, &maybe_reused_bytecode);
Bytecode handler_bytecode =
reuses_existing_handler ? maybe_reused_bytecode : bytecode;
DCHECK(Bytecodes::BytecodeHasHandler(handler_bytecode, operand_scale));
code =
Snapshot::DeserializeHandler(isolate_, handler_bytecode, operand_scale);
DCHECK(code->IsCode());
DCHECK_EQ(code->kind(), Code::BYTECODE_HANDLER);
for (Bytecode b : Bytecodes::AllBytecodesUsingHandler(handler_bytecode)) {
SetBytecodeHandler(b, operand_scale, code);
}
return code;
}
Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale) {
DCHECK(IsDispatchTableInitialized());

View File

@ -42,6 +42,11 @@ class Interpreter {
FunctionLiteral* literal,
Isolate* isolate);
// If the bytecode handler for |bytecode| and |operand_scale| has not yet
// been loaded, deserialize it. Then return the handler.
Code* GetAndMaybeDeserializeBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale);
// Return bytecode handler for |bytecode| and |operand_scale|.
Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);

View File

@ -56,6 +56,18 @@ void SetupInterpreter::InstallBytecodeHandlers(Interpreter* interpreter) {
}
}
// Generate the DeserializeLazy handlers, one for each operand scale.
Heap* heap = interpreter->isolate_->heap();
DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler());
heap->SetDeserializeLazyHandler(*GenerateDeserializeLazyHandler(
interpreter->isolate_, OperandScale::kSingle));
DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_wide());
heap->SetDeserializeLazyHandlerWide(*GenerateDeserializeLazyHandler(
interpreter->isolate_, OperandScale::kDouble));
DCHECK_EQ(Smi::kZero, heap->deserialize_lazy_handler_extra_wide());
heap->SetDeserializeLazyHandlerExtraWide(*GenerateDeserializeLazyHandler(
interpreter->isolate_, OperandScale::kQuadruple));
// Initialization should have been successful.
DCHECK(interpreter->IsDispatchTableInitialized());
}

View File

@ -1598,6 +1598,7 @@ void Logger::LogBytecodeHandlers() {
interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(index);
if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
if (isolate_->heap()->IsDeserializeLazyHandler(code)) continue;
std::string bytecode_name =
interpreter::Bytecodes::ToString(bytecode, operand_scale);
PROFILE(isolate_, CodeCreateEvent(

View File

@ -21,12 +21,17 @@
#include "src/isolate-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::OperandScale;
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
@ -47,20 +52,22 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
SharedFunctionInfo* shared = interpreted_frame->function()->shared();
BytecodeArray* bytecode_array = shared->bytecode_array();
int bytecode_offset = interpreted_frame->GetBytecodeOffset();
interpreter::Bytecode bytecode =
interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
if (bytecode == interpreter::Bytecode::kReturn) {
Bytecode bytecode = Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
if (bytecode == Bytecode::kReturn) {
// If we are returning, reset the bytecode array on the interpreted stack
// frame to the non-debug variant so that the interpreter entry trampoline
// sees the return bytecode rather than the DebugBreak.
interpreted_frame->PatchBytecodeArray(bytecode_array);
}
// We do not have to deal with operand scale here. If the bytecode at the
// break is prefixed by operand scaling, we would have patched over the
// scaling prefix. We now simply dispatch to the handler for the prefix.
return MakePair(isolate->debug()->return_value(),
isolate->interpreter()->GetBytecodeHandler(
bytecode, interpreter::OperandScale::kSingle));
OperandScale operand_scale = OperandScale::kSingle;
Code* code = isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
bytecode, operand_scale);
return MakePair(isolate->debug()->return_value(), code);
}

View File

@ -526,7 +526,7 @@ RUNTIME_FUNCTION(Runtime_DeserializeLazy) {
DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_id));
if (FLAG_trace_lazy_deserialization) {
PrintF("Lazy-deserializing %s\n", Builtins::name(builtin_id));
PrintF("Lazy-deserializing builtin %s\n", Builtins::name(builtin_id));
}
Code* code = Snapshot::DeserializeBuiltin(isolate, builtin_id);

View File

@ -13,12 +13,34 @@
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/ostreams.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
RUNTIME_FUNCTION(Runtime_InterpreterDeserializeLazy) {
HandleScope scope(isolate);
DCHECK(FLAG_lazy_handler_deserialization);
DCHECK(FLAG_lazy_deserialization);
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(bytecode_int, 0);
CONVERT_SMI_ARG_CHECKED(operand_scale_int, 1);
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::OperandScale;
Bytecode bytecode = Bytecodes::FromByte(bytecode_int);
OperandScale operand_scale = static_cast<OperandScale>(operand_scale_int);
return isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
bytecode, operand_scale);
}
RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());

View File

@ -224,6 +224,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F) \
F(InterpreterDeserializeLazy, 2, 1) \
F(InterpreterNewClosure, 4, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \

View File

@ -12,15 +12,23 @@
namespace v8 {
namespace internal {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::Interpreter;
using interpreter::OperandScale;
BuiltinDeserializerAllocator::BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer)
: deserializer_(deserializer) {}
BuiltinDeserializerAllocator::~BuiltinDeserializerAllocator() {
delete handler_allocations_;
}
namespace {
int HandlerAllocationIndex(int code_object_id) {
return code_object_id - BuiltinSnapshotUtils::kFirstHandlerIndex;
}
} // namespace
Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
int size) {
const int code_object_id = deserializer()->CurrentCodeObjectId();
@ -36,18 +44,18 @@ Address BuiltinDeserializerAllocator::Allocate(AllocationSpace space,
DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address();
} else if (BSU::IsHandlerIndex(code_object_id)) {
Bytecode bytecode;
OperandScale operand_scale;
std::tie(bytecode, operand_scale) = BSU::BytecodeFromIndex(code_object_id);
Address* dispatch_table = isolate()->interpreter()->dispatch_table_;
const size_t index =
Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
Object* obj = HeapObject::FromAddress(dispatch_table[index]);
DCHECK(Internals::HasHeapObjectTag(obj));
return HeapObject::cast(obj)->address();
if (handler_allocation_ != nullptr) {
// Lazy deserialization.
DCHECK_NULL(handler_allocations_);
return handler_allocation_;
} else {
// Eager deserialization.
DCHECK_NULL(handler_allocation_);
DCHECK_NOT_NULL(handler_allocations_);
int index = HandlerAllocationIndex(code_object_id);
DCHECK_NOT_NULL(handler_allocations_->at(index));
return handler_allocations_->at(index);
}
}
UNREACHABLE();
@ -59,8 +67,8 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
// Reservations for builtins.
// DeserializeLazy is always the first reservation (to simplify logic in
// InitializeBuiltinsTable).
// DeserializeLazy is always the first builtin reservation (to simplify logic
// in InitializeBuiltinsTable).
{
DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
uint32_t builtin_size =
@ -73,7 +81,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
if (i == Builtins::kDeserializeLazy) continue;
// Skip lazy builtins. These will be replaced by the DeserializeLazy code
// object in InitializeBuiltinsTable and thus require no reserved space.
// object in InitializeFromReservations and thus require no reserved space.
if (deserializer()->IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
continue;
}
@ -87,9 +95,17 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltinsAndHandlers() {
BSU::ForEachBytecode(
[=, &result](Bytecode bytecode, OperandScale operand_scale) {
// TODO(jgruber): Replace with DeserializeLazy handler.
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) {
// Bytecodes without a handler don't require a reservation.
return;
} else if (FLAG_lazy_handler_deserialization &&
deserializer()->IsLazyDeserializationEnabled() &&
Bytecodes::IsLazy(bytecode)) {
// Skip lazy handlers. These will be replaced by the DeserializeLazy
// code object in InitializeFromReservations and thus require no
// reserved space.
return;
}
const int index = BSU::BytecodeToIndex(bytecode, operand_scale);
uint32_t handler_size = deserializer()->ExtractCodeObjectSize(index);
@ -124,14 +140,10 @@ void BuiltinDeserializerAllocator::InitializeHandlerFromReservation(
SkipList::Update(chunk.start, chunk.size);
Address* dispatch_table = isolate()->interpreter()->dispatch_table_;
const size_t index =
Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
// At this point, the HeapObject is not yet a Code object, and thus we don't
// initialize with code->entry() here. Once deserialization completes, this
// is overwritten with the final code->entry() value.
dispatch_table[index] = chunk.start;
DCHECK_NOT_NULL(handler_allocations_);
const int index =
HandlerAllocationIndex(BSU::BytecodeToIndex(bytecode, operand_scale));
handler_allocations_->at(index) = chunk.start;
#ifdef DEBUG
RegisterCodeObjectReservation(BSU::BytecodeToIndex(bytecode, operand_scale));
@ -169,13 +181,23 @@ void BuiltinDeserializerAllocator::InitializeFromReservations(
}
}
// Initialize the interpreter dispatch table.
// Initialize interpreter bytecode handler reservations.
DCHECK_NULL(handler_allocations_);
handler_allocations_ = new std::vector<Address>(BSU::kNumberOfHandlers);
BSU::ForEachBytecode(
[=, &reservation_index](Bytecode bytecode, OperandScale operand_scale) {
// TODO(jgruber): Replace with DeserializeLazy handler.
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) {
// Bytecodes without a handler don't have a reservation.
return;
} else if (FLAG_lazy_handler_deserialization &&
deserializer()->IsLazyDeserializationEnabled() &&
Bytecodes::IsLazy(bytecode)) {
// Likewise, bytecodes with lazy handlers don't either.
return;
}
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
InitializeHandlerFromReservation(reservation[reservation_index],
bytecode, operand_scale);
reservation_index++;
@ -214,6 +236,28 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
#endif
}
void BuiltinDeserializerAllocator::ReserveForHandler(
Bytecode bytecode, OperandScale operand_scale) {
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
const int code_object_id = BSU::BytecodeToIndex(bytecode, operand_scale);
const uint32_t handler_size =
deserializer()->ExtractCodeObjectSize(code_object_id);
DCHECK_LE(handler_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
handler_allocation_ =
isolate()->factory()->NewCodeForDeserialization(handler_size)->address();
// Note: After this point and until deserialization finishes, heap allocation
// is disallowed. We currently can't safely assert this since we'd need to
// pass the DisallowHeapAllocation scope out of this function.
#ifdef DEBUG
RegisterCodeObjectReservation(code_object_id);
#endif
}
#ifdef DEBUG
void BuiltinDeserializerAllocator::RegisterCodeObjectReservation(
int code_object_id) {

View File

@ -9,6 +9,7 @@
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
#include "src/snapshot/serializer-common.h"
namespace v8 {
@ -22,11 +23,15 @@ class BuiltinSnapshotUtils;
class BuiltinDeserializerAllocator final {
using BSU = BuiltinSnapshotUtils;
using Bytecode = interpreter::Bytecode;
using OperandScale = interpreter::OperandScale;
public:
BuiltinDeserializerAllocator(
Deserializer<BuiltinDeserializerAllocator>* deserializer);
~BuiltinDeserializerAllocator();
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
@ -37,8 +42,16 @@ class BuiltinDeserializerAllocator final {
// deserialization) in order to avoid having to patch builtin references
// later on. See also the kBuiltin case in deserializer.cc.
//
// There are three ways that we use to reserve / allocate space. In all
// cases, required objects are requested from the GC prior to
// deserialization. 1. pre-allocated builtin code objects are written into
// the builtins table (this is to make deserialization of builtin references
// easier). Pre-allocated handler code objects are 2. stored in the
// {handler_allocations_} vector (at eager-deserialization time) and 3.
// stored in {handler_allocation_} (at lazy-deserialization time).
//
// Allocate simply returns the pre-allocated object prepared by
// InitializeBuiltinsTable.
// InitializeFromReservations.
Address Allocate(AllocationSpace space, int size);
void MoveToNextChunk(AllocationSpace space) { UNREACHABLE(); }
@ -69,6 +82,10 @@ class BuiltinDeserializerAllocator final {
// lazily deserializing a single builtin.
void ReserveAndInitializeBuiltinsTableForBuiltin(int builtin_id);
// Pre-allocates a code object preparation for lazily deserializing a single
// handler.
void ReserveForHandler(Bytecode bytecode, OperandScale operand_scale);
#ifdef DEBUG
bool ReservationsAreFullyUsed() const;
#endif
@ -105,6 +122,13 @@ class BuiltinDeserializerAllocator final {
// construction since that makes vtable-based checks fail.
Deserializer<BuiltinDeserializerAllocator>* const deserializer_;
// Stores allocated space for bytecode handlers during eager deserialization.
std::vector<Address>* handler_allocations_ = nullptr;
// Stores the allocated space for a single handler during lazy
// deserialization.
Address handler_allocation_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
};

View File

@ -12,10 +12,8 @@
namespace v8 {
namespace internal {
using interpreter::Bytecode;
using interpreter::Bytecodes;
using interpreter::Interpreter;
using interpreter::OperandScale;
// Tracks the code object currently being deserialized (required for
// allocation).
@ -60,7 +58,7 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
for (int i = 0; i < BSU::kNumberOfBuiltins; i++) {
if (IsLazyDeserializationEnabled() && Builtins::IsLazy(i)) {
// Do nothing. These builtins have been replaced by DeserializeLazy in
// InitializeBuiltinsTable.
// InitializeFromReservations.
DCHECK_EQ(builtins->builtin(Builtins::kDeserializeLazy),
builtins->builtin(i));
} else {
@ -77,17 +75,21 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
// Deserialize bytecode handlers.
// The dispatch table has been initialized during memory reservation.
Interpreter* interpreter = isolate()->interpreter();
DCHECK(isolate()->interpreter()->IsDispatchTableInitialized());
DCHECK(!isolate()->interpreter()->IsDispatchTableInitialized());
BSU::ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
// TODO(jgruber): Replace with DeserializeLazy handler.
// Bytecodes without a dedicated handler are patched up in a second pass.
if (!BSU::BytecodeHasDedicatedHandler(bytecode, operand_scale)) return;
Code* code = DeserializeHandlerRaw(bytecode, operand_scale);
// If lazy-deserialization is enabled and the current bytecode is lazy,
// we write the generic LazyDeserialization handler into the dispatch table
// and deserialize later upon first use.
Code* code = (FLAG_lazy_handler_deserialization &&
IsLazyDeserializationEnabled() && Bytecodes::IsLazy(bytecode))
? GetDeserializeLazyHandler(operand_scale)
: DeserializeHandlerRaw(bytecode, operand_scale);
interpreter->SetBytecodeHandler(bytecode, operand_scale, code);
});
@ -123,6 +125,13 @@ Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
return DeserializeBuiltinRaw(builtin_id);
}
Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
OperandScale operand_scale) {
allocator()->ReserveForHandler(bytecode, operand_scale);
DisallowHeapAllocation no_gc;
return DeserializeHandlerRaw(bytecode, operand_scale);
}
Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK(Builtins::IsBuiltinId(builtin_id));
@ -190,5 +199,20 @@ uint32_t BuiltinDeserializer::ExtractCodeObjectSize(int code_object_id) {
return result;
}
Code* BuiltinDeserializer::GetDeserializeLazyHandler(
interpreter::OperandScale operand_scale) const {
STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
switch (operand_scale) {
case OperandScale::kSingle:
return Code::cast(isolate()->heap()->deserialize_lazy_handler());
case OperandScale::kDouble:
return Code::cast(isolate()->heap()->deserialize_lazy_handler_wide());
case OperandScale::kQuadruple:
return Code::cast(
isolate()->heap()->deserialize_lazy_handler_extra_wide());
}
UNREACHABLE();
}
} // namespace internal
} // namespace v8

View File

@ -19,6 +19,8 @@ class BuiltinSnapshotData;
class BuiltinDeserializer final
: public Deserializer<BuiltinDeserializerAllocator> {
using BSU = BuiltinSnapshotUtils;
using Bytecode = interpreter::Bytecode;
using OperandScale = interpreter::OperandScale;
public:
BuiltinDeserializer(Isolate* isolate, const BuiltinSnapshotData* data);
@ -36,6 +38,10 @@ class BuiltinDeserializer final
// lazily deserialized at runtime.
Code* DeserializeBuiltin(int builtin_id);
// Deserializes the single given handler. This is used whenever a handler is
// lazily deserialized at runtime.
Code* DeserializeHandler(Bytecode bytecode, OperandScale operand_scale);
private:
// Deserializes the single given builtin. Assumes that reservations have
// already been allocated.
@ -43,8 +49,7 @@ class BuiltinDeserializer final
// Deserializes the single given bytecode handler. Assumes that reservations
// have already been allocated.
Code* DeserializeHandlerRaw(interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale);
Code* DeserializeHandlerRaw(Bytecode bytecode, OperandScale operand_scale);
// Extracts the size builtin Code objects (baked into the snapshot).
uint32_t ExtractCodeObjectSize(int builtin_id);
@ -57,6 +62,9 @@ class BuiltinDeserializer final
int CurrentCodeObjectId() const { return current_code_object_id_; }
// Convenience function to grab the handler off the heap's strong root list.
Code* GetDeserializeLazyHandler(OperandScale operand_scale) const;
private:
// Stores the code object currently being deserialized. The
// {current_code_object_id} stores the index of the currently-deserialized

View File

@ -45,6 +45,16 @@ void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
isolate()->interpreter()->GetBytecodeHandler(bytecode, operand_scale));
});
STATIC_ASSERT(BSU::kFirstHandlerIndex + BSU::kNumberOfHandlers ==
BSU::kNumberOfCodeObjects);
// The DeserializeLazy handlers are serialized by the StartupSerializer
// during strong root iteration.
DCHECK(isolate()->heap()->deserialize_lazy_handler()->IsCode());
DCHECK(isolate()->heap()->deserialize_lazy_handler_wide()->IsCode());
DCHECK(isolate()->heap()->deserialize_lazy_handler_extra_wide()->IsCode());
// Pad with kNop since GetInt() might read too far.
Pad();
@ -74,7 +84,7 @@ void BuiltinSerializer::SerializeBuiltin(Code* code) {
}
void BuiltinSerializer::SerializeHandler(Code* code) {
DCHECK_EQ(Code::BYTECODE_HANDLER, code->kind());
DCHECK(ObjectIsBytecodeHandler(code));
ObjectSerializer object_serializer(this, code, &sink_, kPlain,
kStartOfObject);
object_serializer.Serialize();

View File

@ -212,11 +212,12 @@ bool Serializer<AllocatorT>::SerializeBuiltinReference(
return true;
}
// static
template <class AllocatorT>
bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) {
bool Serializer<AllocatorT>::ObjectIsBytecodeHandler(HeapObject* obj) const {
if (!obj->IsCode()) return false;
return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
Code* code = Code::cast(obj);
if (isolate()->heap()->IsDeserializeLazyHandler(code)) return false;
return (code->kind() == Code::BYTECODE_HANDLER);
}
template <class AllocatorT>

View File

@ -195,7 +195,7 @@ class Serializer : public SerializerDeserializer {
int skip, BuiltinReferenceSerializationMode mode = kDefault);
// Returns true if the given heap object is a bytecode handler code object.
static bool ObjectIsBytecodeHandler(HeapObject* obj);
bool ObjectIsBytecodeHandler(HeapObject* obj) const;
inline void FlushSkip(int skip) {
if (skip != 0) {

View File

@ -115,6 +115,36 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
return code;
}
// static
Code* Snapshot::DeserializeHandler(Isolate* isolate,
interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale) {
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
const v8::StartupData* blob = isolate->snapshot_blob();
Vector<const byte> builtin_data = Snapshot::ExtractBuiltinData(blob);
BuiltinSnapshotData builtin_snapshot_data(builtin_data);
CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
BuiltinDeserializer builtin_deserializer(isolate, &builtin_snapshot_data);
Code* code = builtin_deserializer.DeserializeHandler(bytecode, operand_scale);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = code->Size();
PrintF("[Deserializing handler %s (%d bytes) took %0.3f ms]\n",
interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str(),
bytes, ms);
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
isolate->logger()->LogCodeObject(code);
}
return code;
}
void ProfileDeserialization(
const SnapshotData* startup_snapshot, const SnapshotData* builtin_snapshot,
const std::vector<SnapshotData*>& context_snapshots) {

View File

@ -98,6 +98,12 @@ class Snapshot : public AllStatic {
// initialized.
static Code* DeserializeBuiltin(Isolate* isolate, int builtin_id);
// Deserializes a single given handler code object. Intended to be called at
// runtime after the isolate has been fully initialized.
static Code* DeserializeHandler(Isolate* isolate,
interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale);
// ---------------- Helper methods ----------------
static bool HasContextSnapshot(Isolate* isolate, size_t index);