[baseline] Use CodeBuilder:TryBuild to handle on heap compilation

Bug: v8:11872
Change-Id: Ibd26b025fc5eb12d90c3a1c932bd9b8473612016
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2978254
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75329}
This commit is contained in:
Victor Gomes 2021-06-23 12:11:38 +02:00 committed by V8 LUCI CQ
parent 151668b935
commit 00fb203d58
3 changed files with 121 additions and 147 deletions

View File

@ -322,20 +322,9 @@ MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) {
// Allocate the bytecode offset table.
Handle<ByteArray> bytecode_offset_table =
bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate);
if (masm_.IsOnHeap()) {
// We compiled on heap, we need to finalise the code object fields.
DCHECK(FLAG_sparkplug_on_heap);
// TODO(victorgomes): Use CodeDesc to handle on-heap-ness.
// We can then simply call TryBuild() here.
return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
.set_bytecode_offset_table(bytecode_offset_table)
.FinishBaselineCode(masm_.code().ToHandleChecked(),
masm_.buffer_size());
} else {
return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
.set_bytecode_offset_table(bytecode_offset_table)
.TryBuild();
}
return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
.set_bytecode_offset_table(bytecode_offset_table)
.TryBuild();
}
int BaselineCompiler::EstimateInstructionSize(BytecodeArray bytecode) {

View File

@ -81,36 +81,6 @@ Factory::CodeBuilder::CodeBuilder(Isolate* isolate, const CodeDesc& desc,
kind_(kind),
position_table_(isolate_->factory()->empty_byte_array()) {}
void Factory::CodeBuilder::SetCodeFields(
Code raw_code, Handle<ByteArray> reloc_info,
Handle<CodeDataContainer> data_container) {
DisallowGarbageCollection no_gc;
constexpr bool kIsNotOffHeapTrampoline = false;
raw_code.set_raw_instruction_size(code_desc_.instruction_size());
raw_code.set_raw_metadata_size(code_desc_.metadata_size());
raw_code.set_relocation_info(*reloc_info);
raw_code.initialize_flags(kind_, is_turbofanned_, stack_slots_,
kIsNotOffHeapTrampoline);
raw_code.set_builtin_id(builtin_);
// This might impact direct concurrent reads from TF if we are resetting this
// field. We currently assume it's immutable thus a relaxed read (after
// passing IsPendingAllocation).
raw_code.set_inlined_bytecode_size(inlined_bytecode_size_);
raw_code.set_code_data_container(*data_container, kReleaseStore);
raw_code.set_deoptimization_data(*deoptimization_data_);
if (kind_ == CodeKind::BASELINE) {
raw_code.set_bytecode_offset_table(*position_table_);
} else {
raw_code.set_source_position_table(*position_table_);
}
raw_code.set_handler_table_offset(code_desc_.handler_table_offset_relative());
raw_code.set_constant_pool_offset(code_desc_.constant_pool_offset_relative());
raw_code.set_code_comments_offset(code_desc_.code_comments_offset_relative());
raw_code.set_unwinding_info_offset(
code_desc_.unwinding_info_offset_relative());
}
MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
bool retry_allocation_or_fail) {
const auto factory = isolate_->factory();
@ -157,50 +127,65 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
}
STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
const int object_size = Code::SizeFor(code_desc_.body_size());
Heap* heap = isolate_->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
Handle<Code> code;
{
Heap* heap = isolate_->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject result;
AllocationType allocation_type =
V8_EXTERNAL_CODE_SPACE_BOOL || is_executable_
? AllocationType::kCode
: AllocationType::kReadOnly;
if (retry_allocation_or_fail) {
result = heap->AllocateRawWith<Heap::kRetryOrFail>(
object_size, allocation_type, AllocationOrigin::kRuntime);
} else {
result = heap->AllocateRawWith<Heap::kLightRetry>(
object_size, allocation_type, AllocationOrigin::kRuntime);
// Return an empty handle if we cannot allocate the code object.
if (result.is_null()) return MaybeHandle<Code>();
bool code_is_on_heap = code_desc_.origin && code_desc_.origin->IsOnHeap();
if (code_is_on_heap) {
DCHECK(FLAG_sparkplug_on_heap);
DCHECK_EQ(kind_, CodeKind::BASELINE);
code = code_desc_.origin->code().ToHandleChecked();
} else {
if (!AllocateCode(retry_allocation_or_fail).ToHandle(&code)) {
return MaybeHandle<Code>();
}
}
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
{
Code raw_code = *code;
constexpr bool kIsNotOffHeapTrampoline = false;
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*factory->code_map(), SKIP_WRITE_BARRIER);
Code raw_code = Code::cast(result);
code = handle(raw_code, isolate_);
if (is_executable_) {
DCHECK(IsAligned(code->address(), kCodeAlignment));
DCHECK_IMPLIES(
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
heap->code_region().contains(code->address()));
if (code_is_on_heap) {
// TODO(victorgomes): we must notify the GC that code layout will change.
heap->EnsureSweepingCompleted(code);
}
SetCodeFields(raw_code, reloc_info, data_container);
raw_code.set_raw_instruction_size(code_desc_.instruction_size());
raw_code.set_raw_metadata_size(code_desc_.metadata_size());
raw_code.set_relocation_info(*reloc_info);
raw_code.initialize_flags(kind_, is_turbofanned_, stack_slots_,
kIsNotOffHeapTrampoline);
raw_code.set_builtin_id(builtin_);
// This might impact direct concurrent reads from TF if we are resetting
// this field. We currently assume it's immutable thus a relaxed read (after
// passing IsPendingAllocation).
raw_code.set_inlined_bytecode_size(inlined_bytecode_size_);
raw_code.set_code_data_container(*data_container, kReleaseStore);
raw_code.set_deoptimization_data(*deoptimization_data_);
if (kind_ == CodeKind::BASELINE) {
raw_code.set_bytecode_offset_table(*position_table_);
} else {
raw_code.set_source_position_table(*position_table_);
}
raw_code.set_handler_table_offset(
code_desc_.handler_table_offset_relative());
raw_code.set_constant_pool_offset(
code_desc_.constant_pool_offset_relative());
raw_code.set_code_comments_offset(
code_desc_.code_comments_offset_relative());
raw_code.set_unwinding_info_offset(
code_desc_.unwinding_info_offset_relative());
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
Handle<Object> self_reference;
if (self_reference_.ToHandle(&self_reference)) {
DCHECK(self_reference->IsOddball());
DCHECK(Oddball::cast(*self_reference).kind() ==
Oddball::kSelfReferenceMarker);
DCHECK_EQ(Oddball::cast(*self_reference).kind(),
Oddball::kSelfReferenceMarker);
DCHECK_NE(kind_, CodeKind::BASELINE);
if (isolate_->IsGeneratingEmbeddedBuiltins()) {
isolate_->builtins_constants_table_builder()->PatchSelfReference(
self_reference, code);
@ -216,13 +201,17 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
handle(on_heap_profiler_data->counts(), isolate_));
}
// Migrate generated code.
// The generated code can contain embedded objects (typically from handles)
// in a pointer-to-tagged-value format (i.e. with indirection like a handle)
// that are dereferenced during the copy to point directly to the actual
// heap objects. These pointers can include references to the code object
// itself, through the self_reference parameter.
raw_code.CopyFromNoFlush(heap, code_desc_);
if (code_is_on_heap) {
FinalizeOnHeapCode(code);
} else {
// Migrate generated code.
// The generated code can contain embedded objects (typically from
// handles) in a pointer-to-tagged-value format (i.e. with indirection
// like a handle) that are dereferenced during the copy to point directly
// to the actual heap objects. These pointers can include references to
// the code object itself, through the self_reference parameter.
raw_code.CopyFromNoFlush(heap, code_desc_);
}
raw_code.clear_padding();
@ -259,6 +248,65 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
return code;
}
MaybeHandle<Code> Factory::CodeBuilder::AllocateCode(
bool retry_allocation_or_fail) {
Heap* heap = isolate_->heap();
HeapObject result;
AllocationType allocation_type = V8_EXTERNAL_CODE_SPACE_BOOL || is_executable_
? AllocationType::kCode
: AllocationType::kReadOnly;
const int object_size = Code::SizeFor(code_desc_.body_size());
if (retry_allocation_or_fail) {
result = heap->AllocateRawWith<Heap::kRetryOrFail>(
object_size, allocation_type, AllocationOrigin::kRuntime);
} else {
result = heap->AllocateRawWith<Heap::kLightRetry>(
object_size, allocation_type, AllocationOrigin::kRuntime);
// Return an empty handle if we cannot allocate the code object.
if (result.is_null()) return MaybeHandle<Code>();
}
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*isolate_->factory()->code_map(),
SKIP_WRITE_BARRIER);
Handle<Code> code = handle(Code::cast(result), isolate_);
if (is_executable_) {
DCHECK(IsAligned(code->address(), kCodeAlignment));
DCHECK_IMPLIES(
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
heap->code_region().contains(code->address()));
}
return code;
}
void Factory::CodeBuilder::FinalizeOnHeapCode(Handle<Code> code) {
Heap* heap = isolate_->heap();
code->CopyRelocInfoToByteArray(code->unchecked_relocation_info(), code_desc_);
code->RelocateFromDesc(heap, code_desc_);
int buffer_size = code_desc_.origin->buffer_size();
if (heap->code_lo_space()->Contains(*code)) {
// We cannot trim the Code object in CODE_LO_SPACE, so we update the
// metadata size to contain the extra bits.
code->set_raw_metadata_size(buffer_size - code_desc_.instruction_size());
} else {
// TODO(v8:11883): add a hook to GC to check if the filler is just before
// the current LAB, and if it is, immediately give back the memory.
int old_object_size = Code::SizeFor(buffer_size);
int new_object_size = Code::SizeFor(code_desc_.instruction_size() +
code_desc_.metadata_size());
int size_to_trim = old_object_size - new_object_size;
DCHECK_GE(size_to_trim, 0);
if (size_to_trim > 0) {
heap->CreateFillerObjectAt(code->address() + new_object_size,
size_to_trim, ClearRecordedSlots::kNo);
}
}
}
MaybeHandle<Code> Factory::NewEmptyCode(CodeKind kind, int buffer_size) {
STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
const int object_size = Code::SizeFor(buffer_size);
@ -299,66 +347,6 @@ Handle<Code> Factory::CodeBuilder::Build() {
return BuildInternal(true).ToHandleChecked();
}
Handle<Code> Factory::CodeBuilder::FinishBaselineCode(Handle<Code> code,
int buffer_size) {
DCHECK_EQ(code->kind(), CodeKind::BASELINE);
const auto factory = isolate_->factory();
Heap* heap = isolate_->heap();
// Allocate objects needed for code initialization.
Handle<ByteArray> reloc_info =
factory->NewByteArray(code_desc_.reloc_size, AllocationType::kOld);
Handle<CodeDataContainer> data_container;
data_container = factory->NewCodeDataContainer(
0, read_only_data_container_ ? AllocationType::kReadOnly
: AllocationType::kOld);
data_container->set_kind_specific_flags(kind_specific_flags_);
STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
{
DisallowGarbageCollection no_gc;
heap->EnsureSweepingCompleted(code);
// TODO(victorgomes): we must notify the GC that code layout will change.
// However this is currently not supported.
SetCodeFields(*code, reloc_info, data_container);
code->CopyRelocInfoToByteArray(code->unchecked_relocation_info(),
code_desc_);
code->RelocateFromDesc(heap, code_desc_);
if (heap->code_lo_space()->Contains(*code)) {
// We cannot trim the Code object in CODE_LO_SPACE, so we update the
// metadata size to contain the extra bits.
code->set_raw_metadata_size(buffer_size - code_desc_.instruction_size());
} else {
// Trim the rest of the buffer.
// TODO(v8:11883): add a hook to GC to check if the filler is just before
// the current LAB, and if it is, immediately give back the memory.
int old_object_size = Code::SizeFor(buffer_size);
int new_object_size = Code::SizeFor(code_desc_.instruction_size() +
code_desc_.metadata_size());
int size_to_trim = old_object_size - new_object_size;
DCHECK_GE(size_to_trim, 0);
if (size_to_trim > 0) {
heap->CreateFillerObjectAt(code->address() + new_object_size,
size_to_trim, ClearRecordedSlots::kNo);
}
}
code->clear_padding();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) code->ObjectVerify(isolate_);
#endif
}
if (profiler_data_ && FLAG_turbo_profiling_verbose) {
#ifdef ENABLE_DISASSEMBLER
std::ostringstream os;
code->Disassemble(nullptr, os, isolate_);
profiler_data_->SetCode(os);
#endif // ENABLE_DISASSEMBLER
}
return code;
}
HeapObject Factory::AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment) {
return isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(

View File

@ -845,9 +845,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Like Build, builds a new code object. May return an empty handle if the
// allocation fails.
V8_WARN_UNUSED_RESULT MaybeHandle<Code> TryBuild();
// Expects a baseline code object and finalizes all its fields.
V8_WARN_UNUSED_RESULT Handle<Code> FinishBaselineCode(Handle<Code> code,
int buffer_size);
// Sets the self-reference object in which a reference to the code object is
// stored. This allows generated code to reference its own Code object by
@ -930,8 +927,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
private:
MaybeHandle<Code> BuildInternal(bool retry_allocation_or_fail);
void SetCodeFields(Code raw_code, Handle<ByteArray> reloc_info,
Handle<CodeDataContainer> data_container);
MaybeHandle<Code> AllocateCode(bool retry_allocation_or_fail);
void FinalizeOnHeapCode(Handle<Code> code);
Isolate* const isolate_;
const CodeDesc& code_desc_;