Remove support for builds without embedded builtins
This CL removes the V8_EMBEDDED_BUILTINS define, FLAG_embedded_builtins, and all code for supporting non-embedded-builtin builds. Bug: v8:6666,v8:8519 Change-Id: I2ad7bd6614c7cd404b83d3d2bf5ff91d7b55ff2a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1866569 Commit-Queue: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org> Cr-Commit-Position: refs/heads/master@{#64461}
This commit is contained in:
parent
bba5f1f43d
commit
9362df561d
@ -105,10 +105,8 @@ void Builtins::TearDown() { initialized_ = false; }
|
||||
|
||||
const char* Builtins::Lookup(Address pc) {
|
||||
// Off-heap pc's can be looked up through binary search.
|
||||
if (FLAG_embedded_builtins) {
|
||||
Code maybe_builtin = InstructionStream::TryLookupCode(isolate_, pc);
|
||||
if (!maybe_builtin.is_null()) return name(maybe_builtin.builtin_index());
|
||||
}
|
||||
Code maybe_builtin = InstructionStream::TryLookupCode(isolate_, pc);
|
||||
if (!maybe_builtin.is_null()) return name(maybe_builtin.builtin_index());
|
||||
|
||||
// May be called during initialization (disassembler).
|
||||
if (initialized_) {
|
||||
@ -250,13 +248,9 @@ bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
|
||||
|
||||
// static
|
||||
bool Builtins::IsIsolateIndependentBuiltin(const Code code) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
const int builtin_index = code.builtin_index();
|
||||
return Builtins::IsBuiltinId(builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
const int builtin_index = code.builtin_index();
|
||||
return Builtins::IsBuiltinId(builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index);
|
||||
}
|
||||
|
||||
// static
|
||||
@ -276,7 +270,7 @@ bool Builtins::IsWasmRuntimeStub(int index) {
|
||||
}
|
||||
|
||||
// static
|
||||
void Builtins::UpdateBuiltinEntryTable(Isolate* isolate) {
|
||||
void Builtins::InitializeBuiltinEntryTable(Isolate* isolate) {
|
||||
Heap* heap = isolate->heap();
|
||||
Address* builtin_entry_table = isolate->builtin_entry_table();
|
||||
for (int i = 0; i < builtin_count; i++) {
|
||||
|
@ -140,9 +140,9 @@ class Builtins {
|
||||
// area.
|
||||
static bool IsWasmRuntimeStub(int index);
|
||||
|
||||
// Updates the table of builtin entry points based on the current contents of
|
||||
// the builtins table.
|
||||
static void UpdateBuiltinEntryTable(Isolate* isolate);
|
||||
// Initializes the table of builtin entry points based on the current contents
|
||||
// of the builtins table.
|
||||
static void InitializeBuiltinEntryTable(Isolate* isolate);
|
||||
|
||||
// Emits a CodeCreateEvent for every builtin.
|
||||
static void EmitCodeCreateEvents(Isolate* isolate);
|
||||
|
@ -3251,7 +3251,7 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
||||
__ mflr(r0);
|
||||
__ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
|
||||
|
||||
if (ABI_USES_FUNCTION_DESCRIPTORS && FLAG_embedded_builtins) {
|
||||
if (ABI_USES_FUNCTION_DESCRIPTORS) {
|
||||
// AIX/PPC64BE Linux use a function descriptor;
|
||||
__ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(temp2, kPointerSize));
|
||||
__ LoadP(temp2, MemOperand(temp2, 0)); // Instruction address
|
||||
|
@ -326,7 +326,6 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
|
||||
|
||||
void TurboAssembler::CallBuiltin(int builtin_index, Condition cond) {
|
||||
DCHECK(Builtins::IsBuiltinId(builtin_index));
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
@ -447,21 +446,23 @@ void TurboAssembler::Push(Smi smi) {
|
||||
void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); }
|
||||
|
||||
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
mov(dst, Operand(value));
|
||||
}
|
||||
|
||||
void TurboAssembler::Move(Register dst, ExternalReference reference) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, reference);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, reference);
|
||||
return;
|
||||
}
|
||||
mov(dst, Operand(reference));
|
||||
}
|
||||
|
@ -285,21 +285,22 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
|
||||
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
|
||||
|
||||
if (operand.NeedsRelocation(this)) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
if (operand.ImmediateRMode() == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
Address addr = static_cast<Address>(operand.ImmediateValue());
|
||||
ExternalReference reference = bit_cast<ExternalReference>(addr);
|
||||
IndirectLoadExternalReference(rd, reference);
|
||||
return;
|
||||
} else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) {
|
||||
Handle<HeapObject> x(
|
||||
reinterpret_cast<Address*>(operand.ImmediateValue()));
|
||||
// TODO(v8:9706): Fix-it! This load will always uncompress the value
|
||||
// even when we are loading a compressed embedded object.
|
||||
IndirectLoadConstant(rd.X(), x);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
if (operand.ImmediateRMode() == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
Address addr = static_cast<Address>(operand.ImmediateValue());
|
||||
ExternalReference reference = bit_cast<ExternalReference>(addr);
|
||||
IndirectLoadExternalReference(rd, reference);
|
||||
return;
|
||||
} else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) {
|
||||
Handle<HeapObject> x(
|
||||
reinterpret_cast<Address*>(operand.ImmediateValue()));
|
||||
// TODO(v8:9706): Fix-it! This load will always uncompress the value
|
||||
// even when we are loading a compressed embedded object.
|
||||
IndirectLoadConstant(rd.X(), x);
|
||||
return;
|
||||
}
|
||||
}
|
||||
Ldr(dst, operand);
|
||||
@ -1950,7 +1951,6 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
|
||||
|
||||
void TurboAssembler::CallBuiltin(int builtin_index) {
|
||||
DCHECK(Builtins::IsBuiltinId(builtin_index));
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
UseScratchRegisterScope temps(this);
|
||||
|
@ -162,7 +162,7 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
|
||||
bool isolate_independent_code = false;
|
||||
// Enables the use of isolate-independent builtins through an off-heap
|
||||
// trampoline. (macro assembler feature).
|
||||
bool inline_offheap_trampolines = FLAG_embedded_builtins;
|
||||
bool inline_offheap_trampolines = true;
|
||||
// On some platforms, all code is within a given range in the process,
|
||||
// and the start of this range is configured here.
|
||||
Address code_range_start = 0;
|
||||
|
@ -159,7 +159,6 @@ Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
|
||||
// TurboAssembler.
|
||||
Operand TurboAssembler::ExternalReferenceAddressAsOperand(
|
||||
ExternalReference reference) {
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
DCHECK(root_array_available());
|
||||
DCHECK(options().isolate_independent_code);
|
||||
return Operand(
|
||||
@ -170,7 +169,6 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand(
|
||||
// TODO(v8:6666): If possible, refactor into a platform-independent function in
|
||||
// TurboAssembler.
|
||||
Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
DCHECK(root_array_available());
|
||||
|
||||
int builtin_index;
|
||||
@ -1913,7 +1911,6 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
|
||||
|
||||
void TurboAssembler::CallBuiltin(int builtin_index) {
|
||||
DCHECK(Builtins::IsBuiltinId(builtin_index));
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
|
@ -1346,21 +1346,23 @@ void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
|
||||
}
|
||||
|
||||
void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
li(dst, Operand(value), mode);
|
||||
}
|
||||
|
||||
void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, value);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, value);
|
||||
return;
|
||||
}
|
||||
li(dst, Operand(value), mode);
|
||||
}
|
||||
@ -3814,41 +3816,41 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
BranchDelaySlot bd) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
if (FLAG_embedded_builtins) {
|
||||
int builtin_index = Builtins::kNoBuiltinId;
|
||||
bool target_is_isolate_independent_builtin =
|
||||
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index);
|
||||
if (target_is_isolate_independent_builtin &&
|
||||
options().use_pc_relative_calls_and_jumps) {
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
Label skip;
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
if (cond != cc_always) {
|
||||
// By using delay slot, we always execute first instruction of
|
||||
// GenPcRelativeJump (which is or_(t8, ra, zero_reg)).
|
||||
Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
|
||||
}
|
||||
GenPCRelativeJump(t8, t9, code_target_index,
|
||||
RelocInfo::RELATIVE_CODE_TARGET, bd);
|
||||
bind(&skip);
|
||||
return;
|
||||
} else if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(t9, code);
|
||||
Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
|
||||
return;
|
||||
} else if (target_is_isolate_independent_builtin &&
|
||||
options().inline_offheap_trampolines) {
|
||||
// Inline the trampoline.
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
Jump(t9, 0, cond, rs, rt, bd);
|
||||
return;
|
||||
|
||||
int builtin_index = Builtins::kNoBuiltinId;
|
||||
bool target_is_isolate_independent_builtin =
|
||||
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index);
|
||||
if (target_is_isolate_independent_builtin &&
|
||||
options().use_pc_relative_calls_and_jumps) {
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
Label skip;
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
if (cond != cc_always) {
|
||||
// By using delay slot, we always execute first instruction of
|
||||
// GenPcRelativeJump (which is or_(t8, ra, zero_reg)).
|
||||
Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
|
||||
}
|
||||
GenPCRelativeJump(t8, t9, code_target_index,
|
||||
RelocInfo::RELATIVE_CODE_TARGET, bd);
|
||||
bind(&skip);
|
||||
return;
|
||||
} else if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(t9, code);
|
||||
Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
|
||||
return;
|
||||
} else if (target_is_isolate_independent_builtin &&
|
||||
options().inline_offheap_trampolines) {
|
||||
// Inline the trampoline.
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
Jump(t9, 0, cond, rs, rt, bd);
|
||||
return;
|
||||
}
|
||||
|
||||
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
|
||||
}
|
||||
|
||||
@ -3954,39 +3956,39 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rs, const Operand& rt,
|
||||
BranchDelaySlot bd) {
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
if (FLAG_embedded_builtins) {
|
||||
int builtin_index = Builtins::kNoBuiltinId;
|
||||
bool target_is_isolate_independent_builtin =
|
||||
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index);
|
||||
if (target_is_isolate_independent_builtin &&
|
||||
options().use_pc_relative_calls_and_jumps) {
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
Label skip;
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
if (cond != cc_always) {
|
||||
Branch(PROTECT, &skip, NegateCondition(cond), rs, rt);
|
||||
}
|
||||
GenPCRelativeJumpAndLink(t8, code_target_index,
|
||||
RelocInfo::RELATIVE_CODE_TARGET, bd);
|
||||
bind(&skip);
|
||||
return;
|
||||
} else if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(t9, code);
|
||||
Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
|
||||
return;
|
||||
} else if (target_is_isolate_independent_builtin &&
|
||||
options().inline_offheap_trampolines) {
|
||||
// Inline the trampoline.
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
Call(t9, 0, cond, rs, rt, bd);
|
||||
return;
|
||||
|
||||
int builtin_index = Builtins::kNoBuiltinId;
|
||||
bool target_is_isolate_independent_builtin =
|
||||
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index);
|
||||
if (target_is_isolate_independent_builtin &&
|
||||
options().use_pc_relative_calls_and_jumps) {
|
||||
int32_t code_target_index = AddCodeTarget(code);
|
||||
Label skip;
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
if (cond != cc_always) {
|
||||
Branch(PROTECT, &skip, NegateCondition(cond), rs, rt);
|
||||
}
|
||||
GenPCRelativeJumpAndLink(t8, code_target_index,
|
||||
RelocInfo::RELATIVE_CODE_TARGET, bd);
|
||||
bind(&skip);
|
||||
return;
|
||||
} else if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(t9, code);
|
||||
Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
|
||||
return;
|
||||
} else if (target_is_isolate_independent_builtin &&
|
||||
options().inline_offheap_trampolines) {
|
||||
// Inline the trampoline.
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
Call(t9, 0, cond, rs, rt, bd);
|
||||
return;
|
||||
}
|
||||
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
Call(code.address(), rmode, cond, rs, rt, bd);
|
||||
}
|
||||
|
@ -1556,21 +1556,23 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
|
||||
}
|
||||
|
||||
void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
li(dst, Operand(value), mode);
|
||||
}
|
||||
|
||||
void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, value);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, value);
|
||||
return;
|
||||
}
|
||||
li(dst, Operand(value), mode);
|
||||
}
|
||||
@ -4176,28 +4178,28 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rs, const Operand& rt,
|
||||
BranchDelaySlot bd) {
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
if (FLAG_embedded_builtins) {
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(t9, code);
|
||||
Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(t9, code);
|
||||
Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
Jump(t9, cond, rs, rt, bd);
|
||||
return;
|
||||
} else if (options().inline_offheap_trampolines) {
|
||||
int builtin_index = Builtins::kNoBuiltinId;
|
||||
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index)) {
|
||||
// Inline the trampoline.
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
Jump(t9, cond, rs, rt, bd);
|
||||
return;
|
||||
} else if (options().inline_offheap_trampolines) {
|
||||
int builtin_index = Builtins::kNoBuiltinId;
|
||||
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index)) {
|
||||
// Inline the trampoline.
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
Jump(t9, cond, rs, rt, bd);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
|
||||
}
|
||||
|
||||
@ -4255,27 +4257,27 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Condition cond, Register rs, const Operand& rt,
|
||||
BranchDelaySlot bd) {
|
||||
BlockTrampolinePoolScope block_trampoline_pool(this);
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(t9, code);
|
||||
Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(t9, code);
|
||||
Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
Call(t9, cond, rs, rt, bd);
|
||||
return;
|
||||
} else if (options().inline_offheap_trampolines) {
|
||||
int builtin_index = Builtins::kNoBuiltinId;
|
||||
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index)) {
|
||||
// Inline the trampoline.
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
Call(t9, cond, rs, rt, bd);
|
||||
return;
|
||||
} else if (options().inline_offheap_trampolines) {
|
||||
int builtin_index = Builtins::kNoBuiltinId;
|
||||
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
|
||||
Builtins::IsIsolateIndependent(builtin_index)) {
|
||||
// Inline the trampoline.
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
Address entry = d.InstructionStartOfBuiltin(builtin_index);
|
||||
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
Call(t9, cond, rs, rt, bd);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DCHECK(RelocInfo::IsCodeTarget(rmode));
|
||||
Call(code.address(), rmode, cond, rs, rt, bd);
|
||||
}
|
||||
|
@ -317,21 +317,23 @@ void TurboAssembler::Push(Smi smi) {
|
||||
}
|
||||
|
||||
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
mov(dst, Operand(value));
|
||||
}
|
||||
|
||||
void TurboAssembler::Move(Register dst, ExternalReference reference) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, reference);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, reference);
|
||||
return;
|
||||
}
|
||||
mov(dst, Operand(reference));
|
||||
}
|
||||
|
@ -287,21 +287,23 @@ void TurboAssembler::Push(Smi smi) {
|
||||
}
|
||||
|
||||
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadConstant(dst, value);
|
||||
return;
|
||||
}
|
||||
mov(dst, Operand(value));
|
||||
}
|
||||
|
||||
void TurboAssembler::Move(Register dst, ExternalReference reference) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, reference);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, reference);
|
||||
return;
|
||||
}
|
||||
mov(dst, Operand(reference));
|
||||
}
|
||||
|
@ -137,11 +137,12 @@ void TurboAssembler::LoadAddress(Register destination,
|
||||
}
|
||||
}
|
||||
// Safe code.
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(destination, source);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(destination, source);
|
||||
return;
|
||||
}
|
||||
Move(destination, source);
|
||||
}
|
||||
@ -1113,11 +1114,12 @@ void TurboAssembler::Move(Register dst, Smi source) {
|
||||
}
|
||||
|
||||
void TurboAssembler::Move(Register dst, ExternalReference ext) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, ext);
|
||||
return;
|
||||
}
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
IndirectLoadExternalReference(dst, ext);
|
||||
return;
|
||||
}
|
||||
movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
|
||||
}
|
||||
@ -1473,15 +1475,14 @@ void TurboAssembler::Push(Handle<HeapObject> source) {
|
||||
|
||||
void TurboAssembler::Move(Register result, Handle<HeapObject> object,
|
||||
RelocInfo::Mode rmode) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
// TODO(v8:9706): Fix-it! This load will always uncompress the value
|
||||
// even when we are loading a compressed embedded object.
|
||||
IndirectLoadConstant(result, object);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
|
||||
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
|
||||
// non-isolate-independent code. In many cases it might be cheaper than
|
||||
// embedding the relocatable value.
|
||||
if (root_array_available_ && options().isolate_independent_code) {
|
||||
// TODO(v8:9706): Fix-it! This load will always uncompress the value
|
||||
// even when we are loading a compressed embedded object.
|
||||
IndirectLoadConstant(result, object);
|
||||
} else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
|
||||
EmbeddedObjectIndex index = AddEmbeddedObject(object);
|
||||
DCHECK(is_uint32(index));
|
||||
movl(result, Immediate(static_cast<int>(index), rmode));
|
||||
@ -1641,7 +1642,6 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
|
||||
|
||||
void TurboAssembler::CallBuiltin(int builtin_index) {
|
||||
DCHECK(Builtins::IsBuiltinId(builtin_index));
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
RecordCommentForOffHeapTrampoline(builtin_index);
|
||||
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
|
@ -265,7 +265,6 @@ uint32_t Isolate::CurrentEmbeddedBlobSize() {
|
||||
|
||||
size_t Isolate::HashIsolateForEmbeddedBlob() {
|
||||
DCHECK(builtins_.is_initialized());
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
DCHECK(Builtins::AllBuiltinsAreIsolateIndependent());
|
||||
|
||||
DisallowHeapAllocation no_gc;
|
||||
@ -3222,7 +3221,6 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
|
||||
|
||||
#ifdef DEBUG
|
||||
bool IsolateIsCompatibleWithEmbeddedBlob(Isolate* isolate) {
|
||||
if (!FLAG_embedded_builtins) return true;
|
||||
EmbeddedData d = EmbeddedData::FromBlob(isolate);
|
||||
return (d.IsolateHash() == isolate->HashIsolateForEmbeddedBlob());
|
||||
}
|
||||
@ -3433,12 +3431,12 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
|
||||
|
||||
bootstrapper_->Initialize(create_heap_objects);
|
||||
|
||||
if (FLAG_embedded_builtins && create_heap_objects) {
|
||||
builtins_constants_table_builder_ = new BuiltinsConstantsTableBuilder(this);
|
||||
}
|
||||
setup_delegate_->SetupBuiltins(this);
|
||||
#ifndef V8_TARGET_ARCH_ARM
|
||||
if (create_heap_objects) {
|
||||
builtins_constants_table_builder_ = new BuiltinsConstantsTableBuilder(this);
|
||||
|
||||
setup_delegate_->SetupBuiltins(this);
|
||||
|
||||
#ifndef V8_TARGET_ARCH_ARM
|
||||
// Store the interpreter entry trampoline on the root list. It is used as a
|
||||
// template for further copies that may later be created to help profile
|
||||
// interpreted code.
|
||||
@ -3449,14 +3447,15 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
|
||||
// See also: https://crbug.com/v8/8713.
|
||||
heap_.SetInterpreterEntryTrampolineForProfiling(
|
||||
heap_.builtin(Builtins::kInterpreterEntryTrampoline));
|
||||
}
|
||||
#endif
|
||||
if (FLAG_embedded_builtins && create_heap_objects) {
|
||||
|
||||
builtins_constants_table_builder_->Finalize();
|
||||
delete builtins_constants_table_builder_;
|
||||
builtins_constants_table_builder_ = nullptr;
|
||||
|
||||
CreateAndSetEmbeddedBlob();
|
||||
} else {
|
||||
setup_delegate_->SetupBuiltins(this);
|
||||
}
|
||||
|
||||
// Initialize custom memcopy and memmove functions (must happen after
|
||||
@ -3503,7 +3502,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
|
||||
delete setup_delegate_;
|
||||
setup_delegate_ = nullptr;
|
||||
|
||||
Builtins::UpdateBuiltinEntryTable(this);
|
||||
Builtins::InitializeBuiltinEntryTable(this);
|
||||
Builtins::EmitCodeCreateEvents(this);
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -3518,7 +3517,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
|
||||
"snapshots, embedders must ensure they pass the same flags as during "
|
||||
"the V8 build process (e.g.: --turbo-instruction-scheduling).");
|
||||
}
|
||||
DCHECK_IMPLIES(FLAG_jitless, FLAG_embedded_builtins);
|
||||
#endif // DEBUG
|
||||
|
||||
#ifndef V8_TARGET_ARCH_ARM
|
||||
|
@ -1315,11 +1315,8 @@ class Isolate final : private HiddenFactory {
|
||||
return &partial_snapshot_cache_;
|
||||
}
|
||||
|
||||
// Off-heap builtins cannot embed constants within the code object itself,
|
||||
// and thus need to load them from the root list.
|
||||
bool IsGeneratingEmbeddedBuiltins() const {
|
||||
return FLAG_embedded_builtins &&
|
||||
builtins_constants_table_builder() != nullptr;
|
||||
return builtins_constants_table_builder() != nullptr;
|
||||
}
|
||||
|
||||
BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
|
||||
|
@ -1247,13 +1247,6 @@ DEFINE_GENERIC_IMPLICATION(
|
||||
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE))
|
||||
|
||||
// snapshot-common.cc
|
||||
#ifdef V8_EMBEDDED_BUILTINS
|
||||
#define V8_EMBEDDED_BUILTINS_BOOL true
|
||||
#else
|
||||
#define V8_EMBEDDED_BUILTINS_BOOL false
|
||||
#endif
|
||||
DEFINE_BOOL_READONLY(embedded_builtins, V8_EMBEDDED_BUILTINS_BOOL,
|
||||
"Embed builtin code into the binary.")
|
||||
DEFINE_BOOL(profile_deserialization, false,
|
||||
"Print the time it takes to deserialize the snapshot.")
|
||||
DEFINE_BOOL(serialization_statistics, false,
|
||||
|
@ -597,9 +597,6 @@ void ComputeFlagListHash() {
|
||||
#ifdef DEBUG
|
||||
modified_args_as_string << "debug";
|
||||
#endif // DEBUG
|
||||
if (FLAG_embedded_builtins) {
|
||||
modified_args_as_string << "embedded";
|
||||
}
|
||||
for (size_t i = 0; i < num_flags; ++i) {
|
||||
Flag* current = &flags[i];
|
||||
if (current->type() == Flag::TYPE_BOOL &&
|
||||
|
@ -165,10 +165,9 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
|
||||
DCHECK(self_reference->IsOddball());
|
||||
DCHECK(Oddball::cast(*self_reference).kind() ==
|
||||
Oddball::kSelfReferenceMarker);
|
||||
if (FLAG_embedded_builtins) {
|
||||
auto builder = isolate_->builtins_constants_table_builder();
|
||||
if (builder != nullptr)
|
||||
builder->PatchSelfReference(self_reference, code);
|
||||
if (isolate_->IsGeneratingEmbeddedBuiltins()) {
|
||||
isolate_->builtins_constants_table_builder()->PatchSelfReference(
|
||||
self_reference, code);
|
||||
}
|
||||
*(self_reference.location()) = code->ptr();
|
||||
}
|
||||
|
@ -4404,15 +4404,9 @@ void Heap::IterateBuiltins(RootVisitor* v) {
|
||||
v->VisitRootPointer(Root::kBuiltins, Builtins::name(i),
|
||||
FullObjectSlot(builtin_address(i)));
|
||||
}
|
||||
#ifdef V8_EMBEDDED_BUILTINS
|
||||
|
||||
// The entry table does not need to be updated if all builtins are embedded.
|
||||
STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
|
||||
#else
|
||||
// If builtins are not embedded, they may move and thus the entry table must
|
||||
// be updated.
|
||||
// TODO(v8:6666): Remove once builtins are embedded unconditionally.
|
||||
Builtins::UpdateBuiltinEntryTable(isolate());
|
||||
#endif // V8_EMBEDDED_BUILTINS
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -104,11 +104,9 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
|
||||
}
|
||||
|
||||
void Interpreter::IterateDispatchTable(RootVisitor* v) {
|
||||
if (FLAG_embedded_builtins && !isolate_->serializer_enabled() &&
|
||||
isolate_->embedded_blob() != nullptr) {
|
||||
// If builtins are embedded (and we're not generating a snapshot), then
|
||||
// every bytecode handler will be off-heap, so there's no point iterating
|
||||
// over them.
|
||||
if (!isolate_->serializer_enabled() && isolate_->embedded_blob() != nullptr) {
|
||||
// If we're not generating a snapshot, then every bytecode handler will be
|
||||
// off-heap, so there's no point iterating over them.
|
||||
#ifdef DEBUG
|
||||
for (int i = 0; i < kDispatchTableSize; i++) {
|
||||
Address code_entry = dispatch_table_[i];
|
||||
|
@ -253,10 +253,7 @@ void Code::set_next_code_link(Object value) {
|
||||
}
|
||||
|
||||
int Code::InstructionSize() const {
|
||||
if (is_off_heap_trampoline()) {
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
return OffHeapInstructionSize();
|
||||
}
|
||||
if (is_off_heap_trampoline()) return OffHeapInstructionSize();
|
||||
return raw_instruction_size();
|
||||
}
|
||||
|
||||
@ -265,10 +262,7 @@ Address Code::raw_instruction_start() const {
|
||||
}
|
||||
|
||||
Address Code::InstructionStart() const {
|
||||
if (is_off_heap_trampoline()) {
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
return OffHeapInstructionStart();
|
||||
}
|
||||
if (is_off_heap_trampoline()) return OffHeapInstructionStart();
|
||||
return raw_instruction_start();
|
||||
}
|
||||
|
||||
@ -277,10 +271,7 @@ Address Code::raw_instruction_end() const {
|
||||
}
|
||||
|
||||
Address Code::InstructionEnd() const {
|
||||
if (is_off_heap_trampoline()) {
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
return OffHeapInstructionEnd();
|
||||
}
|
||||
if (is_off_heap_trampoline()) return OffHeapInstructionEnd();
|
||||
return raw_instruction_end();
|
||||
}
|
||||
|
||||
@ -346,7 +337,6 @@ Address Code::entry() const { return raw_instruction_start(); }
|
||||
|
||||
bool Code::contains(Address inner_pointer) {
|
||||
if (is_off_heap_trampoline()) {
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
if (OffHeapInstructionStart() <= inner_pointer &&
|
||||
inner_pointer < OffHeapInstructionEnd()) {
|
||||
return true;
|
||||
|
@ -1093,17 +1093,10 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
|
||||
ExternalReference::re_check_stack_guard_state(isolate());
|
||||
__ mov(ip, Operand(stack_guard_check));
|
||||
|
||||
if (FLAG_embedded_builtins) {
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
|
||||
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
|
||||
__ mov(lr, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
} else {
|
||||
// TODO(v8:8519): Remove this once embedded builtins are on unconditionally.
|
||||
Handle<Code> code = BUILTIN_CODE(isolate(), DirectCEntry);
|
||||
__ mov(lr, Operand(reinterpret_cast<intptr_t>(code.location()),
|
||||
RelocInfo::CODE_TARGET));
|
||||
}
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
|
||||
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
|
||||
__ mov(lr, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
__ Call(lr);
|
||||
|
||||
// Drop the return address from the stack.
|
||||
|
@ -1412,7 +1412,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
|
||||
ExternalReference::re_check_stack_guard_state(isolate());
|
||||
__ Mov(scratch, check_stack_guard_state);
|
||||
|
||||
if (FLAG_embedded_builtins) {
|
||||
{
|
||||
UseScratchRegisterScope temps(masm_);
|
||||
Register scratch = temps.AcquireX();
|
||||
|
||||
@ -1422,10 +1422,6 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
|
||||
|
||||
__ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
__ Call(scratch);
|
||||
} else {
|
||||
// TODO(v8:8519): Remove this once embedded builtins are on unconditionally.
|
||||
Handle<Code> code = BUILTIN_CODE(isolate(), DirectCEntry);
|
||||
__ Call(code, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
// The input string may have been moved in memory, we need to reload it.
|
||||
|
@ -1128,21 +1128,11 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
|
||||
ExternalReference::re_check_stack_guard_state(masm_->isolate());
|
||||
__ li(t9, Operand(stack_guard_check));
|
||||
|
||||
if (FLAG_embedded_builtins) {
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
|
||||
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
|
||||
__ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
__ Call(kScratchReg);
|
||||
} else {
|
||||
// TODO(v8:8519): Remove this once embedded builtins are on unconditionally.
|
||||
Handle<Code> code = BUILTIN_CODE(isolate(), DirectCEntry);
|
||||
__ li(kScratchReg,
|
||||
Operand(reinterpret_cast<intptr_t>(code.location()),
|
||||
RelocInfo::CODE_TARGET),
|
||||
CONSTANT_SIZE);
|
||||
__ Call(kScratchReg);
|
||||
}
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
|
||||
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
|
||||
__ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
__ Call(kScratchReg);
|
||||
|
||||
// DirectCEntry allocated space for the C argument slots so we have to
|
||||
// drop them with the return address from the stack with loading saved sp.
|
||||
|
@ -1165,21 +1165,11 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
|
||||
ExternalReference::re_check_stack_guard_state(masm_->isolate());
|
||||
__ li(t9, Operand(stack_guard_check));
|
||||
|
||||
if (FLAG_embedded_builtins) {
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
|
||||
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
|
||||
__ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
__ Call(kScratchReg);
|
||||
} else {
|
||||
// TODO(v8:8519): Remove this once embedded builtins are on unconditionally.
|
||||
Handle<Code> code = BUILTIN_CODE(isolate(), DirectCEntry);
|
||||
__ li(kScratchReg,
|
||||
Operand(reinterpret_cast<intptr_t>(code.location()),
|
||||
RelocInfo::CODE_TARGET),
|
||||
CONSTANT_SIZE);
|
||||
__ Call(kScratchReg);
|
||||
}
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
|
||||
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
|
||||
__ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
__ Call(kScratchReg);
|
||||
|
||||
// DirectCEntry allocated space for the C argument slots so we have to
|
||||
// drop them with the return address from the stack with loading saved sp.
|
||||
|
@ -1137,17 +1137,10 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
|
||||
ExternalReference::re_check_stack_guard_state(isolate());
|
||||
__ mov(ip, Operand(stack_guard_check));
|
||||
|
||||
if (FLAG_embedded_builtins) {
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
|
||||
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
|
||||
__ mov(r0, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
} else {
|
||||
// TODO(v8:8519): Remove this once embedded builtins are on unconditionally.
|
||||
Handle<Code> code = BUILTIN_CODE(isolate(), DirectCEntry);
|
||||
__ mov(r0, Operand(reinterpret_cast<intptr_t>(code.location()),
|
||||
RelocInfo::CODE_TARGET));
|
||||
}
|
||||
EmbeddedData d = EmbeddedData::FromBlob();
|
||||
CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
|
||||
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
|
||||
__ mov(r0, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
|
||||
__ Call(r0);
|
||||
|
||||
// Restore the stack pointer
|
||||
|
@ -844,10 +844,8 @@ RUNTIME_FUNCTION(Runtime_ProfileCreateSnapshotDataBlob) {
|
||||
// Track the embedded blob size as well.
|
||||
{
|
||||
int embedded_blob_size = 0;
|
||||
if (FLAG_embedded_builtins) {
|
||||
i::EmbeddedData d = i::EmbeddedData::FromBlob();
|
||||
embedded_blob_size = static_cast<int>(d.size());
|
||||
}
|
||||
i::EmbeddedData d = i::EmbeddedData::FromBlob();
|
||||
embedded_blob_size = static_cast<int>(d.size());
|
||||
PrintF("Embedded blob is %d bytes\n", embedded_blob_size);
|
||||
}
|
||||
|
||||
|
@ -469,7 +469,6 @@ void Deserializer::VisitInternalReference(Code host, RelocInfo* rinfo) {
|
||||
}
|
||||
|
||||
void Deserializer::VisitOffHeapTarget(Code host, RelocInfo* rinfo) {
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
byte data = source_.Get();
|
||||
CHECK_EQ(data, kOffHeapTarget);
|
||||
|
||||
|
@ -14,12 +14,8 @@ namespace internal {
|
||||
|
||||
// static
|
||||
bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
|
||||
if (FLAG_embedded_builtins) {
|
||||
const Address start = reinterpret_cast<Address>(isolate->embedded_blob());
|
||||
return start <= pc && pc < start + isolate->embedded_blob_size();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
const Address start = reinterpret_cast<Address>(isolate->embedded_blob());
|
||||
return start <= pc && pc < start + isolate->embedded_blob_size();
|
||||
}
|
||||
|
||||
// static
|
||||
|
@ -254,29 +254,27 @@ int main(int argc, char** argv) {
|
||||
|
||||
MaybeSetCounterFunction(isolate);
|
||||
|
||||
if (i::FLAG_embedded_builtins) {
|
||||
// Set code range such that relative jumps for builtins to
|
||||
// builtin calls in the snapshot are possible.
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
||||
size_t code_range_size_mb =
|
||||
i::kMaximalCodeRangeSize == 0
|
||||
? i::kMaxPCRelativeCodeRangeInMB
|
||||
: std::min(i::kMaximalCodeRangeSize / i::MB,
|
||||
i::kMaxPCRelativeCodeRangeInMB);
|
||||
v8::ResourceConstraints constraints;
|
||||
constraints.set_code_range_size_in_bytes(code_range_size_mb * i::MB);
|
||||
i_isolate->heap()->ConfigureHeap(constraints);
|
||||
// The isolate contains data from builtin compilation that needs
|
||||
// to be written out if builtins are embedded.
|
||||
i_isolate->RegisterEmbeddedFileWriter(&embedded_writer);
|
||||
}
|
||||
// Set code range such that relative jumps for builtins to
|
||||
// builtin calls in the snapshot are possible.
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
||||
size_t code_range_size_mb =
|
||||
i::kMaximalCodeRangeSize == 0
|
||||
? i::kMaxPCRelativeCodeRangeInMB
|
||||
: std::min(i::kMaximalCodeRangeSize / i::MB,
|
||||
i::kMaxPCRelativeCodeRangeInMB);
|
||||
v8::ResourceConstraints constraints;
|
||||
constraints.set_code_range_size_in_bytes(code_range_size_mb * i::MB);
|
||||
i_isolate->heap()->ConfigureHeap(constraints);
|
||||
// The isolate contains data from builtin compilation that needs
|
||||
// to be written out if builtins are embedded.
|
||||
i_isolate->RegisterEmbeddedFileWriter(&embedded_writer);
|
||||
|
||||
blob = CreateSnapshotDataBlob(isolate, embed_script.get());
|
||||
if (i::FLAG_embedded_builtins) {
|
||||
// At this point, the Isolate has been torn down but the embedded blob
|
||||
// is still alive (we called DisableEmbeddedBlobRefcounting above).
|
||||
// That's fine as far as the embedded file writer is concerned.
|
||||
WriteEmbeddedFile(&embedded_writer);
|
||||
}
|
||||
|
||||
// At this point, the Isolate has been torn down but the embedded blob
|
||||
// is still alive (we called DisableEmbeddedBlobRefcounting above).
|
||||
// That's fine as far as the embedded file writer is concerned.
|
||||
WriteEmbeddedFile(&embedded_writer);
|
||||
}
|
||||
|
||||
if (warmup_script) {
|
||||
|
@ -747,7 +747,6 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(Code host,
|
||||
|
||||
void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
|
||||
RelocInfo* rinfo) {
|
||||
DCHECK(FLAG_embedded_builtins);
|
||||
STATIC_ASSERT(EmbeddedData::kTableSize == Builtins::builtin_count);
|
||||
|
||||
Address addr = rinfo->target_off_heap_target();
|
||||
|
@ -47,7 +47,6 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
|
||||
|
||||
if (code.kind() == Code::REGEXP) return false;
|
||||
if (!code.is_builtin()) return true;
|
||||
if (!FLAG_embedded_builtins) return false;
|
||||
if (code.is_off_heap_trampoline()) return false;
|
||||
|
||||
// An on-heap builtin. We only expect this for the interpreter entry
|
||||
|
@ -1125,7 +1125,6 @@ int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine,
|
||||
const WasmFeatures& enabled_features) {
|
||||
// Disable asynchronous wrapper compilation when builtins are not embedded,
|
||||
// otherwise the isolate might be used after tear down to access builtins.
|
||||
#ifdef V8_EMBEDDED_BUILTINS
|
||||
std::unordered_set<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>> keys;
|
||||
for (auto exp : native_module->module()->export_table) {
|
||||
if (exp.kind != kExternalFunction) continue;
|
||||
@ -1140,9 +1139,6 @@ int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine,
|
||||
}
|
||||
|
||||
return static_cast<int>(keys.size());
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Returns the number of units added.
|
||||
@ -1375,14 +1371,9 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
|
||||
CompileNativeModule(isolate, thrower, wasm_module, native_module.get());
|
||||
if (thrower->error()) return {};
|
||||
|
||||
#ifdef V8_EMBEDDED_BUILTINS
|
||||
Impl(native_module->compilation_state())
|
||||
->FinalizeJSToWasmWrappers(isolate, native_module->module(),
|
||||
export_wrappers_out);
|
||||
#else
|
||||
CompileJsToWasmWrappers(isolate, native_module->module(),
|
||||
export_wrappers_out);
|
||||
#endif
|
||||
|
||||
// Log the code within the generated module for profiling.
|
||||
native_module->LogWasmCodes(isolate);
|
||||
@ -1552,17 +1543,10 @@ void AsyncCompileJob::FinishCompile() {
|
||||
// TODO(bbudge) Allow deserialization without wrapper compilation, so we can
|
||||
// just compile wrappers here.
|
||||
if (!is_after_deserialization) {
|
||||
#ifdef V8_EMBEDDED_BUILTINS
|
||||
Handle<FixedArray> export_wrappers;
|
||||
compilation_state->FinalizeJSToWasmWrappers(
|
||||
isolate_, module_object_->module(), &export_wrappers);
|
||||
module_object_->set_export_wrappers(*export_wrappers);
|
||||
#else
|
||||
Handle<FixedArray> export_wrappers;
|
||||
CompileJsToWasmWrappers(isolate_, module_object_->module(),
|
||||
&export_wrappers);
|
||||
module_object_->set_export_wrappers(*export_wrappers);
|
||||
#endif
|
||||
}
|
||||
// We can only update the feature counts once the entire compile is done.
|
||||
compilation_state->PublishDetectedFeatures(isolate_);
|
||||
|
@ -245,13 +245,8 @@ void WasmCode::Validate() const {
|
||||
Address target = it.rinfo()->wasm_stub_call_address();
|
||||
WasmCode* code = native_module_->Lookup(target);
|
||||
CHECK_NOT_NULL(code);
|
||||
#ifdef V8_EMBEDDED_BUILTINS
|
||||
CHECK_EQ(WasmCode::kJumpTable, code->kind());
|
||||
CHECK(code->contains(target));
|
||||
#else
|
||||
CHECK_EQ(WasmCode::kRuntimeStub, code->kind());
|
||||
CHECK_EQ(target, code->instruction_start());
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
case RelocInfo::INTERNAL_REFERENCE:
|
||||
@ -1214,17 +1209,6 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
|
||||
void NativeModule::AddCodeSpace(
|
||||
base::AddressRegion region,
|
||||
const WasmCodeAllocator::OptionalLock& allocator_lock) {
|
||||
#ifndef V8_EMBEDDED_BUILTINS
|
||||
// The far jump table contains far jumps to the embedded builtins. This
|
||||
// requires a build with embedded builtins enabled.
|
||||
FATAL(
|
||||
"WebAssembly is not supported in no-embed builds. no-embed builds are "
|
||||
"deprecated. See\n"
|
||||
" - https://groups.google.com/d/msg/v8-users/9F53xqBjpkI/9WmKSbcWBAAJ\n"
|
||||
" - https://crbug.com/v8/8519\n"
|
||||
" - https://crbug.com/v8/8531\n");
|
||||
#endif // V8_EMBEDDED_BUILTINS
|
||||
|
||||
// Each code space must be at least twice as large as the overhead per code
|
||||
// space. Otherwise, we are wasting too much memory.
|
||||
DCHECK_GE(region.size(),
|
||||
|
@ -26436,21 +26436,16 @@ TEST(TestGetUnwindState) {
|
||||
v8::MemoryRange builtins_range = unwind_state.embedded_code_range;
|
||||
|
||||
// Check that each off-heap builtin is within the builtins code range.
|
||||
if (i::FLAG_embedded_builtins) {
|
||||
for (int id = 0; id < i::Builtins::builtin_count; id++) {
|
||||
if (!i::Builtins::IsIsolateIndependent(id)) continue;
|
||||
i::Code builtin = i_isolate->builtins()->builtin(id);
|
||||
i::Address start = builtin.InstructionStart();
|
||||
i::Address end = start + builtin.InstructionSize();
|
||||
for (int id = 0; id < i::Builtins::builtin_count; id++) {
|
||||
if (!i::Builtins::IsIsolateIndependent(id)) continue;
|
||||
i::Code builtin = i_isolate->builtins()->builtin(id);
|
||||
i::Address start = builtin.InstructionStart();
|
||||
i::Address end = start + builtin.InstructionSize();
|
||||
|
||||
i::Address builtins_start =
|
||||
reinterpret_cast<i::Address>(builtins_range.start);
|
||||
CHECK(start >= builtins_start &&
|
||||
end < builtins_start + builtins_range.length_in_bytes);
|
||||
}
|
||||
} else {
|
||||
CHECK_EQ(nullptr, builtins_range.start);
|
||||
CHECK_EQ(0, builtins_range.length_in_bytes);
|
||||
i::Address builtins_start =
|
||||
reinterpret_cast<i::Address>(builtins_range.start);
|
||||
CHECK(start >= builtins_start &&
|
||||
end < builtins_start + builtins_range.length_in_bytes);
|
||||
}
|
||||
|
||||
v8::JSEntryStub js_entry_stub = unwind_state.js_entry_stub;
|
||||
|
@ -3669,7 +3669,6 @@ TEST(IsDoubleElementsKind) {
|
||||
}
|
||||
|
||||
TEST(TestCallBuiltinInlineTrampoline) {
|
||||
if (!i::FLAG_embedded_builtins) return;
|
||||
Isolate* isolate(CcTest::InitIsolateOnce());
|
||||
const int kNumParams = 1;
|
||||
CodeAssemblerTester asm_tester(isolate, kNumParams);
|
||||
@ -3694,7 +3693,6 @@ TEST(TestCallBuiltinInlineTrampoline) {
|
||||
}
|
||||
|
||||
TEST(TestCallBuiltinIndirectLoad) {
|
||||
if (!i::FLAG_embedded_builtins) return;
|
||||
Isolate* isolate(CcTest::InitIsolateOnce());
|
||||
const int kNumParams = 1;
|
||||
CodeAssemblerTester asm_tester(isolate, kNumParams);
|
||||
|
@ -27,8 +27,7 @@ static const char* kHeader =
|
||||
"# List of known V8 instance types.\n";
|
||||
|
||||
// Debug builds emit debug code, affecting code object sizes.
|
||||
// Embedded builtins cause objects to be allocated in different locations.
|
||||
#if defined(V8_EMBEDDED_BUILTINS) && !defined(DEBUG)
|
||||
#ifndef DEBUG
|
||||
static const char* kBuild = "shipping";
|
||||
#else
|
||||
static const char* kBuild = "non-shipping";
|
||||
|
Loading…
Reference in New Issue
Block a user