Reland "[objects] Introduce {CodeDataContainer} object type."
This is a reland of eeaffa9f33
Original change's description:
> [objects] Introduce {CodeDataContainer} object type.
>
> This introduces the {CodeDataContainer} as a container for all mutable
> fields associated with a {Code} object. For now only the kind-specific
> flags are moved, but more fields can/will be moved gradually. The goal
> is to make all fields in the {Code} header be immutable eventually.
>
> R=jarin@chromium.org
> BUG=v8:6792
>
> Change-Id: I2eeba893afaba877fb6117e1f18371898c3a175e
> Reviewed-on: https://chromium-review.googlesource.com/732987
> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
> Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#48902}
Bug: v8:6792
Change-Id: I31a127df4bb8ee5fedb4d73755df4deae6e1d352
Reviewed-on: https://chromium-review.googlesource.com/738109
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48928}
This commit is contained in:
parent
06eec1396f
commit
3b67d7a0f4
@ -798,7 +798,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// runtime to clear it.
|
||||
Label found_deoptimized_code;
|
||||
__ ldr(scratch2, FieldMemOperand(optimized_code_entry,
|
||||
Code::kKindSpecificFlags1Offset));
|
||||
Code::kCodeDataContainerOffset));
|
||||
__ ldr(
|
||||
scratch2,
|
||||
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
__ b(ne, &found_deoptimized_code);
|
||||
|
||||
|
@ -827,7 +827,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// runtime to clear it.
|
||||
Label found_deoptimized_code;
|
||||
__ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
|
||||
Code::kKindSpecificFlags1Offset));
|
||||
Code::kCodeDataContainerOffset));
|
||||
__ Ldr(
|
||||
scratch2,
|
||||
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ TestAndBranchIfAnySet(scratch2, 1 << Code::kMarkedForDeoptimizationBit,
|
||||
&found_deoptimized_code);
|
||||
|
||||
|
@ -712,18 +712,20 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
__ mov(optimized_code_entry,
|
||||
FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
|
||||
__ JumpIfSmi(optimized_code_entry, &fallthrough);
|
||||
__ push(eax);
|
||||
__ push(edx);
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, bailout to a
|
||||
// given label.
|
||||
Label found_deoptimized_code;
|
||||
__ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
|
||||
__ mov(eax,
|
||||
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
|
||||
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
__ j(not_zero, &found_deoptimized_code);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure into
|
||||
// the optimized functions list, then tail call the optimized code.
|
||||
__ push(eax);
|
||||
__ push(edx);
|
||||
// The feedback vector is no longer used, so re-use it as a scratch
|
||||
// register.
|
||||
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
|
||||
@ -736,6 +738,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// Optimized code slot contains deoptimized code, evict it and re-enter the
|
||||
// closure's code.
|
||||
__ bind(&found_deoptimized_code);
|
||||
__ pop(edx);
|
||||
__ pop(eax);
|
||||
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
|
||||
}
|
||||
|
||||
|
@ -775,7 +775,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// runtime to clear it.
|
||||
Label found_deoptimized_code;
|
||||
__ lw(scratch2, FieldMemOperand(optimized_code_entry,
|
||||
Code::kKindSpecificFlags1Offset));
|
||||
Code::kCodeDataContainerOffset));
|
||||
__ lw(scratch2, FieldMemOperand(
|
||||
scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ And(scratch2, scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
__ Branch(&found_deoptimized_code, ne, scratch2, Operand(zero_reg));
|
||||
|
||||
|
@ -774,8 +774,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
Label found_deoptimized_code;
|
||||
__ Lw(a5, FieldMemOperand(optimized_code_entry,
|
||||
Code::kKindSpecificFlags1Offset));
|
||||
__ Ld(a5, FieldMemOperand(optimized_code_entry,
|
||||
Code::kCodeDataContainerOffset));
|
||||
__ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
__ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg));
|
||||
|
||||
|
@ -796,9 +796,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
Label found_deoptimized_code;
|
||||
__ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
|
||||
Code::kCodeDataContainerOffset));
|
||||
__ LoadWordArith(
|
||||
scratch2,
|
||||
FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
|
||||
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
|
||||
__ bne(&found_deoptimized_code, cr0);
|
||||
|
||||
|
@ -799,8 +799,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
Label found_deoptimized_code;
|
||||
__ LoadW(scratch2, FieldMemOperand(optimized_code_entry,
|
||||
Code::kKindSpecificFlags1Offset));
|
||||
__ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
|
||||
Code::kCodeDataContainerOffset));
|
||||
__ LoadW(
|
||||
scratch2,
|
||||
FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
|
||||
__ bne(&found_deoptimized_code);
|
||||
|
||||
|
@ -794,8 +794,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
Label found_deoptimized_code;
|
||||
__ movp(scratch2,
|
||||
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
|
||||
__ testl(
|
||||
FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
|
||||
FieldOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
__ j(not_zero, &found_deoptimized_code);
|
||||
|
||||
|
@ -683,15 +683,16 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
// to:
|
||||
// 1. load the address of the current instruction;
|
||||
// 2. read from memory the word that contains that bit, which can be found in
|
||||
// the first set of flags ({kKindSpecificFlags1Offset});
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 3. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 4. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
int pc_offset = __ pc_offset();
|
||||
int offset =
|
||||
Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset + 8);
|
||||
Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset + 8);
|
||||
// We can use the register pc - 8 for the address of the current instruction.
|
||||
__ ldr(ip, MemOperand(pc, offset));
|
||||
__ ldr(ip, FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Handle<Code> code = isolate()->builtins()->builtin_handle(
|
||||
Builtins::kCompileLazyDeoptimizedCode);
|
||||
|
@ -667,7 +667,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
// to:
|
||||
// 1. load the address of the current instruction;
|
||||
// 2. read from memory the word that contains that bit, which can be found in
|
||||
// the first set of flags ({kKindSpecificFlags1Offset});
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 3. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 4. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
@ -676,8 +676,9 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
||||
__ Adr(x2, ¤t);
|
||||
__ Bind(¤t);
|
||||
int pc = __ pc_offset();
|
||||
int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
|
||||
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
|
||||
__ Ldr(x2, MemOperand(x2, offset));
|
||||
__ Ldr(x2, FieldMemOperand(x2, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ Tst(x2, Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Handle<Code> code = isolate()->builtins()->builtin_handle(
|
||||
Builtins::kCompileLazyDeoptimizedCode);
|
||||
|
@ -925,7 +925,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
// to:
|
||||
// 1. load the address of the current instruction;
|
||||
// 2. read from memory the word that contains that bit, which can be found in
|
||||
// the first set of flags ({kKindSpecificFlags1Offset});
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 3. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 4. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
@ -937,8 +937,9 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
||||
// to use a call and then use a pop, thus pushing the return address to
|
||||
// the stack and then popping it into the register.
|
||||
__ pop(ecx);
|
||||
int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
|
||||
__ test(Operand(ecx, offset),
|
||||
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
|
||||
__ mov(ecx, Operand(ecx, offset));
|
||||
__ test(FieldOperand(ecx, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Handle<Code> code = isolate()->builtins()->builtin_handle(
|
||||
Builtins::kCompileLazyDeoptimizedCode);
|
||||
|
@ -745,7 +745,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
// to:
|
||||
// 1. load the address of the current instruction;
|
||||
// 2. read from memory the word that contains that bit, which can be found in
|
||||
// the first set of flags ({kKindSpecificFlags1Offset});
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 3. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 4. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
@ -759,9 +759,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
||||
__ nop();
|
||||
int pc = __ pc_offset();
|
||||
__ bind(¤t);
|
||||
int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
|
||||
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
|
||||
__ lw(a2, MemOperand(ra, offset));
|
||||
__ pop(ra);
|
||||
__ lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Handle<Code> code = isolate()->builtins()->builtin_handle(
|
||||
Builtins::kCompileLazyDeoptimizedCode);
|
||||
|
@ -788,7 +788,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
// to:
|
||||
// 1. load the address of the current instruction;
|
||||
// 2. read from memory the word that contains that bit, which can be found in
|
||||
// the first set of flags ({kKindSpecificFlags1Offset});
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 3. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 4. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
@ -802,9 +802,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
||||
__ nop();
|
||||
int pc = __ pc_offset();
|
||||
__ bind(¤t);
|
||||
int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
|
||||
__ Lw(a2, MemOperand(ra, offset));
|
||||
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
|
||||
__ Ld(a2, MemOperand(ra, offset));
|
||||
__ pop(ra);
|
||||
__ Lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Handle<Code> code = isolate()->builtins()->builtin_handle(
|
||||
Builtins::kCompileLazyDeoptimizedCode);
|
||||
|
@ -966,7 +966,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
// to:
|
||||
// 1. load the address of the current instruction;
|
||||
// 2. read from memory the word that contains that bit, which can be found in
|
||||
// the first set of flags ({kKindSpecificFlags1Offset});
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 3. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 4. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
@ -974,9 +974,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
||||
__ mov_label_addr(r11, ¤t);
|
||||
int pc_offset = __ pc_offset();
|
||||
__ bind(¤t);
|
||||
int offset =
|
||||
Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset);
|
||||
__ LoadWordArith(r11, MemOperand(r11, offset));
|
||||
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset);
|
||||
__ LoadP(r11, MemOperand(r11, offset));
|
||||
__ LoadWordArith(
|
||||
r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
|
||||
Handle<Code> code = isolate()->builtins()->builtin_handle(
|
||||
Builtins::kCompileLazyDeoptimizedCode);
|
||||
|
@ -1174,7 +1174,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
// to:
|
||||
// 1. load the address of the current instruction;
|
||||
// 2. read from memory the word that contains that bit, which can be found in
|
||||
// the first set of flags ({kKindSpecificFlags1Offset});
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 3. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 4. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
@ -1182,9 +1182,10 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
||||
__ larl(r1, ¤t);
|
||||
int pc_offset = __ pc_offset();
|
||||
__ bind(¤t);
|
||||
int offset =
|
||||
Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset);
|
||||
__ LoadW(ip, MemOperand(r1, offset));
|
||||
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset);
|
||||
__ LoadP(ip, MemOperand(r1, offset));
|
||||
__ LoadW(ip,
|
||||
FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ TestBit(ip, Code::kMarkedForDeoptimizationBit);
|
||||
Handle<Code> code = isolate()->builtins()->builtin_handle(
|
||||
Builtins::kCompileLazyDeoptimizedCode);
|
||||
|
@ -305,7 +305,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
|
||||
case TUPLE3_TYPE:
|
||||
case CONTEXT_EXTENSION_TYPE:
|
||||
case ASYNC_GENERATOR_REQUEST_TYPE:
|
||||
case UNUSED_AND_RESERVED_TYPE:
|
||||
case CODE_DATA_CONTAINER_TYPE:
|
||||
UNREACHABLE();
|
||||
}
|
||||
UNREACHABLE();
|
||||
|
@ -827,7 +827,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||
// jumps to CompileLazyDeoptimizedCode builtin. In order to do this we need to:
|
||||
// 1. load the address of the current instruction;
|
||||
// 2. read from memory the word that contains that bit, which can be found in
|
||||
// the first set of flags ({kKindSpecificFlags1Offset});
|
||||
// the flags in the referenced {CodeDataContainer} object;
|
||||
// 3. test kMarkedForDeoptimizationBit in those flags; and
|
||||
// 4. if it is not zero then it jumps to the builtin.
|
||||
void CodeGenerator::BailoutIfDeoptimized() {
|
||||
@ -837,8 +837,9 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
||||
__ leaq(rcx, Operand(¤t));
|
||||
__ bind(¤t);
|
||||
int pc = __ pc_offset();
|
||||
int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
|
||||
__ testl(Operand(rcx, offset),
|
||||
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
|
||||
__ movp(rcx, Operand(rcx, offset));
|
||||
__ testl(FieldOperand(rcx, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
Handle<Code> code = isolate()->builtins()->builtin_handle(
|
||||
Builtins::kCompileLazyDeoptimizedCode);
|
||||
|
@ -3908,7 +3908,6 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
|
||||
case JS_MAP_TYPE:
|
||||
case JS_WEAK_MAP_TYPE:
|
||||
case JS_WEAK_SET_TYPE:
|
||||
case UNUSED_AND_RESERVED_TYPE:
|
||||
case JS_PROMISE_TYPE:
|
||||
case JS_PROXY_TYPE:
|
||||
case MAP_TYPE:
|
||||
@ -3947,6 +3946,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
|
||||
case WEAK_CELL_TYPE:
|
||||
case SMALL_ORDERED_HASH_MAP_TYPE:
|
||||
case SMALL_ORDERED_HASH_SET_TYPE:
|
||||
case CODE_DATA_CONTAINER_TYPE:
|
||||
case PROTOTYPE_INFO_TYPE:
|
||||
case TUPLE2_TYPE:
|
||||
case TUPLE3_TYPE:
|
||||
|
@ -1776,6 +1776,13 @@ Handle<JSObject> Factory::NewExternal(void* value) {
|
||||
return external;
|
||||
}
|
||||
|
||||
Handle<CodeDataContainer> Factory::NewCodeDataContainer(int flags) {
|
||||
Handle<CodeDataContainer> data_container =
|
||||
New<CodeDataContainer>(code_data_container_map(), OLD_SPACE);
|
||||
data_container->set_kind_specific_flags(flags);
|
||||
data_container->clear_padding();
|
||||
return data_container;
|
||||
}
|
||||
|
||||
Handle<Code> Factory::NewCodeRaw(int object_size, bool immovable) {
|
||||
CALL_HEAP_FUNCTION(isolate(),
|
||||
@ -1789,6 +1796,7 @@ Handle<Code> Factory::NewCode(
|
||||
MaybeHandle<ByteArray> maybe_source_position_table,
|
||||
MaybeHandle<DeoptimizationData> maybe_deopt_data, bool immovable) {
|
||||
Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
|
||||
Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
|
||||
|
||||
Handle<HandlerTable> handler_table =
|
||||
maybe_handler_table.is_null() ? HandlerTable::Empty(isolate())
|
||||
@ -1828,8 +1836,8 @@ Handle<Code> Factory::NewCode(
|
||||
code->set_relocation_info(*reloc_info);
|
||||
code->initialize_flags(kind);
|
||||
code->set_has_unwinding_info(has_unwinding_info);
|
||||
code->set_raw_kind_specific_flags1(0);
|
||||
code->set_safepoint_table_offset(0);
|
||||
code->set_code_data_container(*data_container);
|
||||
code->set_has_tagged_params(true);
|
||||
code->set_deoptimization_data(*deopt_data);
|
||||
code->set_stub_key(0);
|
||||
@ -1877,9 +1885,10 @@ Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
|
||||
}
|
||||
|
||||
Handle<Code> Factory::CopyCode(Handle<Code> code) {
|
||||
Handle<CodeDataContainer> data_container =
|
||||
NewCodeDataContainer(code->code_data_container()->kind_specific_flags());
|
||||
CALL_HEAP_FUNCTION(isolate(),
|
||||
isolate()->heap()->CopyCode(*code),
|
||||
Code);
|
||||
isolate()->heap()->CopyCode(*code, *data_container), Code);
|
||||
}
|
||||
|
||||
|
||||
|
@ -671,6 +671,9 @@ class V8_EXPORT_PRIVATE Factory final {
|
||||
// Create an External object for V8's external API.
|
||||
Handle<JSObject> NewExternal(void* value);
|
||||
|
||||
// Creates a new CodeDataContainer for a Code object.
|
||||
Handle<CodeDataContainer> NewCodeDataContainer(int flags);
|
||||
|
||||
// The reference to the Code object is stored in self_reference.
|
||||
// This allows generated code to reference its own Code object
|
||||
// by containing this handle.
|
||||
|
@ -3054,8 +3054,7 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
|
||||
return code;
|
||||
}
|
||||
|
||||
|
||||
AllocationResult Heap::CopyCode(Code* code) {
|
||||
AllocationResult Heap::CopyCode(Code* code, CodeDataContainer* data_container) {
|
||||
CodeSpaceMemoryModificationScope code_modification(this);
|
||||
AllocationResult allocation;
|
||||
|
||||
@ -3071,17 +3070,19 @@ AllocationResult Heap::CopyCode(Code* code) {
|
||||
CopyBlock(new_addr, old_addr, obj_size);
|
||||
Code* new_code = Code::cast(result);
|
||||
|
||||
// Set the {CodeDataContainer}, it cannot be shared.
|
||||
new_code->set_code_data_container(data_container);
|
||||
|
||||
// Clear the trap handler index since they can't be shared between code. We
|
||||
// have to do this before calling Relocate because relocate would adjust the
|
||||
// base pointer for the old code.
|
||||
new_code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
|
||||
|
||||
// Relocate the copy.
|
||||
DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
|
||||
DCHECK(!memory_allocator()->code_range()->valid() ||
|
||||
memory_allocator()->code_range()->contains(code->address()) ||
|
||||
obj_size <= code_space()->AreaSize());
|
||||
|
||||
// Clear the trap handler index since they can't be shared between code. We
|
||||
// have to do this before calling Relocate becauase relocate would adjust the
|
||||
// base pointer for the old code.
|
||||
new_code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
|
||||
|
||||
new_code->Relocate(new_addr - old_addr);
|
||||
// We have to iterate over the object and process its pointers when black
|
||||
// allocation is on.
|
||||
|
@ -37,6 +37,7 @@ class TestMemoryAllocatorScope;
|
||||
} // namespace heap
|
||||
|
||||
class BytecodeArray;
|
||||
class CodeDataContainer;
|
||||
class JSArrayBuffer;
|
||||
|
||||
using v8::MemoryPressureLevel;
|
||||
@ -106,6 +107,7 @@ using v8::MemoryPressureLevel;
|
||||
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
|
||||
V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
|
||||
V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
|
||||
V(Map, code_data_container_map, CodeDataContainerMap) \
|
||||
V(Map, message_object_map, JSMessageObjectMap) \
|
||||
V(Map, external_map, ExternalMap) \
|
||||
V(Map, bytecode_array_map, BytecodeArrayMap) \
|
||||
@ -1998,7 +2000,8 @@ class Heap {
|
||||
AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
|
||||
int parameter_count, FixedArray* constant_pool);
|
||||
|
||||
MUST_USE_RESULT AllocationResult CopyCode(Code* code);
|
||||
MUST_USE_RESULT AllocationResult CopyCode(Code* code,
|
||||
CodeDataContainer* data_container);
|
||||
|
||||
MUST_USE_RESULT AllocationResult
|
||||
CopyBytecodeArray(BytecodeArray* bytecode_array);
|
||||
|
@ -304,6 +304,9 @@ bool Heap::CreateInitialMaps() {
|
||||
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
|
||||
shared_function_info)
|
||||
|
||||
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
|
||||
code_data_container)
|
||||
|
||||
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
|
||||
ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
|
||||
external_map()->set_is_extensible(false);
|
||||
|
@ -355,7 +355,8 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
|
||||
STATIC_ASSERT(kDeoptimizationDataOffset + kPointerSize ==
|
||||
kSourcePositionTableOffset);
|
||||
STATIC_ASSERT(kSourcePositionTableOffset + kPointerSize ==
|
||||
kNextCodeLinkOffset);
|
||||
kCodeDataContainerOffset);
|
||||
STATIC_ASSERT(kCodeDataContainerOffset + kPointerSize == kNextCodeLinkOffset);
|
||||
|
||||
static bool IsValidSlot(HeapObject* obj, int offset) {
|
||||
// Slots in code can't be invalid because we never trim code objects.
|
||||
@ -537,6 +538,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
|
||||
p3);
|
||||
case HEAP_NUMBER_TYPE:
|
||||
case MUTABLE_HEAP_NUMBER_TYPE:
|
||||
case CODE_DATA_CONTAINER_TYPE:
|
||||
case FILLER_TYPE:
|
||||
case BYTE_ARRAY_TYPE:
|
||||
case FREE_SPACE_TYPE:
|
||||
|
@ -238,6 +238,9 @@ void HeapObject::HeapObjectVerify() {
|
||||
case SMALL_ORDERED_HASH_MAP_TYPE:
|
||||
SmallOrderedHashMap::cast(this)->SmallOrderedHashTableVerify();
|
||||
break;
|
||||
case CODE_DATA_CONTAINER_TYPE:
|
||||
CodeDataContainer::cast(this)->CodeDataContainerVerify();
|
||||
break;
|
||||
|
||||
#define MAKE_STRUCT_CASE(NAME, Name, name) \
|
||||
case NAME##_TYPE: \
|
||||
@ -832,6 +835,9 @@ void WeakCell::WeakCellVerify() {
|
||||
VerifyObjectField(kValueOffset);
|
||||
}
|
||||
|
||||
void CodeDataContainer::CodeDataContainerVerify() {
|
||||
CHECK(IsCodeDataContainer());
|
||||
}
|
||||
|
||||
void Code::CodeVerify() {
|
||||
CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
|
||||
|
@ -172,6 +172,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
|
||||
case CODE_TYPE:
|
||||
Code::cast(this)->CodePrint(os);
|
||||
break;
|
||||
case CODE_DATA_CONTAINER_TYPE:
|
||||
CodeDataContainer::cast(this)->CodeDataContainerPrint(os);
|
||||
break;
|
||||
case JS_PROXY_TYPE:
|
||||
JSProxy::cast(this)->JSProxyPrint(os);
|
||||
break;
|
||||
@ -1277,6 +1280,11 @@ void Code::CodePrint(std::ostream& os) { // NOLINT
|
||||
#endif
|
||||
}
|
||||
|
||||
void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) { // NOLINT
|
||||
HeapObject::PrintHeader(os, "CodeDataContainer");
|
||||
os << "\n - kind_specific_flags: " << kind_specific_flags();
|
||||
os << "\n";
|
||||
}
|
||||
|
||||
void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
|
||||
os << "foreign address : " << reinterpret_cast<void*>(foreign_address());
|
||||
|
@ -3222,6 +3222,7 @@ VisitorId Map::GetVisitorId(Map* map) {
|
||||
case FOREIGN_TYPE:
|
||||
case HEAP_NUMBER_TYPE:
|
||||
case MUTABLE_HEAP_NUMBER_TYPE:
|
||||
case CODE_DATA_CONTAINER_TYPE:
|
||||
return kVisitDataObject;
|
||||
|
||||
case BIGINT_TYPE:
|
||||
|
@ -376,7 +376,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
|
||||
V(PROPERTY_CELL_TYPE) \
|
||||
V(SMALL_ORDERED_HASH_MAP_TYPE) \
|
||||
V(SMALL_ORDERED_HASH_SET_TYPE) \
|
||||
V(UNUSED_AND_RESERVED_TYPE) \
|
||||
V(CODE_DATA_CONTAINER_TYPE) \
|
||||
\
|
||||
V(JS_PROXY_TYPE) \
|
||||
V(JS_GLOBAL_OBJECT_TYPE) \
|
||||
@ -728,8 +728,7 @@ enum InstanceType : uint8_t {
|
||||
PROPERTY_CELL_TYPE,
|
||||
SMALL_ORDERED_HASH_MAP_TYPE,
|
||||
SMALL_ORDERED_HASH_SET_TYPE,
|
||||
// TODO(mstarzinger,v8::6792): Will be used for code data container.
|
||||
UNUSED_AND_RESERVED_TYPE,
|
||||
CODE_DATA_CONTAINER_TYPE,
|
||||
|
||||
// All the following types are subtypes of JSReceiver, which corresponds to
|
||||
// objects in the JS sense. The first and the last type in this range are
|
||||
@ -1005,6 +1004,7 @@ template <class C> inline bool Is(Object* obj);
|
||||
V(CallHandlerInfo) \
|
||||
V(Cell) \
|
||||
V(Code) \
|
||||
V(CodeDataContainer) \
|
||||
V(CompilationCacheTable) \
|
||||
V(ConsString) \
|
||||
V(ConstantElementsPair) \
|
||||
|
@ -18,10 +18,12 @@ namespace internal {
|
||||
|
||||
TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
|
||||
TYPE_CHECKER(Code, CODE_TYPE)
|
||||
TYPE_CHECKER(CodeDataContainer, CODE_DATA_CONTAINER_TYPE)
|
||||
|
||||
CAST_ACCESSOR(AbstractCode)
|
||||
CAST_ACCESSOR(BytecodeArray)
|
||||
CAST_ACCESSOR(Code)
|
||||
CAST_ACCESSOR(CodeDataContainer)
|
||||
CAST_ACCESSOR(DependentCode)
|
||||
CAST_ACCESSOR(DeoptimizationData)
|
||||
CAST_ACCESSOR(HandlerTable)
|
||||
@ -155,6 +157,7 @@ CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
|
||||
CODE_ACCESSORS(handler_table, FixedArray, kHandlerTableOffset)
|
||||
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
|
||||
CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
|
||||
CODE_ACCESSORS(code_data_container, CodeDataContainer, kCodeDataContainerOffset)
|
||||
CODE_ACCESSORS(trap_handler_index, Smi, kTrapHandlerIndex)
|
||||
CODE_ACCESSORS(next_code_link, Object, kNextCodeLinkOffset)
|
||||
#undef CODE_ACCESSORS
|
||||
@ -164,6 +167,7 @@ void Code::WipeOutHeader() {
|
||||
WRITE_FIELD(this, kHandlerTableOffset, nullptr);
|
||||
WRITE_FIELD(this, kDeoptimizationDataOffset, nullptr);
|
||||
WRITE_FIELD(this, kSourcePositionTableOffset, nullptr);
|
||||
WRITE_FIELD(this, kCodeDataContainerOffset, nullptr);
|
||||
WRITE_FIELD(this, kNextCodeLinkOffset, nullptr);
|
||||
}
|
||||
|
||||
@ -286,11 +290,6 @@ void Code::set_kind(Kind kind) {
|
||||
WRITE_UINT32_FIELD(this, kFlagsOffset, updated_value);
|
||||
}
|
||||
|
||||
// For initialization.
|
||||
void Code::set_raw_kind_specific_flags1(int value) {
|
||||
WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
|
||||
}
|
||||
|
||||
inline bool Code::is_interpreter_trampoline_builtin() const {
|
||||
Builtins* builtins = GetIsolate()->builtins();
|
||||
bool is_interpreter_trampoline =
|
||||
@ -345,54 +344,54 @@ inline void Code::set_is_turbofanned(bool value) {
|
||||
|
||||
inline bool Code::can_have_weak_objects() const {
|
||||
DCHECK(kind() == OPTIMIZED_FUNCTION);
|
||||
return CanHaveWeakObjectsField::decode(
|
||||
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
|
||||
int flags = code_data_container()->kind_specific_flags();
|
||||
return CanHaveWeakObjectsField::decode(flags);
|
||||
}
|
||||
|
||||
inline void Code::set_can_have_weak_objects(bool value) {
|
||||
DCHECK(kind() == OPTIMIZED_FUNCTION);
|
||||
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
|
||||
int previous = code_data_container()->kind_specific_flags();
|
||||
int updated = CanHaveWeakObjectsField::update(previous, value);
|
||||
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
|
||||
code_data_container()->set_kind_specific_flags(updated);
|
||||
}
|
||||
|
||||
inline bool Code::is_construct_stub() const {
|
||||
DCHECK(kind() == BUILTIN);
|
||||
return IsConstructStubField::decode(
|
||||
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
|
||||
int flags = code_data_container()->kind_specific_flags();
|
||||
return IsConstructStubField::decode(flags);
|
||||
}
|
||||
|
||||
inline void Code::set_is_construct_stub(bool value) {
|
||||
DCHECK(kind() == BUILTIN);
|
||||
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
|
||||
int previous = code_data_container()->kind_specific_flags();
|
||||
int updated = IsConstructStubField::update(previous, value);
|
||||
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
|
||||
code_data_container()->set_kind_specific_flags(updated);
|
||||
}
|
||||
|
||||
inline bool Code::is_promise_rejection() const {
|
||||
DCHECK(kind() == BUILTIN);
|
||||
return IsPromiseRejectionField::decode(
|
||||
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
|
||||
int flags = code_data_container()->kind_specific_flags();
|
||||
return IsPromiseRejectionField::decode(flags);
|
||||
}
|
||||
|
||||
inline void Code::set_is_promise_rejection(bool value) {
|
||||
DCHECK(kind() == BUILTIN);
|
||||
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
|
||||
int previous = code_data_container()->kind_specific_flags();
|
||||
int updated = IsPromiseRejectionField::update(previous, value);
|
||||
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
|
||||
code_data_container()->set_kind_specific_flags(updated);
|
||||
}
|
||||
|
||||
inline bool Code::is_exception_caught() const {
|
||||
DCHECK(kind() == BUILTIN);
|
||||
return IsExceptionCaughtField::decode(
|
||||
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
|
||||
int flags = code_data_container()->kind_specific_flags();
|
||||
return IsExceptionCaughtField::decode(flags);
|
||||
}
|
||||
|
||||
inline void Code::set_is_exception_caught(bool value) {
|
||||
DCHECK(kind() == BUILTIN);
|
||||
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
|
||||
int previous = code_data_container()->kind_specific_flags();
|
||||
int updated = IsExceptionCaughtField::update(previous, value);
|
||||
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
|
||||
code_data_container()->set_kind_specific_flags(updated);
|
||||
}
|
||||
|
||||
inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
|
||||
@ -441,30 +440,30 @@ void Code::set_safepoint_table_offset(unsigned offset) {
|
||||
|
||||
bool Code::marked_for_deoptimization() const {
|
||||
DCHECK(kind() == OPTIMIZED_FUNCTION);
|
||||
return MarkedForDeoptimizationField::decode(
|
||||
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
|
||||
int flags = code_data_container()->kind_specific_flags();
|
||||
return MarkedForDeoptimizationField::decode(flags);
|
||||
}
|
||||
|
||||
void Code::set_marked_for_deoptimization(bool flag) {
|
||||
DCHECK(kind() == OPTIMIZED_FUNCTION);
|
||||
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
|
||||
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
|
||||
int previous = code_data_container()->kind_specific_flags();
|
||||
int updated = MarkedForDeoptimizationField::update(previous, flag);
|
||||
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
|
||||
code_data_container()->set_kind_specific_flags(updated);
|
||||
}
|
||||
|
||||
bool Code::deopt_already_counted() const {
|
||||
DCHECK(kind() == OPTIMIZED_FUNCTION);
|
||||
return DeoptAlreadyCountedField::decode(
|
||||
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
|
||||
int flags = code_data_container()->kind_specific_flags();
|
||||
return DeoptAlreadyCountedField::decode(flags);
|
||||
}
|
||||
|
||||
void Code::set_deopt_already_counted(bool flag) {
|
||||
DCHECK(kind() == OPTIMIZED_FUNCTION);
|
||||
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
|
||||
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
|
||||
int previous = code_data_container()->kind_specific_flags();
|
||||
int updated = DeoptAlreadyCountedField::update(previous, flag);
|
||||
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
|
||||
code_data_container()->set_kind_specific_flags(updated);
|
||||
}
|
||||
|
||||
bool Code::is_stub() const { return kind() == STUB; }
|
||||
@ -523,6 +522,12 @@ bool Code::IsWeakObjectInOptimizedCode(Object* object) {
|
||||
return false;
|
||||
}
|
||||
|
||||
INT_ACCESSORS(CodeDataContainer, kind_specific_flags, kKindSpecificFlagsOffset)
|
||||
|
||||
void CodeDataContainer::clear_padding() {
|
||||
memset(address() + kUnalignedSize, 0, kSize - kUnalignedSize);
|
||||
}
|
||||
|
||||
byte BytecodeArray::get(int index) {
|
||||
DCHECK(index >= 0 && index < this->length());
|
||||
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
|
||||
|
@ -15,6 +15,7 @@ namespace internal {
|
||||
|
||||
class ByteArray;
|
||||
class BytecodeArray;
|
||||
class CodeDataContainer;
|
||||
|
||||
// HandlerTable is a fixed array containing entries for exception handlers in
|
||||
// the code object it is associated with. The tables comes in two flavors:
|
||||
@ -157,9 +158,11 @@ class Code : public HeapObject {
|
||||
// [source_position_table]: ByteArray for the source positions table or
|
||||
// SourcePositionTableWithFrameCache.
|
||||
DECL_ACCESSORS(source_position_table, Object)
|
||||
|
||||
inline ByteArray* SourcePositionTable() const;
|
||||
|
||||
// [code_data_container]: A container indirection for all mutable fields.
|
||||
DECL_ACCESSORS(code_data_container, CodeDataContainer)
|
||||
|
||||
// [trap_handler_index]: An index into the trap handler's master list of code
|
||||
// objects.
|
||||
DECL_ACCESSORS(trap_handler_index, Smi)
|
||||
@ -190,8 +193,6 @@ class Code : public HeapObject {
|
||||
inline bool is_optimized_code() const;
|
||||
inline bool is_wasm_code() const;
|
||||
|
||||
inline void set_raw_kind_specific_flags1(int value);
|
||||
|
||||
// Testers for interpreter builtins.
|
||||
inline bool is_interpreter_trampoline_builtin() const;
|
||||
|
||||
@ -430,13 +431,13 @@ class Code : public HeapObject {
|
||||
kHandlerTableOffset + kPointerSize;
|
||||
static const int kSourcePositionTableOffset =
|
||||
kDeoptimizationDataOffset + kPointerSize;
|
||||
static const int kNextCodeLinkOffset =
|
||||
static const int kCodeDataContainerOffset =
|
||||
kSourcePositionTableOffset + kPointerSize;
|
||||
static const int kNextCodeLinkOffset =
|
||||
kCodeDataContainerOffset + kPointerSize;
|
||||
static const int kInstructionSizeOffset = kNextCodeLinkOffset + kPointerSize;
|
||||
static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
|
||||
static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
|
||||
static const int kSafepointTableOffsetOffset =
|
||||
kKindSpecificFlags1Offset + kIntSize;
|
||||
static const int kSafepointTableOffsetOffset = kFlagsOffset + kIntSize;
|
||||
static const int kStubKeyOffset = kSafepointTableOffsetOffset + kIntSize;
|
||||
static const int kConstantPoolOffset = kStubKeyOffset + kIntSize;
|
||||
static const int kBuiltinIndexOffset =
|
||||
@ -459,38 +460,32 @@ class Code : public HeapObject {
|
||||
class BodyDescriptor;
|
||||
|
||||
// Flags layout. BitField<type, shift, size>.
|
||||
class HasUnwindingInfoField : public BitField<bool, 0, 1> {};
|
||||
class KindField : public BitField<Kind, HasUnwindingInfoField::kNext, 5> {};
|
||||
class HasTaggedStackField : public BitField<bool, KindField::kNext, 1> {};
|
||||
class IsTurbofannedField
|
||||
: public BitField<bool, HasTaggedStackField::kNext, 1> {};
|
||||
class StackSlotsField : public BitField<int, IsTurbofannedField::kNext, 24> {
|
||||
};
|
||||
#define CODE_FLAGS_BIT_FIELDS(V, _) \
|
||||
V(HasUnwindingInfoField, bool, 1, _) \
|
||||
V(KindField, Kind, 5, _) \
|
||||
V(HasTaggedStackField, bool, 1, _) \
|
||||
V(IsTurbofannedField, bool, 1, _) \
|
||||
V(StackSlotsField, int, 24, _)
|
||||
DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
|
||||
#undef CODE_FLAGS_BIT_FIELDS
|
||||
static_assert(NUMBER_OF_KINDS <= KindField::kMax, "Code::KindField size");
|
||||
static_assert(StackSlotsField::kNext <= 32, "Code::flags field exhausted");
|
||||
|
||||
// KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
|
||||
static const int kMarkedForDeoptimizationBit = 0;
|
||||
static const int kDeoptAlreadyCountedBit = kMarkedForDeoptimizationBit + 1;
|
||||
static const int kCanHaveWeakObjects = kDeoptAlreadyCountedBit + 1;
|
||||
// Could be moved to overlap previous bits when we need more space.
|
||||
static const int kIsConstructStub = kCanHaveWeakObjects + 1;
|
||||
static const int kIsPromiseRejection = kIsConstructStub + 1;
|
||||
static const int kIsExceptionCaught = kIsPromiseRejection + 1;
|
||||
STATIC_ASSERT(kIsExceptionCaught + 1 <= 32);
|
||||
// KindSpecificFlags layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
|
||||
#define CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS(V, _) \
|
||||
V(MarkedForDeoptimizationField, bool, 1, _) \
|
||||
V(DeoptAlreadyCountedField, bool, 1, _) \
|
||||
V(CanHaveWeakObjectsField, bool, 1, _) \
|
||||
V(IsConstructStubField, bool, 1, _) \
|
||||
V(IsPromiseRejectionField, bool, 1, _) \
|
||||
V(IsExceptionCaughtField, bool, 1, _)
|
||||
DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
|
||||
#undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
|
||||
static_assert(IsExceptionCaughtField::kNext <= 32, "KindSpecificFlags full");
|
||||
|
||||
class MarkedForDeoptimizationField
|
||||
: public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT
|
||||
class DeoptAlreadyCountedField
|
||||
: public BitField<bool, kDeoptAlreadyCountedBit, 1> {}; // NOLINT
|
||||
class CanHaveWeakObjectsField
|
||||
: public BitField<bool, kCanHaveWeakObjects, 1> {}; // NOLINT
|
||||
class IsConstructStubField : public BitField<bool, kIsConstructStub, 1> {
|
||||
}; // NOLINT
|
||||
class IsPromiseRejectionField
|
||||
: public BitField<bool, kIsPromiseRejection, 1> {}; // NOLINT
|
||||
class IsExceptionCaughtField : public BitField<bool, kIsExceptionCaught, 1> {
|
||||
}; // NOLINT
|
||||
// The {marked_for_deoptimization} field is accessed from generated code.
|
||||
static const int kMarkedForDeoptimizationBit =
|
||||
MarkedForDeoptimizationField::kShift;
|
||||
|
||||
static const int kArgumentsBits = 16;
|
||||
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
|
||||
@ -504,6 +499,33 @@ class Code : public HeapObject {
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
|
||||
};
|
||||
|
||||
// CodeDataContainer is a container for all mutable fields associated with its
|
||||
// referencing {Code} object. Since {Code} objects reside on write-protected
|
||||
// pages within the heap, its header fields need to be immutable. There always
|
||||
// is a 1-to-1 relation between {Code} and {CodeDataContainer}, the referencing
|
||||
// field {Code::code_data_container} itself is immutable.
|
||||
class CodeDataContainer : public HeapObject {
|
||||
public:
|
||||
DECL_INT_ACCESSORS(kind_specific_flags)
|
||||
|
||||
// Clear uninitialized padding space. This ensures that the snapshot content
|
||||
// is deterministic.
|
||||
inline void clear_padding();
|
||||
|
||||
DECL_CAST(CodeDataContainer)
|
||||
|
||||
// Dispatched behavior.
|
||||
DECL_PRINTER(CodeDataContainer)
|
||||
DECL_VERIFIER(CodeDataContainer)
|
||||
|
||||
static const int kKindSpecificFlagsOffset = HeapObject::kHeaderSize;
|
||||
static const int kUnalignedSize = kKindSpecificFlagsOffset + kIntSize;
|
||||
static const int kSize = OBJECT_POINTER_ALIGN(kUnalignedSize);
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(CodeDataContainer);
|
||||
};
|
||||
|
||||
class AbstractCode : public HeapObject {
|
||||
public:
|
||||
// All code kinds and INTERPRETED_FUNCTION.
|
||||
|
@ -84,8 +84,8 @@ AllocationResult HeapTester::AllocateAfterFailures() {
|
||||
// Test that we can allocate in old pointer space and code space.
|
||||
heap::SimulateFullSpace(heap->code_space());
|
||||
heap->AllocateFixedArray(100, TENURED).ToObjectChecked();
|
||||
heap->CopyCode(CcTest::i_isolate()->builtins()->builtin(
|
||||
Builtins::kIllegal)).ToObjectChecked();
|
||||
Code* illegal = CcTest::i_isolate()->builtins()->builtin(Builtins::kIllegal);
|
||||
heap->CopyCode(illegal, illegal->code_data_container()).ToObjectChecked();
|
||||
|
||||
// Return success.
|
||||
return heap->true_value();
|
||||
|
@ -200,9 +200,7 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
|
||||
Handle<Code> code =
|
||||
isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
|
||||
|
||||
Code* tmp = nullptr;
|
||||
heap->CopyCode(*code).To(&tmp);
|
||||
Handle<Code> copy(tmp);
|
||||
Handle<Code> copy = factory->CopyCode(code);
|
||||
|
||||
CheckEmbeddedObjectsAreEqual(code, copy);
|
||||
CcTest::CollectAllAvailableGarbage();
|
||||
|
@ -83,7 +83,7 @@ INSTANCE_TYPES = {
|
||||
179: "PROPERTY_CELL_TYPE",
|
||||
180: "SMALL_ORDERED_HASH_MAP_TYPE",
|
||||
181: "SMALL_ORDERED_HASH_SET_TYPE",
|
||||
182: "UNUSED_AND_RESERVED_TYPE",
|
||||
182: "CODE_DATA_CONTAINER_TYPE",
|
||||
183: "JS_PROXY_TYPE",
|
||||
184: "JS_GLOBAL_OBJECT_TYPE",
|
||||
185: "JS_GLOBAL_PROXY_TYPE",
|
||||
@ -243,32 +243,33 @@ KNOWN_MAPS = {
|
||||
0x03ab1: (146, "FixedFloat64ArrayMap"),
|
||||
0x03b01: (147, "FixedUint8ClampedArrayMap"),
|
||||
0x03b51: (158, "ScriptMap"),
|
||||
0x03ba1: (173, "FeedbackVectorMap"),
|
||||
0x03bf1: (171, "DebugEvaluateContextMap"),
|
||||
0x03c41: (171, "ScriptContextTableMap"),
|
||||
0x03c91: (172, "UnseededNumberDictionaryMap"),
|
||||
0x03ce1: (192, "ExternalMap"),
|
||||
0x03d31: (106, "NativeSourceStringMap"),
|
||||
0x03d81: (165, "Tuple2Map"),
|
||||
0x03dd1: (153, "InterceptorInfoMap"),
|
||||
0x03e21: (150, "AccessorInfoMap"),
|
||||
0x03e71: (151, "AccessorPairMap"),
|
||||
0x03ec1: (152, "AccessCheckInfoMap"),
|
||||
0x03f11: (154, "FunctionTemplateInfoMap"),
|
||||
0x03f61: (155, "ObjectTemplateInfoMap"),
|
||||
0x03fb1: (156, "AllocationSiteMap"),
|
||||
0x04001: (157, "AllocationMementoMap"),
|
||||
0x04051: (159, "AliasedArgumentsEntryMap"),
|
||||
0x040a1: (160, "PromiseResolveThenableJobInfoMap"),
|
||||
0x040f1: (161, "PromiseReactionJobInfoMap"),
|
||||
0x04141: (162, "DebugInfoMap"),
|
||||
0x04191: (163, "StackFrameInfoMap"),
|
||||
0x041e1: (164, "PrototypeInfoMap"),
|
||||
0x04231: (166, "Tuple3Map"),
|
||||
0x04281: (167, "ContextExtensionMap"),
|
||||
0x042d1: (168, "ModuleMap"),
|
||||
0x04321: (169, "ModuleInfoEntryMap"),
|
||||
0x04371: (170, "AsyncGeneratorRequestMap"),
|
||||
0x03ba1: (182, "CodeDataContainerMap"),
|
||||
0x03bf1: (173, "FeedbackVectorMap"),
|
||||
0x03c41: (171, "DebugEvaluateContextMap"),
|
||||
0x03c91: (171, "ScriptContextTableMap"),
|
||||
0x03ce1: (172, "UnseededNumberDictionaryMap"),
|
||||
0x03d31: (192, "ExternalMap"),
|
||||
0x03d81: (106, "NativeSourceStringMap"),
|
||||
0x03dd1: (165, "Tuple2Map"),
|
||||
0x03e21: (153, "InterceptorInfoMap"),
|
||||
0x03e71: (150, "AccessorInfoMap"),
|
||||
0x03ec1: (151, "AccessorPairMap"),
|
||||
0x03f11: (152, "AccessCheckInfoMap"),
|
||||
0x03f61: (154, "FunctionTemplateInfoMap"),
|
||||
0x03fb1: (155, "ObjectTemplateInfoMap"),
|
||||
0x04001: (156, "AllocationSiteMap"),
|
||||
0x04051: (157, "AllocationMementoMap"),
|
||||
0x040a1: (159, "AliasedArgumentsEntryMap"),
|
||||
0x040f1: (160, "PromiseResolveThenableJobInfoMap"),
|
||||
0x04141: (161, "PromiseReactionJobInfoMap"),
|
||||
0x04191: (162, "DebugInfoMap"),
|
||||
0x041e1: (163, "StackFrameInfoMap"),
|
||||
0x04231: (164, "PrototypeInfoMap"),
|
||||
0x04281: (166, "Tuple3Map"),
|
||||
0x042d1: (167, "ContextExtensionMap"),
|
||||
0x04321: (168, "ModuleMap"),
|
||||
0x04371: (169, "ModuleInfoEntryMap"),
|
||||
0x043c1: (170, "AsyncGeneratorRequestMap"),
|
||||
}
|
||||
|
||||
# List of known V8 objects.
|
||||
|
Loading…
Reference in New Issue
Block a user