[wasm-gc] Speculative inlining for call_ref (off by default)
This patch adds infrastructure for collecting feedback about call_ref call targets in Liftoff code, and using that feedback for turning such calls into inlineable direct calls when building Turbofan graphs. The feature is considered experimental quality and hence off by default, --wasm-speculative-inlining turns it on. Bug: v8:7748 Change-Id: I0d0d776f8a71c3dd2c9124d3731f3cb06d4f5821 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3205902 Commit-Queue: Jakob Kummerow <jkummerow@chromium.org> Reviewed-by: Manos Koukoutos <manoskouk@chromium.org> Reviewed-by: Igor Sheludko <ishell@chromium.org> Reviewed-by: Maya Lekova <mslekova@chromium.org> Cr-Commit-Position: refs/heads/main@{#77287}
This commit is contained in:
parent
dee82c85a3
commit
5d75bd1fdb
@ -453,6 +453,111 @@ builtin WasmI64AtomicWait64(
|
||||
}
|
||||
}
|
||||
|
||||
// Type feedback collection support for `call_ref`.
|
||||
|
||||
extern macro GetCodeEntry(Code): RawPtr;
|
||||
extern macro GetCodeEntry(CodeDataContainer): RawPtr;
|
||||
|
||||
struct TargetAndInstance {
|
||||
target: RawPtr;
|
||||
instance: HeapObject; // WasmInstanceObject or Tuple2
|
||||
}
|
||||
|
||||
macro GetTargetAndInstance(funcref: JSFunction): TargetAndInstance {
|
||||
const sfi = funcref.shared_function_info;
|
||||
dcheck(Is<WasmFunctionData>(sfi.function_data));
|
||||
const funcData = UnsafeCast<WasmFunctionData>(sfi.function_data);
|
||||
const ref = funcData.ref;
|
||||
if (Is<Tuple2>(ref)) {
|
||||
const instance: WasmInstanceObject = LoadInstanceFromFrame();
|
||||
UnsafeCast<Tuple2>(ref).value1 = instance;
|
||||
}
|
||||
let target = funcData.foreign_address_ptr;
|
||||
if (Signed(target) == IntPtrConstant(0)) {
|
||||
const wrapper =
|
||||
UnsafeCast<WasmJSFunctionData>(funcData).wasm_to_js_wrapper_code;
|
||||
target = GetCodeEntry(wrapper);
|
||||
}
|
||||
return TargetAndInstance{target: target, instance: ref};
|
||||
}
|
||||
|
||||
// Vector format:
|
||||
// Two slots per call_ref instruction. These slots' values can be:
|
||||
// - uninitialized: (undefined, <unused>). Note: we use {undefined} as the
|
||||
// sentinel as an optimization, as it's the default value for FixedArrays.
|
||||
// - monomorphic: (funcref, call_ref_data)
|
||||
// - polymorphic: (fixed_array, <unused>). In this case, the array
|
||||
// contains 2..4 pairs (funcref, call_ref_data) (like monomorphic data).
|
||||
// - megamorphic: ("megamorphic" sentinel, <unused>)
|
||||
|
||||
builtin CallRefIC(
|
||||
vector: FixedArray, index: intptr, funcref: JSFunction): TargetAndInstance {
|
||||
const value = vector.objects[index];
|
||||
if (value == funcref) {
|
||||
// Monomorphic hit. Check for this case first to maximize its performance.
|
||||
const data = UnsafeCast<CallRefData>(vector.objects[index + 1]);
|
||||
data.count = data.count + 1;
|
||||
return TargetAndInstance{target: data.target, instance: data.instance};
|
||||
}
|
||||
// Check for polymorphic hit; its performance is second-most-important.
|
||||
if (Is<FixedArray>(value)) {
|
||||
const entries = UnsafeCast<FixedArray>(value);
|
||||
for (let i: intptr = 0; i < entries.length_intptr; i += 2) {
|
||||
if (entries.objects[i] == funcref) {
|
||||
// Polymorphic hit.
|
||||
const data = UnsafeCast<CallRefData>(entries.objects[i + 1]);
|
||||
data.count = data.count + 1;
|
||||
return TargetAndInstance{target: data.target, instance: data.instance};
|
||||
}
|
||||
}
|
||||
}
|
||||
// All other cases are some sort of miss and must compute the target/
|
||||
// instance. They all fall through to returning the computed data.
|
||||
const result = GetTargetAndInstance(funcref);
|
||||
if (TaggedEqual(value, Undefined)) {
|
||||
const data = new
|
||||
CallRefData{instance: result.instance, target: result.target, count: 1};
|
||||
vector.objects[index] = funcref;
|
||||
vector.objects[index + 1] = data;
|
||||
} else if (Is<FixedArray>(value)) {
|
||||
// Polymorphic miss.
|
||||
const entries = UnsafeCast<FixedArray>(value);
|
||||
if (entries.length == SmiConstant(8)) { // 4 entries, 2 slots each.
|
||||
vector.objects[index] = ic::kMegamorphicSymbol;
|
||||
vector.objects[index + 1] = ic::kMegamorphicSymbol;
|
||||
} else {
|
||||
const data = new
|
||||
CallRefData{instance: result.instance, target: result.target, count: 1};
|
||||
const newEntries = UnsafeCast<FixedArray>(AllocateFixedArray(
|
||||
ElementsKind::PACKED_ELEMENTS, entries.length_intptr + 2,
|
||||
AllocationFlag::kNone));
|
||||
for (let i: intptr = 0; i < entries.length_intptr; i++) {
|
||||
newEntries.objects[i] = entries.objects[i];
|
||||
}
|
||||
const newIndex = entries.length_intptr;
|
||||
newEntries.objects[newIndex] = funcref;
|
||||
newEntries.objects[newIndex + 1] = data;
|
||||
vector.objects[index] = newEntries;
|
||||
}
|
||||
} else if (Is<JSFunction>(value)) {
|
||||
// Monomorphic miss.
|
||||
const data = new
|
||||
CallRefData{instance: result.instance, target: result.target, count: 1};
|
||||
const newEntries = UnsafeCast<FixedArray>(AllocateFixedArray(
|
||||
ElementsKind::PACKED_ELEMENTS, 4, AllocationFlag::kNone));
|
||||
newEntries.objects[0] = value;
|
||||
newEntries.objects[1] = vector.objects[index + 1];
|
||||
newEntries.objects[2] = funcref;
|
||||
newEntries.objects[3] = data;
|
||||
vector.objects[index] = newEntries;
|
||||
// Clear the old pointer to the first entry's data object; the specific
|
||||
// value we write doesn't matter.
|
||||
vector.objects[index + 1] = Undefined;
|
||||
}
|
||||
// The "ic::IsMegamorphic(value)" case doesn't need to do anything.
|
||||
return result;
|
||||
}
|
||||
|
||||
extern macro TryHasOwnProperty(HeapObject, Map, InstanceType, Name): never
|
||||
labels Found, NotFound, Bailout;
|
||||
type OnNonExistent constexpr 'OnNonExistent';
|
||||
|
@ -490,6 +490,15 @@ void CodeAssembler::Return(TNode<WordT> value1, TNode<WordT> value2) {
|
||||
return raw_assembler()->Return(value1, value2);
|
||||
}
|
||||
|
||||
void CodeAssembler::Return(TNode<WordT> value1, TNode<Object> value2) {
|
||||
DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount());
|
||||
DCHECK_EQ(
|
||||
MachineType::PointerRepresentation(),
|
||||
raw_assembler()->call_descriptor()->GetReturnType(0).representation());
|
||||
DCHECK(raw_assembler()->call_descriptor()->GetReturnType(1).IsTagged());
|
||||
return raw_assembler()->Return(value1, value2);
|
||||
}
|
||||
|
||||
void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
|
||||
DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
|
||||
return raw_assembler()->PopAndReturn(pop, value);
|
||||
|
@ -627,6 +627,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
void Return(TNode<Float32T> value);
|
||||
void Return(TNode<Float64T> value);
|
||||
void Return(TNode<WordT> value1, TNode<WordT> value2);
|
||||
void Return(TNode<WordT> value1, TNode<Object> value2);
|
||||
void PopAndReturn(Node* pop, Node* value);
|
||||
|
||||
void ReturnIf(TNode<BoolT> condition, TNode<Object> value);
|
||||
|
@ -1696,8 +1696,8 @@ struct WasmInliningPhase {
|
||||
data->jsgraph()->Dead(), data->observe_node_manager());
|
||||
DeadCodeElimination dead(&graph_reducer, data->graph(),
|
||||
data->mcgraph()->common(), temp_zone);
|
||||
// For now, hard-code inlining the function at index 0.
|
||||
InlineByIndex heuristics({0});
|
||||
// For now, inline the first few functions;
|
||||
InlineFirstFew heuristics(FLAG_wasm_inlining_budget);
|
||||
WasmInliner inliner(&graph_reducer, env, data->source_positions(),
|
||||
data->node_origins(), data->mcgraph(), wire_bytes,
|
||||
&heuristics);
|
||||
|
@ -27,6 +27,11 @@ Reduction WasmInliner::Reduce(Node* node) {
|
||||
}
|
||||
}
|
||||
|
||||
#define TRACE(...) \
|
||||
if (FLAG_trace_wasm_speculative_inlining) { \
|
||||
PrintF(__VA_ARGS__); \
|
||||
}
|
||||
|
||||
// TODO(12166): Save inlined frames for trap/--trace-wasm purposes. Consider
|
||||
// tail calls.
|
||||
// TODO(12166): Inline indirect calls/call_ref.
|
||||
@ -40,10 +45,22 @@ Reduction WasmInliner::ReduceCall(Node* call) {
|
||||
if (callee->opcode() != reloc_opcode) return NoChange();
|
||||
auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
|
||||
uint32_t inlinee_index = static_cast<uint32_t>(info.value());
|
||||
if (!heuristics_->DoInline(source_positions_->GetSourcePosition(call),
|
||||
inlinee_index)) {
|
||||
TRACE("[considering call to %d... ", inlinee_index)
|
||||
if (info.rmode() != RelocInfo::WASM_CALL) {
|
||||
TRACE("not a wasm call]\n")
|
||||
return NoChange();
|
||||
}
|
||||
if (inlinee_index < module()->num_imported_functions) {
|
||||
TRACE("imported function]\n")
|
||||
return NoChange();
|
||||
}
|
||||
|
||||
if (!heuristics_->DoInline(source_positions_->GetSourcePosition(call),
|
||||
inlinee_index)) {
|
||||
TRACE("heuristics say no]\n")
|
||||
return NoChange();
|
||||
}
|
||||
TRACE("inlining!]\n")
|
||||
|
||||
CHECK_LT(inlinee_index, module()->functions.size());
|
||||
const wasm::WasmFunction* inlinee = &module()->functions[inlinee_index];
|
||||
|
@ -33,26 +33,18 @@ class SourcePositionTable;
|
||||
// Parent class for classes that provide heuristics on how to inline in wasm.
|
||||
class WasmInliningHeuristics {
|
||||
public:
|
||||
virtual bool DoInline(SourcePosition position,
|
||||
uint32_t function_index) const = 0;
|
||||
virtual bool DoInline(SourcePosition position, uint32_t function_index) = 0;
|
||||
};
|
||||
|
||||
// A simple inlining heuristic that inlines all function calls to a set of given
|
||||
// function indices.
|
||||
class InlineByIndex : public WasmInliningHeuristics {
|
||||
class InlineFirstFew : public WasmInliningHeuristics {
|
||||
public:
|
||||
explicit InlineByIndex(uint32_t function_index)
|
||||
: WasmInliningHeuristics(), function_indices_(function_index) {}
|
||||
InlineByIndex(std::initializer_list<uint32_t> function_indices)
|
||||
: WasmInliningHeuristics(), function_indices_(function_indices) {}
|
||||
|
||||
bool DoInline(SourcePosition position,
|
||||
uint32_t function_index) const override {
|
||||
return function_indices_.count(function_index) > 0;
|
||||
explicit InlineFirstFew(int count) : count_(count) {}
|
||||
bool DoInline(SourcePosition position, uint32_t function_index) override {
|
||||
return count_-- > 0;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unordered_set<uint32_t> function_indices_;
|
||||
int count_;
|
||||
};
|
||||
|
||||
// The WasmInliner provides the core graph inlining machinery for Webassembly
|
||||
@ -65,7 +57,7 @@ class WasmInliner final : public AdvancedReducer {
|
||||
SourcePositionTable* source_positions,
|
||||
NodeOriginTable* node_origins, MachineGraph* mcgraph,
|
||||
const wasm::WireBytesStorage* wire_bytes,
|
||||
const WasmInliningHeuristics* heuristics)
|
||||
WasmInliningHeuristics* heuristics)
|
||||
: AdvancedReducer(editor),
|
||||
env_(env),
|
||||
source_positions_(source_positions),
|
||||
@ -98,7 +90,7 @@ class WasmInliner final : public AdvancedReducer {
|
||||
NodeOriginTable* const node_origins_;
|
||||
MachineGraph* const mcgraph_;
|
||||
const wasm::WireBytesStorage* const wire_bytes_;
|
||||
const WasmInliningHeuristics* const heuristics_;
|
||||
WasmInliningHeuristics* heuristics_;
|
||||
};
|
||||
|
||||
} // namespace compiler
|
||||
|
@ -1868,6 +1868,11 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
|
||||
os << "\n - managed_native_allocations: "
|
||||
<< Brief(managed_native_allocations());
|
||||
}
|
||||
if (has_tags_table()) {
|
||||
os << "\n - tags table: " << Brief(tags_table());
|
||||
}
|
||||
os << "\n - managed object maps: " << Brief(managed_object_maps());
|
||||
os << "\n - feedback vectors: " << Brief(feedback_vectors());
|
||||
os << "\n - memory_start: " << static_cast<void*>(memory_start());
|
||||
os << "\n - memory_size: " << memory_size();
|
||||
os << "\n - imported_function_targets: "
|
||||
|
@ -1046,8 +1046,16 @@ DEFINE_BOOL(wasm_math_intrinsics, true,
|
||||
DEFINE_BOOL(
|
||||
wasm_inlining, false,
|
||||
"enable inlining of wasm functions into wasm functions (experimental)")
|
||||
DEFINE_INT(wasm_inlining_budget, 3,
|
||||
"maximum number of call targets to inline into a Wasm function")
|
||||
DEFINE_BOOL(wasm_speculative_inlining, false,
|
||||
"enable speculative inlining of call_ref targets (experimental)")
|
||||
DEFINE_BOOL(trace_wasm_speculative_inlining, false,
|
||||
"trace wasm speculative inlining")
|
||||
DEFINE_IMPLICATION(wasm_speculative_inlining, experimental_wasm_typed_funcref)
|
||||
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_inlining)
|
||||
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_dynamic_tiering)
|
||||
DEFINE_NEG_IMPLICATION(wasm_speculative_inlining, wasm_tier_up)
|
||||
DEFINE_BOOL(wasm_loop_unrolling, true,
|
||||
"enable loop unrolling for wasm functions")
|
||||
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
|
||||
|
@ -208,15 +208,15 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
|
||||
|
||||
DCHECK(isolate->context().is_null());
|
||||
isolate->set_context(instance->native_context());
|
||||
Handle<WasmModuleObject> module_object{instance->module_object(), isolate};
|
||||
bool success = wasm::CompileLazy(isolate, module_object, func_index);
|
||||
bool success = wasm::CompileLazy(isolate, instance, func_index);
|
||||
if (!success) {
|
||||
DCHECK(isolate->has_pending_exception());
|
||||
return ReadOnlyRoots(isolate).exception();
|
||||
}
|
||||
|
||||
Address entrypoint =
|
||||
module_object->native_module()->GetCallTargetForFunction(func_index);
|
||||
instance->module_object().native_module()->GetCallTargetForFunction(
|
||||
func_index);
|
||||
|
||||
return Object(entrypoint);
|
||||
}
|
||||
@ -295,7 +295,7 @@ RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
|
||||
int func_index = frame_finder.frame()->function_index();
|
||||
auto* native_module = instance->module_object().native_module();
|
||||
|
||||
wasm::TriggerTierUp(isolate, native_module, func_index);
|
||||
wasm::TriggerTierUp(isolate, native_module, func_index, instance);
|
||||
|
||||
return ReadOnlyRoots(isolate).undefined_value();
|
||||
}
|
||||
|
@ -512,6 +512,12 @@ class LiftoffCompiler {
|
||||
return __ GetTotalFrameSlotCountForGC();
|
||||
}
|
||||
|
||||
int GetFeedbackVectorSlots() const {
|
||||
// The number of instructions is capped by max function size.
|
||||
STATIC_ASSERT(kV8MaxWasmFunctionSize < std::numeric_limits<int>::max());
|
||||
return static_cast<int>(num_call_ref_instructions_) * 2;
|
||||
}
|
||||
|
||||
void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason,
|
||||
const char* detail) {
|
||||
DCHECK_NE(kSuccess, reason);
|
||||
@ -754,7 +760,20 @@ class LiftoffCompiler {
|
||||
__ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
|
||||
// Load the feedback vector and cache it in a stack slot.
|
||||
if (FLAG_wasm_speculative_inlining) {
|
||||
UNIMPLEMENTED();
|
||||
int declared_func_index =
|
||||
func_index_ - env_->module->num_imported_functions;
|
||||
DCHECK_GE(declared_func_index, 0);
|
||||
LiftoffRegList pinned;
|
||||
for (auto reg : kGpParamRegisters) pinned.set(reg);
|
||||
LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
__ LoadTaggedPointerFromInstance(
|
||||
tmp.gp(), kWasmInstanceRegister,
|
||||
WASM_INSTANCE_OBJECT_FIELD_OFFSET(FeedbackVectors));
|
||||
__ LoadTaggedPointer(tmp.gp(), tmp.gp(), no_reg,
|
||||
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
|
||||
declared_func_index),
|
||||
pinned);
|
||||
__ Spill(liftoff::kFeedbackVectorOffset, tmp, kPointerKind);
|
||||
} else {
|
||||
__ Spill(liftoff::kFeedbackVectorOffset, WasmValue::ForUintPtr(0));
|
||||
}
|
||||
@ -5910,101 +5929,135 @@ class LiftoffCompiler {
|
||||
call_descriptor =
|
||||
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
|
||||
|
||||
// Executing a write barrier needs temp registers; doing this on a
|
||||
// conditional branch confuses the LiftoffAssembler's register management.
|
||||
// Spill everything up front to work around that.
|
||||
__ SpillAllRegisters();
|
||||
Register target_reg = no_reg, instance_reg = no_reg;
|
||||
|
||||
// We limit ourselves to four registers:
|
||||
// (1) func_data, initially reused for func_ref.
|
||||
// (2) instance, initially used as temp.
|
||||
// (3) target, initially used as temp.
|
||||
// (4) temp.
|
||||
LiftoffRegList pinned;
|
||||
LiftoffRegister func_ref = pinned.set(__ PopToModifiableRegister(pinned));
|
||||
MaybeEmitNullCheck(decoder, func_ref.gp(), pinned, func_ref_type);
|
||||
LiftoffRegister instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
LiftoffRegister temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
if (FLAG_wasm_speculative_inlining) {
|
||||
ValueKind kIntPtrKind = kPointerKind;
|
||||
|
||||
// Load the WasmFunctionData.
|
||||
LiftoffRegister func_data = func_ref;
|
||||
__ LoadTaggedPointer(
|
||||
func_data.gp(), func_ref.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(JSFunction::kSharedFunctionInfoOffset),
|
||||
pinned);
|
||||
__ LoadTaggedPointer(
|
||||
func_data.gp(), func_data.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset),
|
||||
pinned);
|
||||
LiftoffRegList pinned;
|
||||
LiftoffAssembler::VarState funcref =
|
||||
__ cache_state()->stack_state.end()[-1];
|
||||
if (funcref.is_reg()) pinned.set(funcref.reg());
|
||||
LiftoffRegister vector = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
__ Fill(vector, liftoff::kFeedbackVectorOffset, kPointerKind);
|
||||
LiftoffAssembler::VarState vector_var(kPointerKind, vector, 0);
|
||||
LiftoffRegister index = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
uintptr_t vector_slot = num_call_ref_instructions_ * 2;
|
||||
num_call_ref_instructions_++;
|
||||
__ LoadConstant(index, WasmValue::ForUintPtr(vector_slot));
|
||||
LiftoffAssembler::VarState index_var(kIntPtrKind, index, 0);
|
||||
|
||||
// Load "ref" (instance or <instance, callable> pair) and target.
|
||||
__ LoadTaggedPointer(
|
||||
instance.gp(), func_data.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(WasmFunctionData::kRefOffset), pinned);
|
||||
// CallRefIC(vector: FixedArray, index: intptr, funcref: JSFunction)
|
||||
CallRuntimeStub(WasmCode::kCallRefIC,
|
||||
MakeSig::Returns(kPointerKind, kPointerKind)
|
||||
.Params(kPointerKind, kIntPtrKind, kPointerKind),
|
||||
{vector_var, index_var, funcref}, decoder->position());
|
||||
|
||||
Label load_target, perform_call;
|
||||
__ cache_state()->stack_state.pop_back(1); // Drop funcref.
|
||||
target_reg = LiftoffRegister(kReturnRegister0).gp();
|
||||
instance_reg = LiftoffRegister(kReturnRegister1).gp();
|
||||
|
||||
// Check if "ref" is a Tuple2.
|
||||
{
|
||||
LiftoffRegister pair_map = temp;
|
||||
LiftoffRegister ref_map = target;
|
||||
__ LoadMap(ref_map.gp(), instance.gp());
|
||||
LOAD_INSTANCE_FIELD(pair_map.gp(), IsolateRoot, kSystemPointerSize,
|
||||
pinned);
|
||||
__ LoadTaggedPointer(pair_map.gp(), pair_map.gp(), no_reg,
|
||||
IsolateData::root_slot_offset(RootIndex::kTuple2Map),
|
||||
pinned);
|
||||
__ emit_cond_jump(kUnequal, &load_target, kRef, ref_map.gp(),
|
||||
pair_map.gp());
|
||||
} else { // FLAG_wasm_speculative_inlining
|
||||
// Non-feedback-collecting version.
|
||||
// Executing a write barrier needs temp registers; doing this on a
|
||||
// conditional branch confuses the LiftoffAssembler's register management.
|
||||
// Spill everything up front to work around that.
|
||||
__ SpillAllRegisters();
|
||||
|
||||
// Overwrite the tuple's "instance" entry with the current instance.
|
||||
// TODO(jkummerow): Can we figure out a way to guarantee that the
|
||||
// instance field is always precomputed?
|
||||
LiftoffRegister current_instance = temp;
|
||||
__ FillInstanceInto(current_instance.gp());
|
||||
__ StoreTaggedPointer(instance.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(Tuple2::kValue1Offset),
|
||||
current_instance, pinned);
|
||||
// Fall through to {load_target}.
|
||||
}
|
||||
// Load the call target.
|
||||
__ bind(&load_target);
|
||||
// We limit ourselves to four registers:
|
||||
// (1) func_data, initially reused for func_ref.
|
||||
// (2) instance, initially used as temp.
|
||||
// (3) target, initially used as temp.
|
||||
// (4) temp.
|
||||
LiftoffRegList pinned;
|
||||
LiftoffRegister func_ref = pinned.set(__ PopToModifiableRegister(pinned));
|
||||
MaybeEmitNullCheck(decoder, func_ref.gp(), pinned, func_ref_type);
|
||||
LiftoffRegister instance =
|
||||
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
LiftoffRegister temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||
|
||||
// Load the WasmFunctionData.
|
||||
LiftoffRegister func_data = func_ref;
|
||||
__ LoadTaggedPointer(
|
||||
func_data.gp(), func_ref.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(JSFunction::kSharedFunctionInfoOffset),
|
||||
pinned);
|
||||
__ LoadTaggedPointer(
|
||||
func_data.gp(), func_data.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset),
|
||||
pinned);
|
||||
|
||||
// Load "ref" (instance or <instance, callable> pair) and target.
|
||||
__ LoadTaggedPointer(
|
||||
instance.gp(), func_data.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(WasmFunctionData::kRefOffset), pinned);
|
||||
|
||||
Label load_target, perform_call;
|
||||
|
||||
// Check if "ref" is a Tuple2.
|
||||
{
|
||||
LiftoffRegister pair_map = temp;
|
||||
LiftoffRegister ref_map = target;
|
||||
__ LoadMap(ref_map.gp(), instance.gp());
|
||||
LOAD_INSTANCE_FIELD(pair_map.gp(), IsolateRoot, kSystemPointerSize,
|
||||
pinned);
|
||||
__ LoadTaggedPointer(
|
||||
pair_map.gp(), pair_map.gp(), no_reg,
|
||||
IsolateData::root_slot_offset(RootIndex::kTuple2Map), pinned);
|
||||
__ emit_cond_jump(kUnequal, &load_target, kRef, ref_map.gp(),
|
||||
pair_map.gp());
|
||||
|
||||
// Overwrite the tuple's "instance" entry with the current instance.
|
||||
// TODO(jkummerow): Can we figure out a way to guarantee that the
|
||||
// instance field is always precomputed?
|
||||
LiftoffRegister current_instance = temp;
|
||||
__ FillInstanceInto(current_instance.gp());
|
||||
__ StoreTaggedPointer(
|
||||
instance.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(Tuple2::kValue1Offset),
|
||||
current_instance, pinned);
|
||||
// Fall through to {load_target}.
|
||||
}
|
||||
// Load the call target.
|
||||
__ bind(&load_target);
|
||||
|
||||
#ifdef V8_HEAP_SANDBOX
|
||||
LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
|
||||
__ LoadExternalPointer(target.gp(), func_data.gp(),
|
||||
WasmFunctionData::kForeignAddressOffset,
|
||||
kForeignForeignAddressTag, temp.gp());
|
||||
LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
|
||||
__ LoadExternalPointer(target.gp(), func_data.gp(),
|
||||
WasmFunctionData::kForeignAddressOffset,
|
||||
kForeignForeignAddressTag, temp.gp());
|
||||
#else
|
||||
__ Load(
|
||||
target, func_data.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset),
|
||||
kPointerLoadType, pinned);
|
||||
__ Load(
|
||||
target, func_data.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset),
|
||||
kPointerLoadType, pinned);
|
||||
#endif
|
||||
|
||||
LiftoffRegister null_address = temp;
|
||||
__ LoadConstant(null_address, WasmValue::ForUintPtr(0));
|
||||
__ emit_cond_jump(kUnequal, &perform_call, kRef, target.gp(),
|
||||
null_address.gp());
|
||||
// The cached target can only be null for WasmJSFunctions.
|
||||
__ LoadTaggedPointer(target.gp(), func_data.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(
|
||||
WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
|
||||
pinned);
|
||||
LiftoffRegister null_address = temp;
|
||||
__ LoadConstant(null_address, WasmValue::ForUintPtr(0));
|
||||
__ emit_cond_jump(kUnequal, &perform_call, kRef, target.gp(),
|
||||
null_address.gp());
|
||||
// The cached target can only be null for WasmJSFunctions.
|
||||
__ LoadTaggedPointer(target.gp(), func_data.gp(), no_reg,
|
||||
wasm::ObjectAccess::ToTagged(
|
||||
WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
|
||||
pinned);
|
||||
#ifdef V8_EXTERNAL_CODE_SPACE
|
||||
__ LoadCodeDataContainerEntry(target.gp(), target.gp());
|
||||
__ LoadCodeDataContainerEntry(target.gp(), target.gp());
|
||||
#else
|
||||
__ emit_ptrsize_addi(target.gp(), target.gp(),
|
||||
wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
|
||||
__ emit_ptrsize_addi(target.gp(), target.gp(),
|
||||
wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
|
||||
#endif
|
||||
// Fall through to {perform_call}.
|
||||
// Fall through to {perform_call}.
|
||||
|
||||
__ bind(&perform_call);
|
||||
// Now the call target is in {target}, and the right instance object
|
||||
// is in {instance}.
|
||||
target_reg = target.gp();
|
||||
instance_reg = instance.gp();
|
||||
} // FLAG_wasm_speculative_inlining
|
||||
|
||||
__ bind(&perform_call);
|
||||
// Now the call target is in {target}, and the right instance object
|
||||
// is in {instance}.
|
||||
Register target_reg = target.gp();
|
||||
Register instance_reg = instance.gp();
|
||||
__ PrepareCall(&sig, call_descriptor, &target_reg, &instance_reg);
|
||||
if (tail_call) {
|
||||
__ PrepareTailCall(
|
||||
@ -6273,6 +6326,10 @@ class LiftoffCompiler {
|
||||
// Current number of exception refs on the stack.
|
||||
int num_exceptions_ = 0;
|
||||
|
||||
// Number of {call_ref} instructions encountered. While compiling, also
|
||||
// index of the next {call_ref}. Used for indexing type feedback.
|
||||
uintptr_t num_call_ref_instructions_ = 0;
|
||||
|
||||
int32_t* max_steps_;
|
||||
int32_t* nondeterminism_;
|
||||
|
||||
@ -6349,6 +6406,7 @@ WasmCompilationResult ExecuteLiftoffCompilation(
|
||||
if (auto* debug_sidetable = compiler_options.debug_sidetable) {
|
||||
*debug_sidetable = debug_sidetable_builder->GenerateDebugSideTable();
|
||||
}
|
||||
result.feedback_vector_slots = compiler->GetFeedbackVectorSlots();
|
||||
|
||||
DCHECK(result.succeeded());
|
||||
return result;
|
||||
|
@ -57,6 +57,7 @@ struct WasmCompilationResult {
|
||||
ExecutionTier result_tier;
|
||||
Kind kind = kFunction;
|
||||
ForDebugging for_debugging = kNoDebugging;
|
||||
int feedback_vector_slots = 0;
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE WasmCompilationUnit final {
|
||||
|
@ -116,12 +116,25 @@ class WasmGraphBuildingInterface {
|
||||
inlined_status_(inlined_status) {}
|
||||
|
||||
void StartFunction(FullDecoder* decoder) {
|
||||
// Get the branch hints map for this function (if available)
|
||||
// Get the branch hints map and type feedback for this function (if
|
||||
// available).
|
||||
if (decoder->module_) {
|
||||
auto branch_hints_it = decoder->module_->branch_hints.find(func_index_);
|
||||
if (branch_hints_it != decoder->module_->branch_hints.end()) {
|
||||
branch_hints_ = &branch_hints_it->second;
|
||||
}
|
||||
TypeFeedbackStorage& feedbacks = decoder->module_->type_feedback;
|
||||
base::MutexGuard mutex_guard(&feedbacks.mutex);
|
||||
auto feedback = feedbacks.feedback_for_function.find(func_index_);
|
||||
if (feedback != feedbacks.feedback_for_function.end()) {
|
||||
type_feedback_ = std::move(feedback->second);
|
||||
// Erasing the map entry means that if the same function later gets
|
||||
// inlined, its inlined copy won't have any type feedback available.
|
||||
// However, if we don't erase the entry now, we'll be stuck with it
|
||||
// forever.
|
||||
// TODO(jkummerow): Reconsider our options here.
|
||||
feedbacks.feedback_for_function.erase(func_index_);
|
||||
}
|
||||
}
|
||||
// The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
|
||||
// instance parameter.
|
||||
@ -657,7 +670,15 @@ class WasmGraphBuildingInterface {
|
||||
void CallRef(FullDecoder* decoder, const Value& func_ref,
|
||||
const FunctionSig* sig, uint32_t sig_index, const Value args[],
|
||||
Value returns[]) {
|
||||
if (!FLAG_wasm_inlining) {
|
||||
int maybe_feedback = -1;
|
||||
// TODO(jkummerow): The way we currently prepare type feedback means that
|
||||
// we won't have any for inlined functions. Figure out how to change that.
|
||||
if (FLAG_wasm_speculative_inlining && type_feedback_.size() > 0) {
|
||||
DCHECK_LT(feedback_instruction_index_, type_feedback_.size());
|
||||
maybe_feedback = type_feedback_[feedback_instruction_index_];
|
||||
feedback_instruction_index_++;
|
||||
}
|
||||
if (maybe_feedback == -1) {
|
||||
DoCall(decoder, CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
|
||||
sig, args, returns);
|
||||
return;
|
||||
@ -665,9 +686,14 @@ class WasmGraphBuildingInterface {
|
||||
|
||||
// Check for equality against a function at a specific index, and if
|
||||
// successful, just emit a direct call.
|
||||
// TODO(12166): For now, we check against function 0. Decide the index based
|
||||
// on liftoff feedback.
|
||||
const uint32_t expected_function_index = 0;
|
||||
DCHECK_GE(maybe_feedback, 0);
|
||||
const uint32_t expected_function_index = maybe_feedback;
|
||||
|
||||
if (FLAG_trace_wasm_speculative_inlining) {
|
||||
PrintF("[Function #%d call #%d: graph support for inlining target #%d]\n",
|
||||
func_index_, feedback_instruction_index_ - 1,
|
||||
expected_function_index);
|
||||
}
|
||||
|
||||
TFNode* success_control;
|
||||
TFNode* failure_control;
|
||||
@ -715,7 +741,13 @@ class WasmGraphBuildingInterface {
|
||||
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
|
||||
const FunctionSig* sig, uint32_t sig_index,
|
||||
const Value args[]) {
|
||||
if (!FLAG_wasm_inlining) {
|
||||
int maybe_feedback = -1;
|
||||
if (FLAG_wasm_speculative_inlining) {
|
||||
DCHECK_LE(feedback_instruction_index_, type_feedback_.size());
|
||||
maybe_feedback = type_feedback_[feedback_instruction_index_];
|
||||
feedback_instruction_index_++;
|
||||
}
|
||||
if (maybe_feedback == -1) {
|
||||
DoReturnCall(decoder,
|
||||
CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
|
||||
sig, args);
|
||||
@ -724,9 +756,14 @@ class WasmGraphBuildingInterface {
|
||||
|
||||
// Check for equality against a function at a specific index, and if
|
||||
// successful, just emit a direct call.
|
||||
// TODO(12166): For now, we check against function 0. Decide the index based
|
||||
// on liftoff feedback.
|
||||
const uint32_t expected_function_index = 0;
|
||||
DCHECK_GE(maybe_feedback, 0);
|
||||
const uint32_t expected_function_index = maybe_feedback;
|
||||
|
||||
if (FLAG_trace_wasm_speculative_inlining) {
|
||||
PrintF("[Function #%d call #%d: graph support for inlining target #%d]\n",
|
||||
func_index_, feedback_instruction_index_ - 1,
|
||||
expected_function_index);
|
||||
}
|
||||
|
||||
TFNode* success_control;
|
||||
TFNode* failure_control;
|
||||
@ -1251,6 +1288,10 @@ class WasmGraphBuildingInterface {
|
||||
// Tracks loop data for loop unrolling.
|
||||
std::vector<compiler::WasmLoopInfo> loop_infos_;
|
||||
InlinedStatus inlined_status_;
|
||||
// The entries in {type_feedback_} are indexed by the position of feedback-
|
||||
// consuming instructions (currently only call_ref).
|
||||
int feedback_instruction_index_ = 0;
|
||||
std::vector<int> type_feedback_;
|
||||
|
||||
TFNode* effect() { return builder_->effect(); }
|
||||
|
||||
|
@ -1134,8 +1134,9 @@ bool IsLazyModule(const WasmModule* module) {
|
||||
|
||||
} // namespace
|
||||
|
||||
bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
|
||||
bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance,
|
||||
int func_index) {
|
||||
Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
|
||||
NativeModule* native_module = module_object->native_module();
|
||||
const WasmModule* module = native_module->module();
|
||||
auto enabled_features = native_module->enabled_features();
|
||||
@ -1197,6 +1198,15 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
|
||||
return false;
|
||||
}
|
||||
|
||||
// Allocate feedback vector if needed.
|
||||
if (result.feedback_vector_slots > 0) {
|
||||
DCHECK(FLAG_wasm_speculative_inlining);
|
||||
Handle<FixedArray> vector =
|
||||
isolate->factory()->NewFixedArray(result.feedback_vector_slots);
|
||||
instance->feedback_vectors().set(
|
||||
declared_function_index(module, func_index), *vector);
|
||||
}
|
||||
|
||||
WasmCodeRefScope code_ref_scope;
|
||||
WasmCode* code;
|
||||
{
|
||||
@ -1228,16 +1238,100 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
|
||||
return true;
|
||||
}
|
||||
|
||||
std::vector<int> ProcessTypeFeedback(Isolate* isolate,
|
||||
Handle<WasmInstanceObject> instance,
|
||||
int func_index) {
|
||||
int which_vector = declared_function_index(instance->module(), func_index);
|
||||
Object maybe_feedback = instance->feedback_vectors().get(which_vector);
|
||||
if (!maybe_feedback.IsFixedArray()) return {};
|
||||
FixedArray feedback = FixedArray::cast(maybe_feedback);
|
||||
std::vector<int> result(feedback.length() / 2);
|
||||
int imported_functions =
|
||||
static_cast<int>(instance->module()->num_imported_functions);
|
||||
for (int i = 0; i < feedback.length(); i += 2) {
|
||||
Object value = feedback.get(i);
|
||||
if (WasmExportedFunction::IsWasmExportedFunction(value)) {
|
||||
// Monomorphic. Mark the target for inlining if it's defined in the
|
||||
// same module.
|
||||
WasmExportedFunction target = WasmExportedFunction::cast(value);
|
||||
if (target.instance() == *instance &&
|
||||
target.function_index() >= imported_functions) {
|
||||
if (FLAG_trace_wasm_speculative_inlining) {
|
||||
PrintF("[Function #%d call_ref #%d inlineable (monomorphic)]\n",
|
||||
func_index, i / 2);
|
||||
}
|
||||
result[i / 2] = target.function_index();
|
||||
continue;
|
||||
}
|
||||
} else if (value.IsFixedArray()) {
|
||||
// Polymorphic. Pick a target for inlining if there is one that was
|
||||
// seen for most calls, and matches the requirements of the monomorphic
|
||||
// case.
|
||||
FixedArray polymorphic = FixedArray::cast(value);
|
||||
size_t total_count = 0;
|
||||
for (int j = 0; j < polymorphic.length(); j += 2) {
|
||||
total_count += CallRefData::cast(polymorphic.get(j + 1)).count();
|
||||
}
|
||||
int found_target = -1;
|
||||
double best_frequency = 0;
|
||||
for (int j = 0; j < polymorphic.length(); j += 2) {
|
||||
uint32_t this_count = CallRefData::cast(polymorphic.get(j + 1)).count();
|
||||
double frequency = static_cast<double>(this_count) / total_count;
|
||||
if (frequency > best_frequency) best_frequency = frequency;
|
||||
if (frequency < 0.8) continue;
|
||||
Object maybe_target = polymorphic.get(j);
|
||||
if (!WasmExportedFunction::IsWasmExportedFunction(maybe_target)) {
|
||||
continue;
|
||||
}
|
||||
WasmExportedFunction target =
|
||||
WasmExportedFunction::cast(polymorphic.get(j));
|
||||
if (target.instance() != *instance ||
|
||||
target.function_index() < imported_functions) {
|
||||
continue;
|
||||
}
|
||||
found_target = target.function_index();
|
||||
if (FLAG_trace_wasm_speculative_inlining) {
|
||||
PrintF("[Function #%d call_ref #%d inlineable (polymorphic %f)]\n",
|
||||
func_index, i / 2, frequency);
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (found_target >= 0) {
|
||||
result[i / 2] = found_target;
|
||||
continue;
|
||||
} else if (FLAG_trace_wasm_speculative_inlining) {
|
||||
PrintF("[Function #%d call_ref #%d: best frequency %f]\n", func_index,
|
||||
i / 2, best_frequency);
|
||||
}
|
||||
}
|
||||
// If we fall through to here, then this call isn't eligible for inlining.
|
||||
// Possible reasons: uninitialized or megamorphic feedback; or monomorphic
|
||||
// or polymorphic that didn't meet our requirements.
|
||||
result[i / 2] = -1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
|
||||
int func_index) {
|
||||
int func_index, Handle<WasmInstanceObject> instance) {
|
||||
CompilationStateImpl* compilation_state =
|
||||
Impl(native_module->compilation_state());
|
||||
WasmCompilationUnit tiering_unit{func_index, ExecutionTier::kTurbofan,
|
||||
kNoDebugging};
|
||||
|
||||
const WasmModule* module = native_module->module();
|
||||
if (FLAG_wasm_speculative_inlining) {
|
||||
auto feedback = ProcessTypeFeedback(isolate, instance, func_index);
|
||||
base::MutexGuard mutex_guard(&module->type_feedback.mutex);
|
||||
// TODO(jkummerow): we could have collisions here if two different instances
|
||||
// of the same module schedule tier-ups of the same function at the same
|
||||
// time. If that ever becomes a problem, figure out a solution.
|
||||
module->type_feedback.feedback_for_function[func_index] =
|
||||
std::move(feedback);
|
||||
}
|
||||
|
||||
uint32_t* call_array = native_module->num_liftoff_function_calls_array();
|
||||
int offset =
|
||||
wasm::declared_function_index(native_module->module(), func_index);
|
||||
int offset = wasm::declared_function_index(module, func_index);
|
||||
|
||||
size_t priority =
|
||||
base::Relaxed_Load(reinterpret_cast<int*>(&call_array[offset]));
|
||||
|
@ -73,9 +73,10 @@ WasmCode* CompileImportWrapper(
|
||||
// Triggered by the WasmCompileLazy builtin. The return value indicates whether
|
||||
// compilation was successful. Lazy compilation can fail only if validation is
|
||||
// also lazy.
|
||||
bool CompileLazy(Isolate*, Handle<WasmModuleObject>, int func_index);
|
||||
bool CompileLazy(Isolate*, Handle<WasmInstanceObject>, int func_index);
|
||||
|
||||
void TriggerTierUp(Isolate*, NativeModule*, int func_index);
|
||||
void TriggerTierUp(Isolate*, NativeModule*, int func_index,
|
||||
Handle<WasmInstanceObject> instance);
|
||||
|
||||
template <typename Key, typename Hash>
|
||||
class WrapperQueue {
|
||||
|
@ -711,6 +711,7 @@ class ModuleDecoderImpl : public Decoder {
|
||||
import->index, // func_index
|
||||
0, // sig_index
|
||||
{0, 0}, // code
|
||||
0, // feedback slots
|
||||
true, // imported
|
||||
false, // exported
|
||||
false}); // declared
|
||||
@ -805,6 +806,7 @@ class ModuleDecoderImpl : public Decoder {
|
||||
func_index, // func_index
|
||||
0, // sig_index
|
||||
{0, 0}, // code
|
||||
0, // feedback slots
|
||||
false, // imported
|
||||
false, // exported
|
||||
false}); // declared
|
||||
|
@ -705,6 +705,27 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
|
||||
instance->set_managed_object_maps(*maps);
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// Allocate type feedback vectors for functions.
|
||||
//--------------------------------------------------------------------------
|
||||
if (FLAG_wasm_speculative_inlining) {
|
||||
int num_functions = static_cast<int>(module_->num_declared_functions);
|
||||
Handle<FixedArray> vectors =
|
||||
isolate_->factory()->NewFixedArray(num_functions, AllocationType::kOld);
|
||||
instance->set_feedback_vectors(*vectors);
|
||||
for (int i = 0; i < num_functions; i++) {
|
||||
int func_index = module_->num_imported_functions + i;
|
||||
int slots = module_->functions[func_index].feedback_slots;
|
||||
if (slots == 0) continue;
|
||||
if (FLAG_trace_wasm_speculative_inlining) {
|
||||
PrintF("[Function %d (declared %d): allocating %d feedback slots]\n",
|
||||
func_index, i, slots);
|
||||
}
|
||||
Handle<FixedArray> feedback = isolate_->factory()->NewFixedArray(slots);
|
||||
vectors->set(i, *feedback);
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// Process the initialization for the module's globals.
|
||||
//--------------------------------------------------------------------------
|
||||
|
@ -2266,6 +2266,13 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
|
||||
for (auto& result : results) {
|
||||
DCHECK(result.succeeded());
|
||||
total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
|
||||
if (result.result_tier == ExecutionTier::kLiftoff) {
|
||||
int index = result.func_index;
|
||||
DCHECK(module()->functions[index].feedback_slots == 0 ||
|
||||
module()->functions[index].feedback_slots ==
|
||||
result.feedback_vector_slots);
|
||||
module()->functions[index].feedback_slots = result.feedback_vector_slots;
|
||||
}
|
||||
}
|
||||
base::Vector<byte> code_space;
|
||||
NativeModule::JumpTablesRef jump_tables;
|
||||
|
@ -86,6 +86,7 @@ struct WasmModule;
|
||||
V(WasmTraceMemory) \
|
||||
V(BigIntToI32Pair) \
|
||||
V(BigIntToI64) \
|
||||
V(CallRefIC) \
|
||||
V(DoubleToI) \
|
||||
V(I32PairToBigInt) \
|
||||
V(I64ToBigInt) \
|
||||
|
@ -161,7 +161,7 @@ constexpr int kAnonymousFuncIndex = -1;
|
||||
constexpr uint32_t kGenericWrapperBudget = 1000;
|
||||
|
||||
#if V8_TARGET_ARCH_X64
|
||||
constexpr int32_t kOSRTargetOffset = 3 * kSystemPointerSize;
|
||||
constexpr int32_t kOSRTargetOffset = 4 * kSystemPointerSize;
|
||||
#endif
|
||||
|
||||
} // namespace wasm
|
||||
|
@ -9,6 +9,7 @@
|
||||
#ifndef V8_WASM_WASM_MODULE_H_
|
||||
#define V8_WASM_WASM_MODULE_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
|
||||
#include "src/base/optional.h"
|
||||
@ -62,6 +63,10 @@ struct WasmFunction {
|
||||
uint32_t func_index; // index into the function table.
|
||||
uint32_t sig_index; // index into the signature table.
|
||||
WireBytesRef code; // code of this function.
|
||||
// Required number of slots in a feedback vector. Marked {mutable} because
|
||||
// this is computed late (by Liftoff compilation), when the rest of the
|
||||
// {WasmFunction} is typically considered {const}.
|
||||
mutable int feedback_slots;
|
||||
bool imported;
|
||||
bool exported;
|
||||
bool declared;
|
||||
@ -257,6 +262,12 @@ struct V8_EXPORT_PRIVATE WasmDebugSymbols {
|
||||
WireBytesRef external_url;
|
||||
};
|
||||
|
||||
struct TypeFeedbackStorage {
|
||||
std::map<uint32_t, std::vector<int>> feedback_for_function;
|
||||
// Accesses to {feedback_for_function} are guarded by this mutex.
|
||||
base::Mutex mutex;
|
||||
};
|
||||
|
||||
struct WasmTable;
|
||||
|
||||
// End of a chain of explicit supertypes.
|
||||
@ -364,6 +375,9 @@ struct V8_EXPORT_PRIVATE WasmModule {
|
||||
std::vector<WasmCompilationHint> compilation_hints;
|
||||
BranchHintInfo branch_hints;
|
||||
SignatureMap signature_map; // canonicalizing map for signature indexes.
|
||||
// Entries in this storage are short-lived: when tier-up of a function is
|
||||
// scheduled, an entry is placed; the Turbofan graph builder consumes it.
|
||||
mutable TypeFeedbackStorage type_feedback;
|
||||
|
||||
ModuleOrigin origin = kWasmOrigin; // origin of the module
|
||||
LazilyGeneratedNames lazily_generated_names;
|
||||
|
@ -253,6 +253,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_external_functions, FixedArray,
|
||||
kWasmExternalFunctionsOffset)
|
||||
ACCESSORS(WasmInstanceObject, managed_object_maps, FixedArray,
|
||||
kManagedObjectMapsOffset)
|
||||
ACCESSORS(WasmInstanceObject, feedback_vectors, FixedArray,
|
||||
kFeedbackVectorsOffset)
|
||||
|
||||
void WasmInstanceObject::clear_padding() {
|
||||
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
|
||||
|
@ -1311,6 +1311,7 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
|
||||
instance->set_hook_on_function_call_address(
|
||||
isolate->debug()->hook_on_function_call_address());
|
||||
instance->set_managed_object_maps(*isolate->factory()->empty_fixed_array());
|
||||
instance->set_feedback_vectors(*isolate->factory()->empty_fixed_array());
|
||||
instance->set_num_liftoff_function_calls_array(
|
||||
module_object->native_module()->num_liftoff_function_calls_array());
|
||||
instance->set_break_on_entry(module_object->script().break_on_entry());
|
||||
|
@ -349,6 +349,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
|
||||
DECL_OPTIONAL_ACCESSORS(tags_table, FixedArray)
|
||||
DECL_OPTIONAL_ACCESSORS(wasm_external_functions, FixedArray)
|
||||
DECL_ACCESSORS(managed_object_maps, FixedArray)
|
||||
DECL_ACCESSORS(feedback_vectors, FixedArray)
|
||||
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
|
||||
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
|
||||
DECL_PRIMITIVE_ACCESSORS(isolate_root, Address)
|
||||
@ -425,6 +426,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
|
||||
V(kTagsTableOffset, kTaggedSize) \
|
||||
V(kWasmExternalFunctionsOffset, kTaggedSize) \
|
||||
V(kManagedObjectMapsOffset, kTaggedSize) \
|
||||
V(kFeedbackVectorsOffset, kTaggedSize) \
|
||||
V(kBreakOnEntryOffset, kUInt8Size) \
|
||||
/* More padding to make the header pointer-size aligned */ \
|
||||
V(kHeaderPaddingOffset, POINTER_SIZE_PADDING(kHeaderPaddingOffset)) \
|
||||
@ -460,7 +462,8 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
|
||||
kManagedNativeAllocationsOffset,
|
||||
kTagsTableOffset,
|
||||
kWasmExternalFunctionsOffset,
|
||||
kManagedObjectMapsOffset};
|
||||
kManagedObjectMapsOffset,
|
||||
kFeedbackVectorsOffset};
|
||||
|
||||
const wasm::WasmModule* module();
|
||||
|
||||
|
@ -157,3 +157,10 @@ extern class WasmArray extends WasmObject {
|
||||
@if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
|
||||
@ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
|
||||
}
|
||||
|
||||
@export
|
||||
class CallRefData extends HeapObject {
|
||||
instance: HeapObject;
|
||||
target: RawPtr;
|
||||
count: uint32;
|
||||
}
|
||||
|
@ -148,6 +148,7 @@ uint32_t TestingModuleBuilder::AddFunction(const FunctionSig* sig,
|
||||
index, // func_index
|
||||
0, // sig_index
|
||||
{0, 0}, // code
|
||||
0, // feedback slots
|
||||
false, // imported
|
||||
false, // exported
|
||||
false}); // declared
|
||||
|
@ -4219,6 +4219,7 @@ ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
|
||||
0, // func_index
|
||||
0, // sig_index
|
||||
{0, 0}, // code
|
||||
0, // feedback slots
|
||||
false, // imported
|
||||
false, // exported
|
||||
false}; // declared
|
||||
|
@ -98,6 +98,7 @@ class TestModuleBuilder {
|
||||
static_cast<uint32_t>(mod.functions.size()), // func_index
|
||||
sig_index, // sig_index
|
||||
{0, 0}, // code
|
||||
0, // feedback slots
|
||||
false, // import
|
||||
false, // export
|
||||
declared}); // declared
|
||||
|
@ -121,38 +121,39 @@ INSTANCE_TYPES = {
|
||||
157: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
|
||||
158: "WEAK_FIXED_ARRAY_TYPE",
|
||||
159: "TRANSITION_ARRAY_TYPE",
|
||||
160: "CELL_TYPE",
|
||||
161: "CODE_TYPE",
|
||||
162: "CODE_DATA_CONTAINER_TYPE",
|
||||
163: "COVERAGE_INFO_TYPE",
|
||||
164: "EMBEDDER_DATA_ARRAY_TYPE",
|
||||
165: "FEEDBACK_METADATA_TYPE",
|
||||
166: "FEEDBACK_VECTOR_TYPE",
|
||||
167: "FILLER_TYPE",
|
||||
168: "FREE_SPACE_TYPE",
|
||||
169: "INTERNAL_CLASS_TYPE",
|
||||
170: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
|
||||
171: "MAP_TYPE",
|
||||
172: "MEGA_DOM_HANDLER_TYPE",
|
||||
173: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
|
||||
174: "PREPARSE_DATA_TYPE",
|
||||
175: "PROPERTY_ARRAY_TYPE",
|
||||
176: "PROPERTY_CELL_TYPE",
|
||||
177: "SCOPE_INFO_TYPE",
|
||||
178: "SHARED_FUNCTION_INFO_TYPE",
|
||||
179: "SMI_BOX_TYPE",
|
||||
180: "SMI_PAIR_TYPE",
|
||||
181: "SORT_STATE_TYPE",
|
||||
182: "SWISS_NAME_DICTIONARY_TYPE",
|
||||
183: "WEAK_ARRAY_LIST_TYPE",
|
||||
184: "WEAK_CELL_TYPE",
|
||||
185: "WASM_ARRAY_TYPE",
|
||||
186: "WASM_STRUCT_TYPE",
|
||||
187: "JS_PROXY_TYPE",
|
||||
160: "CALL_REF_DATA_TYPE",
|
||||
161: "CELL_TYPE",
|
||||
162: "CODE_TYPE",
|
||||
163: "CODE_DATA_CONTAINER_TYPE",
|
||||
164: "COVERAGE_INFO_TYPE",
|
||||
165: "EMBEDDER_DATA_ARRAY_TYPE",
|
||||
166: "FEEDBACK_METADATA_TYPE",
|
||||
167: "FEEDBACK_VECTOR_TYPE",
|
||||
168: "FILLER_TYPE",
|
||||
169: "FREE_SPACE_TYPE",
|
||||
170: "INTERNAL_CLASS_TYPE",
|
||||
171: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
|
||||
172: "MAP_TYPE",
|
||||
173: "MEGA_DOM_HANDLER_TYPE",
|
||||
174: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
|
||||
175: "PREPARSE_DATA_TYPE",
|
||||
176: "PROPERTY_ARRAY_TYPE",
|
||||
177: "PROPERTY_CELL_TYPE",
|
||||
178: "SCOPE_INFO_TYPE",
|
||||
179: "SHARED_FUNCTION_INFO_TYPE",
|
||||
180: "SMI_BOX_TYPE",
|
||||
181: "SMI_PAIR_TYPE",
|
||||
182: "SORT_STATE_TYPE",
|
||||
183: "SWISS_NAME_DICTIONARY_TYPE",
|
||||
184: "WEAK_ARRAY_LIST_TYPE",
|
||||
185: "WEAK_CELL_TYPE",
|
||||
186: "WASM_ARRAY_TYPE",
|
||||
187: "WASM_STRUCT_TYPE",
|
||||
188: "JS_PROXY_TYPE",
|
||||
1057: "JS_OBJECT_TYPE",
|
||||
188: "JS_GLOBAL_OBJECT_TYPE",
|
||||
189: "JS_GLOBAL_PROXY_TYPE",
|
||||
190: "JS_MODULE_NAMESPACE_TYPE",
|
||||
189: "JS_GLOBAL_OBJECT_TYPE",
|
||||
190: "JS_GLOBAL_PROXY_TYPE",
|
||||
191: "JS_MODULE_NAMESPACE_TYPE",
|
||||
1040: "JS_SPECIAL_API_OBJECT_TYPE",
|
||||
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
|
||||
1058: "JS_API_OBJECT_TYPE",
|
||||
@ -236,16 +237,16 @@ INSTANCE_TYPES = {
|
||||
|
||||
# List of known V8 maps.
|
||||
KNOWN_MAPS = {
|
||||
("read_only_space", 0x02119): (171, "MetaMap"),
|
||||
("read_only_space", 0x02119): (172, "MetaMap"),
|
||||
("read_only_space", 0x02141): (67, "NullMap"),
|
||||
("read_only_space", 0x02169): (153, "StrongDescriptorArrayMap"),
|
||||
("read_only_space", 0x02191): (158, "WeakFixedArrayMap"),
|
||||
("read_only_space", 0x021d1): (100, "EnumCacheMap"),
|
||||
("read_only_space", 0x02205): (118, "FixedArrayMap"),
|
||||
("read_only_space", 0x02251): (8, "OneByteInternalizedStringMap"),
|
||||
("read_only_space", 0x0229d): (168, "FreeSpaceMap"),
|
||||
("read_only_space", 0x022c5): (167, "OnePointerFillerMap"),
|
||||
("read_only_space", 0x022ed): (167, "TwoPointerFillerMap"),
|
||||
("read_only_space", 0x0229d): (169, "FreeSpaceMap"),
|
||||
("read_only_space", 0x022c5): (168, "OnePointerFillerMap"),
|
||||
("read_only_space", 0x022ed): (168, "TwoPointerFillerMap"),
|
||||
("read_only_space", 0x02315): (67, "UninitializedMap"),
|
||||
("read_only_space", 0x0238d): (67, "UndefinedMap"),
|
||||
("read_only_space", 0x023d1): (66, "HeapNumberMap"),
|
||||
@ -256,15 +257,15 @@ KNOWN_MAPS = {
|
||||
("read_only_space", 0x02559): (119, "HashTableMap"),
|
||||
("read_only_space", 0x02581): (64, "SymbolMap"),
|
||||
("read_only_space", 0x025a9): (40, "OneByteStringMap"),
|
||||
("read_only_space", 0x025d1): (177, "ScopeInfoMap"),
|
||||
("read_only_space", 0x025f9): (178, "SharedFunctionInfoMap"),
|
||||
("read_only_space", 0x02621): (161, "CodeMap"),
|
||||
("read_only_space", 0x02649): (160, "CellMap"),
|
||||
("read_only_space", 0x02671): (176, "GlobalPropertyCellMap"),
|
||||
("read_only_space", 0x025d1): (178, "ScopeInfoMap"),
|
||||
("read_only_space", 0x025f9): (179, "SharedFunctionInfoMap"),
|
||||
("read_only_space", 0x02621): (162, "CodeMap"),
|
||||
("read_only_space", 0x02649): (161, "CellMap"),
|
||||
("read_only_space", 0x02671): (177, "GlobalPropertyCellMap"),
|
||||
("read_only_space", 0x02699): (70, "ForeignMap"),
|
||||
("read_only_space", 0x026c1): (159, "TransitionArrayMap"),
|
||||
("read_only_space", 0x026e9): (45, "ThinOneByteStringMap"),
|
||||
("read_only_space", 0x02711): (166, "FeedbackVectorMap"),
|
||||
("read_only_space", 0x02711): (167, "FeedbackVectorMap"),
|
||||
("read_only_space", 0x02749): (67, "ArgumentsMarkerMap"),
|
||||
("read_only_space", 0x027a9): (67, "ExceptionMap"),
|
||||
("read_only_space", 0x02805): (67, "TerminationExceptionMap"),
|
||||
@ -272,17 +273,17 @@ KNOWN_MAPS = {
|
||||
("read_only_space", 0x028cd): (67, "StaleRegisterMap"),
|
||||
("read_only_space", 0x0292d): (130, "ScriptContextTableMap"),
|
||||
("read_only_space", 0x02955): (128, "ClosureFeedbackCellArrayMap"),
|
||||
("read_only_space", 0x0297d): (165, "FeedbackMetadataArrayMap"),
|
||||
("read_only_space", 0x0297d): (166, "FeedbackMetadataArrayMap"),
|
||||
("read_only_space", 0x029a5): (118, "ArrayListMap"),
|
||||
("read_only_space", 0x029cd): (65, "BigIntMap"),
|
||||
("read_only_space", 0x029f5): (129, "ObjectBoilerplateDescriptionMap"),
|
||||
("read_only_space", 0x02a1d): (132, "BytecodeArrayMap"),
|
||||
("read_only_space", 0x02a45): (162, "CodeDataContainerMap"),
|
||||
("read_only_space", 0x02a6d): (163, "CoverageInfoMap"),
|
||||
("read_only_space", 0x02a45): (163, "CodeDataContainerMap"),
|
||||
("read_only_space", 0x02a6d): (164, "CoverageInfoMap"),
|
||||
("read_only_space", 0x02a95): (133, "FixedDoubleArrayMap"),
|
||||
("read_only_space", 0x02abd): (121, "GlobalDictionaryMap"),
|
||||
("read_only_space", 0x02ae5): (101, "ManyClosuresCellMap"),
|
||||
("read_only_space", 0x02b0d): (172, "MegaDomHandlerMap"),
|
||||
("read_only_space", 0x02b0d): (173, "MegaDomHandlerMap"),
|
||||
("read_only_space", 0x02b35): (118, "ModuleInfoMap"),
|
||||
("read_only_space", 0x02b5d): (122, "NameDictionaryMap"),
|
||||
("read_only_space", 0x02b85): (101, "NoClosuresCellMap"),
|
||||
@ -291,8 +292,8 @@ KNOWN_MAPS = {
|
||||
("read_only_space", 0x02bfd): (124, "OrderedHashMapMap"),
|
||||
("read_only_space", 0x02c25): (125, "OrderedHashSetMap"),
|
||||
("read_only_space", 0x02c4d): (126, "OrderedNameDictionaryMap"),
|
||||
("read_only_space", 0x02c75): (174, "PreparseDataMap"),
|
||||
("read_only_space", 0x02c9d): (175, "PropertyArrayMap"),
|
||||
("read_only_space", 0x02c75): (175, "PreparseDataMap"),
|
||||
("read_only_space", 0x02c9d): (176, "PropertyArrayMap"),
|
||||
("read_only_space", 0x02cc5): (97, "SideEffectCallHandlerInfoMap"),
|
||||
("read_only_space", 0x02ced): (97, "SideEffectFreeCallHandlerInfoMap"),
|
||||
("read_only_space", 0x02d15): (97, "NextCallSideEffectFreeCallHandlerInfoMap"),
|
||||
@ -301,16 +302,16 @@ KNOWN_MAPS = {
|
||||
("read_only_space", 0x02d8d): (150, "SmallOrderedHashSetMap"),
|
||||
("read_only_space", 0x02db5): (151, "SmallOrderedNameDictionaryMap"),
|
||||
("read_only_space", 0x02ddd): (154, "SourceTextModuleMap"),
|
||||
("read_only_space", 0x02e05): (182, "SwissNameDictionaryMap"),
|
||||
("read_only_space", 0x02e05): (183, "SwissNameDictionaryMap"),
|
||||
("read_only_space", 0x02e2d): (155, "SyntheticModuleMap"),
|
||||
("read_only_space", 0x02e55): (72, "WasmCapiFunctionDataMap"),
|
||||
("read_only_space", 0x02e7d): (73, "WasmExportedFunctionDataMap"),
|
||||
("read_only_space", 0x02ea5): (74, "WasmJSFunctionDataMap"),
|
||||
("read_only_space", 0x02ecd): (75, "WasmTypeInfoMap"),
|
||||
("read_only_space", 0x02ef5): (183, "WeakArrayListMap"),
|
||||
("read_only_space", 0x02ef5): (184, "WeakArrayListMap"),
|
||||
("read_only_space", 0x02f1d): (120, "EphemeronHashTableMap"),
|
||||
("read_only_space", 0x02f45): (164, "EmbedderDataArrayMap"),
|
||||
("read_only_space", 0x02f6d): (184, "WeakCellMap"),
|
||||
("read_only_space", 0x02f45): (165, "EmbedderDataArrayMap"),
|
||||
("read_only_space", 0x02f6d): (185, "WeakCellMap"),
|
||||
("read_only_space", 0x02f95): (32, "StringMap"),
|
||||
("read_only_space", 0x02fbd): (41, "ConsOneByteStringMap"),
|
||||
("read_only_space", 0x02fe5): (33, "ConsStringMap"),
|
||||
@ -368,27 +369,28 @@ KNOWN_MAPS = {
|
||||
("read_only_space", 0x05c11): (152, "DescriptorArrayMap"),
|
||||
("read_only_space", 0x05c39): (157, "UncompiledDataWithoutPreparseDataMap"),
|
||||
("read_only_space", 0x05c61): (156, "UncompiledDataWithPreparseDataMap"),
|
||||
("read_only_space", 0x05c89): (173, "OnHeapBasicBlockProfilerDataMap"),
|
||||
("read_only_space", 0x05cb1): (169, "InternalClassMap"),
|
||||
("read_only_space", 0x05cd9): (180, "SmiPairMap"),
|
||||
("read_only_space", 0x05d01): (179, "SmiBoxMap"),
|
||||
("read_only_space", 0x05c89): (174, "OnHeapBasicBlockProfilerDataMap"),
|
||||
("read_only_space", 0x05cb1): (170, "InternalClassMap"),
|
||||
("read_only_space", 0x05cd9): (181, "SmiPairMap"),
|
||||
("read_only_space", 0x05d01): (180, "SmiBoxMap"),
|
||||
("read_only_space", 0x05d29): (146, "ExportedSubClassBaseMap"),
|
||||
("read_only_space", 0x05d51): (147, "ExportedSubClassMap"),
|
||||
("read_only_space", 0x05d79): (68, "AbstractInternalClassSubclass1Map"),
|
||||
("read_only_space", 0x05da1): (69, "AbstractInternalClassSubclass2Map"),
|
||||
("read_only_space", 0x05dc9): (134, "InternalClassWithSmiElementsMap"),
|
||||
("read_only_space", 0x05df1): (170, "InternalClassWithStructElementsMap"),
|
||||
("read_only_space", 0x05df1): (171, "InternalClassWithStructElementsMap"),
|
||||
("read_only_space", 0x05e19): (148, "ExportedSubClass2Map"),
|
||||
("read_only_space", 0x05e41): (181, "SortStateMap"),
|
||||
("read_only_space", 0x05e69): (90, "AllocationSiteWithWeakNextMap"),
|
||||
("read_only_space", 0x05e91): (90, "AllocationSiteWithoutWeakNextMap"),
|
||||
("read_only_space", 0x05eb9): (81, "LoadHandler1Map"),
|
||||
("read_only_space", 0x05ee1): (81, "LoadHandler2Map"),
|
||||
("read_only_space", 0x05f09): (81, "LoadHandler3Map"),
|
||||
("read_only_space", 0x05f31): (82, "StoreHandler0Map"),
|
||||
("read_only_space", 0x05f59): (82, "StoreHandler1Map"),
|
||||
("read_only_space", 0x05f81): (82, "StoreHandler2Map"),
|
||||
("read_only_space", 0x05fa9): (82, "StoreHandler3Map"),
|
||||
("read_only_space", 0x05e41): (182, "SortStateMap"),
|
||||
("read_only_space", 0x05e69): (160, "CallRefDataMap"),
|
||||
("read_only_space", 0x05e91): (90, "AllocationSiteWithWeakNextMap"),
|
||||
("read_only_space", 0x05eb9): (90, "AllocationSiteWithoutWeakNextMap"),
|
||||
("read_only_space", 0x05ee1): (81, "LoadHandler1Map"),
|
||||
("read_only_space", 0x05f09): (81, "LoadHandler2Map"),
|
||||
("read_only_space", 0x05f31): (81, "LoadHandler3Map"),
|
||||
("read_only_space", 0x05f59): (82, "StoreHandler0Map"),
|
||||
("read_only_space", 0x05f81): (82, "StoreHandler1Map"),
|
||||
("read_only_space", 0x05fa9): (82, "StoreHandler2Map"),
|
||||
("read_only_space", 0x05fd1): (82, "StoreHandler3Map"),
|
||||
("map_space", 0x02119): (1057, "ExternalMap"),
|
||||
("map_space", 0x02141): (2114, "JSMessageObjectMap"),
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user