[maglev] Remove BaselineAssembler dep from Maglev
We should not mix Baseline vs Maglev ScratchScope. x14 is considered an extra-scratch register in arm64 for Baseline, but not for Maglev, which has a more comprehensive way to allocate extra scratches. Bug: v8:7700, chromium:1410970 Change-Id: Ia7eb77ff7fffc3c91d572931aa2ea001c90c1ffc Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4212388 Reviewed-by: Leszek Swirski <leszeks@chromium.org> Auto-Submit: Victor Gomes <victorgomes@chromium.org> Commit-Queue: Victor Gomes <victorgomes@chromium.org> Cr-Commit-Position: refs/heads/main@{#85590}
This commit is contained in:
parent
caa93f9618
commit
12ecfa78cd
@ -430,27 +430,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
FeedbackSlot slot,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough, clear_slot;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
__ JumpIfCodeIsMarkedForDeoptimization(scratch_and_result,
|
||||
temps.AcquireScratch(), &clear_slot);
|
||||
__ B(on_result);
|
||||
}
|
||||
|
||||
__ bind(&clear_slot);
|
||||
__ Mov(scratch_and_result, __ ClearedValue());
|
||||
StoreTaggedFieldNoWriteBarrier(
|
||||
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
|
||||
scratch_and_result);
|
||||
|
||||
__ bind(&fallthrough);
|
||||
Move(scratch_and_result, 0);
|
||||
__ TryLoadOptimizedOsrCode(scratch_and_result, feedback_vector, slot,
|
||||
on_result, Label::Distance::kFar);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
|
@ -377,23 +377,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
FeedbackSlot slot,
|
||||
Label* on_result,
|
||||
Label::Distance distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, &fallthrough);
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
__ TestCodeIsMarkedForDeoptimization(scratch_and_result);
|
||||
__ j(equal, on_result, distance);
|
||||
__ StoreTaggedField(
|
||||
FieldOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())),
|
||||
__ ClearedValue());
|
||||
}
|
||||
|
||||
__ bind(&fallthrough);
|
||||
__ Move(scratch_and_result, 0);
|
||||
__ MacroAssembler::TryLoadOptimizedOsrCode(
|
||||
scratch_and_result, feedback_vector, slot, on_result, distance);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
|
@ -3577,6 +3577,37 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
|
||||
}
|
||||
|
||||
void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Register feedback_vector,
|
||||
FeedbackSlot slot,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough, clear_slot;
|
||||
LoadTaggedPointerField(
|
||||
scratch_and_result,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())));
|
||||
LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
UseScratchRegisterScope temps(this);
|
||||
JumpIfCodeIsMarkedForDeoptimization(scratch_and_result, temps.AcquireX(),
|
||||
&clear_slot);
|
||||
B(on_result);
|
||||
}
|
||||
|
||||
bind(&clear_slot);
|
||||
Mov(scratch_and_result, ClearedValue());
|
||||
StoreTaggedField(
|
||||
scratch_and_result,
|
||||
FieldMemOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())));
|
||||
|
||||
bind(&fallthrough);
|
||||
Mov(scratch_and_result, 0);
|
||||
}
|
||||
|
||||
// This is the main Printf implementation. All other Printf variants call
|
||||
// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
|
||||
void TurboAssembler::PrintfNoPreserve(const char* format,
|
||||
|
@ -2133,6 +2133,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
|
||||
void LoadNativeContextSlot(Register dst, int index);
|
||||
|
||||
// Falls through and sets scratch_and_result to 0 on failure, jumps to
|
||||
// on_result on success.
|
||||
void TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Register feedback_vector, FeedbackSlot slot,
|
||||
Label* on_result, Label::Distance distance);
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
|
||||
};
|
||||
|
||||
|
@ -3234,6 +3234,32 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
|
||||
LoadTaggedPointerField(dst, Operand(dst, Context::SlotOffset(index)));
|
||||
}
|
||||
|
||||
void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Register feedback_vector,
|
||||
FeedbackSlot slot,
|
||||
Label* on_result,
|
||||
Label::Distance distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(
|
||||
scratch_and_result,
|
||||
FieldOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())));
|
||||
LoadWeakValue(scratch_and_result, &fallthrough);
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
TestCodeIsMarkedForDeoptimization(scratch_and_result);
|
||||
j(equal, on_result, distance);
|
||||
StoreTaggedField(
|
||||
FieldOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())),
|
||||
ClearedValue());
|
||||
}
|
||||
|
||||
bind(&fallthrough);
|
||||
Move(scratch_and_result, 0);
|
||||
}
|
||||
|
||||
int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
|
||||
// On Windows 64 stack slots are reserved by the caller for all arguments
|
||||
// including the ones passed in registers, and space is always allocated for
|
||||
|
@ -891,6 +891,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
// Load the native context slot with the current index.
|
||||
void LoadNativeContextSlot(Register dst, int index);
|
||||
|
||||
// Falls through and sets scratch_and_result to 0 on failure, jumps to
|
||||
// on_result on success.
|
||||
void TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Register feedback_vector, FeedbackSlot slot,
|
||||
Label* on_result, Label::Distance distance);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Runtime calls
|
||||
|
||||
|
@ -8,10 +8,4 @@ specific_include_rules = {
|
||||
"maglev-graph-builder\.h": [
|
||||
"+src/interpreter/interpreter-intrinsics.h",
|
||||
],
|
||||
"maglev-ir\.cc": [
|
||||
# Allow Maglev to reuse the baseline assembler.
|
||||
# TODO(v8:7700): Clean up these dependencies by extracting common code to a
|
||||
# separate directory.
|
||||
"+src/baseline/baseline-assembler-inl.h",
|
||||
],
|
||||
}
|
||||
|
@ -559,6 +559,11 @@ inline void MaglevAssembler::JumpIfSmi(Register src, Label* on_smi,
|
||||
MacroAssembler::JumpIfSmi(src, on_smi);
|
||||
}
|
||||
|
||||
void MaglevAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
|
||||
Label* target, Label::Distance) {
|
||||
CompareAndBranch(value, Immediate(byte), cc, target);
|
||||
}
|
||||
|
||||
inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2,
|
||||
Condition cond,
|
||||
Label* target,
|
||||
|
@ -216,6 +216,8 @@ class MaglevAssembler : public MacroAssembler {
|
||||
Label::Distance distance = Label::kFar);
|
||||
inline void JumpIfSmi(Register src, Label* on_smi,
|
||||
Label::Distance near_jump = Label::kFar);
|
||||
inline void JumpIfByte(Condition cc, Register value, int32_t byte,
|
||||
Label* target, Label::Distance distance = Label::kFar);
|
||||
|
||||
inline void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond,
|
||||
Label* target,
|
||||
|
@ -4,7 +4,6 @@
|
||||
|
||||
#include "src/maglev/maglev-ir.h"
|
||||
|
||||
#include "src/baseline/baseline-assembler-inl.h"
|
||||
#include "src/builtins/builtins-constructor.h"
|
||||
#include "src/codegen/interface-descriptors-inl.h"
|
||||
#include "src/execution/isolate-inl.h"
|
||||
@ -2733,24 +2732,21 @@ void AttemptOnStackReplacement(MaglevAssembler* masm,
|
||||
//
|
||||
// See also: InterpreterAssembler::OnStackReplacement.
|
||||
|
||||
baseline::BaselineAssembler basm(masm);
|
||||
__ AssertFeedbackVector(scratch0);
|
||||
|
||||
// Case 1).
|
||||
Label deopt;
|
||||
Register maybe_target_code = scratch1;
|
||||
{
|
||||
basm.TryLoadOptimizedOsrCode(maybe_target_code, scratch0, feedback_slot,
|
||||
&deopt, Label::kFar);
|
||||
}
|
||||
__ TryLoadOptimizedOsrCode(maybe_target_code, scratch0, feedback_slot, &deopt,
|
||||
Label::kFar);
|
||||
|
||||
// Case 2).
|
||||
{
|
||||
__ LoadByte(scratch0,
|
||||
FieldMemOperand(scratch0, FeedbackVector::kOsrStateOffset));
|
||||
__ DecodeField<FeedbackVector::OsrUrgencyBits>(scratch0);
|
||||
basm.JumpIfByte(kUnsignedLessThanEqual, scratch0, loop_depth,
|
||||
*no_code_for_osr, Label::Distance::kNear);
|
||||
__ JumpIfByte(kUnsignedLessThanEqual, scratch0, loop_depth,
|
||||
*no_code_for_osr, Label::Distance::kNear);
|
||||
|
||||
// The osr_urgency exceeds the current loop_depth, signaling an OSR
|
||||
// request. Call into runtime to compile.
|
||||
|
@ -444,6 +444,12 @@ inline void MaglevAssembler::JumpIfSmi(Register src, Label* on_smi,
|
||||
MacroAssembler::JumpIfSmi(src, on_smi, distance);
|
||||
}
|
||||
|
||||
void MaglevAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
|
||||
Label* target, Label::Distance distance) {
|
||||
cmpb(value, Immediate(byte));
|
||||
j(cc, target, distance);
|
||||
}
|
||||
|
||||
void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2,
|
||||
Condition cond, Label* target,
|
||||
Label::Distance distance) {
|
||||
|
Loading…
Reference in New Issue
Block a user