[CSA] Update MachineType to TaggedSigned for Smi's load and stores
The important bit is using MachineType::TaggedSigned instead of AnyTagged in CSA. Everything else, it's just the result of adding types to variables. SloppyTNode-ify LoadAndUntagToWord32ObjectField. Both LoadAndUntagSmi and StoreAndTagSmi were only used once, and their names were not clear. Inline those where they were used. TNodify: * ReloadBytecodeOffset * LoadAndUntagRegister * GetInterpretedFramePointer * Advance (the three variants) * SaveBytecodeOffset * BytecodeOffset Type variables: * interpreted_frame_pointer_ * bytecode_offset_ Create macros: * TYPED_VARIABLE_CONSTRUCTOR * TVARIABLE_CONSTRUCTOR which are similar to their non-typed counterparts. Bug: v8:7703, v8:6949 Change-Id: I776e3fe16ca642f868bb635b8bcd5b8b78ca6fea Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1758308 Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org> Cr-Commit-Position: refs/heads/master@{#63522}
This commit is contained in:
parent
369e0d587a
commit
c04b27fb7c
@ -135,10 +135,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
|
||||
TVARIABLE(Int32T, var_length);
|
||||
BIND(&if_array);
|
||||
{
|
||||
TNode<JSObject> js_object = CAST(arguments_list);
|
||||
// Try to extract the elements from a JSArray object.
|
||||
var_elements = LoadElements(CAST(arguments_list));
|
||||
var_elements = LoadElements(js_object);
|
||||
var_length =
|
||||
LoadAndUntagToWord32ObjectField(arguments_list, JSArray::kLengthOffset);
|
||||
LoadAndUntagToWord32ObjectField(js_object, JSArray::kLengthOffset);
|
||||
|
||||
// Holey arrays and double backing stores need special treatment.
|
||||
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
|
||||
|
@ -1465,12 +1465,12 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
|
||||
LoadObjectField(object, offset, MachineType::Int32()));
|
||||
} else {
|
||||
return SmiToIntPtr(
|
||||
LoadObjectField(object, offset, MachineType::AnyTagged()));
|
||||
LoadObjectField(object, offset, MachineType::TaggedSigned()));
|
||||
}
|
||||
}
|
||||
|
||||
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
|
||||
int offset) {
|
||||
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
|
||||
SloppyTNode<HeapObject> object, int offset) {
|
||||
if (SmiValuesAre32Bits()) {
|
||||
#if V8_TARGET_LITTLE_ENDIAN
|
||||
offset += 4;
|
||||
@ -1479,38 +1479,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
|
||||
LoadObjectField(object, offset, MachineType::Int32()));
|
||||
} else {
|
||||
return SmiToInt32(
|
||||
LoadObjectField(object, offset, MachineType::AnyTagged()));
|
||||
}
|
||||
}
|
||||
|
||||
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
|
||||
if (SmiValuesAre32Bits()) {
|
||||
#if V8_TARGET_LITTLE_ENDIAN
|
||||
index += 4;
|
||||
#endif
|
||||
return ChangeInt32ToIntPtr(
|
||||
Load(MachineType::Int32(), base, IntPtrConstant(index)));
|
||||
} else {
|
||||
return SmiToIntPtr(
|
||||
Load(MachineType::AnyTagged(), base, IntPtrConstant(index)));
|
||||
}
|
||||
}
|
||||
|
||||
void CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
|
||||
if (SmiValuesAre32Bits()) {
|
||||
int zero_offset = offset + 4;
|
||||
int payload_offset = offset;
|
||||
#if V8_TARGET_LITTLE_ENDIAN
|
||||
std::swap(zero_offset, payload_offset);
|
||||
#endif
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
|
||||
IntPtrConstant(zero_offset), Int32Constant(0));
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
|
||||
IntPtrConstant(payload_offset),
|
||||
TruncateInt64ToInt32(value));
|
||||
} else {
|
||||
StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
|
||||
IntPtrConstant(offset), SmiTag(value));
|
||||
LoadObjectField(object, offset, MachineType::TaggedSigned()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2519,7 +2488,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
|
||||
if (SmiValuesAre32Bits()) {
|
||||
return UncheckedCast<Int32T>(Load(MachineType::Int32(), object, offset));
|
||||
} else {
|
||||
return SmiToInt32(Load(MachineType::AnyTagged(), object, offset));
|
||||
return SmiToInt32(Load(MachineType::TaggedSigned(), object, offset));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -219,6 +219,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
|
||||
name(this, CSA_DEBUG_INFO(name), __VA_ARGS__)
|
||||
#define TYPED_VARIABLE_DEF(type, name, ...) \
|
||||
TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
|
||||
#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) \
|
||||
name(CSA_DEBUG_INFO(name), __VA_ARGS__)
|
||||
#else // DEBUG
|
||||
#define CSA_ASSERT(csa, ...) ((void)0)
|
||||
#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
|
||||
@ -227,9 +229,12 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
|
||||
#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
|
||||
#define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__)
|
||||
#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
|
||||
#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) name(__VA_ARGS__)
|
||||
#endif // DEBUG
|
||||
|
||||
#define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this))
|
||||
#define TVARIABLE_CONSTRUCTOR(...) \
|
||||
EXPAND(TYPED_VARIABLE_CONSTRUCTOR(__VA_ARGS__, this))
|
||||
|
||||
#ifdef ENABLE_SLOW_DCHECKS
|
||||
#define CSA_SLOW_ASSERT(csa, ...) \
|
||||
@ -970,9 +975,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
|
||||
int offset);
|
||||
// Load a SMI field, untag it, and convert to Word32.
|
||||
TNode<Int32T> LoadAndUntagToWord32ObjectField(Node* object, int offset);
|
||||
// Load a SMI and untag it.
|
||||
TNode<IntPtrT> LoadAndUntagSmi(Node* base, int index);
|
||||
TNode<Int32T> LoadAndUntagToWord32ObjectField(SloppyTNode<HeapObject> object,
|
||||
int offset);
|
||||
|
||||
TNode<MaybeObject> LoadMaybeWeakObjectField(SloppyTNode<HeapObject> object,
|
||||
int offset) {
|
||||
@ -1039,9 +1043,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
value, StoreToObjectWriteBarrier::kNone);
|
||||
}
|
||||
|
||||
// Tag a smi and store it.
|
||||
void StoreAndTagSmi(Node* base, int offset, Node* value);
|
||||
|
||||
// Load the floating point value of a HeapNumber.
|
||||
TNode<Float64T> LoadHeapNumberValue(SloppyTNode<HeapObject> object);
|
||||
// Load the Map of an HeapObject.
|
||||
|
@ -31,14 +31,14 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
|
||||
: CodeStubAssembler(state),
|
||||
bytecode_(bytecode),
|
||||
operand_scale_(operand_scale),
|
||||
VARIABLE_CONSTRUCTOR(interpreted_frame_pointer_,
|
||||
MachineType::PointerRepresentation()),
|
||||
TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
|
||||
VARIABLE_CONSTRUCTOR(
|
||||
bytecode_array_, MachineRepresentation::kTagged,
|
||||
Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
|
||||
VARIABLE_CONSTRUCTOR(
|
||||
bytecode_offset_, MachineType::PointerRepresentation(),
|
||||
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset)),
|
||||
TVARIABLE_CONSTRUCTOR(
|
||||
bytecode_offset_,
|
||||
UncheckedCast<IntPtrT>(
|
||||
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))),
|
||||
VARIABLE_CONSTRUCTOR(
|
||||
dispatch_table_, MachineType::PointerRepresentation(),
|
||||
Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
|
||||
@ -71,27 +71,27 @@ InterpreterAssembler::~InterpreterAssembler() {
|
||||
UnregisterCallGenerationCallbacks();
|
||||
}
|
||||
|
||||
Node* InterpreterAssembler::GetInterpretedFramePointer() {
|
||||
TNode<RawPtrT> InterpreterAssembler::GetInterpretedFramePointer() {
|
||||
if (!interpreted_frame_pointer_.IsBound()) {
|
||||
interpreted_frame_pointer_.Bind(LoadParentFramePointer());
|
||||
interpreted_frame_pointer_ = LoadParentFramePointer();
|
||||
} else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
|
||||
!reloaded_frame_ptr_) {
|
||||
interpreted_frame_pointer_.Bind(LoadParentFramePointer());
|
||||
interpreted_frame_pointer_ = LoadParentFramePointer();
|
||||
reloaded_frame_ptr_ = true;
|
||||
}
|
||||
return interpreted_frame_pointer_.value();
|
||||
}
|
||||
|
||||
Node* InterpreterAssembler::BytecodeOffset() {
|
||||
TNode<IntPtrT> InterpreterAssembler::BytecodeOffset() {
|
||||
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
|
||||
(bytecode_offset_.value() ==
|
||||
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
|
||||
bytecode_offset_.Bind(ReloadBytecodeOffset());
|
||||
bytecode_offset_ = ReloadBytecodeOffset();
|
||||
}
|
||||
return bytecode_offset_.value();
|
||||
}
|
||||
|
||||
Node* InterpreterAssembler::ReloadBytecodeOffset() {
|
||||
TNode<IntPtrT> InterpreterAssembler::ReloadBytecodeOffset() {
|
||||
TNode<IntPtrT> offset = LoadAndUntagRegister(Register::bytecode_offset());
|
||||
if (operand_scale() != OperandScale::kSingle) {
|
||||
// Add one to the offset such that it points to the actual bytecode rather
|
||||
@ -102,13 +102,31 @@ Node* InterpreterAssembler::ReloadBytecodeOffset() {
|
||||
}
|
||||
|
||||
void InterpreterAssembler::SaveBytecodeOffset() {
|
||||
Node* offset = BytecodeOffset();
|
||||
TNode<IntPtrT> bytecode_offset = BytecodeOffset();
|
||||
if (operand_scale() != OperandScale::kSingle) {
|
||||
// Subtract one from the offset such that it points to the Wide / ExtraWide
|
||||
// prefix bytecode.
|
||||
offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
|
||||
// Subtract one from the bytecode_offset such that it points to the Wide /
|
||||
// ExtraWide prefix bytecode.
|
||||
bytecode_offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
|
||||
}
|
||||
int store_offset =
|
||||
Register::bytecode_offset().ToOperand() * kSystemPointerSize;
|
||||
TNode<RawPtrT> base = GetInterpretedFramePointer();
|
||||
|
||||
if (SmiValuesAre32Bits()) {
|
||||
int zero_offset = store_offset + 4;
|
||||
int payload_offset = store_offset;
|
||||
#if V8_TARGET_LITTLE_ENDIAN
|
||||
std::swap(zero_offset, payload_offset);
|
||||
#endif
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
|
||||
IntPtrConstant(zero_offset), Int32Constant(0));
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
|
||||
IntPtrConstant(payload_offset),
|
||||
TruncateIntPtrToInt32(bytecode_offset));
|
||||
} else {
|
||||
StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
|
||||
IntPtrConstant(store_offset), SmiTag(bytecode_offset));
|
||||
}
|
||||
StoreAndTagRegister(offset, Register::bytecode_offset());
|
||||
}
|
||||
|
||||
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
|
||||
@ -239,8 +257,18 @@ TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
|
||||
}
|
||||
|
||||
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
|
||||
return LoadAndUntagSmi(GetInterpretedFramePointer(),
|
||||
reg.ToOperand() * kSystemPointerSize);
|
||||
TNode<RawPtrT> base = GetInterpretedFramePointer();
|
||||
int index = reg.ToOperand() * kSystemPointerSize;
|
||||
if (SmiValuesAre32Bits()) {
|
||||
#if V8_TARGET_LITTLE_ENDIAN
|
||||
index += 4;
|
||||
#endif
|
||||
return ChangeInt32ToIntPtr(
|
||||
Load(MachineType::Int32(), base, IntPtrConstant(index)));
|
||||
} else {
|
||||
return SmiToIntPtr(
|
||||
Load(MachineType::TaggedSigned(), base, IntPtrConstant(index)));
|
||||
}
|
||||
}
|
||||
|
||||
TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
|
||||
@ -300,11 +328,6 @@ void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
|
||||
RegisterFrameOffset(reg_index), value);
|
||||
}
|
||||
|
||||
void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
|
||||
int offset = reg.ToOperand() * kSystemPointerSize;
|
||||
StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
|
||||
}
|
||||
|
||||
void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
|
||||
int operand_index) {
|
||||
StoreRegister(value,
|
||||
@ -1292,19 +1315,22 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
|
||||
Comment("] UpdateInterruptBudget");
|
||||
}
|
||||
|
||||
Node* InterpreterAssembler::Advance() { return Advance(CurrentBytecodeSize()); }
|
||||
TNode<IntPtrT> InterpreterAssembler::Advance() {
|
||||
return Advance(CurrentBytecodeSize());
|
||||
}
|
||||
|
||||
Node* InterpreterAssembler::Advance(int delta) {
|
||||
TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
|
||||
return Advance(IntPtrConstant(delta));
|
||||
}
|
||||
|
||||
Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
|
||||
TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta,
|
||||
bool backward) {
|
||||
#ifdef V8_TRACE_IGNITION
|
||||
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
|
||||
#endif
|
||||
TNode<WordT> next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
|
||||
: IntPtrAdd(BytecodeOffset(), delta);
|
||||
bytecode_offset_.Bind(next_offset);
|
||||
TNode<IntPtrT> next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
|
||||
: IntPtrAdd(BytecodeOffset(), delta);
|
||||
bytecode_offset_ = next_offset;
|
||||
return next_offset;
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
||||
std::pair<compiler::TNode<Object>, compiler::TNode<Object>>
|
||||
LoadRegisterPairAtOperandIndex(int operand_index);
|
||||
void StoreRegister(compiler::Node* value, Register reg);
|
||||
void StoreAndTagRegister(compiler::Node* value, Register reg);
|
||||
void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
|
||||
void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
|
||||
compiler::Node* value2,
|
||||
@ -269,7 +268,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
||||
void MaybeDropFrames(compiler::Node* context);
|
||||
|
||||
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
|
||||
compiler::Node* BytecodeOffset();
|
||||
TNode<IntPtrT> BytecodeOffset();
|
||||
|
||||
protected:
|
||||
Bytecode bytecode() const { return bytecode_; }
|
||||
@ -291,7 +290,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
||||
|
||||
// Returns the frame pointer for the interpreted frame of the function being
|
||||
// interpreted.
|
||||
compiler::Node* GetInterpretedFramePointer();
|
||||
TNode<RawPtrT> GetInterpretedFramePointer();
|
||||
|
||||
// Operations on registers.
|
||||
compiler::TNode<IntPtrT> RegisterLocation(Register reg);
|
||||
@ -386,16 +385,16 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
||||
// Save the bytecode offset to the interpreter frame.
|
||||
void SaveBytecodeOffset();
|
||||
// Reload the bytecode offset from the interpreter frame.
|
||||
Node* ReloadBytecodeOffset();
|
||||
TNode<IntPtrT> ReloadBytecodeOffset();
|
||||
|
||||
// Updates and returns BytecodeOffset() advanced by the current bytecode's
|
||||
// size. Traces the exit of the current bytecode.
|
||||
compiler::Node* Advance();
|
||||
TNode<IntPtrT> Advance();
|
||||
|
||||
// Updates and returns BytecodeOffset() advanced by delta bytecodes.
|
||||
// Traces the exit of the current bytecode.
|
||||
compiler::Node* Advance(int delta);
|
||||
compiler::Node* Advance(compiler::Node* delta, bool backward = false);
|
||||
TNode<IntPtrT> Advance(int delta);
|
||||
TNode<IntPtrT> Advance(SloppyTNode<IntPtrT> delta, bool backward = false);
|
||||
|
||||
// Load the bytecode at |bytecode_offset|.
|
||||
compiler::TNode<WordT> LoadBytecode(compiler::Node* bytecode_offset);
|
||||
@ -425,9 +424,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
||||
|
||||
Bytecode bytecode_;
|
||||
OperandScale operand_scale_;
|
||||
CodeStubAssembler::Variable interpreted_frame_pointer_;
|
||||
TVariable<RawPtrT> interpreted_frame_pointer_;
|
||||
CodeStubAssembler::Variable bytecode_array_;
|
||||
CodeStubAssembler::Variable bytecode_offset_;
|
||||
TVariable<IntPtrT> bytecode_offset_;
|
||||
CodeStubAssembler::Variable dispatch_table_;
|
||||
CodeStubAssembler::Variable accumulator_;
|
||||
AccumulatorUse accumulator_use_;
|
||||
|
Loading…
Reference in New Issue
Block a user