diff --git a/src/builtins/builtins-regexp-gen.cc b/src/builtins/builtins-regexp-gen.cc index 6a29033a1c..603f50d65a 100644 --- a/src/builtins/builtins-regexp-gen.cc +++ b/src/builtins/builtins-regexp-gen.cc @@ -1336,10 +1336,10 @@ TNode RegExpMatchAllAssembler::CreateRegExpStringIterator( // 9. Set iterator.[[Done]] to false. TNode global_flag = Word32Shl(ReinterpretCast(global), - Int32Constant(JSRegExpStringIterator::GlobalBit::kShift)); + Int32Constant(JSRegExpStringIterator::kGlobalBit)); TNode unicode_flag = Word32Shl(ReinterpretCast(full_unicode), - Int32Constant(JSRegExpStringIterator::UnicodeBit::kShift)); + Int32Constant(JSRegExpStringIterator::kUnicodeBit)); TNode iterator_flags = Word32Or(global_flag, unicode_flag); StoreObjectFieldNoWriteBarrier(iterator, JSRegExpStringIterator::kFlagsOffset, SmiFromInt32(iterator_flags)); diff --git a/src/builtins/internal-coverage.tq b/src/builtins/internal-coverage.tq index 77a431aaa9..65cb207eaa 100644 --- a/src/builtins/internal-coverage.tq +++ b/src/builtins/internal-coverage.tq @@ -12,7 +12,7 @@ namespace internal_coverage { const debugInfo = Cast(shared.script_or_debug_info) otherwise goto IfNoCoverageInfo; - if (!debugInfo.flags.has_coverage_info) goto IfNoCoverageInfo; + if (!SmiUntag(debugInfo.flags).has_coverage_info) goto IfNoCoverageInfo; return UnsafeCast(debugInfo.coverage_info); } diff --git a/src/builtins/regexp-match-all.tq b/src/builtins/regexp-match-all.tq index 25394b7f79..022f8bc53f 100644 --- a/src/builtins/regexp-match-all.tq +++ b/src/builtins/regexp-match-all.tq @@ -104,6 +104,32 @@ namespace regexp { return RegExpPrototypeMatchAllImpl(context, receiver, string); } + const kJSRegExpStringIteratorDone: + constexpr int31 generates '1 << JSRegExpStringIterator::kDoneBit'; + const kJSRegExpStringIteratorGlobal: constexpr int31 + generates '1 << JSRegExpStringIterator::kGlobalBit'; + const kJSRegExpStringIteratorUnicode: constexpr int31 + generates '1 << JSRegExpStringIterator::kUnicodeBit'; + + extern macro IsSetSmi(Smi, constexpr int31): bool; + + macro HasDoneFlag(flags: Smi): bool { + return IsSetSmi(flags, kJSRegExpStringIteratorDone); + } + + macro HasGlobalFlag(flags: Smi): bool { + return IsSetSmi(flags, kJSRegExpStringIteratorGlobal); + } + + macro HasUnicodeFlag(flags: Smi): bool { + return IsSetSmi(flags, kJSRegExpStringIteratorUnicode); + } + + macro SetDoneFlag(iterator: JSRegExpStringIterator, flags: Smi) { + const newFlags: Smi = flags | kJSRegExpStringIteratorDone; + iterator.flags = newFlags; + } + // https://tc39.github.io/proposal-string-matchall/ // %RegExpStringIteratorPrototype%.next ( ) transitioning javascript builtin RegExpStringIteratorPrototypeNext( @@ -121,8 +147,8 @@ namespace regexp { try { // 4. If O.[[Done]] is true, then // a. Return ! CreateIterResultObject(undefined, true). - const flags: SmiTagged = receiver.flags; - if (flags.done) goto ReturnEmptyDoneResult; + const flags: Smi = receiver.flags; + if (HasDoneFlag(flags)) goto ReturnEmptyDoneResult; // 5. Let R be O.[[iteratingRegExp]]. const iteratingRegExp: JSReceiver = receiver.iterating_reg_exp; @@ -154,15 +180,15 @@ namespace regexp { } // 11. Else, // b. Else, handle non-global case first. - if (!flags.global) { + if (!HasGlobalFlag(flags)) { // i. Set O.[[Done]] to true. - receiver.flags.done = true; + SetDoneFlag(receiver, flags); // ii. Return ! CreateIterResultObject(match, false). return AllocateJSIteratorResult(UnsafeCast(match), False); } // a. If global is true, - assert(flags.global); + assert(HasGlobalFlag(flags)); if (isFastRegExp) { // i. Let matchStr be ? ToString(? Get(match, "0")). const match = UnsafeCast(match); @@ -180,7 +206,7 @@ namespace regexp { // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, // fullUnicode). const nextIndex: Smi = AdvanceStringIndexFast( - iteratingString, thisIndex, flags.unicode); + iteratingString, thisIndex, HasUnicodeFlag(flags)); // 3. Perform ? Set(R, "lastIndex", nextIndex, true). FastStoreLastIndex(iteratingRegExp, nextIndex); @@ -201,8 +227,8 @@ namespace regexp { // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, // fullUnicode). - const nextIndex: Number = - AdvanceStringIndexSlow(iteratingString, thisIndex, flags.unicode); + const nextIndex: Number = AdvanceStringIndexSlow( + iteratingString, thisIndex, HasUnicodeFlag(flags)); // 3. Perform ? Set(R, "lastIndex", nextIndex, true). SlowStoreLastIndex(iteratingRegExp, nextIndex); @@ -213,7 +239,7 @@ namespace regexp { // 10. If match is null, then label IfNoMatch { // a. Set O.[[Done]] to true. - receiver.flags.done = true; + SetDoneFlag(receiver, flags); // b. Return ! CreateIterResultObject(undefined, true). goto ReturnEmptyDoneResult; diff --git a/src/codegen/code-stub-assembler.cc b/src/codegen/code-stub-assembler.cc index 493741a3eb..8b7fd54af0 100644 --- a/src/codegen/code-stub-assembler.cc +++ b/src/codegen/code-stub-assembler.cc @@ -7348,7 +7348,7 @@ TNode CodeStubAssembler::DecodeWord32(SloppyTNode word32, } TNode CodeStubAssembler::DecodeWord(SloppyTNode word, - uint32_t shift, uintptr_t mask) { + uint32_t shift, uint32_t mask) { DCHECK_EQ((mask >> shift) << shift, mask); return Unsigned(WordAnd(WordShr(word, static_cast(shift)), IntPtrConstant(mask >> shift))); @@ -7367,7 +7367,7 @@ TNode CodeStubAssembler::UpdateWord32(TNode word, TNode CodeStubAssembler::UpdateWord(TNode word, TNode value, - uint32_t shift, uintptr_t mask) { + uint32_t shift, uint32_t mask) { DCHECK_EQ((mask >> shift) << shift, mask); // Ensure the {value} fits fully in the mask. CSA_ASSERT(this, diff --git a/src/codegen/code-stub-assembler.h b/src/codegen/code-stub-assembler.h index 12d6a4c0e5..778960d36c 100644 --- a/src/codegen/code-stub-assembler.h +++ b/src/codegen/code-stub-assembler.h @@ -2814,7 +2814,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Decodes an unsigned (!) value from |word| to a word-size node. TNode DecodeWord(SloppyTNode word, uint32_t shift, - uintptr_t mask); + uint32_t mask); // Returns a node that contains the updated values of a |BitField|. template @@ -2850,7 +2850,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Returns a node that contains the updated {value} inside {word} starting // at {shift} and fitting in {mask}. TNode UpdateWord(TNode word, TNode value, - uint32_t shift, uintptr_t mask); + uint32_t shift, uint32_t mask); // Returns true if any of the |T|'s bits in given |word32| are set. template diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc index f24581eec7..b04d6c3d75 100644 --- a/src/compiler/machine-operator-reducer.cc +++ b/src/compiler/machine-operator-reducer.cc @@ -43,14 +43,6 @@ class Word32Adapter { return x.IsWord32Shl(); } template - static bool IsWordNShr(const T& x) { - return x.IsWord32Shr(); - } - template - static bool IsWordNSar(const T& x) { - return x.IsWord32Sar(); - } - template static bool IsWordNXor(const T& x) { return x.IsWord32Xor(); } @@ -73,7 +65,6 @@ class Word32Adapter { Reduction TryMatchWordNRor(Node* node) { return r_->TryMatchWord32Ror(node); } Node* IntNConstant(int32_t value) { return r_->Int32Constant(value); } - Node* UintNConstant(uint32_t value) { return r_->Uint32Constant(value); } Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word32And(lhs, rhs); } private: @@ -103,14 +94,6 @@ class Word64Adapter { return x.IsWord64Shl(); } template - static bool IsWordNShr(const T& x) { - return x.IsWord64Shr(); - } - template - static bool IsWordNSar(const T& x) { - return x.IsWord64Sar(); - } - template static bool IsWordNXor(const T& x) { return x.IsWord64Xor(); } @@ -136,7 +119,6 @@ class Word64Adapter { } Node* IntNConstant(int64_t value) { return r_->Int64Constant(value); } - Node* UintNConstant(uint64_t value) { return r_->Uint64Constant(value); } Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word64And(lhs, rhs); } private: @@ -264,12 +246,6 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) { return quotient; } -Node* MachineOperatorReducer::TruncateInt64ToInt32(Node* value) { - Node* const node = graph()->NewNode(machine()->TruncateInt64ToInt32(), value); - Reduction const reduction = ReduceTruncateInt64ToInt32(node); - return reduction.Changed() ? reduction.replacement() : node; -} - // Perform constant folding and strength reduction on machine operators. Reduction MachineOperatorReducer::Reduce(Node* node) { switch (node->opcode()) { @@ -321,20 +297,25 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { } // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true - if (m.right().HasValue()) { - base::Optional> replacements; - if (m.left().IsTruncateInt64ToInt32()) { - replacements = ReduceWord32EqualForConstantRhs( - NodeProperties::GetValueInput(m.left().node(), 0), - static_cast(m.right().Value())); - } else { - replacements = ReduceWord32EqualForConstantRhs( - m.left().node(), static_cast(m.right().Value())); - } - if (replacements) { - node->ReplaceInput(0, replacements->first); - node->ReplaceInput(1, Uint32Constant(replacements->second)); - return Changed(node); + if (m.left().IsWord32And() && m.right().HasValue()) { + Uint32BinopMatcher mand(m.left().node()); + if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) && + mand.right().HasValue()) { + Uint32BinopMatcher mshift(mand.left().node()); + // ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1) + if (mshift.right().HasValue()) { + auto shift_bits = mshift.right().Value(); + auto mask = mand.right().Value(); + auto rhs = static_cast(m.right().Value()); + // Make sure that we won't shift data off the end. + if (shift_bits <= base::bits::CountLeadingZeros(mask) && + shift_bits <= base::bits::CountLeadingZeros(rhs)) { + node->ReplaceInput( + 0, Word32And(mshift.left().node(), mask << shift_bits)); + node->ReplaceInput(1, Int32Constant(rhs << shift_bits)); + return Changed(node); + } + } } } break; @@ -822,8 +803,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0)); return NoChange(); } - case IrOpcode::kTruncateInt64ToInt32: - return ReduceTruncateInt64ToInt32(node); + case IrOpcode::kTruncateInt64ToInt32: { + Int64Matcher m(node->InputAt(0)); + if (m.HasValue()) return ReplaceInt32(static_cast(m.Value())); + if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0)); + break; + } case IrOpcode::kTruncateFloat64ToFloat32: { Float64Matcher m(node->InputAt(0)); if (m.HasValue()) { @@ -879,13 +864,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { return NoChange(); } -Reduction MachineOperatorReducer::ReduceTruncateInt64ToInt32(Node* node) { - Int64Matcher m(node->InputAt(0)); - if (m.HasValue()) return ReplaceInt32(static_cast(m.Value())); - if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0)); - return NoChange(); -} - Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) { DCHECK_EQ(IrOpcode::kInt32Add, node->opcode()); Int32BinopMatcher m(node); @@ -1551,20 +1529,6 @@ Reduction MachineOperatorReducer::ReduceWordNOr(Node* node) { } if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x - // (x & K1) | K2 => x | K2 if K2 has ones for every zero bit in K1. - // This case can be constructed by UpdateWord and UpdateWord32 in CSA. - if (m.right().HasValue()) { - if (A::IsWordNAnd(m.left())) { - typename A::IntNBinopMatcher mand(m.left().node()); - if (mand.right().HasValue()) { - if ((m.right().Value() | mand.right().Value()) == -1) { - node->ReplaceInput(0, mand.left().node()); - return Changed(node); - } - } - } - } - return a.TryMatchWordNRor(node); } @@ -1718,64 +1682,25 @@ Reduction MachineOperatorReducer::ReduceConditional(Node* node) { // Reductions involving control flow happen elsewhere. Non-zero inputs are // considered true in all conditional ops. NodeMatcher condition(NodeProperties::GetValueInput(node, 0)); - if (condition.IsTruncateInt64ToInt32()) { - if (auto replacement = - ReduceConditionalN(condition.node())) { - NodeProperties::ReplaceValueInput(node, *replacement, 0); - return Changed(node); - } - } else if (auto replacement = ReduceConditionalN(node)) { - NodeProperties::ReplaceValueInput(node, *replacement, 0); - return Changed(node); - } - return NoChange(); -} - -template -base::Optional MachineOperatorReducer::ReduceConditionalN(Node* node) { - NodeMatcher condition(NodeProperties::GetValueInput(node, 0)); - // Branch conditions are 32-bit comparisons against zero, so they are the - // opposite of a 32-bit `x == 0` node. To avoid repetition, we can reuse logic - // for Word32Equal: if `x == 0` can reduce to `y == 0`, then branch(x) can - // reduce to branch(y). - auto replacements = - ReduceWord32EqualForConstantRhs(condition.node(), 0); - if (replacements && replacements->second == 0) return replacements->first; - return {}; -} - -template -base::Optional> -MachineOperatorReducer::ReduceWord32EqualForConstantRhs(Node* lhs, - uint32_t rhs) { - if (WordNAdapter::IsWordNAnd(NodeMatcher(lhs))) { - typename WordNAdapter::UintNBinopMatcher mand(lhs); - if ((WordNAdapter::IsWordNShr(mand.left()) || - WordNAdapter::IsWordNSar(mand.left())) && + if (condition.IsWord32And()) { + Uint32BinopMatcher mand(condition.node()); + if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) && mand.right().HasValue()) { - typename WordNAdapter::UintNBinopMatcher mshift(mand.left().node()); - // ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1) + Uint32BinopMatcher mshift(mand.left().node()); + // Branch condition (x >> K1) & K2 => x & (K2 << K1) if (mshift.right().HasValue()) { auto shift_bits = mshift.right().Value(); auto mask = mand.right().Value(); - // Make sure that we won't shift data off the end, and that all of the - // data ends up in the lower 32 bits for 64-bit mode. - if (shift_bits <= base::bits::CountLeadingZeros(mask) && - shift_bits <= base::bits::CountLeadingZeros(rhs) && - mask << shift_bits <= std::numeric_limits::max()) { - Node* new_input = mshift.left().node(); - uint32_t new_mask = static_cast(mask << shift_bits); - uint32_t new_rhs = rhs << shift_bits; - if (WordNAdapter::WORD_SIZE == 64) { - // We can truncate before performing the And. - new_input = TruncateInt64ToInt32(new_input); - } - return std::make_pair(Word32And(new_input, new_mask), new_rhs); + // Make sure that we won't shift data off the end. + if (shift_bits <= base::bits::CountLeadingZeros(mask)) { + NodeProperties::ReplaceValueInput( + node, Word32And(mshift.left().node(), mask << shift_bits), 0); + return Changed(node); } } } } - return {}; + return NoChange(); } CommonOperatorBuilder* MachineOperatorReducer::common() const { diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h index 63a1f2f1ab..53c5d6fa68 100644 --- a/src/compiler/machine-operator-reducer.h +++ b/src/compiler/machine-operator-reducer.h @@ -62,7 +62,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final Node* Int32Mul(Node* lhs, Node* rhs); Node* Int32Div(Node* dividend, int32_t divisor); Node* Uint32Div(Node* dividend, uint32_t divisor); - Node* TruncateInt64ToInt32(Node* value); Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); } Reduction ReplaceFloat32(volatile float value) { @@ -110,7 +109,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final Reduction ReduceFloat64InsertHighWord32(Node* node); Reduction ReduceFloat64Compare(Node* node); Reduction ReduceFloat64RoundDown(Node* node); - Reduction ReduceTruncateInt64ToInt32(Node* node); Reduction ReduceConditional(Node* node); Graph* graph() const; @@ -127,18 +125,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final template Reduction ReduceWordNXor(Node* node); - // Helper for ReduceConditional. Does not perform the actual reduction; just - // returns a new Node that could be used as the input to the condition. - template - base::Optional ReduceConditionalN(Node* node); - - // Helper for finding a reduced equality condition. Does not perform the - // actual reduction; just returns a new pair that could be compared for the - // same outcome. - template - base::Optional> ReduceWord32EqualForConstantRhs( - Node* lhs, uint32_t rhs); - MachineGraph* mcgraph_; bool allow_signalling_nan_; }; diff --git a/src/objects/js-regexp-string-iterator-inl.h b/src/objects/js-regexp-string-iterator-inl.h index b0d8e4c5ec..ea44aaae27 100644 --- a/src/objects/js-regexp-string-iterator-inl.h +++ b/src/objects/js-regexp-string-iterator-inl.h @@ -17,9 +17,9 @@ namespace internal { TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator) -BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, DoneBit::kShift) -BOOL_ACCESSORS(JSRegExpStringIterator, flags, global, GlobalBit::kShift) -BOOL_ACCESSORS(JSRegExpStringIterator, flags, unicode, UnicodeBit::kShift) +BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, kDoneBit) +BOOL_ACCESSORS(JSRegExpStringIterator, flags, global, kGlobalBit) +BOOL_ACCESSORS(JSRegExpStringIterator, flags, unicode, kUnicodeBit) } // namespace internal } // namespace v8 diff --git a/src/objects/js-regexp-string-iterator.h b/src/objects/js-regexp-string-iterator.h index 1fdd503072..e54aedbc2b 100644 --- a/src/objects/js-regexp-string-iterator.h +++ b/src/objects/js-regexp-string-iterator.h @@ -6,7 +6,6 @@ #define V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_H_ #include "src/objects/js-objects.h" -#include "torque-generated/bit-fields-tq.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -29,7 +28,9 @@ class JSRegExpStringIterator DECL_PRINTER(JSRegExpStringIterator) - DEFINE_TORQUE_GENERATED_JS_REG_EXP_STRING_ITERATOR_FLAGS() + static const int kDoneBit = 0; + static const int kGlobalBit = 1; + static const int kUnicodeBit = 2; TQ_OBJECT_CONSTRUCTORS(JSRegExpStringIterator) }; diff --git a/src/objects/js-regexp-string-iterator.tq b/src/objects/js-regexp-string-iterator.tq index 4daed7af2d..3ab1679699 100644 --- a/src/objects/js-regexp-string-iterator.tq +++ b/src/objects/js-regexp-string-iterator.tq @@ -2,17 +2,11 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -bitfield struct JSRegExpStringIteratorFlags extends uint31 { - done: bool: 1 bit; - global: bool: 1 bit; - unicode: bool: 1 bit; -} - @generateCppClass extern class JSRegExpStringIterator extends JSObject { // The [[IteratingRegExp]] internal property. iterating_reg_exp: JSReceiver; // The [[IteratedString]] internal property. iterated_string: String; - flags: SmiTagged; + flags: Smi; } diff --git a/src/torque/class-debug-reader-generator.cc b/src/torque/class-debug-reader-generator.cc index b89ec85d80..52646dff26 100644 --- a/src/torque/class-debug-reader-generator.cc +++ b/src/torque/class-debug-reader-generator.cc @@ -53,7 +53,7 @@ class ValueTypeFieldIterator { if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) { type = *type_wrapped_in_smi; - bitfield_start_offset = TargetArchitecture::SmiTagAndShiftSize(); + bitfield_start_offset = kSmiTagSize + kSmiShiftSize; } if (const BitFieldStructType* bit_field_struct_type = BitFieldStructType::DynamicCast(type)) { diff --git a/src/torque/csa-generator.cc b/src/torque/csa-generator.cc index f6dd3acbce..9716ccbad4 100644 --- a/src/torque/csa-generator.cc +++ b/src/torque/csa-generator.cc @@ -5,7 +5,6 @@ #include "src/torque/csa-generator.h" #include "src/common/globals.h" -#include "src/torque/global-context.h" #include "src/torque/type-oracle.h" #include "src/torque/types.h" #include "src/torque/utils.h" @@ -859,20 +858,13 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction, } namespace { -std::string GetBitFieldSpecialization(const Type* container, +std::string GetBitFieldSpecialization(const BitFieldStructType* container, const BitField& field) { - auto smi_tagged_type = - Type::MatchUnaryGeneric(container, TypeOracle::GetSmiTaggedGeneric()); - std::string container_type = smi_tagged_type - ? "uintptr_t" - : container->GetConstexprGeneratedTypeName(); - int offset = smi_tagged_type - ? field.offset + TargetArchitecture::SmiTagAndShiftSize() - : field.offset; std::stringstream stream; stream << "base::BitField<" << field.name_and_type.type->GetConstexprGeneratedTypeName() << ", " - << offset << ", " << field.num_bits << ", " << container_type << ">"; + << field.offset << ", " << field.num_bits << ", " + << container->GetConstexprGeneratedTypeName() << ">"; return stream.str(); } } // namespace @@ -885,36 +877,23 @@ void CSAGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction, std::string bit_field_struct = stack->Pop(); stack->Push(result_name); - const Type* struct_type = instruction.bit_field_struct_type; - const Type* field_type = instruction.bit_field.name_and_type.type; - auto smi_tagged_type = - Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric()); - bool struct_is_pointer_size = - IsPointerSizeIntegralType(struct_type) || smi_tagged_type; - DCHECK_IMPLIES(!struct_is_pointer_size, Is32BitIntegralType(struct_type)); - bool field_is_pointer_size = IsPointerSizeIntegralType(field_type); - DCHECK_IMPLIES(!field_is_pointer_size, Is32BitIntegralType(field_type)); - std::string struct_word_type = struct_is_pointer_size ? "WordT" : "Word32T"; + const BitFieldStructType* source_type = instruction.bit_field_struct_type; + const Type* result_type = instruction.bit_field.name_and_type.type; + bool source_uintptr = source_type->IsSubtypeOf(TypeOracle::GetUIntPtrType()); + bool result_uintptr = result_type->IsSubtypeOf(TypeOracle::GetUIntPtrType()); + std::string source_word_type = source_uintptr ? "WordT" : "Word32T"; std::string decoder = - struct_is_pointer_size - ? (field_is_pointer_size ? "DecodeWord" : "DecodeWord32FromWord") - : (field_is_pointer_size ? "DecodeWordFromWord32" : "DecodeWord32"); + source_uintptr + ? (result_uintptr ? "DecodeWord" : "DecodeWord32FromWord") + : (result_uintptr ? "DecodeWordFromWord32" : "DecodeWord32"); - decls() << " " << field_type->GetGeneratedTypeName() << " " << result_name + decls() << " " << result_type->GetGeneratedTypeName() << " " << result_name << ";\n"; - - if (smi_tagged_type) { - // If the container is a SMI, then UncheckedCast is insufficient and we must - // use a bit cast. - bit_field_struct = - "ca_.BitcastTaggedToWordForTagAndSmiBits(" + bit_field_struct + ")"; - } - out() << " " << result_name << " = ca_.UncheckedCast<" - << field_type->GetGeneratedTNodeTypeName() + << result_type->GetGeneratedTNodeTypeName() << ">(CodeStubAssembler(state_)." << decoder << "<" - << GetBitFieldSpecialization(struct_type, instruction.bit_field) - << ">(ca_.UncheckedCast<" << struct_word_type << ">(" + << GetBitFieldSpecialization(source_type, instruction.bit_field) + << ">(ca_.UncheckedCast<" << source_word_type << ">(" << bit_field_struct << ")));\n"; } @@ -927,46 +906,25 @@ void CSAGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction, std::string bit_field_struct = stack->Pop(); stack->Push(result_name); - const Type* struct_type = instruction.bit_field_struct_type; + const BitFieldStructType* struct_type = instruction.bit_field_struct_type; const Type* field_type = instruction.bit_field.name_and_type.type; - auto smi_tagged_type = - Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric()); - bool struct_is_pointer_size = - IsPointerSizeIntegralType(struct_type) || smi_tagged_type; - DCHECK_IMPLIES(!struct_is_pointer_size, Is32BitIntegralType(struct_type)); - bool field_is_pointer_size = IsPointerSizeIntegralType(field_type); - DCHECK_IMPLIES(!field_is_pointer_size, Is32BitIntegralType(field_type)); - std::string struct_word_type = struct_is_pointer_size ? "WordT" : "Word32T"; - std::string field_word_type = field_is_pointer_size ? "UintPtrT" : "Uint32T"; + bool struct_uintptr = struct_type->IsSubtypeOf(TypeOracle::GetUIntPtrType()); + bool field_uintptr = field_type->IsSubtypeOf(TypeOracle::GetUIntPtrType()); + std::string struct_word_type = struct_uintptr ? "WordT" : "Word32T"; + std::string field_word_type = field_uintptr ? "UintPtrT" : "Uint32T"; std::string encoder = - struct_is_pointer_size - ? (field_is_pointer_size ? "UpdateWord" : "UpdateWord32InWord") - : (field_is_pointer_size ? "UpdateWordInWord32" : "UpdateWord32"); + struct_uintptr ? (field_uintptr ? "UpdateWord" : "UpdateWord32InWord") + : (field_uintptr ? "UpdateWordInWord32" : "UpdateWord32"); decls() << " " << struct_type->GetGeneratedTypeName() << " " << result_name << ";\n"; - - if (smi_tagged_type) { - // If the container is a SMI, then UncheckedCast is insufficient and we must - // use a bit cast. - bit_field_struct = - "ca_.BitcastTaggedToWordForTagAndSmiBits(" + bit_field_struct + ")"; - } - - std::string result_expression = - "CodeStubAssembler(state_)." + encoder + "<" + - GetBitFieldSpecialization(struct_type, instruction.bit_field) + - ">(ca_.UncheckedCast<" + struct_word_type + ">(" + bit_field_struct + - "), ca_.UncheckedCast<" + field_word_type + ">(" + value + "))"; - - if (smi_tagged_type) { - result_expression = - "ca_.BitcastWordToTaggedSigned(" + result_expression + ")"; - } - out() << " " << result_name << " = ca_.UncheckedCast<" - << struct_type->GetGeneratedTNodeTypeName() << ">(" << result_expression - << ");\n"; + << struct_type->GetGeneratedTNodeTypeName() + << ">(CodeStubAssembler(state_)." << encoder << "<" + << GetBitFieldSpecialization(struct_type, instruction.bit_field) + << ">(ca_.UncheckedCast<" << struct_word_type << ">(" + << bit_field_struct << "), ca_.UncheckedCast<" << field_word_type + << ">(" << value << ")));\n"; } // static diff --git a/src/torque/global-context.cc b/src/torque/global-context.cc index 3204a3c5cd..e236de5a93 100644 --- a/src/torque/global-context.cc +++ b/src/torque/global-context.cc @@ -24,10 +24,7 @@ GlobalContext::GlobalContext(Ast ast) TargetArchitecture::TargetArchitecture(bool force_32bit) : tagged_size_(force_32bit ? sizeof(int32_t) : kTaggedSize), - raw_ptr_size_(force_32bit ? sizeof(int32_t) : kSystemPointerSize), - smi_tag_and_shift_size_( - kSmiTagSize + (force_32bit ? SmiTagging::kSmiShiftSize - : kSmiShiftSize)) {} + raw_ptr_size_(force_32bit ? sizeof(int32_t) : kSystemPointerSize) {} } // namespace torque } // namespace internal diff --git a/src/torque/global-context.h b/src/torque/global-context.h index fbcfe7ca93..bbfbb686ef 100644 --- a/src/torque/global-context.h +++ b/src/torque/global-context.h @@ -93,12 +93,10 @@ class TargetArchitecture : public ContextualClass { static size_t RawPtrSize() { return Get().raw_ptr_size_; } static size_t MaxHeapAlignment() { return TaggedSize(); } static bool ArePointersCompressed() { return TaggedSize() < RawPtrSize(); } - static int SmiTagAndShiftSize() { return Get().smi_tag_and_shift_size_; } private: const size_t tagged_size_; const size_t raw_ptr_size_; - const int smi_tag_and_shift_size_; }; } // namespace torque diff --git a/src/torque/implementation-visitor.cc b/src/torque/implementation-visitor.cc index 60ba9cd0b3..7fcb6836f9 100644 --- a/src/torque/implementation-visitor.cc +++ b/src/torque/implementation-visitor.cc @@ -2002,20 +2002,6 @@ LocationReference ImplementationVisitor::GenerateFieldAccess( const BitField& field = bitfield_struct->LookupField(fieldname); return LocationReference::BitFieldAccess(reference, field); } - if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric( - reference.ReferencedType(), TypeOracle::GetSmiTaggedGeneric())) { - const BitFieldStructType* bitfield_struct = - BitFieldStructType::DynamicCast(*type_wrapped_in_smi); - if (bitfield_struct == nullptr) { - ReportError( - "When a value of type SmiTagged is used in a field access " - "expression, T is expected to be a bitfield struct type. Instead, T " - "is ", - **type_wrapped_in_smi); - } - const BitField& field = bitfield_struct->LookupField(fieldname); - return LocationReference::BitFieldAccess(reference, field); - } if (reference.IsHeapReference()) { VisitResult ref = reference.heap_reference(); bool is_const; @@ -2204,8 +2190,9 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation( // First fetch the bitfield struct, then get the bits out of it. VisitResult bit_field_struct = GenerateFetchFromLocation(reference.bit_field_struct_location()); - assembler().Emit(LoadBitFieldInstruction{bit_field_struct.type(), - reference.bit_field()}); + assembler().Emit(LoadBitFieldInstruction{ + BitFieldStructType::cast(bit_field_struct.type()), + reference.bit_field()}); return VisitResult(reference.ReferencedType(), assembler().TopRange(1)); } else { if (reference.IsHeapSlice()) { @@ -2284,8 +2271,9 @@ void ImplementationVisitor::GenerateAssignToLocation( GenerateImplicitConvert(reference.ReferencedType(), assignment_value); GenerateCopy(bit_field_struct); GenerateCopy(converted_value); - assembler().Emit(StoreBitFieldInstruction{bit_field_struct.type(), - reference.bit_field()}); + assembler().Emit(StoreBitFieldInstruction{ + BitFieldStructType::cast(bit_field_struct.type()), + reference.bit_field()}); GenerateAssignToLocation( reference.bit_field_struct_location(), VisitResult(bit_field_struct.type(), assembler().TopRange(1))); diff --git a/src/torque/instructions.h b/src/torque/instructions.h index d6526d0a7c..2d5eff71da 100644 --- a/src/torque/instructions.h +++ b/src/torque/instructions.h @@ -349,13 +349,14 @@ struct StoreReferenceInstruction : InstructionBase { // Pops a bitfield struct; pushes a bitfield value extracted from it. struct LoadBitFieldInstruction : InstructionBase { TORQUE_INSTRUCTION_BOILERPLATE() - LoadBitFieldInstruction(const Type* bit_field_struct_type, BitField bit_field) + LoadBitFieldInstruction(const BitFieldStructType* bit_field_struct_type, + BitField bit_field) : bit_field_struct_type(bit_field_struct_type), bit_field(std::move(bit_field)) {} DefinitionLocation GetValueDefinition() const; - const Type* bit_field_struct_type; + const BitFieldStructType* bit_field_struct_type; BitField bit_field; }; @@ -363,14 +364,14 @@ struct LoadBitFieldInstruction : InstructionBase { // containing the updated value. struct StoreBitFieldInstruction : InstructionBase { TORQUE_INSTRUCTION_BOILERPLATE() - StoreBitFieldInstruction(const Type* bit_field_struct_type, + StoreBitFieldInstruction(const BitFieldStructType* bit_field_struct_type, BitField bit_field) : bit_field_struct_type(bit_field_struct_type), bit_field(std::move(bit_field)) {} DefinitionLocation GetValueDefinition() const; - const Type* bit_field_struct_type; + const BitFieldStructType* bit_field_struct_type; BitField bit_field; }; diff --git a/src/torque/types.cc b/src/torque/types.cc index 9800d0a941..90c7b7f5f3 100644 --- a/src/torque/types.cc +++ b/src/torque/types.cc @@ -928,17 +928,10 @@ bool IsAllowedAsBitField(const Type* type) { // Any integer-ish type, including bools and enums which inherit from integer // types, are allowed. Note, however, that we always zero-extend during // decoding regardless of signedness. - return IsPointerSizeIntegralType(type) || Is32BitIntegralType(type); -} - -bool IsPointerSizeIntegralType(const Type* type) { - return type->IsSubtypeOf(TypeOracle::GetUIntPtrType()) || - type->IsSubtypeOf(TypeOracle::GetIntPtrType()); -} - -bool Is32BitIntegralType(const Type* type) { return type->IsSubtypeOf(TypeOracle::GetUint32Type()) || + type->IsSubtypeOf(TypeOracle::GetUIntPtrType()) || type->IsSubtypeOf(TypeOracle::GetInt32Type()) || + type->IsSubtypeOf(TypeOracle::GetIntPtrType()) || type->IsSubtypeOf(TypeOracle::GetBoolType()); } diff --git a/src/torque/types.h b/src/torque/types.h index 02c52edc8c..b3a3298379 100644 --- a/src/torque/types.h +++ b/src/torque/types.h @@ -816,8 +816,6 @@ TypeVector LowerParameterTypes(const ParameterTypes& parameter_types, base::Optional> SizeOf(const Type* type); bool IsAnyUnsignedInteger(const Type* type); bool IsAllowedAsBitField(const Type* type); -bool IsPointerSizeIntegralType(const Type* type); -bool Is32BitIntegralType(const Type* type); base::Optional ExtractSimpleFieldArraySize( const ClassType& class_type, Expression* array_size); diff --git a/test/mjsunit/harmony/string-matchAll.js b/test/mjsunit/harmony/string-matchAll.js index e3b1d5c224..e9b39ba46c 100644 --- a/test/mjsunit/harmony/string-matchAll.js +++ b/test/mjsunit/harmony/string-matchAll.js @@ -36,7 +36,7 @@ TestNoMatch('a', 'b'); function TestGlobalRegex(regex_or_string) { - const iter = 'ab'.matchAll(regex_or_string); + const iter = 'ab'.matchAll(/./g); let next_result = iter.next(); assertEquals(['a'], next_result.value); assertFalse(next_result.done); diff --git a/test/unittests/compiler/machine-operator-reducer-unittest.cc b/test/unittests/compiler/machine-operator-reducer-unittest.cc index eb06d94d6d..c3659032cf 100644 --- a/test/unittests/compiler/machine-operator-reducer-unittest.cc +++ b/test/unittests/compiler/machine-operator-reducer-unittest.cc @@ -763,44 +763,6 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithComparisonAndConstantOne) { } } -// ----------------------------------------------------------------------------- -// Word32Or - -TEST_F(MachineOperatorReducerTest, Word32OrWithWord32And) { - Node* const p0 = Parameter(0); - TRACED_FOREACH(int32_t, m, kUint32Values) { - TRACED_FOREACH(int32_t, rhs, kUint32Values) { - // To get better coverage of interesting cases, run this test twice: - // once with the mask from kUint32Values, and once with its inverse. - for (int32_t mask : {m, ~m}) { - Reduction const r = Reduce(graph()->NewNode( - machine()->Word32Or(), - graph()->NewNode(machine()->Word32And(), p0, Int32Constant(mask)), - Int32Constant(rhs))); - switch (rhs) { - case 0: // x | 0 => x - ASSERT_TRUE(r.Changed()); - EXPECT_THAT(r.replacement(), - IsWord32And(p0, IsInt32Constant(mask))); - break; - case -1: // x | -1 => -1 - ASSERT_TRUE(r.Changed()); - EXPECT_THAT(r.replacement(), IsInt32Constant(-1)); - break; - default: // (x & K1) | K2 => x | K2, if K1 | K2 == -1 - if ((mask | rhs) == -1) { - ASSERT_TRUE(r.Changed()); - EXPECT_THAT(r.replacement(), - IsWord32Or(p0, IsInt32Constant(rhs))); - } else { - ASSERT_TRUE(!r.Changed()); - } - break; - } - } - } - } -} // ----------------------------------------------------------------------------- // Word32Xor