[maglev] Add internalized string compare fast-path
- Rename TryBuildCompareOperationBranch to TryBuildCompareOperation - Add CheckedInternalizedString conversion Node that checks for string inputs and extracts internalised Strings from ThinStrings - Add BranchIfReferenceCompare Node - Add runtime functions to create internalised and thin Strings - Add deopt check to test/mjsunit/maglev/int32-branch.js Bug: v8:7700 Change-Id: I0073c24fad9e3231c985153cd27b0b8fe6ee56f0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3664498 Reviewed-by: Leszek Swirski <leszeks@chromium.org> Commit-Queue: Camillo Bruni <cbruni@chromium.org> Cr-Commit-Position: refs/heads/main@{#81361}
This commit is contained in:
parent
d265b5f8b6
commit
c4301c0489
@ -416,9 +416,10 @@ void MaglevGraphBuilder::VisitBinarySmiOperation() {
|
||||
BuildGenericBinarySmiOperationNode<kOperation>();
|
||||
}
|
||||
|
||||
bool MaglevGraphBuilder::TryBuildCompareOperationBranch(Operation operation,
|
||||
ValueNode* left,
|
||||
ValueNode* right) {
|
||||
template <typename CompareControlNode>
|
||||
bool MaglevGraphBuilder::TryBuildCompareOperation(Operation operation,
|
||||
ValueNode* left,
|
||||
ValueNode* right) {
|
||||
// Don't emit the shortcut branch if the next bytecode is a merge target.
|
||||
if (IsOffsetAMergePoint(next_offset())) return false;
|
||||
|
||||
@ -456,7 +457,7 @@ bool MaglevGraphBuilder::TryBuildCompareOperationBranch(Operation operation,
|
||||
return false;
|
||||
}
|
||||
|
||||
BasicBlock* block = FinishBlock<BranchIfInt32Compare>(
|
||||
BasicBlock* block = FinishBlock<CompareControlNode>(
|
||||
next_offset(), {left, right}, operation, &jump_targets_[true_offset],
|
||||
&jump_targets_[false_offset]);
|
||||
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
|
||||
@ -476,7 +477,8 @@ void MaglevGraphBuilder::VisitCompareOperation() {
|
||||
left = LoadRegisterInt32(0);
|
||||
right = GetAccumulatorInt32();
|
||||
}
|
||||
if (TryBuildCompareOperationBranch(kOperation, left, right)) {
|
||||
if (TryBuildCompareOperation<BranchIfInt32Compare>(kOperation, left,
|
||||
right)) {
|
||||
return;
|
||||
}
|
||||
SetAccumulator(
|
||||
@ -496,6 +498,21 @@ void MaglevGraphBuilder::VisitCompareOperation() {
|
||||
// return;
|
||||
}
|
||||
break;
|
||||
case CompareOperationHint::kInternalizedString:
|
||||
DCHECK(kOperation == Operation::kEqual ||
|
||||
kOperation == Operation::kStrictEqual);
|
||||
ValueNode *left, *right;
|
||||
if (IsRegisterEqualToAccumulator(0)) {
|
||||
left = right = LoadRegister<CheckedInternalizedString>(0);
|
||||
} else {
|
||||
left = LoadRegister<CheckedInternalizedString>(0);
|
||||
right = GetAccumulator<CheckedInternalizedString>();
|
||||
}
|
||||
if (TryBuildCompareOperation<BranchIfReferenceCompare>(kOperation, left,
|
||||
right)) {
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
// Fallback to generic node.
|
||||
break;
|
||||
|
@ -401,6 +401,12 @@ class MaglevGraphBuilder {
|
||||
return GetTaggedValueHelper(reg, value);
|
||||
}
|
||||
|
||||
template <typename ConversionNodeT>
|
||||
ValueNode* GetValue(interpreter::Register reg) {
|
||||
ValueNode* value = current_interpreter_frame_.get(reg);
|
||||
return AddNewConversionNode<ConversionNodeT>(reg, value);
|
||||
}
|
||||
|
||||
ValueNode* GetInt32(interpreter::Register reg) {
|
||||
ValueNode* value = current_interpreter_frame_.get(reg);
|
||||
switch (value->properties().value_representation()) {
|
||||
@ -447,6 +453,12 @@ class MaglevGraphBuilder {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
template <typename ConversionNodeT>
|
||||
ValueNode* GetAccumulator() {
|
||||
return GetValue<ConversionNodeT>(
|
||||
interpreter::Register::virtual_accumulator());
|
||||
}
|
||||
|
||||
ValueNode* GetAccumulatorTagged() {
|
||||
return GetTaggedValue(interpreter::Register::virtual_accumulator());
|
||||
}
|
||||
@ -465,6 +477,12 @@ class MaglevGraphBuilder {
|
||||
current_interpreter_frame_.accumulator();
|
||||
}
|
||||
|
||||
template <typename ConversionNodeT>
|
||||
ValueNode* LoadRegister(int operand_index) {
|
||||
return GetValue<ConversionNodeT>(
|
||||
iterator_.GetRegisterOperand(operand_index));
|
||||
}
|
||||
|
||||
ValueNode* LoadRegisterTagged(int operand_index) {
|
||||
return GetTaggedValue(iterator_.GetRegisterOperand(operand_index));
|
||||
}
|
||||
@ -661,8 +679,9 @@ class MaglevGraphBuilder {
|
||||
template <Operation kOperation>
|
||||
void VisitBinarySmiOperation();
|
||||
|
||||
bool TryBuildCompareOperationBranch(Operation operation, ValueNode* left,
|
||||
ValueNode* right);
|
||||
template <typename CompareControlNode>
|
||||
bool TryBuildCompareOperation(Operation operation, ValueNode* left,
|
||||
ValueNode* right);
|
||||
template <Operation kOperation>
|
||||
void VisitCompareOperation();
|
||||
|
||||
|
@ -88,6 +88,7 @@ class MaglevGraphVerifier {
|
||||
case Opcode::kLoadTaggedField:
|
||||
// TODO(victorgomes): Can we check that the input is actually a map?
|
||||
case Opcode::kCheckMaps:
|
||||
case Opcode::kCheckedInternalizedString:
|
||||
// TODO(victorgomes): Can we check that the input is Boolean?
|
||||
case Opcode::kBranchIfToBooleanTrue:
|
||||
case Opcode::kBranchIfTrue:
|
||||
@ -165,6 +166,11 @@ class MaglevGraphVerifier {
|
||||
CheckValueInputIs(node, 0, ValueRepresentation::kInt32);
|
||||
CheckValueInputIs(node, 1, ValueRepresentation::kInt32);
|
||||
break;
|
||||
case Opcode::kBranchIfReferenceCompare:
|
||||
DCHECK_EQ(node->input_count(), 2);
|
||||
CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
|
||||
CheckValueInputIs(node, 1, ValueRepresentation::kTagged);
|
||||
break;
|
||||
case Opcode::kFloat64Add:
|
||||
case Opcode::kFloat64Subtract:
|
||||
case Opcode::kFloat64Multiply:
|
||||
|
@ -671,6 +671,51 @@ void CheckMaps::PrintParams(std::ostream& os,
|
||||
os << "(" << *map().object() << ")";
|
||||
}
|
||||
|
||||
void CheckedInternalizedString::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(object_input());
|
||||
set_temporaries_needed(1);
|
||||
DefineSameAsFirst(vreg_state, this);
|
||||
}
|
||||
void CheckedInternalizedString::GenerateCode(MaglevCodeGenState* code_gen_state,
|
||||
const ProcessingState& state) {
|
||||
Register object = ToRegister(object_input());
|
||||
RegList temps = temporaries();
|
||||
Register map_tmp = temps.PopFirst();
|
||||
|
||||
Condition is_smi = __ CheckSmi(object);
|
||||
EmitEagerDeoptIf(is_smi, code_gen_state, this);
|
||||
|
||||
__ LoadMap(map_tmp, object);
|
||||
__ RecordComment("Test IsInternalizedString");
|
||||
__ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
|
||||
Immediate(kIsNotStringMask | kIsNotInternalizedMask));
|
||||
static_assert((kStringTag | kInternalizedTag) == 0);
|
||||
JumpToDeferredIf(
|
||||
not_zero, code_gen_state,
|
||||
[](MaglevCodeGenState* code_gen_state, Label* return_label,
|
||||
Register object, CheckedInternalizedString* node,
|
||||
EagerDeoptInfo* deopt_info, Register map_tmp) {
|
||||
__ RecordComment("Deferred Test IsThinString");
|
||||
__ movw(map_tmp, FieldOperand(map_tmp, Map::kInstanceTypeOffset));
|
||||
static_assert(kThinStringTagBit > 0);
|
||||
__ testb(map_tmp, Immediate(kThinStringTagBit));
|
||||
__ j(zero, &deopt_info->deopt_entry_label);
|
||||
__ LoadTaggedPointerField(
|
||||
object, FieldOperand(object, ThinString::kActualOffset));
|
||||
if (FLAG_debug_code) {
|
||||
__ RecordComment("DCHECK IsInternalizedString");
|
||||
__ LoadMap(map_tmp, object);
|
||||
__ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
|
||||
Immediate(kIsNotStringMask | kIsNotInternalizedMask));
|
||||
static_assert((kStringTag | kInternalizedTag) == 0);
|
||||
__ Check(zero, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ jmp(return_label);
|
||||
},
|
||||
object, this, eager_deopt_info(), map_tmp);
|
||||
}
|
||||
|
||||
void LoadTaggedField::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(object_input());
|
||||
DefineAsRegister(vreg_state, this);
|
||||
@ -1762,6 +1807,37 @@ void BranchIfInt32Compare::PrintParams(
|
||||
os << "(" << operation_ << ")";
|
||||
}
|
||||
|
||||
void BranchIfReferenceCompare::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(left_input());
|
||||
UseRegister(right_input());
|
||||
}
|
||||
void BranchIfReferenceCompare::GenerateCode(MaglevCodeGenState* code_gen_state,
|
||||
const ProcessingState& state) {
|
||||
Register left = ToRegister(left_input());
|
||||
Register right = ToRegister(right_input());
|
||||
|
||||
auto* next_block = state.next_block();
|
||||
__ cmp_tagged(left, right);
|
||||
// We don't have any branch probability information, so try to jump
|
||||
// over whatever the next block emitted is.
|
||||
if (if_false() == next_block) {
|
||||
// Jump over the false block if true, otherwise fall through into it.
|
||||
__ j(ConditionFor(operation_), if_true()->label());
|
||||
} else {
|
||||
// Jump to the false block if true.
|
||||
__ j(NegateCondition(ConditionFor(operation_)), if_false()->label());
|
||||
// Jump to the true block if it's not the next block.
|
||||
if (if_true() != next_block) {
|
||||
__ jmp(if_true()->label());
|
||||
}
|
||||
}
|
||||
}
|
||||
void BranchIfReferenceCompare::PrintParams(
|
||||
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
|
||||
os << "(" << operation_ << ")";
|
||||
}
|
||||
|
||||
void BranchIfToBooleanTrue::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {
|
||||
UseFixed(condition_input(),
|
||||
|
@ -131,6 +131,7 @@ class CompactInterpreterFrameState;
|
||||
V(RegisterInput) \
|
||||
V(CheckedSmiTag) \
|
||||
V(CheckedSmiUntag) \
|
||||
V(CheckedInternalizedString) \
|
||||
V(ChangeInt32ToFloat64) \
|
||||
V(Float64Box) \
|
||||
V(CheckedFloat64Unbox) \
|
||||
@ -152,6 +153,7 @@ class CompactInterpreterFrameState;
|
||||
#define CONDITIONAL_CONTROL_NODE_LIST(V) \
|
||||
V(BranchIfTrue) \
|
||||
V(BranchIfToBooleanTrue) \
|
||||
V(BranchIfReferenceCompare) \
|
||||
V(BranchIfInt32Compare) \
|
||||
V(BranchIfFloat64Compare)
|
||||
|
||||
@ -1653,9 +1655,6 @@ class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
|
||||
explicit CheckMaps(uint32_t bitfield, const compiler::MapRef& map)
|
||||
: Base(bitfield), map_(map) {}
|
||||
|
||||
// TODO(verwaest): This just calls in deferred code, so probably we'll need to
|
||||
// mark that to generate stack maps. Mark as call so we at least clear the
|
||||
// registers since we currently don't properly spill either.
|
||||
static constexpr OpProperties kProperties =
|
||||
OpProperties::EagerDeopt() | OpProperties::Call();
|
||||
|
||||
@ -1672,6 +1671,27 @@ class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
|
||||
const compiler::MapRef map_;
|
||||
};
|
||||
|
||||
class CheckedInternalizedString
|
||||
: public FixedInputValueNodeT<1, CheckedInternalizedString> {
|
||||
using Base = FixedInputValueNodeT<1, CheckedInternalizedString>;
|
||||
|
||||
public:
|
||||
explicit CheckedInternalizedString(uint32_t bitfield) : Base(bitfield) {
|
||||
CHECK_EQ(properties().value_representation(), ValueRepresentation::kTagged);
|
||||
}
|
||||
|
||||
static constexpr OpProperties kProperties = OpProperties::EagerDeopt() |
|
||||
OpProperties::TaggedValue() |
|
||||
OpProperties::ConversionNode();
|
||||
|
||||
static constexpr int kObjectIndex = 0;
|
||||
Input& object_input() { return Node::input(kObjectIndex); }
|
||||
|
||||
void AllocateVreg(MaglevVregAllocationState*);
|
||||
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
|
||||
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {};
|
||||
};
|
||||
|
||||
class LoadTaggedField : public FixedInputValueNodeT<1, LoadTaggedField> {
|
||||
using Base = FixedInputValueNodeT<1, LoadTaggedField>;
|
||||
|
||||
@ -2439,6 +2459,29 @@ class BranchIfFloat64Compare
|
||||
Operation operation_;
|
||||
};
|
||||
|
||||
class BranchIfReferenceCompare
|
||||
: public ConditionalControlNodeT<2, BranchIfReferenceCompare> {
|
||||
using Base = ConditionalControlNodeT<2, BranchIfReferenceCompare>;
|
||||
|
||||
public:
|
||||
static constexpr int kLeftIndex = 0;
|
||||
static constexpr int kRightIndex = 1;
|
||||
Input& left_input() { return NodeBase::input(kLeftIndex); }
|
||||
Input& right_input() { return NodeBase::input(kRightIndex); }
|
||||
|
||||
explicit BranchIfReferenceCompare(uint32_t bitfield, Operation operation,
|
||||
BasicBlockRef* if_true_refs,
|
||||
BasicBlockRef* if_false_refs)
|
||||
: Base(bitfield, if_true_refs, if_false_refs), operation_(operation) {}
|
||||
|
||||
void AllocateVreg(MaglevVregAllocationState*);
|
||||
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
|
||||
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
|
||||
|
||||
private:
|
||||
Operation operation_;
|
||||
};
|
||||
|
||||
} // namespace maglev
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -39,6 +39,13 @@ static_assert((kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
|
||||
static_assert((kSlicedStringTag & kIsIndirectStringMask) ==
|
||||
kIsIndirectStringTag);
|
||||
static_assert((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
|
||||
const uint32_t kThinStringTagBit = 1 << 2;
|
||||
// Assert that the kThinStringTagBit is only used in kThinStringTag.
|
||||
static_assert((kSeqStringTag & kThinStringTagBit) == 0);
|
||||
static_assert((kConsStringTag & kThinStringTagBit) == 0);
|
||||
static_assert((kExternalStringTag & kThinStringTagBit) == 0);
|
||||
static_assert((kSlicedStringTag & kThinStringTagBit) == 0);
|
||||
static_assert((kThinStringTag & kThinStringTagBit) == kThinStringTagBit);
|
||||
|
||||
// For strings, bit 3 indicates whether the string consists of two-byte
|
||||
// characters or one-byte characters.
|
||||
|
@ -143,6 +143,34 @@ RUNTIME_FUNCTION(Runtime_ConstructSlicedString) {
|
||||
return *sliced_string;
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_ConstructInternalizedString) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
Handle<String> string = args.at<String>(0);
|
||||
CHECK(string->IsOneByteRepresentation());
|
||||
Handle<String> internalized = isolate->factory()->InternalizeString(string);
|
||||
CHECK(string->IsInternalizedString());
|
||||
return *internalized;
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_ConstructThinString) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
Handle<String> string = args.at<String>(0);
|
||||
CHECK(string->IsOneByteRepresentation());
|
||||
if (!string->IsConsString()) {
|
||||
const bool kIsOneByte = true;
|
||||
string =
|
||||
isolate->factory()->NewConsString(isolate->factory()->empty_string(),
|
||||
string, string->length(), kIsOneByte);
|
||||
}
|
||||
CHECK(string->IsConsString());
|
||||
Handle<String> internalized = isolate->factory()->InternalizeString(string);
|
||||
CHECK_NE(*internalized, *string);
|
||||
CHECK(string->IsThinString());
|
||||
return *string;
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
@ -1248,8 +1276,8 @@ RUNTIME_FUNCTION(Runtime_TraceExit) {
|
||||
RUNTIME_FUNCTION(Runtime_HaveSameMap) {
|
||||
SealHandleScope shs(isolate);
|
||||
DCHECK_EQ(2, args.length());
|
||||
auto obj1 = JSObject::cast(args[0]);
|
||||
auto obj2 = JSObject::cast(args[1]);
|
||||
auto obj1 = HeapObject::cast(args[0]);
|
||||
auto obj2 = HeapObject::cast(args[1]);
|
||||
return isolate->heap()->ToBoolean(obj1.map() == obj2.map());
|
||||
}
|
||||
|
||||
@ -1635,6 +1663,13 @@ RUNTIME_FUNCTION(Runtime_IsSharedString) {
|
||||
Handle<String>::cast(obj)->IsShared());
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_IsInternalizedString) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
Handle<HeapObject> obj = args.at<HeapObject>(0);
|
||||
return isolate->heap()->ToBoolean(obj->IsInternalizedString());
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_SharedGC) {
|
||||
SealHandleScope scope(isolate);
|
||||
isolate->heap()->CollectSharedGarbage(GarbageCollectionReason::kTesting);
|
||||
|
@ -488,7 +488,9 @@ namespace internal {
|
||||
F(CompleteInobjectSlackTracking, 1, 1) \
|
||||
F(ConstructConsString, 2, 1) \
|
||||
F(ConstructDouble, 2, 1) \
|
||||
F(ConstructInternalizedString, 1, 1) \
|
||||
F(ConstructSlicedString, 2, 1) \
|
||||
F(ConstructThinString, 1, 1) \
|
||||
F(DebugPrint, 1, 1) \
|
||||
F(DebugPrintPtr, 1, 1) \
|
||||
F(DebugTrace, 0, 1) \
|
||||
@ -539,6 +541,7 @@ namespace internal {
|
||||
F(IsConcatSpreadableProtector, 0, 1) \
|
||||
F(IsConcurrentRecompilationSupported, 0, 1) \
|
||||
F(IsDictPropertyConstTrackingEnabled, 0, 1) \
|
||||
F(IsInternalizedString, 1, 1) \
|
||||
F(IsSameHeapObject, 2, 1) \
|
||||
F(IsSharedString, 1, 1) \
|
||||
F(MapIteratorProtector, 0, 1) \
|
||||
|
@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --allow-natives-syntax --maglev --no-stress-opt
|
||||
// Flags: --allow-natives-syntax --maglev --no-stress-opt --no-always-turbofan
|
||||
|
||||
function foo(x,y) {
|
||||
if (x < y) {
|
||||
@ -17,6 +17,10 @@ assertEquals(24, foo(6,2));
|
||||
%OptimizeMaglevOnNextCall(foo);
|
||||
assertEquals(42, foo(1,2));
|
||||
assertEquals(24, foo(6,2));
|
||||
assertTrue(isMaglevved(foo));
|
||||
assertEquals(42, foo(1.1, 2.2));
|
||||
assertEquals(24, foo(6.6, 2.2));
|
||||
assertUnoptimized(foo);
|
||||
|
||||
|
||||
function bar(x,y) {
|
||||
@ -32,3 +36,7 @@ assertEquals(24, bar(6,2));
|
||||
%OptimizeMaglevOnNextCall(bar);
|
||||
assertEquals(42, bar(1,2));
|
||||
assertEquals(24, bar(6,2));
|
||||
assertTrue(isMaglevved(bar));
|
||||
assertEquals(42, bar(1.1, 2.2));
|
||||
assertEquals(24, bar(6.6, 2.2));
|
||||
assertUnoptimized(bar);
|
||||
|
117
test/mjsunit/maglev/string-compare.js
Normal file
117
test/mjsunit/maglev/string-compare.js
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --allow-natives-syntax --maglev --no-stress-opt --no-always-turbofan
|
||||
// Flags: --no-always-use-string-forwarding-table
|
||||
|
||||
let internalized1234 = %ConstructInternalizedString("1234123412341234");
|
||||
let nonInternalized1234 = "1234" + "1234" + "1234" + "1234";
|
||||
let thin1234 = %ConstructThinString( "1234" + "1234" + "1234" + "1234");
|
||||
|
||||
let uniqueId = 0;
|
||||
|
||||
function warmUpMaglevTestFn(test_fn_src) {
|
||||
// Create a fresh and uncached function
|
||||
test_fn_src = test_fn_src.toString();
|
||||
const pattern = '(x, y) {\n';
|
||||
assertTrue(test_fn_src.includes(pattern))
|
||||
parts = test_fn_src.split(pattern)
|
||||
assertEquals(parts.length, 2)
|
||||
let test_fn = new Function('x', 'y', `{/*${uniqueId++}*/\n${parts[1]}`);
|
||||
|
||||
assertUnoptimized(test_fn);
|
||||
%PrepareFunctionForOptimization(test_fn);
|
||||
|
||||
// Warm up with internalized strings
|
||||
assertEquals(0, test_fn("1","2"));
|
||||
assertEquals(1, test_fn("2", "2"));
|
||||
assertEquals(1, test_fn(internalized1234, internalized1234));
|
||||
|
||||
%OptimizeMaglevOnNextCall(test_fn);
|
||||
assertEquals(0, test_fn("1", "2"));
|
||||
assertTrue(isMaglevved(test_fn));
|
||||
return test_fn;
|
||||
}
|
||||
|
||||
function test(test_fn_src, is_strict=false) {
|
||||
assertEquals(internalized1234, nonInternalized1234);
|
||||
assertTrue(1234123412341234 == nonInternalized1234);
|
||||
assertTrue(%IsInternalizedString(internalized1234));
|
||||
assertFalse(%IsInternalizedString(nonInternalized1234));
|
||||
assertTrue(%IsInternalizedString(internalized1234));
|
||||
assertFalse(%IsInternalizedString(thin1234));
|
||||
|
||||
let test_fn = warmUpMaglevTestFn(test_fn_src);
|
||||
assertEquals(1, test_fn("1", "1"));
|
||||
assertTrue(isMaglevved(test_fn));
|
||||
|
||||
assertEquals(0, test_fn(internalized1234, "1"));
|
||||
assertTrue(isMaglevved(test_fn));
|
||||
|
||||
assertEquals(1, test_fn(thin1234, "1234123412341234"));
|
||||
assertTrue(isMaglevved(test_fn));
|
||||
|
||||
assertEquals(1, test_fn(thin1234, thin1234));
|
||||
assertTrue(isMaglevved(test_fn));
|
||||
|
||||
assertEquals(1, test_fn(internalized1234, "1234123412341234"));
|
||||
assertTrue(isMaglevved(test_fn));
|
||||
|
||||
if (is_strict) {
|
||||
assertEquals(0, test_fn(internalized1234, 1234123412341234));
|
||||
assertUnoptimized(test_fn);
|
||||
} else {
|
||||
assertEquals(1, test_fn(internalized1234, 1234123412341234));
|
||||
assertUnoptimized(test_fn);
|
||||
}
|
||||
|
||||
assertEquals(1, test_fn(internalized1234, "1234123412341234"));
|
||||
|
||||
test_fn = warmUpMaglevTestFn(test_fn_src);
|
||||
assertEquals(1, test_fn(nonInternalized1234, "1234123412341234"));
|
||||
assertUnoptimized(test_fn);
|
||||
assertEquals(1, test_fn(nonInternalized1234, "1234123412341234"));
|
||||
|
||||
test_fn = warmUpMaglevTestFn(test_fn_src);
|
||||
assertEquals(1, test_fn(nonInternalized1234, nonInternalized1234));
|
||||
assertUnoptimized(test_fn);
|
||||
assertEquals(1, test_fn(nonInternalized1234, nonInternalized1234));
|
||||
|
||||
test_fn = warmUpMaglevTestFn(test_fn_src);
|
||||
assertEquals(0, test_fn(nonInternalized1234, "1"));
|
||||
assertUnoptimized(test_fn);
|
||||
assertEquals(0, test_fn(nonInternalized1234, "1"));
|
||||
}
|
||||
|
||||
function equals_fn(x, y) {
|
||||
if (x == y) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
test(equals_fn);
|
||||
|
||||
function not_equals_fn(x, y) {
|
||||
if (x != y) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
test(not_equals_fn);
|
||||
|
||||
function strict_equals_fn(x, y) {
|
||||
if (x === y) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
test(strict_equals_fn, true);
|
||||
|
||||
function strict_not_equals_fn(x, y) {
|
||||
if (x !== y) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
test(strict_not_equals_fn, true);
|
Loading…
Reference in New Issue
Block a user