[maglev] Fix NaN handling after Ucomisd

As a drive-by this also fixes property load from smi. We still need to check that we actually have a smi...

Bug: v8:7700, chromium:1403280, chromium:1403323
Change-Id: I3c4f050b94550b8d7e4e65f733f9c1dad47941d2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4120575
Auto-Submit: Toon Verwaest <verwaest@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Commit-Queue: Patrick Thier <pthier@chromium.org>
Reviewed-by: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: Patrick Thier <pthier@chromium.org>
Cr-Commit-Position: refs/heads/main@{#85015}
This commit is contained in:
Toon Verwaest 2022-12-23 15:57:56 +01:00 committed by V8 LUCI CQ
parent 1531fec7e6
commit 109c8efc55
5 changed files with 72 additions and 48 deletions

View File

@ -75,9 +75,9 @@ void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
// Deopt when result would be -0.
static_assert(Int32NegateWithOverflow::kProperties.can_eager_deopt());
__ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kOverflow);
Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
__ RecordComment("-- Jump to eager deopt");
__ Cbz(value, eager_deopt_info()->deopt_entry_label());
__ Cbz(value, fail);
__ negs(out, value);
// Output register must not be a register input into the eager deopt info.
@ -242,12 +242,10 @@ void CheckedUint32ToInt32::GenerateCode(MaglevAssembler* masm,
Register input_reg = ToRegister(input()).W();
// Check if the top bit is set -- if it is, then this is not a valid int32,
// otherwise it is.
// TODO(victorgomes): I am manually creating an eager deopt here, if this is a
// common pattern, maybe abstract to a helper function.
static_assert(CheckedUint32ToInt32::kProperties.can_eager_deopt());
__ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kNotInt32);
Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32);
__ RecordComment("-- Jump to eager deopt");
__ Tbnz(input_reg, 31, eager_deopt_info()->deopt_entry_label());
__ Tbnz(input_reg, 31, fail);
}
void ChangeInt32ToFloat64::SetValueLocationConstraints() {
@ -372,8 +370,7 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm,
// TODO(victorgomes): This can happen, because we do not emit an unconditional
// deopt when we intersect the map sets.
if (maps().is_empty()) {
__ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap);
__ B(eager_deopt_info()->deopt_entry_label());
__ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap);
return;
}
@ -386,7 +383,7 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm,
Condition is_smi = __ CheckSmi(object);
if (maps_include_heap_number) {
// Smis count as matching the HeapNumber map, so we're done.
__ B(&done);
__ B(&done, is_smi);
} else {
__ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
}
@ -420,12 +417,10 @@ void CheckMapsWithMigration::SetValueLocationConstraints() {
}
void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
__ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap);
// TODO(victorgomes): This can happen, because we do not emit an unconditional
// deopt when we intersect the map sets.
if (maps().is_empty()) {
__ B(eager_deopt_info()->deopt_entry_label());
__ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap);
return;
}
@ -440,9 +435,9 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
Condition is_smi = __ CheckSmi(object);
if (maps_include_heap_number) {
// Smis count as matching the HeapNumber map, so we're done.
__ B(*done);
__ B(*done, is_smi);
} else {
__ B(eager_deopt_info()->deopt_entry_label(), is_smi);
__ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
}
}
@ -518,14 +513,14 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
},
// If this is the last map to check, we should deopt if we fail.
// This is safe to do, since {eager_deopt_info} is ZoneAllocated.
(last_map ? ZoneLabelRef::UnsafeFromLabelPointer(
eager_deopt_info()->deopt_entry_label())
(last_map ? ZoneLabelRef::UnsafeFromLabelPointer(masm->GetDeoptLabel(
this, DeoptimizeReason::kWrongMap))
: continue_label),
done, object, object_map, i, this);
} else if (last_map) {
// If it is the last map and it is not a migration target, we should deopt
// if the check fails.
__ B(eager_deopt_info()->deopt_entry_label(), ne);
__ EmitDeoptIf(ne, DeoptimizeReason::kWrongMap, this);
}
if (!last_map) {
@ -1277,10 +1272,10 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
static_assert(kThinStringTagBit > 0);
// Deopt if this isn't a string.
__ Tst(instance_type, Immediate(kIsNotStringMask));
__ JumpIf(ne, deopt_info->deopt_entry_label());
__ EmitEagerDeoptIf(ne, DeoptimizeReason::kWrongMap, node);
// Deopt if this isn't a thin string.
__ Tst(instance_type, Immediate(kThinStringTagBit));
__ JumpIf(eq, deopt_info->deopt_entry_label());
__ EmitEagerDeoptIf(eq, DeoptimizeReason::kWrongMap, node);
__ LoadTaggedPointerField(
object, FieldMemOperand(object, ThinString::kActualOffset));
if (v8_flags.debug_code) {

View File

@ -124,8 +124,8 @@ class MaglevAssembler : public MacroAssembler {
inline void JumpToDeferredIf(Condition cond, Function&& deferred_code_gen,
Args&&... args);
inline void RegisterEagerDeopt(EagerDeoptInfo* deopt_info,
DeoptimizeReason reason);
template <typename NodeT>
inline Label* GetDeoptLabel(NodeT* node, DeoptimizeReason reason);
template <typename NodeT>
inline void EmitEagerDeopt(NodeT* node, DeoptimizeReason reason);
template <typename NodeT>
@ -456,8 +456,11 @@ inline void MaglevAssembler::JumpToDeferredIf(Condition cond,
// Deopt
// ---
inline void MaglevAssembler::RegisterEagerDeopt(EagerDeoptInfo* deopt_info,
DeoptimizeReason reason) {
template <typename NodeT>
inline Label* MaglevAssembler::GetDeoptLabel(NodeT* node,
DeoptimizeReason reason) {
static_assert(NodeT::kProperties.can_eager_deopt());
EagerDeoptInfo* deopt_info = node->eager_deopt_info();
if (deopt_info->reason() != DeoptimizeReason::kUnknown) {
DCHECK_EQ(deopt_info->reason(), reason);
}
@ -465,25 +468,22 @@ inline void MaglevAssembler::RegisterEagerDeopt(EagerDeoptInfo* deopt_info,
code_gen_state()->PushEagerDeopt(deopt_info);
deopt_info->set_reason(reason);
}
return node->eager_deopt_info()->deopt_entry_label();
}
template <typename NodeT>
inline void MaglevAssembler::EmitEagerDeopt(NodeT* node,
DeoptimizeReason reason) {
static_assert(NodeT::kProperties.can_eager_deopt());
RegisterEagerDeopt(node->eager_deopt_info(), reason);
RecordComment("-- Jump to eager deopt");
Jump(node->eager_deopt_info()->deopt_entry_label());
Jump(GetDeoptLabel(node, reason));
}
template <typename NodeT>
inline void MaglevAssembler::EmitEagerDeoptIf(Condition cond,
DeoptimizeReason reason,
NodeT* node) {
static_assert(NodeT::kProperties.can_eager_deopt());
RegisterEagerDeopt(node->eager_deopt_info(), reason);
RecordComment("-- Jump to eager deopt");
JumpIf(cond, node->eager_deopt_info()->deopt_entry_label());
JumpIf(cond, GetDeoptLabel(node, reason));
}
inline void MaglevAssembler::DefineLazyDeoptPoint(LazyDeoptInfo* info) {

View File

@ -230,8 +230,7 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm,
// TODO(victorgomes): This can happen, because we do not emit an unconditional
// deopt when we intersect the map sets.
if (maps().is_empty()) {
__ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap);
__ jmp(eager_deopt_info()->deopt_entry_label());
__ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap);
return;
}
@ -244,7 +243,7 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm,
Condition is_smi = __ CheckSmi(object);
if (maps_include_heap_number) {
// Smis count as matching the HeapNumber map, so we're done.
__ jmp(&done);
__ j(is_smi, &done);
} else {
__ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
}
@ -346,12 +345,10 @@ void CheckMapsWithMigration::SetValueLocationConstraints() {
}
void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
__ RegisterEagerDeopt(eager_deopt_info(), DeoptimizeReason::kWrongMap);
// TODO(victorgomes): This can happen, because we do not emit an unconditional
// deopt when we intersect the map sets.
if (maps().is_empty()) {
__ jmp(eager_deopt_info()->deopt_entry_label());
__ EmitEagerDeopt(this, DeoptimizeReason::kWrongMap);
return;
}
@ -366,9 +363,9 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
Condition is_smi = __ CheckSmi(object);
if (maps_include_heap_number) {
// Smis count as matching the HeapNumber map, so we're done.
__ jmp(*done);
__ j(is_smi, *done);
} else {
__ j(is_smi, eager_deopt_info()->deopt_entry_label());
__ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this);
}
}
@ -439,14 +436,14 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
},
// If this is the last map to check, we should deopt if we fail.
// This is safe to do, since {eager_deopt_info} is ZoneAllocated.
(last_map ? ZoneLabelRef::UnsafeFromLabelPointer(
eager_deopt_info()->deopt_entry_label())
(last_map ? ZoneLabelRef::UnsafeFromLabelPointer(masm->GetDeoptLabel(
this, DeoptimizeReason::kWrongMap))
: continue_label),
done, object, i, this);
} else if (last_map) {
// If it is the last map and it is not a migration target, we should deopt
// if the check fails.
__ j(not_equal, eager_deopt_info()->deopt_entry_label());
__ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kWrongMap, this);
}
if (!last_map) {
@ -623,10 +620,10 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
static_assert(kThinStringTagBit > 0);
// Deopt if this isn't a string.
__ testw(map_tmp, Immediate(kIsNotStringMask));
__ j(not_zero, deopt_info->deopt_entry_label());
__ EmitEagerDeoptIf(not_zero, DeoptimizeReason::kWrongMap, node);
// Deopt if this isn't a thin string.
__ testb(map_tmp, Immediate(kThinStringTagBit));
__ j(zero, deopt_info->deopt_entry_label());
__ EmitEagerDeoptIf(zero, DeoptimizeReason::kWrongMap, node);
__ LoadTaggedPointerField(
object, FieldOperand(object, ThinString::kActualOffset));
if (v8_flags.debug_code) {
@ -668,7 +665,7 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm,
FIRST_STRING_TYPE, LAST_STRING_TYPE);
__ j(below_equal, &is_string);
__ cmpl(kScratchRegister, Immediate(HEAP_NUMBER_TYPE));
__ cmpw(kScratchRegister, Immediate(HEAP_NUMBER_TYPE));
// The IC will go generic if it encounters something other than a
// Number or String key.
__ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, node);
@ -677,6 +674,9 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm,
{
DoubleRegister number_value = node->double_temporaries().first();
DoubleRegister converted_back = kScratchDoubleReg;
// Load the heap number value into a double register.
__ Movsd(number_value,
FieldOperand(object, HeapNumber::kValueOffset));
// Convert the input float64 value to int32.
__ Cvttsd2si(result_reg, number_value);
// Convert that int32 value back to float64.
@ -684,6 +684,7 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm,
// Check that the result of the float64->int32->float64 is equal to
// the input (i.e. that the conversion didn't truncate.
__ Ucomisd(number_value, converted_back);
__ EmitEagerDeoptIf(parity_even, DeoptimizeReason::kNotInt32, node);
__ j(equal, *done);
__ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
}
@ -2425,6 +2426,7 @@ void CheckedTruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm,
// Check that the result of the float64->int32->float64 is equal to the input
// (i.e. that the conversion didn't truncate.
__ Ucomisd(input_reg, converted_back);
__ EmitEagerDeoptIf(parity_even, DeoptimizeReason::kNotInt32, this);
__ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this);
// Check if {input} is -0.
@ -2451,14 +2453,15 @@ void CheckedTruncateFloat64ToUint32::GenerateCode(
Register result_reg = ToRegister(result());
DoubleRegister converted_back = kScratchDoubleReg;
Label fail;
// Convert the input float64 value to uint32.
__ Cvttsd2ui(result_reg, input_reg, &fail);
Label* deopt = __ GetDeoptLabel(this, DeoptimizeReason::kNotUint32);
__ Cvttsd2ui(result_reg, input_reg, deopt);
// Convert that uint32 value back to float64.
__ Cvtlui2sd(converted_back, result_reg);
// Check that the result of the float64->uint32->float64 is equal to the input
// (i.e. that the conversion didn't truncate.
__ Ucomisd(input_reg, converted_back);
__ EmitEagerDeoptIf(parity_even, DeoptimizeReason::kNotUint32, this);
__ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotUint32, this);
// Check if {input} is -0.
@ -2472,9 +2475,6 @@ void CheckedTruncateFloat64ToUint32::GenerateCode(
__ cmpl(high_word32_of_input, Immediate(0));
__ EmitEagerDeoptIf(less, DeoptimizeReason::kNotUint32, this);
__ bind(&fail);
__ EmitEagerDeopt(this, DeoptimizeReason::kNotUint32);
__ bind(&check_done);
}

View File

@ -0,0 +1,14 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --maglev --allow-natives-syntax
function __f_41() {
return "abc".charCodeAt(undefined/2);
}
%PrepareFunctionForOptimization(__f_41);
assertEquals(97, __f_41());
%OptimizeMaglevOnNextCall(__f_41);
assertEquals(97, __f_41());

View File

@ -0,0 +1,15 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --maglev --allow-natives-syntax
function foo(a) {
if (a.length > 0) {}
}
%PrepareFunctionForOptimization(foo);
foo(false);
foo(false);
foo(4);
%OptimizeMaglevOnNextCall(foo);
assertThrows(foo);