[ic] Refactor unary op assembler implementations

This CL brings unary op assembler structure closer to that of binary
ops assemblers:

- Decrement, Increment, Negate call into UnaryOpWithFeedback,
- which takes lambdas specifying smi, float, and bigint logic.
- BitwiseNot is different in that it still dispatches using
  TaggedToWOrd32OrBigIntWithFeedback.
- These methods are all implemented in the (hidden)
  UnaryOpAssemblerImpl class.
- The header only exposes UnaryOpAssembler with the bare minimum of
  API.

The last point is the remaining major divergence from binary op
assemblers. I just like how this avoids useless implementation details
in the header.

Bug: v8:8888
Change-Id: I0ac4695483950356885301234d58c1900904aa92
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2214830
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68004}
This commit is contained in:
Jakob Gruber 2020-05-26 16:14:40 +02:00 committed by Commit Bot
parent f427f4d2be
commit e1427aea0e

View File

@ -9,25 +9,123 @@
namespace v8 {
namespace internal {
// Unary op helper classes.
namespace {
class UnaryNumericOpAssembler : public CodeStubAssembler {
class UnaryOpAssemblerImpl final : public CodeStubAssembler {
public:
explicit UnaryNumericOpAssembler(compiler::CodeAssemblerState* state)
explicit UnaryOpAssemblerImpl(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
protected:
virtual TNode<Number> SmiOp(TNode<Smi> smi_value,
TNode<Object> BitwiseNot(TNode<Context> context, TNode<Object> value,
TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
// TODO(jgruber): Make this implementation more consistent with other unary
// ops (i.e. have them all use UnaryOpWithFeedback or some other common
// mechanism).
TVARIABLE(Word32T, var_word32);
TVARIABLE(Smi, var_feedback);
TVARIABLE(BigInt, var_bigint);
TVARIABLE(Object, var_result);
Label if_number(this), if_bigint(this, Label::kDeferred), out(this);
TaggedToWord32OrBigIntWithFeedback(context, value, &if_number, &var_word32,
&if_bigint, &var_bigint, &var_feedback);
// Number case.
BIND(&if_number);
var_result =
ChangeInt32ToTagged(Signed(Word32BitwiseNot(var_word32.value())));
TNode<Smi> result_type = SelectSmiConstant(
TaggedIsSmi(var_result.value()), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
UpdateFeedback(SmiOr(result_type, var_feedback.value()),
maybe_feedback_vector, slot);
Goto(&out);
// BigInt case.
BIND(&if_bigint);
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt),
maybe_feedback_vector, slot);
var_result =
CallRuntime(Runtime::kBigIntUnaryOp, context, var_bigint.value(),
SmiConstant(Operation::kBitwiseNot));
Goto(&out);
BIND(&out);
return var_result.value();
}
TNode<Object> Decrement(TNode<Context> context, TNode<Object> value,
TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
return IncrementOrDecrement<Operation::kDecrement>(context, value, slot,
maybe_feedback_vector);
}
TNode<Object> Increment(TNode<Context> context, TNode<Object> value,
TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
return IncrementOrDecrement<Operation::kIncrement>(context, value, slot,
maybe_feedback_vector);
}
TNode<Object> Negate(TNode<Context> context, TNode<Object> value,
TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
SmiOperation smi_op = [=](TNode<Smi> smi_value,
TVariable<Smi>* var_feedback, Label* do_float_op,
TVariable<Float64T>* var_float) = 0;
virtual TNode<Float64T> FloatOp(TNode<Float64T> float_value) = 0;
virtual TNode<HeapObject> BigIntOp(TNode<Context> context,
TNode<HeapObject> bigint_value) = 0;
TVariable<Float64T>* var_float) {
TVARIABLE(Number, var_result);
Label if_zero(this), if_min_smi(this), end(this);
// Return -0 if operand is 0.
GotoIf(SmiEqual(smi_value, SmiConstant(0)), &if_zero);
// Special-case the minimum Smi to avoid overflow.
GotoIf(SmiEqual(smi_value, SmiConstant(Smi::kMinValue)), &if_min_smi);
// Else simply subtract operand from 0.
CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
var_result = SmiSub(SmiConstant(0), smi_value);
Goto(&end);
BIND(&if_zero);
CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
var_result = MinusZeroConstant();
Goto(&end);
BIND(&if_min_smi);
*var_float = SmiToFloat64(smi_value);
Goto(do_float_op);
BIND(&end);
return var_result.value();
};
FloatOperation float_op = [=](TNode<Float64T> float_value) {
return Float64Neg(float_value);
};
BigIntOperation bigint_op = [=](TNode<Context> context,
TNode<HeapObject> bigint_value) {
return CAST(CallRuntime(Runtime::kBigIntUnaryOp, context, bigint_value,
SmiConstant(Operation::kNegate)));
};
return UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector,
smi_op, float_op, bigint_op);
}
private:
using SmiOperation = std::function<TNode<Number>(
TNode<Smi> /* smi_value */, TVariable<Smi>* /* var_feedback */,
Label* /* do_float_op */, TVariable<Float64T>* /* var_float */)>;
using FloatOperation =
std::function<TNode<Float64T>(TNode<Float64T> /* float_value */)>;
using BigIntOperation = std::function<TNode<HeapObject>(
TNode<Context> /* context */, TNode<HeapObject> /* bigint_value */)>;
TNode<Object> UnaryOpWithFeedback(TNode<Context> context, TNode<Object> value,
TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
TNode<HeapObject> maybe_feedback_vector,
const SmiOperation& smi_op,
const FloatOperation& float_op,
const BigIntOperation& bigint_op) {
TVARIABLE(Object, var_value, value);
TVARIABLE(Object, var_result);
TVARIABLE(Float64T, var_float_value);
@ -55,7 +153,7 @@ class UnaryNumericOpAssembler : public CodeStubAssembler {
BIND(&if_smi);
{
var_result =
SmiOp(CAST(value), &var_feedback, &do_float_op, &var_float_value);
smi_op(CAST(value), &var_feedback, &do_float_op, &var_float_value);
Goto(&end);
}
@ -67,7 +165,7 @@ class UnaryNumericOpAssembler : public CodeStubAssembler {
BIND(&if_bigint);
{
var_result = BigIntOp(context, value_heap_object);
var_result = bigint_op(context, value_heap_object);
CombineFeedback(&var_feedback, BinaryOperationFeedback::kBigInt);
Goto(&end);
}
@ -104,7 +202,7 @@ class UnaryNumericOpAssembler : public CodeStubAssembler {
{
CombineFeedback(&var_feedback, BinaryOperationFeedback::kNumber);
var_result =
AllocateHeapNumberWithValue(FloatOp(var_float_value.value()));
AllocateHeapNumberWithValue(float_op(var_float_value.value()));
Goto(&end);
}
@ -112,131 +210,42 @@ class UnaryNumericOpAssembler : public CodeStubAssembler {
UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot);
return var_result.value();
}
};
class NegateAssembler : public UnaryNumericOpAssembler {
public:
explicit NegateAssembler(compiler::CodeAssemblerState* state)
: UnaryNumericOpAssembler(state) {}
template <Operation kOperation>
TNode<Object> IncrementOrDecrement(TNode<Context> context,
TNode<Object> value, TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
STATIC_ASSERT(kOperation == Operation::kIncrement ||
kOperation == Operation::kDecrement);
static constexpr int kAddValue =
(kOperation == Operation::kIncrement) ? 1 : -1;
using UnaryNumericOpAssembler::UnaryOpWithFeedback;
SmiOperation smi_op = [=](TNode<Smi> smi_value,
TVariable<Smi>* var_feedback, Label* do_float_op,
TVariable<Float64T>* var_float) {
Label if_overflow(this), out(this);
TNode<Smi> result =
TrySmiAdd(smi_value, SmiConstant(kAddValue), &if_overflow);
CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(&out);
private:
TNode<Number> SmiOp(TNode<Smi> smi_value, TVariable<Smi>* var_feedback,
Label* do_float_op,
TVariable<Float64T>* var_float) override {
TVARIABLE(Number, var_result);
Label if_zero(this), if_min_smi(this), end(this);
// Return -0 if operand is 0.
GotoIf(SmiEqual(smi_value, SmiConstant(0)), &if_zero);
BIND(&if_overflow);
*var_float = SmiToFloat64(smi_value);
Goto(do_float_op);
// Special-case the minimum Smi to avoid overflow.
GotoIf(SmiEqual(smi_value, SmiConstant(Smi::kMinValue)), &if_min_smi);
// Else simply subtract operand from 0.
CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
var_result = SmiSub(SmiConstant(0), smi_value);
Goto(&end);
BIND(&if_zero);
CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
var_result = MinusZeroConstant();
Goto(&end);
BIND(&if_min_smi);
*var_float = SmiToFloat64(smi_value);
Goto(do_float_op);
BIND(&end);
return var_result.value();
}
TNode<Float64T> FloatOp(TNode<Float64T> float_value) override {
return Float64Neg(float_value);
}
TNode<HeapObject> BigIntOp(TNode<Context> context,
TNode<HeapObject> bigint_value) override {
return CAST(CallRuntime(Runtime::kBigIntUnaryOp, context, bigint_value,
SmiConstant(Operation::kNegate)));
}
};
template <int kAddValue, Operation kOperation>
class IncDecAssembler : public UnaryNumericOpAssembler {
public:
explicit IncDecAssembler(compiler::CodeAssemblerState* state)
: UnaryNumericOpAssembler(state) {}
using UnaryNumericOpAssembler::UnaryOpWithFeedback;
private:
TNode<Number> SmiOp(TNode<Smi> value, TVariable<Smi>* var_feedback,
Label* do_float_op,
TVariable<Float64T>* var_float) override {
Label if_overflow(this), out(this);
TNode<Smi> result = TrySmiAdd(value, SmiConstant(kAddValue), &if_overflow);
CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(&out);
BIND(&if_overflow);
*var_float = SmiToFloat64(value);
Goto(do_float_op);
BIND(&out);
return result;
}
TNode<Float64T> FloatOp(TNode<Float64T> float_value) override {
return Float64Add(float_value, Float64Constant(kAddValue));
}
TNode<HeapObject> BigIntOp(TNode<Context> context,
TNode<HeapObject> bigint_value) override {
return CAST(CallRuntime(Runtime::kBigIntUnaryOp, context, bigint_value,
SmiConstant(kOperation)));
}
};
using IncAssembler = IncDecAssembler<1, Operation::kIncrement>;
using DecAssembler = IncDecAssembler<-1, Operation::kDecrement>;
class BitwiseNotAssembler : public CodeStubAssembler {
public:
explicit BitwiseNotAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
TNode<Object> BitwiseNotWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
TVARIABLE(Word32T, var_word32);
TVARIABLE(Smi, var_feedback);
TVARIABLE(BigInt, var_bigint);
TVARIABLE(Object, var_result);
Label if_number(this), if_bigint(this, Label::kDeferred), out(this);
TaggedToWord32OrBigIntWithFeedback(context, value, &if_number, &var_word32,
&if_bigint, &var_bigint, &var_feedback);
// Number case.
BIND(&if_number);
var_result =
ChangeInt32ToTagged(Signed(Word32BitwiseNot(var_word32.value())));
TNode<Smi> result_type = SelectSmiConstant(
TaggedIsSmi(var_result.value()), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
UpdateFeedback(SmiOr(result_type, var_feedback.value()),
maybe_feedback_vector, slot);
Goto(&out);
// BigInt case.
BIND(&if_bigint);
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt),
maybe_feedback_vector, slot);
var_result =
CallRuntime(Runtime::kBigIntUnaryOp, context, var_bigint.value(),
SmiConstant(Operation::kBitwiseNot));
Goto(&out);
BIND(&out);
return var_result.value();
BIND(&out);
return result;
};
FloatOperation float_op = [=](TNode<Float64T> float_value) {
return Float64Add(float_value, Float64Constant(kAddValue));
};
BigIntOperation bigint_op = [=](TNode<Context> context,
TNode<HeapObject> bigint_value) {
return CAST(CallRuntime(Runtime::kBigIntUnaryOp, context, bigint_value,
SmiConstant(kOperation)));
};
return UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector,
smi_op, float_op, bigint_op);
}
};
@ -245,31 +254,29 @@ class BitwiseNotAssembler : public CodeStubAssembler {
TNode<Object> UnaryOpAssembler::Generate_BitwiseNotWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
// TODO(jgruber): Make this implementation more consistent with other unary
// ops (i.e. have them all use UnaryOpWithFeedback or some other mechanism).
BitwiseNotAssembler a(state_);
return a.BitwiseNotWithFeedback(context, value, slot, maybe_feedback_vector);
UnaryOpAssemblerImpl a(state_);
return a.BitwiseNot(context, value, slot, maybe_feedback_vector);
}
TNode<Object> UnaryOpAssembler::Generate_DecrementWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
DecAssembler a(state_);
return a.UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector);
UnaryOpAssemblerImpl a(state_);
return a.Decrement(context, value, slot, maybe_feedback_vector);
}
TNode<Object> UnaryOpAssembler::Generate_IncrementWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
IncAssembler a(state_);
return a.UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector);
UnaryOpAssemblerImpl a(state_);
return a.Increment(context, value, slot, maybe_feedback_vector);
}
TNode<Object> UnaryOpAssembler::Generate_NegateWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector) {
NegateAssembler a(state_);
return a.UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector);
UnaryOpAssemblerImpl a(state_);
return a.Negate(context, value, slot, maybe_feedback_vector);
}
} // namespace internal