Revert "Hydrogenisation of binops"

This reverts r17052-17054 for various build breaks.

TBR=mstarzinger@chromium.org
BUG=

Review URL: https://codereview.chromium.org/25571002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17055 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
olivf@chromium.org 2013-10-01 18:00:02 +00:00
parent cee81cf0b7
commit 9459ed3ab4
22 changed files with 3768 additions and 824 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1968,7 +1968,7 @@ class CountOperation V8_FINAL : public Expression {
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
Handle<Type> type() const { return type_; }
TypeInfo type() const { return type_; }
BailoutId AssignmentId() const { return assignment_id_; }
@ -1997,7 +1997,7 @@ class CountOperation V8_FINAL : public Expression {
bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
Handle<Type> type_;
TypeInfo type_;
Expression* expression_;
int pos_;

View File

@ -841,101 +841,6 @@ Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
}
template <>
HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
BinaryOpStub* stub = casted_stub();
HValue* left = GetParameter(0);
HValue* right = GetParameter(1);
Handle<Type> left_type = stub->GetLeftType(isolate());
Handle<Type> right_type = stub->GetRightType(isolate());
Handle<Type> result_type = stub->GetResultType(isolate());
ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
(stub->HasSideEffects(isolate()) || !result_type->Is(Type::None())));
HValue* result = NULL;
if (stub->operation() == Token::ADD &&
(left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
!left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
// For the generic add stub a fast case for String add is performance
// critical.
if (left_type->Maybe(Type::String())) {
IfBuilder left_string(this);
left_string.IfNot<HIsSmiAndBranch>(left);
left_string.AndIf<HIsStringAndBranch>(left);
left_string.Then();
Push(Add<HStringAdd>(left, right, STRING_ADD_CHECK_RIGHT));
left_string.Else();
Push(AddInstruction(BuildBinaryOperation(stub->operation(),
left, right, left_type, right_type, result_type,
stub->fixed_right_arg(), true)));
left_string.End();
result = Pop();
} else {
IfBuilder right_string(this);
right_string.IfNot<HIsSmiAndBranch>(right);
right_string.AndIf<HIsStringAndBranch>(right);
right_string.Then();
Push(Add<HStringAdd>(left, right, STRING_ADD_CHECK_LEFT));
right_string.Else();
Push(AddInstruction(BuildBinaryOperation(stub->operation(),
left, right, left_type, right_type, result_type,
stub->fixed_right_arg(), true)));
right_string.End();
result = Pop();
}
} else {
result = AddInstruction(BuildBinaryOperation(stub->operation(),
left, right, left_type, right_type, result_type,
stub->fixed_right_arg(), true));
}
// If we encounter a generic argument, the number conversion is
// observable, thus we cannot afford to bail out after the fact.
if (!stub->HasSideEffects(isolate())) {
if (result_type->Is(Type::Smi())) {
if (stub->operation() == Token::SHR) {
// TODO(olivf) Replace this by a SmiTagU Instruction.
// 0x40000000: this number would convert to negative when interpreting
// the register as signed value;
IfBuilder if_of(this);
if_of.IfNot<HCompareNumericAndBranch>(result,
Add<HConstant>(static_cast<int>(0x40000000)), Token::EQ_STRICT);
if_of.Then();
if_of.ElseDeopt("UInt->Smi oveflow");
if_of.End();
}
}
result = EnforceNumberType(result, result_type);
}
// Reuse the double box if we are allowed to (i.e. chained binops).
if (stub->CanReuseDoubleBox()) {
HValue* reuse = (stub->mode() == OVERWRITE_LEFT) ? left : right;
IfBuilder if_heap_number(this);
if_heap_number.IfNot<HIsSmiAndBranch>(reuse);
if_heap_number.Then();
HValue* res_val = Add<HForceRepresentation>(result,
Representation::Double());
HObjectAccess access = HObjectAccess::ForHeapNumberValue();
Add<HStoreNamedField>(reuse, access, res_val);
Push(reuse);
if_heap_number.Else();
Push(result);
if_heap_number.End();
result = Pop();
}
return result;
}
Handle<Code> BinaryOpStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();

View File

@ -137,7 +137,6 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
ASSERT(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
@ -204,307 +203,119 @@ void CodeStub::PrintName(StringStream* stream) {
}
void BinaryOpStub::PrintBaseName(StringStream* stream) {
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
AllowStubCallsScope allow_stub_calls(masm, true);
BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
// The OddballStub handles a number and an oddball, not two oddballs.
operands_type = BinaryOpIC::GENERIC;
}
switch (operands_type) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
case BinaryOpIC::SMI:
GenerateSmiStub(masm);
break;
case BinaryOpIC::INT32:
GenerateInt32Stub(masm);
break;
case BinaryOpIC::NUMBER:
GenerateNumberStub(masm);
break;
case BinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
case BinaryOpIC::STRING:
GenerateStringStub(masm);
break;
case BinaryOpIC::GENERIC:
GenerateGeneric(masm);
break;
default:
UNREACHABLE();
}
}
#define __ ACCESS_MASM(masm)
void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
__ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION);
break;
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION);
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION);
break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION);
break;
case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION);
break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION);
break;
case Token::BIT_AND:
__ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION);
break;
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION);
break;
case Token::SAR:
__ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION);
break;
case Token::SHR:
__ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION);
break;
case Token::SHL:
__ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION);
break;
default:
UNREACHABLE();
}
}
#undef __
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* ovr = "";
if (mode_ == OVERWRITE_LEFT) ovr = "_ReuseLeft";
if (mode_ == OVERWRITE_RIGHT) ovr = "_ReuseRight";
stream->Add("BinaryOpStub_%s%s", op_name, ovr);
}
void BinaryOpStub::PrintState(StringStream* stream) {
stream->Add("(");
stream->Add(StateToName(left_state_));
if (left_bool_) {
stream->Add(",Boolean");
const char* overwrite_name;
switch (mode_) {
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
stream->Add("*");
if (fixed_right_arg_.has_value) {
stream->Add("%d", fixed_right_arg_.value);
} else {
stream->Add(StateToName(right_state_));
if (right_bool_) {
stream->Add(",Boolean");
}
}
stream->Add("->");
stream->Add(StateToName(result_state_));
stream->Add(")");
stream->Add("BinaryOpStub_%s_%s_%s+%s",
op_name,
overwrite_name,
BinaryOpIC::GetName(left_type_),
BinaryOpIC::GetName(right_type_));
}
Maybe<Handle<Object> > BinaryOpStub::Result(Handle<Object> left,
Handle<Object> right,
Isolate* isolate) {
Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
Builtins::JavaScript func = BinaryOpIC::TokenToJSBuiltin(op_);
Object* builtin = builtins->javascript_builtin(func);
Handle<JSFunction> builtin_function =
Handle<JSFunction>(JSFunction::cast(builtin), isolate);
bool caught_exception;
Handle<Object> result = Execution::Call(isolate, builtin_function, left,
1, &right, &caught_exception);
return Maybe<Handle<Object> >(!caught_exception, result);
}
void BinaryOpStub::Initialize() {
fixed_right_arg_.has_value = false;
left_state_ = right_state_ = result_state_ = NONE;
left_bool_ = right_bool_ = false;
}
void BinaryOpStub::Generate(Token::Value op,
State left,
State right,
State result,
Isolate* isolate) {
BinaryOpStub stub(INITIALIZED);
stub.op_ = op;
stub.left_state_ = left;
stub.right_state_ = right;
stub.result_state_ = result;
stub.mode_ = NO_OVERWRITE;
stub.GetCode(isolate);
stub.mode_ = OVERWRITE_LEFT;
stub.GetCode(isolate);
}
void BinaryOpStub::GenerateAheadOfTime(Isolate* isolate) {
Token::Value binop[] = {Token::SUB, Token::MOD, Token::DIV, Token::MUL,
Token::ADD, Token::SAR, Token::BIT_OR, Token::BIT_AND,
Token::BIT_XOR, Token::SHL, Token::SHR};
// TODO(olivf) NumberTagU is not snapshot safe yet so we have to skip SHR
// since that produces a unsigned int32.
Token::Value bitop[] = {Token::BIT_OR, Token::BIT_AND, Token::BIT_XOR,
Token::SAR, Token::SHL /* Token::SHR */};
Token::Value arithop[] = {Token::ADD, Token::SUB, Token::MOD,
Token::DIV, Token::MUL};
for (unsigned i = 0; i < ARRAY_SIZE(binop); i++) {
BinaryOpStub stub(UNINITIALIZED);
stub.op_ = binop[i];
stub.GetCode(isolate);
}
for (unsigned i = 0; i < ARRAY_SIZE(arithop); i++) {
Generate(arithop[i], SMI, SMI, SMI, isolate);
Generate(arithop[i], SMI, SMI, INT32, isolate);
Generate(arithop[i], SMI, SMI, NUMBER, isolate);
Generate(arithop[i], INT32, INT32, INT32, isolate);
Generate(arithop[i], NUMBER, SMI, SMI, isolate);
Generate(arithop[i], NUMBER, SMI, NUMBER, isolate);
Generate(arithop[i], NUMBER, INT32, NUMBER, isolate);
Generate(arithop[i], NUMBER, NUMBER, NUMBER, isolate);
}
Generate(Token::SHR, SMI, SMI, SMI, isolate);
for (unsigned i = 0; i < ARRAY_SIZE(bitop); i++) {
Generate(bitop[i], SMI, SMI, SMI, isolate);
Generate(bitop[i], SMI, INT32, INT32, isolate);
Generate(bitop[i], INT32, INT32, INT32, isolate);
Generate(bitop[i], NUMBER, INT32, INT32, isolate);
Generate(bitop[i], NUMBER, NUMBER, INT32, isolate);
}
Generate(Token::ADD, STRING, STRING, STRING, isolate);
BinaryOpStub stub(INITIALIZED);
stub.op_ = Token::MOD;
stub.left_state_ = SMI;
stub.right_state_ = SMI;
stub.result_state_ = SMI;
stub.fixed_right_arg_.has_value = true;
stub.fixed_right_arg_.value = 4;
stub.mode_ = NO_OVERWRITE;
stub.GetCode(isolate);
stub.fixed_right_arg_.value = 8;
stub.GetCode(isolate);
}
bool BinaryOpStub::can_encode_arg_value(int32_t value) const {
return op_ == Token::MOD && value > 0 && IsPowerOf2(value) &&
FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
}
int BinaryOpStub::encode_arg_value(int32_t value) const {
ASSERT(can_encode_arg_value(value));
return WhichPowerOf2(value);
}
int32_t BinaryOpStub::decode_arg_value(int value) const {
return 1 << value;
}
int BinaryOpStub::encode_token(Token::Value op) const {
ASSERT(op >= FIRST_TOKEN && op <= LAST_TOKEN);
return op - FIRST_TOKEN;
}
Token::Value BinaryOpStub::decode_token(int op) const {
int res = op + FIRST_TOKEN;
ASSERT(res >= FIRST_TOKEN && res <= LAST_TOKEN);
return static_cast<Token::Value>(res);
}
const char* BinaryOpStub::StateToName(State state) {
switch (state) {
case NONE:
return "None";
case SMI:
return "Smi";
case INT32:
return "Int32";
case NUMBER:
return "Number";
case STRING:
return "String";
case GENERIC:
return "Generic";
}
return "";
}
void BinaryOpStub::UpdateStatus(Handle<Object> left,
Handle<Object> right,
Maybe<Handle<Object> > result) {
int old_state = GetExtraICState();
UpdateStatus(left, &left_state_, &left_bool_);
UpdateStatus(right, &right_state_, &right_bool_);
int32_t value;
bool new_has_fixed_right_arg =
right->ToInt32(&value) && can_encode_arg_value(value) &&
(left_state_ == SMI || left_state_ == INT32) &&
(result_state_ == NONE || !fixed_right_arg_.has_value);
fixed_right_arg_ = Maybe<int32_t>(new_has_fixed_right_arg, value);
if (result.has_value) UpdateStatus(result.value, &result_state_, NULL);
State max_result = has_int_result() ? INT32 : NUMBER;
State max_input = Max(left_state_, right_state_);
// Avoid unnecessary Representation changes.
if (left_state_ == STRING && right_state_ < STRING) {
right_state_ = GENERIC;
} else if (right_state_ == STRING && left_state_ < STRING) {
left_state_ = GENERIC;
} else if ((right_state_ == GENERIC && left_state_ != STRING) ||
(left_state_ == GENERIC && right_state_ != STRING)) {
left_state_ = right_state_ = GENERIC;
} else if (max_input <= NUMBER && max_input > result_state_) {
result_state_ = Min(max_result, max_input);
}
ASSERT(result_state_ <= max_result || op_ == Token::ADD);
if (old_state == GetExtraICState()) {
// Since the fpu is to precise, we might bail out on numbers which
// actually would truncate with 64 bit precision.
ASSERT(!CpuFeatures::IsSupported(SSE2) &&
result_state_ <= INT32);
result_state_ = NUMBER;
}
}
void BinaryOpStub::UpdateStatus(Handle<Object> object,
State* state,
bool* bool_state) {
if (object->IsBoolean() && bool_state != NULL) {
*bool_state = true;
void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
GenerateBothStringStub(masm);
return;
}
v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(object);
if (object->IsUndefined()) {
// Undefined will be automatically truncated for us by HChange.
type = (op_ == Token::BIT_AND || op_ == Token::BIT_OR ||
op_ == Token::BIT_XOR || op_ == Token::SAR ||
op_ == Token::SHL || op_ == Token::SHR)
? TypeInfo::Integer32()
: TypeInfo::Double();
}
State int_state = SmiValuesAre32Bits() ? NUMBER : INT32;
State new_state = NONE;
if (type.IsSmi()) {
new_state = SMI;
} else if (type.IsInteger32()) {
new_state = int_state;
} else if (type.IsNumber()) {
new_state = NUMBER;
} else if (object->IsString() && operation() == Token::ADD) {
new_state = STRING;
} else {
new_state = GENERIC;
}
if ((new_state <= NUMBER && *state > NUMBER) ||
(new_state > NUMBER && *state <= NUMBER && *state != NONE)) {
new_state = GENERIC;
}
*state = Max(*state, new_state);
}
Handle<Type> BinaryOpStub::StateToType(State state,
bool seen_bool,
Isolate* isolate) {
Handle<Type> t = handle(Type::None(), isolate);
switch (state) {
case NUMBER:
t = handle(Type::Union(t, handle(Type::Double(), isolate)), isolate);
// Fall through.
case INT32:
t = handle(Type::Union(t, handle(Type::Signed32(), isolate)), isolate);
// Fall through.
case SMI:
t = handle(Type::Union(t, handle(Type::Smi(), isolate)), isolate);
break;
case STRING:
t = handle(Type::Union(t, handle(Type::String(), isolate)), isolate);
break;
case GENERIC:
return handle(Type::Any(), isolate);
break;
case NONE:
break;
}
if (seen_bool) {
t = handle(Type::Union(t, handle(Type::Boolean(), isolate)), isolate);
}
return t;
}
Handle<Type> BinaryOpStub::GetLeftType(Isolate* isolate) const {
return StateToType(left_state_, left_bool_, isolate);
}
Handle<Type> BinaryOpStub::GetRightType(Isolate* isolate) const {
return StateToType(right_state_, right_bool_, isolate);
}
Handle<Type> BinaryOpStub::GetResultType(Isolate* isolate) const {
if (HasSideEffects(isolate)) return StateToType(NONE, false, isolate);
if (result_state_ == GENERIC && op_ == Token::ADD) {
return handle(Type::Union(handle(Type::Number(), isolate),
handle(Type::String(), isolate)), isolate);
}
ASSERT(result_state_ != GENERIC);
if (result_state_ == NUMBER && op_ == Token::SHR) {
return handle(Type::Unsigned32(), isolate);
}
return StateToType(result_state_, false, isolate);
// Try to add arguments as strings, otherwise, transition to the generic
// BinaryOpIC type.
GenerateAddStrings(masm);
GenerateTypeTransition(masm);
}

View File

@ -200,9 +200,6 @@ class CodeStub BASE_EMBEDDED {
virtual void PrintName(StringStream* stream);
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
protected:
static bool CanUseFPRegisters();
@ -214,6 +211,8 @@ class CodeStub BASE_EMBEDDED {
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
virtual void PrintBaseName(StringStream* stream);
virtual void PrintState(StringStream* stream) { }
@ -996,177 +995,156 @@ class KeyedLoadFieldStub: public LoadFieldStub {
};
class BinaryOpStub: public HydrogenCodeStub {
class BinaryOpStub: public PlatformCodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: HydrogenCodeStub(UNINITIALIZED), op_(op), mode_(mode) {
ASSERT(op <= LAST_TOKEN && op >= FIRST_TOKEN);
: op_(op),
mode_(mode),
platform_specific_bit_(false),
left_type_(BinaryOpIC::UNINITIALIZED),
right_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED),
encoded_right_arg_(false, encode_arg_value(1)) {
Initialize();
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
explicit BinaryOpStub(Code::ExtraICState state)
: op_(decode_token(OpBits::decode(state))),
mode_(OverwriteModeField::decode(state)),
fixed_right_arg_(
Maybe<int>(HasFixedRightArgBits::decode(state),
decode_arg_value(FixedRightArgValueBits::decode(state)))),
left_state_(LeftStateField::decode(state)),
left_bool_(LeftBoolField::decode(state)),
right_state_(fixed_right_arg_.has_value
? ((fixed_right_arg_.value <= Smi::kMaxValue) ? SMI : INT32)
: RightStateField::decode(state)),
right_bool_(fixed_right_arg_.has_value
? false : RightBoolField::decode(state)),
result_state_(ResultStateField::decode(state)) {
// We don't deserialize the SSE2 Field, since this is only used to be able
// to include SSE2 as well as non-SSE2 versions in the snapshot. For code
// generation we always want it to reflect the current state.
ASSERT(!fixed_right_arg_.has_value ||
can_encode_arg_value(fixed_right_arg_.value));
BinaryOpStub(
int key,
BinaryOpIC::TypeInfo left_type,
BinaryOpIC::TypeInfo right_type,
BinaryOpIC::TypeInfo result_type,
Maybe<int32_t> fixed_right_arg)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
platform_specific_bit_(PlatformSpecificBits::decode(key)),
left_type_(left_type),
right_type_(right_type),
result_type_(result_type),
encoded_right_arg_(fixed_right_arg.has_value,
encode_arg_value(fixed_right_arg.value)) { }
static void decode_types_from_minor_key(int minor_key,
BinaryOpIC::TypeInfo* left_type,
BinaryOpIC::TypeInfo* right_type,
BinaryOpIC::TypeInfo* result_type) {
*left_type =
static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
*right_type =
static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
*result_type =
static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
}
static const int FIRST_TOKEN = Token::BIT_OR;
static const int LAST_TOKEN = Token::MOD;
static void GenerateAheadOfTime(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
static void InitializeForIsolate(Isolate* isolate) {
BinaryOpStub binopStub(UNINITIALIZED);
binopStub.InitializeInterfaceDescriptor(
isolate, isolate->code_stub_interface_descriptor(CodeStub::BinaryOp));
static Token::Value decode_op_from_minor_key(int minor_key) {
return static_cast<Token::Value>(OpBits::decode(minor_key));
}
virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
if (Max(left_state_, right_state_) == NONE && !left_bool_ && !right_bool_) {
return ::v8::internal::UNINITIALIZED;
}
if (Max(left_state_, right_state_) == GENERIC) return MEGAMORPHIC;
return MONOMORPHIC;
static Maybe<int> decode_fixed_right_arg_from_minor_key(int minor_key) {
return Maybe<int>(
HasFixedRightArgBits::decode(minor_key),
decode_arg_value(FixedRightArgValueBits::decode(minor_key)));
}
virtual Code::ExtraICState GetExtraICState() {
bool sse_field = Max(result_state_, Max(left_state_, right_state_)) > SMI &&
CpuFeatures::IsSafeForSnapshot(SSE2);
return OpBits::encode(encode_token(op_))
| LeftStateField::encode(left_state_)
| LeftBoolField::encode(left_bool_)
| RightStateField::encode(fixed_right_arg_.has_value
? NONE : right_state_)
| RightBoolField::encode(fixed_right_arg_.has_value
? false
: right_bool_)
| ResultStateField::encode(result_state_)
| HasFixedRightArgBits::encode(fixed_right_arg_.has_value)
| FixedRightArgValueBits::encode(fixed_right_arg_.has_value
? encode_arg_value(
fixed_right_arg_.value)
: 0)
| SSE2Field::encode(sse_field)
| OverwriteModeField::encode(mode_);
int fixed_right_arg_value() const {
return decode_arg_value(encoded_right_arg_.value);
}
bool CanReuseDoubleBox() {
return result_state_ <= NUMBER && result_state_ > SMI &&
((left_state_ > SMI && left_state_ <= NUMBER &&
mode_ == OVERWRITE_LEFT) ||
(right_state_ > SMI && right_state_ <= NUMBER &&
mode_ == OVERWRITE_RIGHT));
static bool can_encode_arg_value(int32_t value) {
return value > 0 &&
IsPowerOf2(value) &&
FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
}
bool HasSideEffects(Isolate* isolate) const {
return GetLeftType(isolate)->Maybe(Type::Receiver()) ||
GetRightType(isolate)->Maybe(Type::Receiver());
}
virtual Handle<Code> GenerateCode(Isolate* isolate);
Maybe<Handle<Object> > Result(Handle<Object> left,
Handle<Object> right,
Isolate* isolate);
Token::Value operation() const { return op_; }
OverwriteMode mode() const { return mode_; }
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
Handle<Type> GetLeftType(Isolate* isolate) const;
Handle<Type> GetRightType(Isolate* isolate) const;
Handle<Type> GetResultType(Isolate* isolate) const;
void UpdateStatus(Handle<Object> left,
Handle<Object> right,
Maybe<Handle<Object> > result);
void PrintState(StringStream* stream);
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
private:
explicit BinaryOpStub(InitializationState state) : HydrogenCodeStub(state),
op_(Token::ADD),
mode_(NO_OVERWRITE) {
Initialize();
}
void Initialize();
enum State { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
// We truncate the last bit of the token.
STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 5));
class LeftStateField: public BitField<State, 0, 3> {};
class LeftBoolField: public BitField<bool, 3, 1> {};
// When fixed right arg is set, we don't need to store the right state.
// Thus the two fields can overlap.
class HasFixedRightArgBits: public BitField<bool, 4, 1> {};
class FixedRightArgValueBits: public BitField<int, 5, 4> {};
class RightStateField: public BitField<State, 5, 3> {};
class RightBoolField: public BitField<bool, 8, 1> {};
class ResultStateField: public BitField<State, 9, 3> {};
class SSE2Field: public BitField<bool, 12, 1> {};
class OverwriteModeField: public BitField<OverwriteMode, 13, 2> {};
class OpBits: public BitField<int, 15, 5> {};
virtual CodeStub::Major MajorKey() { return BinaryOp; }
virtual int NotMissMinorKey() { return GetExtraICState(); }
static Handle<Type> StateToType(State state,
bool seen_bool,
Isolate* isolate);
static void Generate(Token::Value op,
State left,
State right,
State result,
Isolate* isolate);
void UpdateStatus(Handle<Object> object,
State* state,
bool* bool_state);
bool can_encode_arg_value(int32_t value) const;
int encode_arg_value(int32_t value) const;
int32_t decode_arg_value(int value) const;
int encode_token(Token::Value op) const;
Token::Value decode_token(int op) const;
bool has_int_result() const {
return op_ == Token::BIT_XOR || op_ == Token::BIT_AND ||
op_ == Token::BIT_OR || op_ == Token::SAR;
}
const char* StateToName(State state);
void PrintBaseName(StringStream* stream);
Token::Value op_;
OverwriteMode mode_;
bool platform_specific_bit_; // Indicates SSE3 on IA32.
Maybe<int> fixed_right_arg_;
State left_state_;
bool left_bool_;
State right_state_;
bool right_bool_;
State result_state_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo left_type_;
BinaryOpIC::TypeInfo right_type_;
BinaryOpIC::TypeInfo result_type_;
Maybe<int> encoded_right_arg_;
static int encode_arg_value(int32_t value) {
ASSERT(can_encode_arg_value(value));
return WhichPowerOf2(value);
}
static int32_t decode_arg_value(int value) {
return 1 << value;
}
virtual void PrintName(StringStream* stream);
// Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM.
// Note: We actually do not need 7 bits for the operation, just 4 bits to
// encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class PlatformSpecificBits: public BitField<bool, 9, 1> {};
class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
class HasFixedRightArgBits: public BitField<bool, 19, 1> {};
class FixedRightArgValueBits: public BitField<int, 20, 5> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| PlatformSpecificBits::encode(platform_specific_bit_)
| LeftTypeBits::encode(left_type_)
| RightTypeBits::encode(right_type_)
| ResultTypeBits::encode(result_type_)
| HasFixedRightArgBits::encode(encoded_right_arg_.has_value)
| FixedRightArgValueBits::encode(encoded_right_arg_.value);
}
// Platform-independent implementation.
void Generate(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
// Platform-independent signature, platform-specific implementation.
void Initialize();
void GenerateAddStrings(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateNumberStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
// Entirely platform-specific methods are defined as static helper
// functions in the <arch>/code-stubs-<arch>.cc files.
virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(Max(left_type_, right_type_));
}
virtual void FinishCode(Handle<Code> code) {
code->set_stub_info(MinorKey());
}
friend class CodeGenerator;
};
@ -1739,9 +1717,7 @@ class DoubleToIStub : public PlatformCodeStub {
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath) |
SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
SkipFastPathBits::encode(skip_fastpath);
}
Register source() {
@ -1784,8 +1760,6 @@ class DoubleToIStub : public PlatformCodeStub {
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
class SkipFastPathBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
class SSEBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }

View File

@ -763,9 +763,6 @@ class HValue : public ZoneObject {
void SetFlag(Flag f) { flags_ |= (1 << f); }
void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
void CopyFlag(Flag f, HValue* other) {
if (other->CheckFlag(f)) SetFlag(f);
}
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f) const;

View File

@ -7501,7 +7501,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
bool returns_original_input,
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
Handle<Type> info = expr->type();
TypeInfo info = expr->type();
Representation rep = Representation::FromType(info);
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
@ -7865,8 +7865,7 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
Handle<Type> left_type,
Handle<Type> right_type,
Handle<Type> result_type,
Maybe<int> fixed_right_arg,
bool binop_stub) {
Maybe<int> fixed_right_arg) {
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
@ -7895,92 +7894,75 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
right_rep = Representation::FromType(right_type);
}
if (binop_stub) {
left = EnforceNumberType(left, left_type);
right = EnforceNumberType(right, right_type);
}
Representation result_rep = Representation::FromType(result_type);
bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) ||
(right_rep.IsTagged() && !right_rep.IsSmi());
bool is_string_add = op == Token::ADD &&
(left_type->Is(Type::String()) ||
right_type->Is(Type::String()));
bool is_string_add = op == Token::ADD &&
(left_type->Is(Type::String()) ||
right_type->Is(Type::String()));
HInstruction* instr = NULL;
// Only the stub is allowed to call into the runtime, since otherwise we would
// inline several instructions (including the two pushes) for every tagged
// operation in optimized code, which is more expensive, than a stub call.
if (binop_stub && is_non_primitive && !is_string_add) {
HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
Add<HPushArgument>(left);
Add<HPushArgument>(right);
instr = NewUncasted<HInvokeFunction>(function, 2);
} else {
switch (op) {
case Token::ADD:
if (is_string_add) {
StringAddFlags flags = STRING_ADD_CHECK_BOTH;
if (left_type->Is(Type::String())) {
BuildCheckHeapObject(left);
AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
flags = STRING_ADD_CHECK_RIGHT;
}
if (right_type->Is(Type::String())) {
BuildCheckHeapObject(right);
AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
flags = (flags == STRING_ADD_CHECK_BOTH)
? STRING_ADD_CHECK_LEFT : STRING_ADD_CHECK_NONE;
}
instr = NewUncasted<HStringAdd>(left, right, flags);
} else {
instr = NewUncasted<HAdd>(left, right);
switch (op) {
case Token::ADD:
if (is_string_add) {
StringAddFlags flags = STRING_ADD_CHECK_BOTH;
if (left_type->Is(Type::String())) {
BuildCheckHeapObject(left);
AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
flags = STRING_ADD_CHECK_RIGHT;
}
break;
case Token::SUB:
instr = NewUncasted<HSub>(left, right);
break;
case Token::MUL:
instr = NewUncasted<HMul>(left, right);
break;
case Token::MOD:
instr = NewUncasted<HMod>(left, right, fixed_right_arg);
break;
case Token::DIV:
instr = NewUncasted<HDiv>(left, right);
break;
case Token::BIT_XOR:
case Token::BIT_AND:
instr = NewUncasted<HBitwise>(op, left, right);
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
if (left_type->Is(Type::Signed32()) &&
right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
instr = NewUncasted<HRor>(operand, shift_amount);
} else {
instr = NewUncasted<HBitwise>(op, left, right);
if (right_type->Is(Type::String())) {
BuildCheckHeapObject(right);
AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
flags = (flags == STRING_ADD_CHECK_BOTH)
? STRING_ADD_CHECK_LEFT : STRING_ADD_CHECK_NONE;
}
break;
instr = NewUncasted<HStringAdd>(left, right, flags);
} else {
instr = NewUncasted<HAdd>(left, right);
}
case Token::SAR:
instr = NewUncasted<HSar>(left, right);
break;
case Token::SHR:
instr = NewUncasted<HShr>(left, right);
if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
CanBeZero(right)) {
graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
instr = NewUncasted<HShl>(left, right);
break;
default:
UNREACHABLE();
break;
case Token::SUB:
instr = NewUncasted<HSub>(left, right);
break;
case Token::MUL:
instr = NewUncasted<HMul>(left, right);
break;
case Token::MOD:
instr = NewUncasted<HMod>(left, right, fixed_right_arg);
break;
case Token::DIV:
instr = NewUncasted<HDiv>(left, right);
break;
case Token::BIT_XOR:
case Token::BIT_AND:
instr = NewUncasted<HBitwise>(op, left, right);
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
if (left_type->Is(Type::Signed32()) &&
right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
instr = NewUncasted<HRor>(operand, shift_amount);
} else {
instr = NewUncasted<HBitwise>(op, left, right);
}
break;
}
case Token::SAR:
instr = NewUncasted<HSar>(left, right);
break;
case Token::SHR:
instr = NewUncasted<HShr>(left, right);
if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
CanBeZero(right)) {
graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
instr = NewUncasted<HShl>(left, right);
break;
default:
UNREACHABLE();
}
if (instr->IsBinaryOperation()) {
@ -7988,19 +7970,6 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(
binop->set_observed_input_representation(1, left_rep);
binop->set_observed_input_representation(2, right_rep);
binop->initialize_output_representation(result_rep);
if (binop_stub) {
// Stub should not call into stub.
instr->SetFlag(HValue::kCannotBeTagged);
// And should truncate on HForceRepresentation already.
if (left->IsForceRepresentation()) {
left->CopyFlag(HValue::kTruncatingToSmi, instr);
left->CopyFlag(HValue::kTruncatingToInt32, instr);
}
if (right->IsForceRepresentation()) {
right->CopyFlag(HValue::kTruncatingToSmi, instr);
right->CopyFlag(HValue::kTruncatingToInt32, instr);
}
}
}
return instr;
}

View File

@ -1291,8 +1291,7 @@ class HGraphBuilder {
Handle<Type> left_type,
Handle<Type> right_type,
Handle<Type> result_type,
Maybe<int> fixed_right_arg,
bool binop_stub = false);
Maybe<int> fixed_right_arg);
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);

File diff suppressed because it is too large Load Diff

View File

@ -212,7 +212,6 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
if (!CpuFeatures::IsSupported(SSE2)) return;
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);

View File

@ -253,8 +253,8 @@ void MacroAssembler::X87TOSToI(Register result_reg,
Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
fld(0);
fist_s(MemOperand(esp, 0));
fld(0);
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
@ -453,7 +453,6 @@ static double kUint32Bias =
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
ASSERT(!Serializer::enabled());
Label done;
cmp(src, Immediate(0));
movdbl(scratch,

340
src/ic.cc
View File

@ -2371,6 +2371,11 @@ RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
}
void BinaryOpIC::patch(Code* code) {
set_target(code);
}
const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
@ -2385,64 +2390,256 @@ const char* BinaryOpIC::GetName(TypeInfo type_info) {
}
MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
BinaryOpStub stub(extra_ic_state);
bool smi_was_enabled = stub.GetLeftType(isolate())->Maybe(Type::Smi()) &&
stub.GetRightType(isolate())->Maybe(Type::Smi());
Maybe<Handle<Object> > result = stub.Result(left, right, isolate());
#ifdef DEBUG
if (FLAG_trace_ic) {
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
static_cast<unsigned>(sizeof(buffer)));
StringStream stream(&allocator);
stream.Add("[");
stub.PrintName(&stream);
stub.UpdateStatus(left, right, result);
stream.Add(" => ");
stub.PrintState(&stream);
stream.Add(" ");
stream.OutputToStdOut();
PrintF(" @ %p <- ", static_cast<void*>(*stub.GetCode(isolate())));
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
PrintF("]\n");
} else {
stub.UpdateStatus(left, right, result);
BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED:
return ::v8::internal::UNINITIALIZED;
case SMI:
case INT32:
case NUMBER:
case ODDBALL:
case STRING:
return MONOMORPHIC;
case GENERIC:
return ::v8::internal::GENERIC;
}
#else
stub.UpdateStatus(left, right, result);
#endif
Handle<Code> code = stub.GetCode(isolate());
set_target(*code);
bool enable_smi = stub.GetLeftType(isolate())->Maybe(Type::Smi()) &&
stub.GetRightType(isolate())->Maybe(Type::Smi());
if (!smi_was_enabled && enable_smi) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
} else if (smi_was_enabled && !enable_smi) {
PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
}
return result.has_value
? static_cast<MaybeObject*>(*result.value)
: Failure::Exception();
UNREACHABLE();
return ::v8::internal::UNINITIALIZED;
}
RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) {
Handle<Type> BinaryOpIC::TypeInfoToType(BinaryOpIC::TypeInfo binary_type,
Isolate* isolate) {
switch (binary_type) {
case UNINITIALIZED:
return handle(Type::None(), isolate);
case SMI:
return handle(Type::Smi(), isolate);
case INT32:
return handle(Type::Signed32(), isolate);
case NUMBER:
return handle(Type::Number(), isolate);
case ODDBALL:
return handle(Type::Optional(
handle(Type::Union(
handle(Type::Number(), isolate),
handle(Type::String(), isolate)), isolate)), isolate);
case STRING:
return handle(Type::String(), isolate);
case GENERIC:
return handle(Type::Any(), isolate);
}
UNREACHABLE();
return handle(Type::Any(), isolate);
}
void BinaryOpIC::StubInfoToType(int minor_key,
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
Isolate* isolate) {
TypeInfo left_typeinfo, right_typeinfo, result_typeinfo;
BinaryOpStub::decode_types_from_minor_key(
minor_key, &left_typeinfo, &right_typeinfo, &result_typeinfo);
*left = TypeInfoToType(left_typeinfo, isolate);
*right = TypeInfoToType(right_typeinfo, isolate);
*result = TypeInfoToType(result_typeinfo, isolate);
}
static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
Token::Value op) {
v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(value);
if (type.IsSmi()) return BinaryOpIC::SMI;
if (type.IsInteger32()) {
if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
return BinaryOpIC::INT32;
}
if (type.IsNumber()) return BinaryOpIC::NUMBER;
if (type.IsString()) return BinaryOpIC::STRING;
if (value->IsUndefined()) {
if (op == Token::BIT_AND ||
op == Token::BIT_OR ||
op == Token::BIT_XOR ||
op == Token::SAR ||
op == Token::SHL ||
op == Token::SHR) {
if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
return BinaryOpIC::INT32;
}
return BinaryOpIC::ODDBALL;
}
return BinaryOpIC::GENERIC;
}
static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
Handle<Object> value,
Token::Value op) {
BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
if (old_type == BinaryOpIC::STRING) {
if (new_type == BinaryOpIC::STRING) return new_type;
return BinaryOpIC::GENERIC;
}
return Max(old_type, new_type);
}
#ifdef DEBUG
static void TraceBinaryOp(BinaryOpIC::TypeInfo left,
BinaryOpIC::TypeInfo right,
Maybe<int32_t> fixed_right_arg,
BinaryOpIC::TypeInfo result) {
PrintF("%s*%s", BinaryOpIC::GetName(left), BinaryOpIC::GetName(right));
if (fixed_right_arg.has_value) PrintF("{%d}", fixed_right_arg.value);
PrintF("->%s", BinaryOpIC::GetName(result));
}
#endif
RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
ASSERT(args.length() == 3);
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
BinaryOpIC ic(isolate);
return ic.Transition(left, right);
int key = args.smi_at(2);
Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
BinaryOpIC::TypeInfo previous_left, previous_right, previous_result;
BinaryOpStub::decode_types_from_minor_key(
key, &previous_left, &previous_right, &previous_result);
BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
// STRING is only used for ADD operations.
if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
op != Token::ADD) {
new_left = new_right = BinaryOpIC::GENERIC;
}
BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
Maybe<int> previous_fixed_right_arg =
BinaryOpStub::decode_fixed_right_arg_from_minor_key(key);
int32_t value;
bool new_has_fixed_right_arg =
op == Token::MOD &&
right->ToInt32(&value) &&
BinaryOpStub::can_encode_arg_value(value) &&
(previous_overall == BinaryOpIC::UNINITIALIZED ||
(previous_fixed_right_arg.has_value &&
previous_fixed_right_arg.value == value));
Maybe<int32_t> new_fixed_right_arg(
new_has_fixed_right_arg, new_has_fixed_right_arg ? value : 1);
if (previous_fixed_right_arg.has_value == new_fixed_right_arg.has_value) {
if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
if (op == Token::DIV ||
op == Token::MUL ||
op == Token::SHR ||
SmiValuesAre32Bits()) {
// Arithmetic on two Smi inputs has yielded a heap number.
// That is the only way to get here from the Smi stub.
// With 32-bit Smis, all overflows give heap numbers, but with
// 31-bit Smis, most operations overflow to int32 results.
result_type = BinaryOpIC::NUMBER;
} else {
// Other operations on SMIs that overflow yield int32s.
result_type = BinaryOpIC::INT32;
}
}
if (new_overall == BinaryOpIC::INT32 &&
previous_overall == BinaryOpIC::INT32) {
if (new_left == previous_left && new_right == previous_right) {
result_type = BinaryOpIC::NUMBER;
}
}
}
BinaryOpStub stub(key, new_left, new_right, result_type, new_fixed_right_arg);
Handle<Code> code = stub.GetCode(isolate);
if (!code.is_null()) {
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[BinaryOpIC in ");
JavaScriptFrame::PrintTop(isolate, stdout, false, true);
PrintF(" ");
TraceBinaryOp(previous_left, previous_right, previous_fixed_right_arg,
previous_result);
PrintF(" => ");
TraceBinaryOp(new_left, new_right, new_fixed_right_arg, result_type);
PrintF(" #%s @ %p]\n", Token::Name(op), static_cast<void*>(*code));
}
#endif
BinaryOpIC ic(isolate);
ic.patch(*code);
// Activate inlined smi code.
if (previous_overall == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
}
}
Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
Object* builtin = NULL; // Initialization calms down the compiler.
switch (op) {
case Token::ADD:
builtin = builtins->javascript_builtin(Builtins::ADD);
break;
case Token::SUB:
builtin = builtins->javascript_builtin(Builtins::SUB);
break;
case Token::MUL:
builtin = builtins->javascript_builtin(Builtins::MUL);
break;
case Token::DIV:
builtin = builtins->javascript_builtin(Builtins::DIV);
break;
case Token::MOD:
builtin = builtins->javascript_builtin(Builtins::MOD);
break;
case Token::BIT_AND:
builtin = builtins->javascript_builtin(Builtins::BIT_AND);
break;
case Token::BIT_OR:
builtin = builtins->javascript_builtin(Builtins::BIT_OR);
break;
case Token::BIT_XOR:
builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
break;
case Token::SHR:
builtin = builtins->javascript_builtin(Builtins::SHR);
break;
case Token::SAR:
builtin = builtins->javascript_builtin(Builtins::SAR);
break;
case Token::SHL:
builtin = builtins->javascript_builtin(Builtins::SHL);
break;
default:
UNREACHABLE();
}
Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
bool caught_exception;
Handle<Object> builtin_args[] = { right };
Handle<Object> result = Execution::Call(isolate,
builtin_function,
left,
ARRAY_SIZE(builtin_args),
builtin_args,
&caught_exception);
if (caught_exception) {
return Failure::Exception();
}
return *result;
}
@ -2743,47 +2940,6 @@ RUNTIME_FUNCTION(MaybeObject*, Unreachable) {
}
Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
switch (op) {
default:
UNREACHABLE();
case Token::ADD:
return Builtins::ADD;
break;
case Token::SUB:
return Builtins::SUB;
break;
case Token::MUL:
return Builtins::MUL;
break;
case Token::DIV:
return Builtins::DIV;
break;
case Token::MOD:
return Builtins::MOD;
break;
case Token::BIT_OR:
return Builtins::BIT_OR;
break;
case Token::BIT_AND:
return Builtins::BIT_AND;
break;
case Token::BIT_XOR:
return Builtins::BIT_XOR;
break;
case Token::SAR:
return Builtins::SAR;
break;
case Token::SHR:
return Builtins::SHR;
break;
case Token::SHL:
return Builtins::SHL;
break;
}
}
MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object,
Code::ExtraICState extra_ic_state) {
ToBooleanStub stub(extra_ic_state);

View File

@ -57,8 +57,8 @@ namespace internal {
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
ICU(BinaryOp_Patch) \
ICU(CompareIC_Miss) \
ICU(BinaryOpIC_Miss) \
ICU(CompareNilIC_Miss) \
ICU(Unreachable) \
ICU(ToBooleanIC_Miss)
@ -735,14 +735,22 @@ class BinaryOpIC: public IC {
GENERIC
};
explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
static void StubInfoToType(int minor_key,
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
Isolate* isolate);
static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
void patch(Code* code);
static const char* GetName(TypeInfo type_info);
MUST_USE_RESULT MaybeObject* Transition(Handle<Object> left,
Handle<Object> right);
static State ToState(TypeInfo type_info);
private:
static Handle<Type> TypeInfoToType(TypeInfo binary_type, Isolate* isolate);
};
@ -849,7 +857,6 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);

View File

@ -2338,7 +2338,6 @@ bool Isolate::Init(Deserializer* des) {
DONT_TRACK_ALLOCATION_SITE, 0);
stub.InitializeInterfaceDescriptor(
this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
BinaryOpStub::InitializeForIsolate(this);
CompareNilICStub::InitializeForIsolate(this);
ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);

View File

@ -1610,12 +1610,7 @@ void Logger::LogCodeObject(Object* object) {
case Code::FUNCTION:
case Code::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
case Code::BINARY_OP_IC: {
BinaryOpStub stub(code_object->extended_extra_ic_state());
description = stub.GetName().Detach();
tag = Logger::STUB_TAG;
break;
}
case Code::BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
case Code::COMPARE_NIL_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through

View File

@ -4883,8 +4883,7 @@ class Code: public HeapObject {
// TODO(danno): This is a bit of a hack right now since there are still
// clients of this API that pass "extra" values in for argc. These clients
// should be retrofitted to used ExtendedExtraICState.
return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
kind == BINARY_OP_IC;
return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC;
}
inline StubType type(); // Only valid for monomorphic IC stubs.

View File

@ -1056,7 +1056,7 @@ intptr_t PagedSpace::SizeOfFirstPage() {
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
size = 72 * kPointerSize * KB;
size = 64 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;

View File

@ -381,29 +381,20 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
Maybe<int>* fixed_right_arg,
Token::Value operation) {
Maybe<int>* fixed_right_arg) {
Handle<Object> object = GetInfo(id);
if (!object->IsCode()) {
// For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
// operations covered by the BinaryOpStub we should always have them.
ASSERT(!(operation >= BinaryOpStub::FIRST_TOKEN &&
operation <= BinaryOpStub::LAST_TOKEN));
// For some binary ops we don't have ICs, e.g. Token::COMMA.
*left = *right = *result = handle(Type::None(), isolate_);
return;
}
Handle<Code> code = Handle<Code>::cast(object);
ASSERT(code->is_binary_op_stub());
BinaryOpStub stub(code->extended_extra_ic_state());
// Sanity check.
ASSERT(stub.operation() == operation);
*left = stub.GetLeftType(isolate());
*right = stub.GetRightType(isolate());
*result = stub.GetResultType(isolate());
*fixed_right_arg = stub.fixed_right_arg();
int minor_key = code->stub_info();
BinaryOpIC::StubInfoToType(minor_key, left, right, result, isolate());
*fixed_right_arg =
BinaryOpStub::decode_fixed_right_arg_from_minor_key(minor_key);
}
@ -419,15 +410,36 @@ Handle<Type> TypeFeedbackOracle::ClauseType(TypeFeedbackId id) {
}
Handle<Type> TypeFeedbackOracle::IncrementType(CountOperation* expr) {
TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
Handle<Type> unknown(Type::None(), isolate_);
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_binary_op_stub()) return unknown;
BinaryOpStub stub(code->extended_extra_ic_state());
return stub.GetLeftType(isolate());
BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
&right_type, &unused_result_type);
// CountOperations should always have +1 or -1 as their right input.
ASSERT(right_type == BinaryOpIC::SMI ||
right_type == BinaryOpIC::UNINITIALIZED);
switch (left_type) {
case BinaryOpIC::UNINITIALIZED:
case BinaryOpIC::SMI:
return TypeInfo::Smi();
case BinaryOpIC::INT32:
return TypeInfo::Integer32();
case BinaryOpIC::NUMBER:
return TypeInfo::Double();
case BinaryOpIC::STRING:
case BinaryOpIC::GENERIC:
return unknown;
default:
return unknown;
}
UNREACHABLE();
return unknown;
}

View File

@ -301,8 +301,7 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
Maybe<int>* fixed_right_arg,
Token::Value operation);
Maybe<int>* fixed_right_arg);
void CompareType(TypeFeedbackId id,
Handle<Type>* left,
@ -311,7 +310,7 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<Type> ClauseType(TypeFeedbackId id);
Handle<Type> IncrementType(CountOperation* expr);
TypeInfo IncrementType(CountOperation* expr);
Zone* zone() const { return zone_; }
Isolate* isolate() const { return isolate_; }

View File

@ -543,7 +543,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Handle<Type> type, left_type, right_type;
Maybe<int> fixed_right_arg;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
&left_type, &right_type, &type, &fixed_right_arg, expr->op());
&left_type, &right_type, &type, &fixed_right_arg);
NarrowLowerType(expr, type);
NarrowLowerType(expr->left(), left_type);
NarrowLowerType(expr->right(), right_type);

View File

@ -204,6 +204,7 @@ namespace internal {
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \

View File

@ -155,18 +155,6 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
}
void BinaryOpStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
descriptor->SetMissHandler(
ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
}
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
@ -459,8 +447,35 @@ class FloatingPointHelper : public AllStatic {
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// NumberOperands assumes both are smis or heap numbers.
static void LoadSSE2SmiOperands(MacroAssembler* masm);
static void LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers);
// Takes the operands in rdx and rax and loads them as integers in rax
// and rcx.
static void LoadAsIntegers(MacroAssembler* masm,
Label* operand_conversion_failure,
Register heap_number_map);
// Tries to convert two values to smis losslessly.
// This fails if either argument is not a Smi nor a HeapNumber,
// or if it's a HeapNumber with a value that can't be converted
// losslessly to a Smi. In that case, control transitions to the
// on_not_smis label.
// On success, either control goes to the on_success label (if one is
// provided), or it falls through at the end of the code (if on_success
// is NULL).
// On success, both first and second holds Smi tagged values.
// One of first or second must be non-Smi when entering.
static void NumbersToSmis(MacroAssembler* masm,
Register first,
Register second,
Register scratch1,
Register scratch2,
Register scratch3,
Label* on_success,
Label* on_not_smis,
ConvertUndefined convert_undefined);
};
@ -548,6 +563,569 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
void BinaryOpStub::Initialize() {}
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx);
__ push(rdx);
__ push(rax);
// Left and right arguments are now on top.
__ Push(Smi::FromInt(MinorKey()));
__ PushReturnAddressFrom(rcx);
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
3,
1);
}
static void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
Token::Value op) {
// Arguments to BinaryOpStub are in rdx and rax.
const Register left = rdx;
const Register right = rax;
// We only generate heapnumber answers for overflowing calculations
// for the four basic arithmetic operations and logical right shift by 0.
bool generate_inline_heapnumber_results =
(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
(op == Token::ADD || op == Token::SUB ||
op == Token::MUL || op == Token::DIV || op == Token::SHR);
// Smi check of both operands. If op is BIT_OR, the check is delayed
// until after the OR operation.
Label not_smis;
Label use_fp_on_smis;
Label fail;
if (op != Token::BIT_OR) {
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, &not_smis);
}
Label smi_values;
__ bind(&smi_values);
// Perform the operation.
Comment perform_smi(masm, "-- Perform smi operation");
switch (op) {
case Token::ADD:
ASSERT(right.is(rax));
__ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
break;
case Token::SUB:
__ SmiSub(left, left, right, &use_fp_on_smis);
__ movq(rax, left);
break;
case Token::MUL:
ASSERT(right.is(rax));
__ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
break;
case Token::DIV:
// SmiDiv will not accept left in rdx or right in rax.
__ movq(rbx, rax);
__ movq(rcx, rdx);
__ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
break;
case Token::MOD:
// SmiMod will not accept left in rdx or right in rax.
__ movq(rbx, rax);
__ movq(rcx, rdx);
__ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
break;
case Token::BIT_OR: {
ASSERT(right.is(rax));
__ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
break;
}
case Token::BIT_XOR:
ASSERT(right.is(rax));
__ SmiXor(right, right, left); // BIT_XOR is commutative.
break;
case Token::BIT_AND:
ASSERT(right.is(rax));
__ SmiAnd(right, right, left); // BIT_AND is commutative.
break;
case Token::SHL:
__ SmiShiftLeft(left, left, right);
__ movq(rax, left);
break;
case Token::SAR:
__ SmiShiftArithmeticRight(left, left, right);
__ movq(rax, left);
break;
case Token::SHR:
__ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
__ movq(rax, left);
break;
default:
UNREACHABLE();
}
// 5. Emit return of result in rax. Some operations have registers pushed.
__ ret(0);
if (use_fp_on_smis.is_linked()) {
// 6. For some operations emit inline code to perform floating point
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
__ bind(&use_fp_on_smis);
if (op == Token::DIV || op == Token::MOD) {
// Restore left and right to rdx and rax.
__ movq(rdx, rcx);
__ movq(rax, rbx);
}
if (generate_inline_heapnumber_results) {
__ AllocateHeapNumber(rcx, rbx, slow);
Comment perform_float(masm, "-- Perform float operation on smis");
if (op == Token::SHR) {
__ SmiToInteger32(left, left);
__ cvtqsi2sd(xmm0, left);
} else {
FloatingPointHelper::LoadSSE2SmiOperands(masm);
switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
}
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
} else {
__ jmp(&fail);
}
}
// 7. Non-smi operands reach the end of the code generated by
// GenerateSmiCode, and fall through to subsequent code,
// with the operands in rdx and rax.
// But first we check if non-smi values are HeapNumbers holding
// values that could be smi.
__ bind(&not_smis);
Comment done_comment(masm, "-- Enter non-smi code");
FloatingPointHelper::ConvertUndefined convert_undefined =
FloatingPointHelper::BAILOUT_ON_UNDEFINED;
// This list must be in sync with BinaryOpPatch() behavior in ic.cc.
if (op == Token::BIT_AND ||
op == Token::BIT_OR ||
op == Token::BIT_XOR ||
op == Token::SAR ||
op == Token::SHL ||
op == Token::SHR) {
convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
}
FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
&smi_values, &fail, convert_undefined);
__ jmp(&smi_values);
__ bind(&fail);
}
static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
Label* alloc_failure,
OverwriteMode mode);
static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
Label* allocation_failure,
Label* non_numeric_failure,
Token::Value op,
OverwriteMode mode) {
switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
BinaryOpStub_GenerateHeapResultAllocation(
masm, allocation_failure, mode);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
break;
}
case Token::MOD: {
// For MOD we jump to the allocation_failure label, to call runtime.
__ jmp(allocation_failure);
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
case Token::SAR:
case Token::SHL:
case Token::SHR: {
Label non_smi_shr_result;
Register heap_number_map = r9;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
heap_number_map);
switch (op) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
case Token::SAR: __ sarl_cl(rax); break;
case Token::SHL: __ shll_cl(rax); break;
case Token::SHR: {
__ shrl_cl(rax);
// Check if result is negative. This can only happen for a shift
// by zero.
__ testl(rax, rax);
__ j(negative, &non_smi_shr_result);
break;
}
default: UNREACHABLE();
}
STATIC_ASSERT(kSmiValueSize == 32);
// Tag smi result and return.
__ Integer32ToSmi(rax, rax);
__ Ret();
// Logical shift right can produce an unsigned int32 that is not
// an int32, and so is not in the smi range. Allocate a heap number
// in that case.
if (op == Token::SHR) {
__ bind(&non_smi_shr_result);
Label allocation_failed;
__ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
// Allocate heap number in new space.
// Not using AllocateHeapNumber macro in order to reuse
// already loaded heap_number_map.
__ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
TAG_OBJECT);
// Set the map.
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
kHeapNumberMapRegisterClobbered);
__ movq(FieldOperand(rax, HeapObject::kMapOffset),
heap_number_map);
__ cvtqsi2sd(xmm0, rbx);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ Ret();
__ bind(&allocation_failed);
// We need tagged values in rdx and rax for the following code,
// not int32 in rax and rcx.
__ Integer32ToSmi(rax, rcx);
__ Integer32ToSmi(rdx, rbx);
__ jmp(allocation_failure);
}
break;
}
default: UNREACHABLE(); break;
}
// No fall-through from this generated code.
if (FLAG_debug_code) {
__ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
}
}
static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
MacroAssembler* masm) {
// Push arguments, but ensure they are under the return address
// for a tail call.
__ PopReturnAddressTo(rcx);
__ push(rdx);
__ push(rax);
__ PushReturnAddressFrom(rcx);
}
void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
Label left_not_string, call_runtime;
// Registers containing left and right operands respectively.
Register left = rdx;
Register right = rax;
// Test if left operand is a string.
__ JumpIfSmi(left, &left_not_string, Label::kNear);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &left_not_string, Label::kNear);
StringAddStub string_add_left_stub(
(StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
__ bind(&left_not_string);
__ JumpIfSmi(right, &call_runtime, Label::kNear);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &call_runtime, Label::kNear);
StringAddStub string_add_right_stub(
(StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
__ TailCallStub(&string_add_right_stub);
// Neither argument is a string.
__ bind(&call_runtime);
}
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label right_arg_changed, call_runtime;
if (op_ == Token::MOD && encoded_right_arg_.has_value) {
// It is guaranteed that the value will fit into a Smi, because if it
// didn't, we wouldn't be here, see BinaryOp_Patch.
__ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
__ j(not_equal, &right_arg_changed);
}
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
BinaryOpStub_GenerateSmiCode(
masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
}
// Code falls through if the result is not returned as either a smi or heap
// number.
__ bind(&right_arg_changed);
GenerateTypeTransition(masm);
if (call_runtime.is_linked()) {
__ bind(&call_runtime);
{
FrameScope scope(masm, StackFrame::INTERNAL);
GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
__ Ret();
}
}
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// The int32 case is identical to the Smi case. We avoid creating this
// ic state on x64.
UNREACHABLE();
}
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
// Registers containing left and right operands respectively.
Register left = rdx;
Register right = rax;
// Test if left operand is a string.
__ JumpIfSmi(left, &call_runtime);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &call_runtime);
// Test if right operand is a string.
__ JumpIfSmi(right, &call_runtime);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &call_runtime);
StringAddStub string_add_stub(
(StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
__ TailCallStub(&string_add_stub);
__ bind(&call_runtime);
GenerateTypeTransition(masm);
}
void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
Label call_runtime;
if (op_ == Token::ADD) {
// Handle string addition here, because it is the only operation
// that does not do a ToNumber conversion on the operands.
GenerateAddStrings(masm);
}
// Convert oddball arguments to numbers.
Label check, done;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check, Label::kNear);
if (Token::IsBitOp(op_)) {
__ xor_(rdx, rdx);
} else {
__ LoadRoot(rdx, Heap::kNanValueRootIndex);
}
__ jmp(&done, Label::kNear);
__ bind(&check);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &done, Label::kNear);
if (Token::IsBitOp(op_)) {
__ xor_(rax, rax);
} else {
__ LoadRoot(rax, Heap::kNanValueRootIndex);
}
__ bind(&done);
GenerateNumberStub(masm);
}
static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
Register input,
Label* fail) {
Label ok;
__ JumpIfSmi(input, &ok, Label::kNear);
Register heap_number_map = r8;
Register scratch1 = r9;
Register scratch2 = r10;
// HeapNumbers containing 32bit integer values are also allowed.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, fail);
__ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
// Convert, convert back, and compare the two doubles' bits.
__ cvttsd2siq(scratch2, xmm0);
__ Cvtlsi2sd(xmm1, scratch2);
__ movq(scratch1, xmm0);
__ movq(scratch2, xmm1);
__ cmpq(scratch1, scratch2);
__ j(not_equal, fail);
__ bind(&ok);
}
void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
Label gc_required, not_number;
// It could be that only SMIs have been seen at either the left
// or the right operand. For precise type feedback, patch the IC
// again if this changes.
if (left_type_ == BinaryOpIC::SMI) {
BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
}
if (right_type_ == BinaryOpIC::SMI) {
BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
}
BinaryOpStub_GenerateFloatingPointCode(
masm, &gc_required, &not_number, op_, mode_);
__ bind(&not_number);
GenerateTypeTransition(masm);
__ bind(&gc_required);
{
FrameScope scope(masm, StackFrame::INTERNAL);
GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
__ Ret();
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
BinaryOpStub_GenerateSmiCode(
masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
BinaryOpStub_GenerateFloatingPointCode(
masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
GenerateAddStrings(masm);
}
__ bind(&call_runtime);
{
FrameScope scope(masm, StackFrame::INTERNAL);
GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
__ Ret();
}
static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
Label* alloc_failure,
OverwriteMode mode) {
Label skip_allocation;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in rdx is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(rdx, &skip_allocation);
// Allocate a heap number for the result. Keep rax and rdx intact
// for the possible runtime call.
__ AllocateHeapNumber(rbx, rcx, alloc_failure);
// Now rdx can be overwritten losing one of the arguments as we are
// now done and will not need it any more.
__ movq(rdx, rbx);
__ bind(&skip_allocation);
// Use object in rdx as a result holder
__ movq(rax, rdx);
break;
}
case OVERWRITE_RIGHT:
// If the argument in rax is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
// Allocate a heap number for the result. Keep rax and rdx intact
// for the possible runtime call.
__ AllocateHeapNumber(rbx, rcx, alloc_failure);
// Now rax can be overwritten losing one of the arguments as we are
// now done and will not need it any more.
__ movq(rax, rbx);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
}
void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
__ push(rdx);
__ push(rax);
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
@ -854,6 +1432,67 @@ void TranscendentalCacheStub::GenerateOperation(
}
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
// Jump to conversion_failure: rdx and rax are unchanged.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label* conversion_failure,
Register heap_number_map) {
// Check float operands.
Label arg1_is_object, check_undefined_arg1;
Label arg2_is_object, check_undefined_arg2;
Label load_arg2, done;
__ JumpIfNotSmi(rdx, &arg1_is_object);
__ SmiToInteger32(r8, rdx);
__ jmp(&load_arg2);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg1);
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure);
__ Set(r8, 0);
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the rdx heap number in r8.
__ TruncateHeapNumberToI(r8, rdx);
// Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
// Test if arg2 is a Smi.
__ JumpIfNotSmi(rax, &arg2_is_object);
__ SmiToInteger32(rcx, rax);
__ jmp(&done);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg2);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure);
__ Set(rcx, 0);
__ jmp(&done);
__ bind(&arg2_is_object);
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the rax heap number in rcx.
__ TruncateHeapNumberToI(rcx, rax);
__ bind(&done);
__ movl(rax, r8);
}
void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
__ SmiToInteger32(kScratchRegister, rdx);
__ Cvtlsi2sd(xmm0, kScratchRegister);
__ SmiToInteger32(kScratchRegister, rax);
__ Cvtlsi2sd(xmm1, kScratchRegister);
}
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
@ -884,6 +1523,83 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
}
void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
Register first,
Register second,
Register scratch1,
Register scratch2,
Register scratch3,
Label* on_success,
Label* on_not_smis,
ConvertUndefined convert_undefined) {
Register heap_number_map = scratch3;
Register smi_result = scratch1;
Label done, maybe_undefined_first, maybe_undefined_second, first_done;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Label first_smi;
__ JumpIfSmi(first, &first_smi, Label::kNear);
__ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal,
(convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
? &maybe_undefined_first
: on_not_smis);
// Convert HeapNumber to smi if possible.
__ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
__ cvttsd2siq(smi_result, xmm0);
// Check if conversion was successful by converting back and
// comparing to the original double's bits.
__ Cvtlsi2sd(xmm1, smi_result);
__ movq(kScratchRegister, xmm1);
__ cmpq(scratch2, kScratchRegister);
__ j(not_equal, on_not_smis);
__ Integer32ToSmi(first, smi_result);
__ bind(&first_done);
__ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
__ bind(&first_smi);
__ AssertNotSmi(second);
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal,
(convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
? &maybe_undefined_second
: on_not_smis);
// Convert second to smi, if possible.
__ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
__ cvttsd2siq(smi_result, xmm0);
__ Cvtlsi2sd(xmm1, smi_result);
__ movq(kScratchRegister, xmm1);
__ cmpq(scratch2, kScratchRegister);
__ j(not_equal, on_not_smis);
__ Integer32ToSmi(second, smi_result);
if (on_success != NULL) {
__ jmp(on_success);
} else {
__ jmp(&done);
}
__ bind(&maybe_undefined_first);
__ CompareRoot(first, Heap::kUndefinedValueRootIndex);
__ j(not_equal, on_not_smis);
__ xor_(first, first);
__ jmp(&first_done);
__ bind(&maybe_undefined_second);
__ CompareRoot(second, Heap::kUndefinedValueRootIndex);
__ j(not_equal, on_not_smis);
__ xor_(second, second);
if (on_success != NULL) {
__ jmp(on_success);
}
// Else: fall through.
__ bind(&done);
}
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = rdx;
const Register base = rax;
@ -2750,7 +3466,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
BinaryOpStub::GenerateAheadOfTime(isolate);
}