From b93a0f5d9abcbbb664db4e6166397841ca22c98f Mon Sep 17 00:00:00 2001 From: "fschneider@chromium.org" Date: Wed, 15 Dec 2010 13:56:41 +0000 Subject: [PATCH] Second attempt: Improve our type feedback by recogizining never-executed IC calls for binary operations. This is an improved version of my earlier change r5970. It avoids degrading the non-optimized code. Initially we emit a conditional branch that is either always- or never-taken after a smi-check (depending on whether we test for smi for for non-smi) Since test-eax always sets the carry-flag to 0 we use jump-if-carry and jump-if-not-carry. The first invocation of the stub patches a jc with a jz and jnc with a jnz-instruction so that the code looks exactly as it was without patching. The only difference is the test- or nop-instruction after the IC-call. Review URL: http://codereview.chromium.org/5763004 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6030 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/ic-arm.cc | 10 ++- src/full-codegen.h | 7 ++ src/ia32/assembler-ia32.h | 9 ++ src/ia32/code-stubs-ia32.cc | 4 +- src/ia32/code-stubs-ia32.h | 6 +- src/ia32/full-codegen-ia32.cc | 141 +++++++++++++++++++++++-------- src/ia32/ic-ia32.cc | 54 +++++++++++- src/ia32/lithium-codegen-ia32.cc | 7 ++ src/ic.cc | 23 +++-- src/ic.h | 5 +- src/type-info.cc | 9 ++ src/x64/ic-x64.cc | 8 +- 12 files changed, 226 insertions(+), 57 deletions(-) diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc index a75d96bfdd..e5a1bae968 100644 --- a/src/arm/ic-arm.cc +++ b/src/arm/ic-arm.cc @@ -2360,10 +2360,8 @@ Condition CompareIC::ComputeCondition(Token::Value op) { void CompareIC::UpdateCaches(Handle x, Handle y) { HandleScope scope; Handle rewritten; -#ifdef DEBUG State previous_state = GetState(); -#endif - State state = TargetState(x, y); + State state = TargetState(previous_state, false, x, y); if (state == GENERIC) { CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); rewritten = stub.GetCode(); @@ -2383,6 +2381,12 @@ void CompareIC::UpdateCaches(Handle x, Handle y) { #endif } + +void PatchInlinedSmiCode(Address address) { + UNIMPLEMENTED(); +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/src/full-codegen.h b/src/full-codegen.h index e0fd192a21..af58941d3a 100644 --- a/src/full-codegen.h +++ b/src/full-codegen.h @@ -38,6 +38,9 @@ namespace v8 { namespace internal { +// Forward declarations. +class JumpPatchSite; + // AST node visitor which can tell whether a given statement will be breakable // when the code is compiled by the full compiler in the debugger. This means // that there will be an IC (load/store/call) in the code generated for the @@ -533,6 +536,10 @@ class FullCodeGenerator: public AstVisitor { // Helper for calling an IC stub. void EmitCallIC(Handle ic, RelocInfo::Mode mode); + // Calling an IC stub with a patch site. Passing NULL for patch_site + // indicates no inlined smi code and emits a nop after the IC call. + void EmitCallIC(Handle ic, JumpPatchSite* patch_site); + // Set fields in the stack frame. Offsets are the frame pointer relative // offsets defined in, e.g., StandardFrameConstants. void StoreToFrameField(int frame_offset, Register value); diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h index 737b57f395..11acb56110 100644 --- a/src/ia32/assembler-ia32.h +++ b/src/ia32/assembler-ia32.h @@ -571,6 +571,15 @@ class Assembler : public Malloced { static const byte kTestEaxByte = 0xA9; // One byte opcode for test al, 0xXX. static const byte kTestAlByte = 0xA8; + // One byte opcode for nop. + static const byte kNopByte = 0x90; + + // One byte opcode for a short unconditional jump. + static const byte kJmpShortOpcode = 0xEB; + // One byte prefix for a short conditional jump. + static const byte kJccShortPrefix = 0x70; + static const byte kJncShortOpcode = kJccShortPrefix | not_carry; + static const byte kJcShortOpcode = kJccShortPrefix | carry; // --------------------------------------------------------------------------- // Code generation diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index 78d00706e2..1f810a00f9 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -1366,8 +1366,8 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, if (op_ == Token::DIV || op_ == Token::MOD) { left = eax; right = ebx; - __ mov(ebx, eax); - __ mov(eax, edx); + __ mov(ebx, eax); + __ mov(eax, edx); } diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h index 10fee97a9c..04f23ace9b 100644 --- a/src/ia32/code-stubs-ia32.h +++ b/src/ia32/code-stubs-ia32.h @@ -250,7 +250,8 @@ class TypeRecordingBinaryOpStub: public CodeStub { ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } - TypeRecordingBinaryOpStub(int key, + TypeRecordingBinaryOpStub( + int key, TRBinaryOpIC::TypeInfo operands_type, TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED) : op_(OpBits::decode(key)), @@ -258,8 +259,7 @@ class TypeRecordingBinaryOpStub: public CodeStub { use_sse3_(SSE3Bits::decode(key)), operands_type_(operands_type), result_type_(result_type), - name_(NULL) { - } + name_(NULL) { } // Generate code to call the stub with the supplied arguments. This will add // code at the call site to prepare arguments either in registers or on the diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index 4c4a1208f1..65f74e3560 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -41,8 +41,61 @@ namespace v8 { namespace internal { + #define __ ACCESS_MASM(masm_) + +class JumpPatchSite BASE_EMBEDDED { + public: + explicit JumpPatchSite(MacroAssembler* masm) + : masm_(masm) { +#ifdef DEBUG + info_emitted_ = false; +#endif + } + + ~JumpPatchSite() { + ASSERT(patch_site_.is_bound() == info_emitted_); + } + + void EmitJumpIfNotSmi(Register reg, NearLabel* target) { + __ test(reg, Immediate(kSmiTagMask)); + EmitJump(not_carry, target); // Always taken before patched. + } + + void EmitJumpIfSmi(Register reg, NearLabel* target) { + __ test(reg, Immediate(kSmiTagMask)); + EmitJump(carry, target); // Never taken before patched. + } + + void EmitPatchInfo() { + int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_); + ASSERT(is_int8(delta_to_patch_site)); + __ test(eax, Immediate(delta_to_patch_site)); +#ifdef DEBUG + info_emitted_ = true; +#endif + } + + bool is_bound() const { return patch_site_.is_bound(); } + + private: + // jc will be patched with jz, jnc will become jnz. + void EmitJump(Condition cc, NearLabel* target) { + ASSERT(!patch_site_.is_bound() && !info_emitted_); + ASSERT(cc == carry || cc == not_carry); + __ bind(&patch_site_); + __ j(cc, target); + } + + MacroAssembler* masm_; + Label patch_site_; +#ifdef DEBUG + bool info_emitted_; +#endif +}; + + // Generate code for a JS function. On entry to the function the receiver // and arguments have been pushed on the stack left to right, with the // return address on top of them. The actual argument count matches the @@ -715,12 +768,13 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Perform the comparison as if via '==='. __ mov(edx, Operand(esp, 0)); // Switch value. bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT); + JumpPatchSite patch_site(masm_); if (inline_smi_code) { NearLabel slow_case; __ mov(ecx, edx); __ or_(ecx, Operand(eax)); - __ test(ecx, Immediate(kSmiTagMask)); - __ j(not_zero, &slow_case, not_taken); + patch_site.EmitJumpIfNotSmi(ecx, &slow_case); + __ cmp(edx, Operand(eax)); __ j(not_equal, &next_test); __ Drop(1); // Switch value is no longer needed. @@ -730,9 +784,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Record position before stub call for type feedback. SetSourcePosition(clause->position()); - Handle ic = CompareIC::GetUninitialized(Token::EQ_STRICT); - __ call(ic, RelocInfo::CODE_TARGET); + EmitCallIC(ic, &patch_site); __ test(eax, Operand(eax)); __ j(not_equal, &next_test); @@ -1556,12 +1609,11 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr, OverwriteMode mode, bool left_is_constant_smi, Smi* value) { - NearLabel call_stub; - Label done; + NearLabel call_stub, done; __ add(Operand(eax), Immediate(value)); __ j(overflow, &call_stub); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &done); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(eax, &done); // Undo the optimistic add operation and call the shared stub. __ bind(&call_stub); @@ -1574,7 +1626,8 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr, __ mov(edx, eax); __ mov(eax, Immediate(value)); } - __ CallStub(&stub); + EmitCallIC(stub.GetCode(), &patch_site); + __ bind(&done); context()->Plug(eax); } @@ -1584,7 +1637,7 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr, OverwriteMode mode, bool left_is_constant_smi, Smi* value) { - Label call_stub, done; + NearLabel call_stub, done; if (left_is_constant_smi) { __ mov(ecx, eax); __ mov(eax, Immediate(value)); @@ -1593,8 +1646,8 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr, __ sub(Operand(eax), Immediate(value)); } __ j(overflow, &call_stub); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &done); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(eax, &done); __ bind(&call_stub); if (left_is_constant_smi) { @@ -1607,7 +1660,8 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr, } Token::Value op = Token::SUB; TypeRecordingBinaryOpStub stub(op, mode); - __ CallStub(&stub); + EmitCallIC(stub.GetCode(), &patch_site); + __ bind(&done); context()->Plug(eax); } @@ -1617,19 +1671,21 @@ void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr, Token::Value op, OverwriteMode mode, Smi* value) { - Label call_stub, smi_case, done; + NearLabel call_stub, smi_case, done; int shift_value = value->value() & 0x1f; - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &smi_case); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(eax, &smi_case); + // Call stub. __ bind(&call_stub); __ mov(edx, eax); __ mov(eax, Immediate(value)); TypeRecordingBinaryOpStub stub(op, mode); - __ CallStub(&stub); + EmitCallIC(stub.GetCode(), &patch_site); __ jmp(&done); + // Smi case. __ bind(&smi_case); switch (op) { case Token::SHL: @@ -1679,17 +1735,19 @@ void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr, Token::Value op, OverwriteMode mode, Smi* value) { - Label smi_case, done; - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &smi_case); + NearLabel smi_case, done; + + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(eax, &smi_case); // The order of the arguments does not matter for bit-ops with a // constant operand. __ mov(edx, Immediate(value)); TypeRecordingBinaryOpStub stub(op, mode); - __ CallStub(&stub); + EmitCallIC(stub.GetCode(), &patch_site); __ jmp(&done); + // Smi case. __ bind(&smi_case); switch (op) { case Token::BIT_OR: @@ -1757,19 +1815,20 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, // Do combined smi check of the operands. Left operand is on the // stack. Right operand is in eax. - Label done, stub_call, smi_case; + NearLabel done, smi_case, stub_call; __ pop(edx); __ mov(ecx, eax); __ or_(eax, Operand(edx)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &smi_case); + JumpPatchSite patch_site(masm_); + patch_site.EmitJumpIfSmi(eax, &smi_case); __ bind(&stub_call); __ mov(eax, ecx); TypeRecordingBinaryOpStub stub(op, mode); - __ CallStub(&stub); + EmitCallIC(stub.GetCode(), &patch_site); __ jmp(&done); + // Smi case. __ bind(&smi_case); __ mov(eax, edx); // Copy left operand in case of a stub call. @@ -1848,7 +1907,7 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op, OverwriteMode mode) { __ pop(edx); TypeRecordingBinaryOpStub stub(op, mode); - __ CallStub(&stub); + EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code. context()->Plug(eax); } @@ -3720,8 +3779,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { } // Inline smi case if we are in a loop. - NearLabel stub_call; - Label done; + NearLabel stub_call, done; + JumpPatchSite patch_site(masm_); + if (ShouldInlineSmiCase(expr->op())) { if (expr->op() == Token::INC) { __ add(Operand(eax), Immediate(Smi::FromInt(1))); @@ -3731,8 +3791,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ j(overflow, &stub_call); // We could eliminate this smi check if we split the code at // the first smi check before calling ToNumber. - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &done); + patch_site.EmitJumpIfSmi(eax, &done); + __ bind(&stub_call); // Call stub. Undo operation first. if (expr->op() == Token::INC) { @@ -3750,7 +3810,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ mov(eax, Immediate(Smi::FromInt(1))); TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE); - __ CallStub(&stub); + EmitCallIC(stub.GetCode(), &patch_site); __ bind(&done); // Store the value returned in eax. @@ -4023,21 +4083,22 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } bool inline_smi_code = ShouldInlineSmiCase(op); + JumpPatchSite patch_site(masm_); if (inline_smi_code) { NearLabel slow_case; __ mov(ecx, Operand(edx)); __ or_(ecx, Operand(eax)); - __ test(ecx, Immediate(kSmiTagMask)); - __ j(not_zero, &slow_case, not_taken); + patch_site.EmitJumpIfNotSmi(ecx, &slow_case); __ cmp(edx, Operand(eax)); Split(cc, if_true, if_false, NULL); __ bind(&slow_case); } // Record position and call the compare IC. - Handle ic = CompareIC::GetUninitialized(op); SetSourcePosition(expr->position()); - __ call(ic, RelocInfo::CODE_TARGET); + Handle ic = CompareIC::GetUninitialized(op); + EmitCallIC(ic, &patch_site); + PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); __ test(eax, Operand(eax)); Split(cc, if_true, if_false, fall_through); @@ -4141,6 +4202,16 @@ void FullCodeGenerator::EmitCallIC(Handle ic, RelocInfo::Mode mode) { } +void FullCodeGenerator::EmitCallIC(Handle ic, JumpPatchSite* patch_site) { + __ call(ic, RelocInfo::CODE_TARGET); + if (patch_site != NULL && patch_site->is_bound()) { + patch_site->EmitPatchInfo(); + } else { + __ nop(); // Signals no inlined code. + } +} + + void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); __ mov(Operand(ebp, frame_offset), value); diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc index b34179a41c..9c9304d5a4 100644 --- a/src/ia32/ic-ia32.cc +++ b/src/ia32/ic-ia32.cc @@ -2049,13 +2049,23 @@ Condition CompareIC::ComputeCondition(Token::Value op) { } +static bool HasInlinedSmiCode(Address address) { + // The address of the instruction following the call. + Address test_instruction_address = + address + Assembler::kCallTargetAddressOffset; + + // If the instruction following the call is not a test al, nothing + // was inlined. + return *test_instruction_address == Assembler::kTestAlByte; +} + + void CompareIC::UpdateCaches(Handle x, Handle y) { HandleScope scope; Handle rewritten; -#ifdef DEBUG State previous_state = GetState(); -#endif - State state = TargetState(x, y); + + State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y); if (state == GENERIC) { CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); rewritten = stub.GetCode(); @@ -2073,6 +2083,44 @@ void CompareIC::UpdateCaches(Handle x, Handle y) { Token::Name(op_)); } #endif + + // Activate inlined smi code. + if (previous_state == UNINITIALIZED) { + PatchInlinedSmiCode(address()); + } +} + + +void PatchInlinedSmiCode(Address address) { + // The address of the instruction following the call. + Address test_instruction_address = + address + Assembler::kCallTargetAddressOffset; + + // If the instruction following the call is not a test al, nothing + // was inlined. + if (*test_instruction_address != Assembler::kTestAlByte) { + ASSERT(*test_instruction_address == Assembler::kNopByte); + return; + } + + Address delta_address = test_instruction_address + 1; + // The delta to the start of the map check instruction and the + // condition code uses at the patched jump. + int8_t delta = *reinterpret_cast(delta_address); + if (FLAG_trace_ic) { + PrintF("[ patching ic at %p, test=%p, delta=%d\n", + address, test_instruction_address, delta); + } + + // Patch with a short conditional jump. There must be a + // short jump-if-carry/not-carry at this position. + Address jmp_address = test_instruction_address - delta; + ASSERT(*jmp_address == Assembler::kJncShortOpcode || + *jmp_address == Assembler::kJcShortOpcode); + Condition cc = *jmp_address == Assembler::kJncShortOpcode + ? not_zero + : zero; + *jmp_address = static_cast(Assembler::kJccShortPrefix | cc); } diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index 2abc7f0171..1b7c52b2aa 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -315,6 +315,13 @@ void LCodeGen::CallCode(Handle code, __ call(code, mode); RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex); } + + // Signal that we don't inline smi code before these stubs in the + // optimizing code generator. + if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC || + code->kind() == Code::COMPARE_IC) { + __ nop(); + } } diff --git a/src/ic.cc b/src/ic.cc index cda0b1504a..645c6fdcf6 100644 --- a/src/ic.cc +++ b/src/ic.cc @@ -1951,7 +1951,7 @@ TRBinaryOpIC::State TRBinaryOpIC::ToState(TypeInfo type_info) { TRBinaryOpIC::TypeInfo TRBinaryOpIC::JoinTypes(TRBinaryOpIC::TypeInfo x, - TRBinaryOpIC::TypeInfo y) { + TRBinaryOpIC::TypeInfo y) { if (x == UNINITIALIZED) return y; if (y == UNINITIALIZED) return x; if (x == STRING && y == STRING) return STRING; @@ -2041,6 +2041,11 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) { TRBinaryOpIC::GetName(result_type), Token::Name(op)); } + + // Activate inlined smi code. + if (previous_type == TRBinaryOpIC::UNINITIALIZED) { + PatchInlinedSmiCode(ic.address()); + } } Handle builtins = Top::builtins(); @@ -2127,13 +2132,17 @@ const char* CompareIC::GetStateName(State state) { } -CompareIC::State CompareIC::TargetState(Handle x, Handle y) { - State state = GetState(); - if (state != UNINITIALIZED) return GENERIC; - if (x->IsSmi() && y->IsSmi()) return SMIS; - if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS; +CompareIC::State CompareIC::TargetState(State state, + bool has_inlined_smi_code, + Handle x, + Handle y) { + if (!has_inlined_smi_code && state != UNINITIALIZED) return GENERIC; + if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS; + if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) && + x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS; if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC; - if (x->IsJSObject() && y->IsJSObject()) return OBJECTS; + if (state == UNINITIALIZED && + x->IsJSObject() && y->IsJSObject()) return OBJECTS; return GENERIC; } diff --git a/src/ic.h b/src/ic.h index 434c5024eb..8562bcc24a 100644 --- a/src/ic.h +++ b/src/ic.h @@ -582,7 +582,8 @@ class CompareIC: public IC { static const char* GetStateName(State state); private: - State TargetState(Handle x, Handle y); + State TargetState(State state, bool has_inlined_smi_code, + Handle x, Handle y); bool strict() const { return op_ == Token::EQ_STRICT; } Condition GetCondition() const { return ComputeCondition(op_); } @@ -591,6 +592,8 @@ class CompareIC: public IC { Token::Value op_; }; +// Helper for TRBinaryOpIC and CompareIC. +void PatchInlinedSmiCode(Address address); } } // namespace v8::internal diff --git a/src/type-info.cc b/src/type-info.cc index 5f6022b6f1..8719439ad4 100644 --- a/src/type-info.cc +++ b/src/type-info.cc @@ -142,6 +142,9 @@ TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr, Side side) { CompareIC::State state = static_cast(code->compare_state()); switch (state) { case CompareIC::UNINITIALIZED: + // Uninitialized means never executed. + // TODO(fschneider): Introduce a separate value for never-executed ICs. + return unknown; case CompareIC::SMIS: return TypeInfo::Smi(); case CompareIC::HEAP_NUMBERS: @@ -184,6 +187,9 @@ TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr, Side side) { switch (type) { case TRBinaryOpIC::UNINITIALIZED: + // Uninitialized means never executed. + // TODO(fschneider): Introduce a separate value for never-executed ICs + return unknown; case TRBinaryOpIC::SMI: switch (result_type) { case TRBinaryOpIC::UNINITIALIZED: @@ -224,6 +230,9 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) { CompareIC::State state = static_cast(code->compare_state()); switch (state) { case CompareIC::UNINITIALIZED: + // Uninitialized means never executed. + // TODO(fschneider): Introduce a separate value for never-executed ICs. + return unknown; case CompareIC::SMIS: return TypeInfo::Smi(); case CompareIC::HEAP_NUMBERS: diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc index 200209906b..aff778a5de 100644 --- a/src/x64/ic-x64.cc +++ b/src/x64/ic-x64.cc @@ -1951,10 +1951,8 @@ Condition CompareIC::ComputeCondition(Token::Value op) { void CompareIC::UpdateCaches(Handle x, Handle y) { HandleScope scope; Handle rewritten; -#ifdef DEBUG State previous_state = GetState(); -#endif - State state = TargetState(x, y); + State state = TargetState(previous_state, false, x, y); if (state == GENERIC) { CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); rewritten = stub.GetCode(); @@ -1974,6 +1972,10 @@ void CompareIC::UpdateCaches(Handle x, Handle y) { #endif } +void PatchInlinedSmiCode(Address address) { + UNIMPLEMENTED(); +} + } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64