Avoid patching code after the call to binary operation stub in optimized code

This patch just adds a nop after the call to the binary operation stub in optimized code to avoid the patching for the inlined smi case used in the full code generator to kick in if the next instruction generated by the lithium code generator should accidentially enable that. For calls generated by CallCodeGeneric this was already handled on Intel platforms, but missing on ARM.

On IA-32 I did also try to check for whether the code containing the call was optimized (patch below), but that caused regressions on some benchmarks.

diff --git src/ia32/ic-ia32.cc src/ia32/ic-ia32.cc
index 5f143b1..f70e208 100644
--- src/ia32/ic-ia32.cc
+++ src/ia32/ic-ia32.cc
@@ -1603,12 +1603,18 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {

   // Activate inlined smi code.
   if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address());
+    PatchInlinedSmiCode(address(), isolate());
   }
 }

-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, Isolate* isolate) {
+  // Never patch in optimized code.
+  Code* code = isolate->pc_to_code_cache()->GetCacheEntry(address)->code;
+  if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+    return;
+  }
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
diff --git src/ic.cc src/ic.cc
index f70f75a..62e79da 100644
--- src/ic.cc
+++ src/ic.cc
@@ -2384,7 +2384,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {

     // Activate inlined smi code.
     if (previous_type == BinaryOpIC::UNINITIALIZED) {
-      PatchInlinedSmiCode(ic.address());
+      PatchInlinedSmiCode(ic.address(), isolate);
     }
   }

diff --git src/ic.h src/ic.h
index 11c2e3a..9ef4b20 100644
--- src/ic.h
+++ src/ic.h
@@ -721,7 +721,7 @@ class CompareIC: public IC {
 };

 // Helper for BinaryOpIC and CompareIC.
-void PatchInlinedSmiCode(Address address);
+void PatchInlinedSmiCode(Address address, Isolate* isolate);

 } }  // namespace v8::internal

R=danno@chromium.org

BUG=none
TEST=none

Review URL: http://codereview.chromium.org//7350015

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8623 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
sgjesse@chromium.org 2011-07-13 09:31:17 +00:00
parent 8a6108de95
commit 620d50af55
3 changed files with 10 additions and 0 deletions

View File

@ -551,6 +551,13 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RecordPosition(pointers->position());
__ Call(code, mode);
RegisterLazyDeoptimization(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
if (code->kind() == Code::BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
__ nop();
}
}
@ -1506,6 +1513,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}

View File

@ -1345,6 +1345,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}

View File

@ -1341,6 +1341,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}