Port count-based profiler to x64

Review URL: https://chromiumcodereview.appspot.com/9845019

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11159 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
jkummerow@chromium.org 2012-03-27 11:21:27 +00:00
parent 9b9e458a43
commit d71c60e086
13 changed files with 162 additions and 143 deletions

View File

@ -110,13 +110,6 @@ class JumpPatchSite BASE_EMBEDDED {
};
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
UNREACHABLE();
return 24;
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the

View File

@ -1847,13 +1847,6 @@ static void RedirectActivationsToRecompiledCodeOnThread(
// break slots.
debug_break_slot_count++;
}
if (frame_code->has_self_optimization_header() &&
!new_code->has_self_optimization_header()) {
delta -= FullCodeGenerator::self_optimization_header_size();
} else {
ASSERT(frame_code->has_self_optimization_header() ==
new_code->has_self_optimization_header());
}
int debug_break_slot_bytes =
debug_break_slot_count * Assembler::kDebugBreakSlotLength;
if (FLAG_trace_deopt) {

View File

@ -315,7 +315,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable() &&
!info->function()->flags()->Contains(kDontOptimize));
code->set_self_optimization_header(cgen.has_self_optimization_header_);
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code);

View File

@ -90,15 +90,10 @@ class FullCodeGenerator: public AstVisitor {
stack_checks_(2), // There's always at least one.
type_feedback_cells_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0),
ic_total_count_(0),
has_self_optimization_header_(false) { }
ic_total_count_(0) { }
static bool MakeCode(CompilationInfo* info);
// Returns the platform-specific size in bytes of the self-optimization
// header.
static int self_optimization_header_size();
// Encode state and pc-offset as a BitField<type, start, size>.
// Only use 30 bits because we encode the result as a smi.
class StateField : public BitField<State, 0, 1> { };
@ -796,7 +791,6 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
bool has_self_optimization_header_;
Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_;

View File

@ -239,13 +239,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// ok:
if (FLAG_count_based_interrupts) {
ASSERT_EQ(*(call_target_address - 3), kJnsInstruction);
ASSERT_EQ(*(call_target_address - 2), kJnsOffset);
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
} else {
ASSERT_EQ(*(call_target_address - 3), kJaeInstruction);
ASSERT_EQ(*(call_target_address - 2), kJaeOffset);
ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
}
ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@ -266,9 +266,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
ASSERT_EQ(*(call_target_address - 3), kNopByteOne);
ASSERT_EQ(*(call_target_address - 2), kNopByteTwo);
ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (FLAG_count_based_interrupts) {
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;

View File

@ -101,13 +101,6 @@ class JumpPatchSite BASE_EMBEDDED {
};
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
UNREACHABLE();
return 13;
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the

View File

@ -120,13 +120,6 @@ class JumpPatchSite BASE_EMBEDDED {
};
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
UNREACHABLE();
return 10 * Instruction::kInstrSize;
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the

View File

@ -3060,21 +3060,6 @@ void Code::set_compiled_optimizable(bool value) {
}
bool Code::has_self_optimization_header() {
ASSERT(kind() == FUNCTION);
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasSelfOptimizationHeader::decode(flags);
}
void Code::set_self_optimization_header(bool value) {
ASSERT(kind() == FUNCTION);
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsHasSelfOptimizationHeader::update(flags, value);
WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
}
int Code::allow_osr_at_loop_nesting_level() {
ASSERT(kind() == FUNCTION);
return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);

View File

@ -4243,11 +4243,6 @@ class Code: public HeapObject {
inline bool is_compiled_optimizable();
inline void set_compiled_optimizable(bool value);
// [has_self_optimization_header]: For FUNCTION kind, tells if it has
// a self-optimization header.
inline bool has_self_optimization_header();
inline void set_self_optimization_header(bool value);
// [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
// how long the function has been marked for OSR and therefore which
// level of loop nesting we are willing to do on-stack replacement
@ -4469,7 +4464,6 @@ class Code: public HeapObject {
public BitField<bool, 0, 1> {}; // NOLINT
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
class FullCodeFlagsHasSelfOptimizationHeader: public BitField<bool, 3, 1> {};
static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;

View File

@ -175,14 +175,10 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// prepared to generate it, but we don't expect to have to.
bool found_code = false;
Code* stack_check_code = NULL;
#if defined(V8_TARGET_ARCH_IA32) || \
defined(V8_TARGET_ARCH_ARM) || \
defined(V8_TARGET_ARCH_MIPS)
if (FLAG_count_based_interrupts) {
InterruptStub interrupt_stub;
found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
} else // NOLINT
#endif
{ // NOLINT
StackCheckStub check_stub;
found_code = check_stub.FindCodeInCache(&stack_check_code);
@ -353,11 +349,7 @@ void RuntimeProfiler::OptimizeNow() {
void RuntimeProfiler::NotifyTick() {
#if defined(V8_TARGET_ARCH_IA32) || \
defined(V8_TARGET_ARCH_ARM) || \
defined(V8_TARGET_ARCH_MIPS)
if (FLAG_count_based_interrupts) return;
#endif
isolate_->stack_guard()->RequestRuntimeProfilerTick();
}

View File

@ -8358,14 +8358,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
PrintF("]\n");
}
Handle<Code> check_code;
#if defined(V8_TARGET_ARCH_IA32) || \
defined(V8_TARGET_ARCH_ARM) || \
defined(V8_TARGET_ARCH_MIPS)
if (FLAG_count_based_interrupts) {
InterruptStub interrupt_stub;
check_code = interrupt_stub.GetCode();
} else // NOLINT
#endif
{ // NOLINT
StackCheckStub check_stub;
check_code = check_stub.GetCode();

View File

@ -111,13 +111,22 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x1f;
static const byte kJnsOffsetDebugCode = 0x53;
static const byte kJaeInstruction = 0x73;
static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
ASSERT(check_code->entry() ==
Assembler::target_address_at(call_target_address));
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
// The stack check code matches the pattern:
//
// cmp rsp, <limit>
@ -135,11 +144,24 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// test rax, <loop nesting depth>
// ok:
//
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x66; // 2 byte nop part 1
*(call_target_address - 2) = 0x90; // 2 byte nop part 2
if (FLAG_count_based_interrupts) {
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
if (FLAG_debug_code) {
// FullCodeGenerator::EmitProfilingCounterReset() makes use of
// masm->Move(Operand&, Smi*), which generates additional code
// when FLAG_debug_code is set, so the jump offset is larger
// in that case.
ASSERT_EQ(kJnsOffsetDebugCode, *(call_target_address - 2));
} else {
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
}
} else {
ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
}
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@ -157,11 +179,21 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
*(call_target_address - 2) == 0x90 && // 2 byte nop part 2
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (FLAG_count_based_interrupts) {
*(call_target_address - 3) = kJnsInstruction;
if (FLAG_debug_code) {
// See comment above: larger jump offset if debug code is generated.
*(call_target_address - 2) = kJnsOffsetDebugCode;
} else {
*(call_target_address - 2) = kJnsOffset;
}
} else {
*(call_target_address - 3) = kJaeInstruction;
*(call_target_address - 2) = kJaeOffset;
}
Assembler::set_target_address_at(call_target_address,
check_code->entry());

View File

@ -34,6 +34,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@ -100,11 +101,6 @@ class JumpPatchSite BASE_EMBEDDED {
};
int FullCodeGenerator::self_optimization_header_size() {
return 20;
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@ -122,32 +118,11 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
if (FLAG_trace_opt_verbose) {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
has_self_optimization_header_ = true;
MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
JSGlobalPropertyCell* cell;
if (maybe_cell->To(&cell)) {
__ movq(rax, Handle<JSGlobalPropertyCell>(cell),
RelocInfo::EMBEDDED_OBJECT);
__ SmiAddConstant(FieldOperand(rax, JSGlobalPropertyCell::kValueOffset),
Smi::FromInt(-1));
Handle<Code> compile_stub(
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
__ j(zero, compile_stub, RelocInfo::CODE_TARGET);
ASSERT(masm_->pc_offset() == self_optimization_header_size());
}
}
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@ -322,14 +297,57 @@ void FullCodeGenerator::ClearAccumulator() {
}
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ SmiAddConstant(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
Smi::FromInt(-delta));
}
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
// Self-optimization is a one-off thing; if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
reset_value = 10;
}
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
Smi::FromInt(reset_value));
}
static const int kMaxBackEdgeWeight = 127;
static const int kBackEdgeDistanceDivisor = 162;
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
if (FLAG_count_based_interrupts) {
int weight = 1;
if (FLAG_weighted_back_edges) {
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
InterruptStub stub;
__ CallStub(&stub);
} else {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
}
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
@ -342,6 +360,10 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(loop_depth() > 0);
__ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
if (FLAG_count_based_interrupts) {
EmitProfilingCounterReset();
}
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@ -361,6 +383,31 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
if (info_->ShouldSelfOptimize()) {
weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
Max(1, distance = kBackEdgeDistanceDivisor));
}
EmitProfilingCounterDecrement(weight);
Label ok;
__ j(positive, &ok, Label::kNear);
__ push(rax);
if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
InterruptStub stub;
__ CallStub(&stub);
}
__ pop(rax);
EmitProfilingCounterReset();
__ bind(&ok);
}
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
@ -856,7 +903,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ testq(rax, rax);
@ -1155,7 +1202,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
__ call(ic, mode);
CallIC(ic, mode);
}
@ -1236,7 +1283,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ Move(rcx, var->name());
__ movq(rax, GlobalObjectOperand());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(rax);
break;
}
@ -1446,7 +1493,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, key->id());
CallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@ -1716,14 +1763,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ call(ic, RelocInfo::CODE_TARGET, prop->id());
CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
__ call(ic, RelocInfo::CODE_TARGET, prop->id());
CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@ -1745,7 +1792,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movq(rax, rcx);
BinaryOpStub stub(op, mode);
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@ -1794,7 +1841,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(rdx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
@ -1835,7 +1882,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic);
CallIC(ic);
break;
}
case KEYED_PROPERTY: {
@ -1848,7 +1895,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic);
CallIC(ic);
break;
}
}
@ -1865,7 +1912,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
@ -1973,7 +2020,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2013,7 +2060,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2047,6 +2094,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
}
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id) {
ic_total_count_++;
__ call(code, rmode, ast_id);
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
@ -2064,7 +2119,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
__ call(ic, mode, expr->id());
CallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@ -2097,7 +2152,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@ -3737,7 +3792,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
__ call(ic, mode, expr->id());
CallIC(ic, mode, expr->id());
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
@ -3895,7 +3950,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register rax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(rax);
}
@ -4016,7 +4071,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
@ -4050,7 +4105,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -4067,7 +4122,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -4094,7 +4149,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ call(ic);
CallIC(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@ -4274,7 +4329,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
__ call(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);