MIPS: Skip write barriers in the fast case when setting up local context.

Port r21481 (5973b48)

Original commit message:
The FastNewContextStub always allocates in new space, so we don't
need to update the write barrier when copying the parameters to
the newly allocated context.

BUG=
R=plind44@gmail.com

Review URL: https://codereview.chromium.org/302633002

Patch from Balazs Kilvady <kilvadyb@homejinni.com>.

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21500 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
plind44@gmail.com 2014-05-26 14:40:55 +00:00
parent d755611e93
commit 4fd78904be
2 changed files with 24 additions and 4 deletions

View File

@ -219,6 +219,7 @@ void FullCodeGenerator::Generate() {
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate context"); Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1. // Argument to NewContext is the function, which is still in a1.
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) { if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ push(a1); __ push(a1);
__ Push(info->scope()->GetScopeInfo()); __ Push(info->scope()->GetScopeInfo());
@ -226,6 +227,8 @@ void FullCodeGenerator::Generate() {
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) { } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots); FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub); __ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
} else { } else {
__ push(a1); __ push(a1);
__ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@ -249,8 +252,15 @@ void FullCodeGenerator::Generate() {
__ sw(a0, target); __ sw(a0, target);
// Update the write barrier. // Update the write barrier.
__ RecordWriteContextSlot( if (need_write_barrier) {
cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs); __ RecordWriteContextSlot(
cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, a0, &done);
__ Abort(kExpectedNewSpaceObject);
__ bind(&done);
}
} }
} }
} }

View File

@ -198,10 +198,13 @@ bool LCodeGen::GeneratePrologue() {
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
Comment(";;; Allocate local context"); Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in a1. // Argument to NewContext is the function, which is in a1.
if (heap_slots <= FastNewContextStub::kMaximumSlots) { if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots); FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub); __ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
} else { } else {
__ push(a1); __ push(a1);
__ CallRuntime(Runtime::kHiddenNewFunctionContext, 1); __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
@ -224,8 +227,15 @@ bool LCodeGen::GeneratePrologue() {
MemOperand target = ContextOperand(cp, var->index()); MemOperand target = ContextOperand(cp, var->index());
__ sw(a0, target); __ sw(a0, target);
// Update the write barrier. This clobbers a3 and a0. // Update the write barrier. This clobbers a3 and a0.
__ RecordWriteContextSlot( if (need_write_barrier) {
cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs); __ RecordWriteContextSlot(
cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, a0, &done);
__ Abort(kExpectedNewSpaceObject);
__ bind(&done);
}
} }
} }
Comment(";;; End allocate local context"); Comment(";;; End allocate local context");