Restore saved caller FP registers on stub failure
and preserve FP registers on NotifyStubFailure. In debug mode, clobber FP registers on each runtime call to increase chances of catching such bugs. R=danno@chromium.org Review URL: https://chromiumcodereview.appspot.com/78283002 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18000 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
988eb40d70
commit
21fb1401bd
@ -117,4 +117,11 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
|
|||||||
#undef WRITE_CALL_2_VOID
|
#undef WRITE_CALL_2_VOID
|
||||||
|
|
||||||
|
|
||||||
|
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
|
||||||
|
// TODO(ulan): This clobbers only subset of registers depending on compiler,
|
||||||
|
// Rewrite this in assembly to really clobber all registers.
|
||||||
|
return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
} } // namespace v8::internal
|
} } // namespace v8::internal
|
||||||
|
@ -289,12 +289,23 @@ class FunctionCallbackArguments
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
|
||||||
|
#else
|
||||||
|
#define CLOBBER_DOUBLE_REGISTERS()
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
|
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
|
||||||
Type Name(int args_length, Object** args_object, Isolate* isolate)
|
Type Name(int args_length, Object** args_object, Isolate* isolate)
|
||||||
|
|
||||||
#define RUNTIME_FUNCTION(Type, Name) \
|
#define RUNTIME_FUNCTION(Type, Name) \
|
||||||
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
|
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
|
||||||
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
|
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
|
||||||
|
CLOBBER_DOUBLE_REGISTERS(); \
|
||||||
Arguments args(args_length, args_object); \
|
Arguments args(args_length, args_object); \
|
||||||
return __RT_impl_##Name(args, isolate); \
|
return __RT_impl_##Name(args, isolate); \
|
||||||
} \
|
} \
|
||||||
|
@ -857,7 +857,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
|
||||||
|
SaveFPRegsMode save_doubles) {
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
|
||||||
@ -866,7 +867,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
|||||||
// registers.
|
// registers.
|
||||||
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
|
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
|
||||||
// Pass the function and deoptimization type to the runtime system.
|
// Pass the function and deoptimization type to the runtime system.
|
||||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
|
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
|
||||||
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
|
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -875,6 +876,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||||
|
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
|
||||||
|
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
||||||
Deoptimizer::BailoutType type) {
|
Deoptimizer::BailoutType type) {
|
||||||
{
|
{
|
||||||
|
@ -127,6 +127,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Code* Deoptimizer::NotifyStubFailureBuiltin() {
|
||||||
|
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#define __ masm()->
|
#define __ masm()->
|
||||||
|
|
||||||
// This code tries to be close to ia32 code so that any changes can be
|
// This code tries to be close to ia32 code so that any changes can be
|
||||||
|
@ -98,6 +98,38 @@ void LCodeGen::Abort(BailoutReason reason) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LCodeGen::SaveCallerDoubles() {
|
||||||
|
ASSERT(info()->saves_caller_doubles());
|
||||||
|
ASSERT(NeedsEagerFrame());
|
||||||
|
Comment(";;; Save clobbered callee double registers");
|
||||||
|
int count = 0;
|
||||||
|
BitVector* doubles = chunk()->allocated_double_registers();
|
||||||
|
BitVector::Iterator save_iterator(doubles);
|
||||||
|
while (!save_iterator.Done()) {
|
||||||
|
__ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
||||||
|
MemOperand(sp, count * kDoubleSize));
|
||||||
|
save_iterator.Advance();
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LCodeGen::RestoreCallerDoubles() {
|
||||||
|
ASSERT(info()->saves_caller_doubles());
|
||||||
|
ASSERT(NeedsEagerFrame());
|
||||||
|
Comment(";;; Restore clobbered callee double registers");
|
||||||
|
BitVector* doubles = chunk()->allocated_double_registers();
|
||||||
|
BitVector::Iterator save_iterator(doubles);
|
||||||
|
int count = 0;
|
||||||
|
while (!save_iterator.Done()) {
|
||||||
|
__ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
||||||
|
MemOperand(sp, count * kDoubleSize));
|
||||||
|
save_iterator.Advance();
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool LCodeGen::GeneratePrologue() {
|
bool LCodeGen::GeneratePrologue() {
|
||||||
ASSERT(is_generating());
|
ASSERT(is_generating());
|
||||||
|
|
||||||
@ -158,16 +190,7 @@ bool LCodeGen::GeneratePrologue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (info()->saves_caller_doubles()) {
|
if (info()->saves_caller_doubles()) {
|
||||||
Comment(";;; Save clobbered callee double registers");
|
SaveCallerDoubles();
|
||||||
int count = 0;
|
|
||||||
BitVector* doubles = chunk()->allocated_double_registers();
|
|
||||||
BitVector::Iterator save_iterator(doubles);
|
|
||||||
while (!save_iterator.Done()) {
|
|
||||||
__ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
|
||||||
MemOperand(sp, count * kDoubleSize));
|
|
||||||
save_iterator.Advance();
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Possibly allocate a local context.
|
// Possibly allocate a local context.
|
||||||
@ -313,6 +336,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
|
|||||||
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
||||||
}
|
}
|
||||||
if (deopt_jump_table_[i].needs_frame) {
|
if (deopt_jump_table_[i].needs_frame) {
|
||||||
|
ASSERT(!info()->saves_caller_doubles());
|
||||||
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
|
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
|
||||||
if (needs_frame.is_bound()) {
|
if (needs_frame.is_bound()) {
|
||||||
__ b(&needs_frame);
|
__ b(&needs_frame);
|
||||||
@ -330,6 +354,10 @@ bool LCodeGen::GenerateDeoptJumpTable() {
|
|||||||
__ mov(pc, ip);
|
__ mov(pc, ip);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (info()->saves_caller_doubles()) {
|
||||||
|
ASSERT(info()->IsStub());
|
||||||
|
RestoreCallerDoubles();
|
||||||
|
}
|
||||||
__ mov(lr, Operand(pc), LeaveCC, al);
|
__ mov(lr, Operand(pc), LeaveCC, al);
|
||||||
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
|
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
|
||||||
}
|
}
|
||||||
@ -828,7 +856,10 @@ void LCodeGen::DeoptimizeIf(Condition condition,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(info()->IsStub() || frame_is_built_);
|
ASSERT(info()->IsStub() || frame_is_built_);
|
||||||
if (condition == al && frame_is_built_) {
|
// Go through jump table if we need to handle condition, build frame, or
|
||||||
|
// restore caller doubles.
|
||||||
|
if (condition == al && frame_is_built_ &&
|
||||||
|
!info()->saves_caller_doubles()) {
|
||||||
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
|
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
|
||||||
} else {
|
} else {
|
||||||
// We often have several deopts to the same entry, reuse the last
|
// We often have several deopts to the same entry, reuse the last
|
||||||
@ -2929,16 +2960,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
|||||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||||
}
|
}
|
||||||
if (info()->saves_caller_doubles()) {
|
if (info()->saves_caller_doubles()) {
|
||||||
ASSERT(NeedsEagerFrame());
|
RestoreCallerDoubles();
|
||||||
BitVector* doubles = chunk()->allocated_double_registers();
|
|
||||||
BitVector::Iterator save_iterator(doubles);
|
|
||||||
int count = 0;
|
|
||||||
while (!save_iterator.Done()) {
|
|
||||||
__ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
|
||||||
MemOperand(sp, count * kDoubleSize));
|
|
||||||
save_iterator.Advance();
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
int no_frame_start = -1;
|
int no_frame_start = -1;
|
||||||
if (NeedsEagerFrame()) {
|
if (NeedsEagerFrame()) {
|
||||||
|
@ -186,6 +186,9 @@ class LCodeGen: public LCodeGenBase {
|
|||||||
|
|
||||||
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
||||||
|
|
||||||
|
void SaveCallerDoubles();
|
||||||
|
void RestoreCallerDoubles();
|
||||||
|
|
||||||
// Code generation passes. Returns true if code generation should
|
// Code generation passes. Returns true if code generation should
|
||||||
// continue.
|
// continue.
|
||||||
bool GeneratePrologue();
|
bool GeneratePrologue();
|
||||||
|
@ -1054,8 +1054,10 @@ class MacroAssembler: public Assembler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convenience function: Same as above, but takes the fid instead.
|
// Convenience function: Same as above, but takes the fid instead.
|
||||||
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
|
void CallRuntime(Runtime::FunctionId id,
|
||||||
CallRuntime(Runtime::FunctionForId(id), num_arguments);
|
int num_arguments,
|
||||||
|
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
||||||
|
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convenience function: call an external reference.
|
// Convenience function: call an external reference.
|
||||||
|
@ -113,6 +113,8 @@ enum BuiltinExtraArguments {
|
|||||||
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
|
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
|
||||||
Code::kNoExtraICState) \
|
Code::kNoExtraICState) \
|
||||||
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
|
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
|
||||||
|
Code::kNoExtraICState) \
|
||||||
|
V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \
|
||||||
Code::kNoExtraICState) \
|
Code::kNoExtraICState) \
|
||||||
\
|
\
|
||||||
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
|
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
|
||||||
@ -403,6 +405,7 @@ class Builtins {
|
|||||||
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
|
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
|
||||||
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
|
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
|
||||||
static void Generate_NotifyStubFailure(MacroAssembler* masm);
|
static void Generate_NotifyStubFailure(MacroAssembler* masm);
|
||||||
|
static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
|
||||||
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
|
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
|
||||||
|
|
||||||
static void Generate_FunctionCall(MacroAssembler* masm);
|
static void Generate_FunctionCall(MacroAssembler* masm);
|
||||||
|
@ -1649,8 +1649,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
|
|||||||
output_frame->SetPc(reinterpret_cast<intptr_t>(
|
output_frame->SetPc(reinterpret_cast<intptr_t>(
|
||||||
trampoline->instruction_start()));
|
trampoline->instruction_start()));
|
||||||
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
|
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
|
||||||
Code* notify_failure =
|
Code* notify_failure = NotifyStubFailureBuiltin();
|
||||||
isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
|
|
||||||
output_frame->SetContinuation(
|
output_frame->SetContinuation(
|
||||||
reinterpret_cast<intptr_t>(notify_failure->entry()));
|
reinterpret_cast<intptr_t>(notify_failure->entry()));
|
||||||
}
|
}
|
||||||
|
@ -406,6 +406,10 @@ class Deoptimizer : public Malloced {
|
|||||||
// at the dynamic alignment state slot inside the frame.
|
// at the dynamic alignment state slot inside the frame.
|
||||||
bool HasAlignmentPadding(JSFunction* function);
|
bool HasAlignmentPadding(JSFunction* function);
|
||||||
|
|
||||||
|
// Select the version of NotifyStubFailure builtin that either saves or
|
||||||
|
// doesn't save the double registers depending on CPU features.
|
||||||
|
Code* NotifyStubFailureBuiltin();
|
||||||
|
|
||||||
Isolate* isolate_;
|
Isolate* isolate_;
|
||||||
JSFunction* function_;
|
JSFunction* function_;
|
||||||
Code* compiled_code_;
|
Code* compiled_code_;
|
||||||
|
@ -601,7 +601,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
|
||||||
|
SaveFPRegsMode save_doubles) {
|
||||||
// Enter an internal frame.
|
// Enter an internal frame.
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
@ -610,7 +611,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
|||||||
// stubs that tail call the runtime on deopts passing their parameters in
|
// stubs that tail call the runtime on deopts passing their parameters in
|
||||||
// registers.
|
// registers.
|
||||||
__ pushad();
|
__ pushad();
|
||||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
|
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
|
||||||
__ popad();
|
__ popad();
|
||||||
// Tear down internal frame.
|
// Tear down internal frame.
|
||||||
}
|
}
|
||||||
@ -620,6 +621,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||||
|
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
|
||||||
|
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
||||||
Deoptimizer::BailoutType type) {
|
Deoptimizer::BailoutType type) {
|
||||||
{
|
{
|
||||||
|
@ -231,6 +231,13 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Code* Deoptimizer::NotifyStubFailureBuiltin() {
|
||||||
|
Builtins::Name name = CpuFeatures::IsSupported(SSE2) ?
|
||||||
|
Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
|
||||||
|
return isolate_->builtins()->builtin(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#define __ masm()->
|
#define __ masm()->
|
||||||
|
|
||||||
void Deoptimizer::EntryGenerator::Generate() {
|
void Deoptimizer::EntryGenerator::Generate() {
|
||||||
|
@ -130,6 +130,40 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
void LCodeGen::SaveCallerDoubles() {
|
||||||
|
ASSERT(info()->saves_caller_doubles());
|
||||||
|
ASSERT(NeedsEagerFrame());
|
||||||
|
Comment(";;; Save clobbered callee double registers");
|
||||||
|
CpuFeatureScope scope(masm(), SSE2);
|
||||||
|
int count = 0;
|
||||||
|
BitVector* doubles = chunk()->allocated_double_registers();
|
||||||
|
BitVector::Iterator save_iterator(doubles);
|
||||||
|
while (!save_iterator.Done()) {
|
||||||
|
__ movsd(MemOperand(esp, count * kDoubleSize),
|
||||||
|
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
||||||
|
save_iterator.Advance();
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LCodeGen::RestoreCallerDoubles() {
|
||||||
|
ASSERT(info()->saves_caller_doubles());
|
||||||
|
ASSERT(NeedsEagerFrame());
|
||||||
|
Comment(";;; Restore clobbered callee double registers");
|
||||||
|
CpuFeatureScope scope(masm(), SSE2);
|
||||||
|
BitVector* doubles = chunk()->allocated_double_registers();
|
||||||
|
BitVector::Iterator save_iterator(doubles);
|
||||||
|
int count = 0;
|
||||||
|
while (!save_iterator.Done()) {
|
||||||
|
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
||||||
|
MemOperand(esp, count * kDoubleSize));
|
||||||
|
save_iterator.Advance();
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool LCodeGen::GeneratePrologue() {
|
bool LCodeGen::GeneratePrologue() {
|
||||||
ASSERT(is_generating());
|
ASSERT(is_generating());
|
||||||
|
|
||||||
@ -244,17 +278,7 @@ bool LCodeGen::GeneratePrologue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
||||||
Comment(";;; Save clobbered callee double registers");
|
SaveCallerDoubles();
|
||||||
CpuFeatureScope scope(masm(), SSE2);
|
|
||||||
int count = 0;
|
|
||||||
BitVector* doubles = chunk()->allocated_double_registers();
|
|
||||||
BitVector::Iterator save_iterator(doubles);
|
|
||||||
while (!save_iterator.Done()) {
|
|
||||||
__ movsd(MemOperand(esp, count * kDoubleSize),
|
|
||||||
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
|
||||||
save_iterator.Advance();
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -399,6 +423,7 @@ bool LCodeGen::GenerateJumpTable() {
|
|||||||
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
||||||
}
|
}
|
||||||
if (jump_table_[i].needs_frame) {
|
if (jump_table_[i].needs_frame) {
|
||||||
|
ASSERT(!info()->saves_caller_doubles());
|
||||||
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
|
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
|
||||||
if (needs_frame.is_bound()) {
|
if (needs_frame.is_bound()) {
|
||||||
__ jmp(&needs_frame);
|
__ jmp(&needs_frame);
|
||||||
@ -425,6 +450,9 @@ bool LCodeGen::GenerateJumpTable() {
|
|||||||
__ ret(0); // Call the continuation without clobbering registers.
|
__ ret(0); // Call the continuation without clobbering registers.
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
||||||
|
RestoreCallerDoubles();
|
||||||
|
}
|
||||||
__ call(entry, RelocInfo::RUNTIME_ENTRY);
|
__ call(entry, RelocInfo::RUNTIME_ENTRY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3129,17 +3157,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
|||||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||||
}
|
}
|
||||||
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
||||||
ASSERT(NeedsEagerFrame());
|
RestoreCallerDoubles();
|
||||||
CpuFeatureScope scope(masm(), SSE2);
|
|
||||||
BitVector* doubles = chunk()->allocated_double_registers();
|
|
||||||
BitVector::Iterator save_iterator(doubles);
|
|
||||||
int count = 0;
|
|
||||||
while (!save_iterator.Done()) {
|
|
||||||
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
|
||||||
MemOperand(esp, count * kDoubleSize));
|
|
||||||
save_iterator.Advance();
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (dynamic_frame_alignment_) {
|
if (dynamic_frame_alignment_) {
|
||||||
// Fetch the state of the dynamic frame alignment.
|
// Fetch the state of the dynamic frame alignment.
|
||||||
|
@ -198,6 +198,9 @@ class LCodeGen: public LCodeGenBase {
|
|||||||
|
|
||||||
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
||||||
|
|
||||||
|
void SaveCallerDoubles();
|
||||||
|
void RestoreCallerDoubles();
|
||||||
|
|
||||||
// Code generation passes. Returns true if code generation should
|
// Code generation passes. Returns true if code generation should
|
||||||
// continue.
|
// continue.
|
||||||
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
|
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
|
||||||
|
@ -777,8 +777,10 @@ class MacroAssembler: public Assembler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convenience function: Same as above, but takes the fid instead.
|
// Convenience function: Same as above, but takes the fid instead.
|
||||||
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
|
void CallRuntime(Runtime::FunctionId id,
|
||||||
CallRuntime(Runtime::FunctionForId(id), num_arguments);
|
int num_arguments,
|
||||||
|
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
||||||
|
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convenience function: call an external reference.
|
// Convenience function: call an external reference.
|
||||||
|
@ -892,7 +892,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
|
||||||
|
SaveFPRegsMode save_doubles) {
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
|
|
||||||
@ -901,7 +902,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
|||||||
// registers.
|
// registers.
|
||||||
__ MultiPush(kJSCallerSaved | kCalleeSaved);
|
__ MultiPush(kJSCallerSaved | kCalleeSaved);
|
||||||
// Pass the function and deoptimization type to the runtime system.
|
// Pass the function and deoptimization type to the runtime system.
|
||||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
|
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
|
||||||
__ MultiPop(kJSCallerSaved | kCalleeSaved);
|
__ MultiPop(kJSCallerSaved | kCalleeSaved);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -910,6 +911,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||||
|
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
|
||||||
|
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
||||||
Deoptimizer::BailoutType type) {
|
Deoptimizer::BailoutType type) {
|
||||||
{
|
{
|
||||||
|
@ -125,6 +125,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Code* Deoptimizer::NotifyStubFailureBuiltin() {
|
||||||
|
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#define __ masm()->
|
#define __ masm()->
|
||||||
|
|
||||||
|
|
||||||
|
@ -98,6 +98,38 @@ void LChunkBuilder::Abort(BailoutReason reason) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LCodeGen::SaveCallerDoubles() {
|
||||||
|
ASSERT(info()->saves_caller_doubles());
|
||||||
|
ASSERT(NeedsEagerFrame());
|
||||||
|
Comment(";;; Save clobbered callee double registers");
|
||||||
|
int count = 0;
|
||||||
|
BitVector* doubles = chunk()->allocated_double_registers();
|
||||||
|
BitVector::Iterator save_iterator(doubles);
|
||||||
|
while (!save_iterator.Done()) {
|
||||||
|
__ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
|
||||||
|
MemOperand(sp, count * kDoubleSize));
|
||||||
|
save_iterator.Advance();
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LCodeGen::RestoreCallerDoubles() {
|
||||||
|
ASSERT(info()->saves_caller_doubles());
|
||||||
|
ASSERT(NeedsEagerFrame());
|
||||||
|
Comment(";;; Restore clobbered callee double registers");
|
||||||
|
BitVector* doubles = chunk()->allocated_double_registers();
|
||||||
|
BitVector::Iterator save_iterator(doubles);
|
||||||
|
int count = 0;
|
||||||
|
while (!save_iterator.Done()) {
|
||||||
|
__ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
|
||||||
|
MemOperand(sp, count * kDoubleSize));
|
||||||
|
save_iterator.Advance();
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool LCodeGen::GeneratePrologue() {
|
bool LCodeGen::GeneratePrologue() {
|
||||||
ASSERT(is_generating());
|
ASSERT(is_generating());
|
||||||
|
|
||||||
@ -160,16 +192,7 @@ bool LCodeGen::GeneratePrologue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (info()->saves_caller_doubles()) {
|
if (info()->saves_caller_doubles()) {
|
||||||
Comment(";;; Save clobbered callee double registers");
|
SaveCallerDoubles();
|
||||||
int count = 0;
|
|
||||||
BitVector* doubles = chunk()->allocated_double_registers();
|
|
||||||
BitVector::Iterator save_iterator(doubles);
|
|
||||||
while (!save_iterator.Done()) {
|
|
||||||
__ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
|
|
||||||
MemOperand(sp, count * kDoubleSize));
|
|
||||||
save_iterator.Advance();
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Possibly allocate a local context.
|
// Possibly allocate a local context.
|
||||||
@ -298,6 +321,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
|
|||||||
}
|
}
|
||||||
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
|
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
|
||||||
if (deopt_jump_table_[i].needs_frame) {
|
if (deopt_jump_table_[i].needs_frame) {
|
||||||
|
ASSERT(!info()->saves_caller_doubles());
|
||||||
if (needs_frame.is_bound()) {
|
if (needs_frame.is_bound()) {
|
||||||
__ Branch(&needs_frame);
|
__ Branch(&needs_frame);
|
||||||
} else {
|
} else {
|
||||||
@ -313,6 +337,10 @@ bool LCodeGen::GenerateDeoptJumpTable() {
|
|||||||
__ Call(t9);
|
__ Call(t9);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (info()->saves_caller_doubles()) {
|
||||||
|
ASSERT(info()->IsStub());
|
||||||
|
RestoreCallerDoubles();
|
||||||
|
}
|
||||||
__ Call(t9);
|
__ Call(t9);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -786,7 +814,10 @@ void LCodeGen::DeoptimizeIf(Condition condition,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(info()->IsStub() || frame_is_built_);
|
ASSERT(info()->IsStub() || frame_is_built_);
|
||||||
if (condition == al && frame_is_built_) {
|
// Go through jump table if we need to handle condition, build frame, or
|
||||||
|
// restore caller doubles.
|
||||||
|
if (condition == al && frame_is_built_ &&
|
||||||
|
!info()->saves_caller_doubles()) {
|
||||||
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
|
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
|
||||||
} else {
|
} else {
|
||||||
// We often have several deopts to the same entry, reuse the last
|
// We often have several deopts to the same entry, reuse the last
|
||||||
@ -2777,16 +2808,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
|||||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||||
}
|
}
|
||||||
if (info()->saves_caller_doubles()) {
|
if (info()->saves_caller_doubles()) {
|
||||||
ASSERT(NeedsEagerFrame());
|
RestoreCallerDoubles();
|
||||||
BitVector* doubles = chunk()->allocated_double_registers();
|
|
||||||
BitVector::Iterator save_iterator(doubles);
|
|
||||||
int count = 0;
|
|
||||||
while (!save_iterator.Done()) {
|
|
||||||
__ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
|
|
||||||
MemOperand(sp, count * kDoubleSize));
|
|
||||||
save_iterator.Advance();
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
int no_frame_start = -1;
|
int no_frame_start = -1;
|
||||||
if (NeedsEagerFrame()) {
|
if (NeedsEagerFrame()) {
|
||||||
|
@ -1205,8 +1205,10 @@ class MacroAssembler: public Assembler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convenience function: Same as above, but takes the fid instead.
|
// Convenience function: Same as above, but takes the fid instead.
|
||||||
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
|
void CallRuntime(Runtime::FunctionId id,
|
||||||
CallRuntime(Runtime::FunctionForId(id), num_arguments);
|
int num_arguments,
|
||||||
|
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
||||||
|
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convenience function: call an external reference.
|
// Convenience function: call an external reference.
|
||||||
|
@ -662,7 +662,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
|
||||||
|
SaveFPRegsMode save_doubles) {
|
||||||
// Enter an internal frame.
|
// Enter an internal frame.
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
@ -671,7 +672,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
|||||||
// stubs that tail call the runtime on deopts passing their parameters in
|
// stubs that tail call the runtime on deopts passing their parameters in
|
||||||
// registers.
|
// registers.
|
||||||
__ Pushad();
|
__ Pushad();
|
||||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
|
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
|
||||||
__ Popad();
|
__ Popad();
|
||||||
// Tear down internal frame.
|
// Tear down internal frame.
|
||||||
}
|
}
|
||||||
@ -681,6 +682,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||||
|
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
|
||||||
|
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
||||||
Deoptimizer::BailoutType type) {
|
Deoptimizer::BailoutType type) {
|
||||||
// Enter an internal frame.
|
// Enter an internal frame.
|
||||||
|
@ -126,6 +126,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Code* Deoptimizer::NotifyStubFailureBuiltin() {
|
||||||
|
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#define __ masm()->
|
#define __ masm()->
|
||||||
|
|
||||||
void Deoptimizer::EntryGenerator::Generate() {
|
void Deoptimizer::EntryGenerator::Generate() {
|
||||||
|
@ -111,6 +111,38 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
void LCodeGen::SaveCallerDoubles() {
|
||||||
|
ASSERT(info()->saves_caller_doubles());
|
||||||
|
ASSERT(NeedsEagerFrame());
|
||||||
|
Comment(";;; Save clobbered callee double registers");
|
||||||
|
int count = 0;
|
||||||
|
BitVector* doubles = chunk()->allocated_double_registers();
|
||||||
|
BitVector::Iterator save_iterator(doubles);
|
||||||
|
while (!save_iterator.Done()) {
|
||||||
|
__ movsd(MemOperand(rsp, count * kDoubleSize),
|
||||||
|
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
||||||
|
save_iterator.Advance();
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LCodeGen::RestoreCallerDoubles() {
|
||||||
|
ASSERT(info()->saves_caller_doubles());
|
||||||
|
ASSERT(NeedsEagerFrame());
|
||||||
|
Comment(";;; Restore clobbered callee double registers");
|
||||||
|
BitVector* doubles = chunk()->allocated_double_registers();
|
||||||
|
BitVector::Iterator save_iterator(doubles);
|
||||||
|
int count = 0;
|
||||||
|
while (!save_iterator.Done()) {
|
||||||
|
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
||||||
|
MemOperand(rsp, count * kDoubleSize));
|
||||||
|
save_iterator.Advance();
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool LCodeGen::GeneratePrologue() {
|
bool LCodeGen::GeneratePrologue() {
|
||||||
ASSERT(is_generating());
|
ASSERT(is_generating());
|
||||||
|
|
||||||
@ -173,16 +205,7 @@ bool LCodeGen::GeneratePrologue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (info()->saves_caller_doubles()) {
|
if (info()->saves_caller_doubles()) {
|
||||||
Comment(";;; Save clobbered callee double registers");
|
SaveCallerDoubles();
|
||||||
int count = 0;
|
|
||||||
BitVector* doubles = chunk()->allocated_double_registers();
|
|
||||||
BitVector::Iterator save_iterator(doubles);
|
|
||||||
while (!save_iterator.Done()) {
|
|
||||||
__ movsd(MemOperand(rsp, count * kDoubleSize),
|
|
||||||
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
|
||||||
save_iterator.Advance();
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,6 +284,7 @@ bool LCodeGen::GenerateJumpTable() {
|
|||||||
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
||||||
}
|
}
|
||||||
if (jump_table_[i].needs_frame) {
|
if (jump_table_[i].needs_frame) {
|
||||||
|
ASSERT(!info()->saves_caller_doubles());
|
||||||
__ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
|
__ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
|
||||||
if (needs_frame.is_bound()) {
|
if (needs_frame.is_bound()) {
|
||||||
__ jmp(&needs_frame);
|
__ jmp(&needs_frame);
|
||||||
@ -280,6 +304,10 @@ bool LCodeGen::GenerateJumpTable() {
|
|||||||
__ call(kScratchRegister);
|
__ call(kScratchRegister);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (info()->saves_caller_doubles()) {
|
||||||
|
ASSERT(info()->IsStub());
|
||||||
|
RestoreCallerDoubles();
|
||||||
|
}
|
||||||
__ call(entry, RelocInfo::RUNTIME_ENTRY);
|
__ call(entry, RelocInfo::RUNTIME_ENTRY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -714,7 +742,10 @@ void LCodeGen::DeoptimizeIf(Condition cc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(info()->IsStub() || frame_is_built_);
|
ASSERT(info()->IsStub() || frame_is_built_);
|
||||||
if (cc == no_condition && frame_is_built_) {
|
// Go through jump table if we need to handle condition, build frame, or
|
||||||
|
// restore caller doubles.
|
||||||
|
if (cc == no_condition && frame_is_built_ &&
|
||||||
|
!info()->saves_caller_doubles()) {
|
||||||
__ call(entry, RelocInfo::RUNTIME_ENTRY);
|
__ call(entry, RelocInfo::RUNTIME_ENTRY);
|
||||||
} else {
|
} else {
|
||||||
// We often have several deopts to the same entry, reuse the last
|
// We often have several deopts to the same entry, reuse the last
|
||||||
@ -2657,16 +2688,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
|||||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||||
}
|
}
|
||||||
if (info()->saves_caller_doubles()) {
|
if (info()->saves_caller_doubles()) {
|
||||||
ASSERT(NeedsEagerFrame());
|
RestoreCallerDoubles();
|
||||||
BitVector* doubles = chunk()->allocated_double_registers();
|
|
||||||
BitVector::Iterator save_iterator(doubles);
|
|
||||||
int count = 0;
|
|
||||||
while (!save_iterator.Done()) {
|
|
||||||
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
|
||||||
MemOperand(rsp, count * kDoubleSize));
|
|
||||||
save_iterator.Advance();
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
int no_frame_start = -1;
|
int no_frame_start = -1;
|
||||||
if (NeedsEagerFrame()) {
|
if (NeedsEagerFrame()) {
|
||||||
|
@ -153,6 +153,10 @@ class LCodeGen: public LCodeGenBase {
|
|||||||
|
|
||||||
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
||||||
|
|
||||||
|
|
||||||
|
void SaveCallerDoubles();
|
||||||
|
void RestoreCallerDoubles();
|
||||||
|
|
||||||
// Code generation passes. Returns true if code generation should
|
// Code generation passes. Returns true if code generation should
|
||||||
// continue.
|
// continue.
|
||||||
bool GeneratePrologue();
|
bool GeneratePrologue();
|
||||||
|
@ -1266,8 +1266,10 @@ class MacroAssembler: public Assembler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convenience function: Same as above, but takes the fid instead.
|
// Convenience function: Same as above, but takes the fid instead.
|
||||||
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
|
void CallRuntime(Runtime::FunctionId id,
|
||||||
CallRuntime(Runtime::FunctionForId(id), num_arguments);
|
int num_arguments,
|
||||||
|
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
||||||
|
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convenience function: call an external reference.
|
// Convenience function: call an external reference.
|
||||||
|
54
test/mjsunit/regress/regress-clobbered-fp-regs.js
Normal file
54
test/mjsunit/regress/regress-clobbered-fp-regs.js
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
//
|
||||||
|
// Flags: --allow-natives-syntax
|
||||||
|
|
||||||
|
function store(a, x, y) {
|
||||||
|
var f1 = 0.1 * y;
|
||||||
|
var f2 = 0.2 * y;
|
||||||
|
var f3 = 0.3 * y;
|
||||||
|
var f4 = 0.4 * y;
|
||||||
|
var f5 = 0.5 * y;
|
||||||
|
var f6 = 0.6 * y;
|
||||||
|
var f7 = 0.7 * y;
|
||||||
|
var f8 = 0.8 * y;
|
||||||
|
a[0] = x;
|
||||||
|
var sum = (f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8);
|
||||||
|
assertEquals(1, y);
|
||||||
|
var expected = 3.6;
|
||||||
|
if (Math.abs(expected - sum) > 0.01) {
|
||||||
|
assertEquals(expected, sum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate TransitionElementsKindStub.
|
||||||
|
store([1], 1, 1);
|
||||||
|
store([1], 1.1, 1);
|
||||||
|
store([1], 1.1, 1);
|
||||||
|
%OptimizeFunctionOnNextCall(store);
|
||||||
|
// This will trap on allocation site in TransitionElementsKindStub.
|
||||||
|
store([1], 1, 1)
|
Loading…
Reference in New Issue
Block a user