[turbofan] Advance bytecode offset after lazy deopt.
This changes {FrameState} nodes modeling "after" states to use bytecode offsets pointing to the deoptimizing bytecode. This is in sync with the normal execution, as the bytecode offset is advanced after operations complete in regular bytecode handlers. The change is necessary to ensure lazy deoptimized frames contain an accurate bytecode offset while they are on the stack. Such frames can be inspected by various stack walks. The continuation builtin will advance the bytecode offset upon return. R=jarin@chromium.org TEST=mjsunit/regress/regress-crbug-660379 BUG=chromium:660379 Review-Url: https://codereview.chromium.org/2487173002 Cr-Commit-Position: refs/heads/master@{#40887}
This commit is contained in:
parent
98e06c342e
commit
93c6595200
@ -1296,7 +1296,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
|
||||
}
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// Set the return address to the correct point in the interpreter entry
|
||||
// trampoline.
|
||||
Smi* interpreter_entry_return_pc_offset(
|
||||
@ -1337,6 +1337,29 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
__ mov(pc, ip);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
// Advance the current bytecode offset stored within the given interpreter
|
||||
// stack frame. This simulates what all bytecode handlers do upon completion
|
||||
// of the underlying operation.
|
||||
__ ldr(r1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ ldr(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(kInterpreterAccumulatorRegister, r1, r2);
|
||||
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
|
||||
__ mov(r2, r0); // Result is the new bytecode offset.
|
||||
__ Pop(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : argument count (preserved for callee)
|
||||
|
@ -1306,7 +1306,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
|
||||
}
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// Set the return address to the correct point in the interpreter entry
|
||||
// trampoline.
|
||||
Smi* interpreter_entry_return_pc_offset(
|
||||
@ -1347,6 +1347,29 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
__ Jump(ip0);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
// Advance the current bytecode offset stored within the given interpreter
|
||||
// stack frame. This simulates what all bytecode handlers do upon completion
|
||||
// of the underlying operation.
|
||||
__ Ldr(x1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ Ldr(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(kInterpreterAccumulatorRegister, x1, x2);
|
||||
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
|
||||
__ Mov(x2, x0); // Result is the new bytecode offset.
|
||||
__ Pop(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- x0 : argument count (preserved for callee)
|
||||
|
@ -116,6 +116,7 @@ namespace internal {
|
||||
ASM(InterpreterPushArgsAndConstruct) \
|
||||
ASM(InterpreterPushArgsAndConstructFunction) \
|
||||
ASM(InterpreterPushArgsAndConstructArray) \
|
||||
ASM(InterpreterEnterBytecodeAdvance) \
|
||||
ASM(InterpreterEnterBytecodeDispatch) \
|
||||
ASM(InterpreterOnStackReplacement) \
|
||||
\
|
||||
|
@ -980,7 +980,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
|
||||
}
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// Set the return address to the correct point in the interpreter entry
|
||||
// trampoline.
|
||||
Smi* interpreter_entry_return_pc_offset(
|
||||
@ -1022,6 +1022,31 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
__ jmp(ebx);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
// Advance the current bytecode offset stored within the given interpreter
|
||||
// stack frame. This simulates what all bytecode handlers do upon completion
|
||||
// of the underlying operation.
|
||||
__ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ mov(edx, Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
__ Push(ebx); // First argument is the bytecode array.
|
||||
__ Push(edx); // Second argument is the bytecode offset.
|
||||
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
|
||||
__ Move(edx, eax); // Result is the new bytecode offset.
|
||||
__ Pop(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), edx);
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : argument count (preserved for callee)
|
||||
|
@ -1296,7 +1296,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
|
||||
}
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// Set the return address to the correct point in the interpreter entry
|
||||
// trampoline.
|
||||
Smi* interpreter_entry_return_pc_offset(
|
||||
@ -1339,6 +1339,29 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
__ Jump(a1);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
// Advance the current bytecode offset stored within the given interpreter
|
||||
// stack frame. This simulates what all bytecode handlers do upon completion
|
||||
// of the underlying operation.
|
||||
__ lw(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ lw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(kInterpreterAccumulatorRegister, a1, a2);
|
||||
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
|
||||
__ mov(a2, v0); // Result is the new bytecode offset.
|
||||
__ Pop(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a0 : argument count (preserved for callee)
|
||||
|
@ -1288,7 +1288,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
|
||||
}
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// Set the return address to the correct point in the interpreter entry
|
||||
// trampoline.
|
||||
Smi* interpreter_entry_return_pc_offset(
|
||||
@ -1331,6 +1331,29 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
__ Jump(a1);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
// Advance the current bytecode offset stored within the given interpreter
|
||||
// stack frame. This simulates what all bytecode handlers do upon completion
|
||||
// of the underlying operation.
|
||||
__ ld(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ ld(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(kInterpreterAccumulatorRegister, a1, a2);
|
||||
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
|
||||
__ mov(a2, v0); // Result is the new bytecode offset.
|
||||
__ Pop(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a0 : argument count (preserved for callee)
|
||||
|
@ -956,7 +956,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
|
||||
}
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
// Set the return address to the correct point in the interpreter entry
|
||||
// trampoline.
|
||||
Smi* interpreter_entry_return_pc_offset(
|
||||
@ -998,6 +998,31 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
__ jmp(rbx);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
// Advance the current bytecode offset stored within the given interpreter
|
||||
// stack frame. This simulates what all bytecode handlers do upon completion
|
||||
// of the underlying operation.
|
||||
__ movp(rbx, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ movp(rdx, Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
__ Push(rbx); // First argument is the bytecode array.
|
||||
__ Push(rdx); // Second argument is the bytecode offset.
|
||||
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
|
||||
__ Move(rdx, rax); // Result is the new bytecode offset.
|
||||
__ Pop(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rdx);
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
}
|
||||
|
||||
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : argument count (preserved for callee)
|
||||
|
@ -603,9 +603,9 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
|
||||
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
|
||||
DCHECK_EQ(IrOpcode::kDead,
|
||||
NodeProperties::GetFrameStateInput(node)->opcode());
|
||||
BailoutId bailout_id_before(bytecode_iterator().current_offset());
|
||||
BailoutId bailout_id(bytecode_iterator().current_offset());
|
||||
Node* frame_state_before = environment()->Checkpoint(
|
||||
bailout_id_before, OutputFrameStateCombine::Ignore(), false);
|
||||
bailout_id, OutputFrameStateCombine::Ignore(), false);
|
||||
NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
|
||||
}
|
||||
}
|
||||
@ -618,11 +618,10 @@ void BytecodeGraphBuilder::PrepareFrameState(Node* node,
|
||||
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
|
||||
DCHECK_EQ(IrOpcode::kDead,
|
||||
NodeProperties::GetFrameStateInput(node)->opcode());
|
||||
BailoutId bailout_id_after(bytecode_iterator().current_offset() +
|
||||
bytecode_iterator().current_bytecode_size());
|
||||
BailoutId bailout_id(bytecode_iterator().current_offset());
|
||||
bool has_exception = NodeProperties::IsExceptionalCall(node);
|
||||
Node* frame_state_after =
|
||||
environment()->Checkpoint(bailout_id_after, combine, has_exception);
|
||||
environment()->Checkpoint(bailout_id, combine, has_exception);
|
||||
NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
|
||||
}
|
||||
}
|
||||
|
@ -1001,7 +1001,7 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
|
||||
}
|
||||
}
|
||||
|
||||
// Compute this frame's PC, state, and continuation.
|
||||
// Compute this frame's PC and state.
|
||||
FixedArray* raw_data = non_optimized_code->deoptimization_data();
|
||||
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
|
||||
Address start = non_optimized_code->instruction_start();
|
||||
@ -1243,7 +1243,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
|
||||
|
||||
// Translate the accumulator register (depending on frame position).
|
||||
if (is_topmost) {
|
||||
// For topmost frmae, p ut the accumulator on the stack. The bailout state
|
||||
// For topmost frame, put the accumulator on the stack. The bailout state
|
||||
// for interpreted frames is always set to {BailoutState::TOS_REGISTER} and
|
||||
// the {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
|
||||
// after materialization).
|
||||
@ -1268,9 +1268,15 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
|
||||
}
|
||||
CHECK_EQ(0u, output_offset);
|
||||
|
||||
// Compute this frame's PC and state. The PC will be a special builtin that
|
||||
// continues the bytecode dispatch. Note that non-topmost and lazy-style
|
||||
// bailout handlers also advance the bytecode offset before dispatch, hence
|
||||
// simulating what normal handlers do upon completion of the operation.
|
||||
Builtins* builtins = isolate_->builtins();
|
||||
Code* dispatch_builtin =
|
||||
builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
|
||||
(!is_topmost || (bailout_type_ == LAZY)) && !goto_catch_handler
|
||||
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
|
||||
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
|
||||
output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
|
||||
// Restore accumulator (TOS) register.
|
||||
output_frame->SetState(
|
||||
@ -2758,11 +2764,8 @@ int Deoptimizer::ComputeSourcePositionFromBaselineCode(
|
||||
int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
|
||||
SharedFunctionInfo* shared, BailoutId node_id) {
|
||||
DCHECK(shared->HasBytecodeArray());
|
||||
// BailoutId points to the next bytecode in the bytecode aray. Subtract
|
||||
// 1 to get the end of current bytecode.
|
||||
int code_offset = node_id.ToInt() - 1;
|
||||
return AbstractCode::cast(shared->bytecode_array())
|
||||
->SourcePosition(code_offset);
|
||||
->SourcePosition(node_id.ToInt());
|
||||
}
|
||||
|
||||
// static
|
||||
|
@ -404,11 +404,15 @@ void StackFrame::SetReturnAddressLocationResolver(
|
||||
static bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
|
||||
Code* interpreter_entry_trampoline =
|
||||
isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
|
||||
Code* interpreter_bytecode_advance =
|
||||
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
|
||||
Code* interpreter_bytecode_dispatch =
|
||||
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
|
||||
|
||||
return (pc >= interpreter_entry_trampoline->instruction_start() &&
|
||||
pc < interpreter_entry_trampoline->instruction_end()) ||
|
||||
(pc >= interpreter_bytecode_advance->instruction_start() &&
|
||||
pc < interpreter_bytecode_advance->instruction_end()) ||
|
||||
(pc >= interpreter_bytecode_dispatch->instruction_start() &&
|
||||
pc < interpreter_bytecode_dispatch->instruction_end());
|
||||
}
|
||||
@ -1219,9 +1223,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames,
|
||||
abstract_code = AbstractCode::cast(code);
|
||||
} else {
|
||||
DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
|
||||
// BailoutId points to the next bytecode in the bytecode aray. Subtract
|
||||
// 1 to get the end of current bytecode.
|
||||
code_offset = bailout_id.ToInt() - 1;
|
||||
code_offset = bailout_id.ToInt(); // Points to current bytecode.
|
||||
abstract_code = AbstractCode::cast(shared_info->bytecode_array());
|
||||
}
|
||||
FrameSummary summary(receiver, function, abstract_code, code_offset,
|
||||
|
@ -5055,6 +5055,7 @@ inline bool Code::is_hydrogen_stub() {
|
||||
inline bool Code::is_interpreter_trampoline_builtin() {
|
||||
Builtins* builtins = GetIsolate()->builtins();
|
||||
return this == *builtins->InterpreterEntryTrampoline() ||
|
||||
this == *builtins->InterpreterEnterBytecodeAdvance() ||
|
||||
this == *builtins->InterpreterEnterBytecodeDispatch();
|
||||
}
|
||||
|
||||
|
@ -171,5 +171,19 @@ RUNTIME_FUNCTION(Runtime_InterpreterSetPendingMessage) {
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_InterpreterAdvanceBytecodeOffset) {
|
||||
SealHandleScope shs(isolate);
|
||||
DCHECK_EQ(2, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
|
||||
CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
|
||||
interpreter::BytecodeArrayIterator it(bytecode_array);
|
||||
int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
|
||||
while (it.current_offset() < offset) it.Advance();
|
||||
DCHECK_EQ(offset, it.current_offset());
|
||||
it.Advance(); // Advance by one bytecode.
|
||||
offset = it.current_offset() + BytecodeArray::kHeaderSize - kHeapObjectTag;
|
||||
return Smi::FromInt(offset);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -215,7 +215,8 @@ namespace internal {
|
||||
F(InterpreterTraceBytecodeEntry, 3, 1) \
|
||||
F(InterpreterTraceBytecodeExit, 3, 1) \
|
||||
F(InterpreterClearPendingMessage, 0, 1) \
|
||||
F(InterpreterSetPendingMessage, 1, 1)
|
||||
F(InterpreterSetPendingMessage, 1, 1) \
|
||||
F(InterpreterAdvanceBytecodeOffset, 2, 1)
|
||||
|
||||
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
|
||||
F(FunctionGetName, 1, 1) \
|
||||
|
@ -22,11 +22,4 @@
|
||||
'debug/debug-stepout-scope-part7': [SKIP],
|
||||
'debug/debug-stepout-scope-part8': [SKIP],
|
||||
}], # 'gc_stress == True'
|
||||
|
||||
##############################################################################
|
||||
['variant == turbofan_opt', {
|
||||
|
||||
# TODO(jarin/mstarzinger): Investigate debugger issues with TurboFan.
|
||||
'debug/ignition/debug-break-on-stack': [FAIL],
|
||||
}], # variant == turbofan_opt
|
||||
]
|
||||
|
42
test/mjsunit/regress/regress-crbug-660379.js
Normal file
42
test/mjsunit/regress/regress-crbug-660379.js
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2016 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --allow-natives-syntax
|
||||
|
||||
(function InlinedThrowAtEndOfTry() {
|
||||
function g() {
|
||||
%DeoptimizeFunction(f);
|
||||
throw "boom";
|
||||
}
|
||||
function f() {
|
||||
try {
|
||||
g(); // Right at the end of try.
|
||||
} catch (e) {
|
||||
assertEquals("boom", e)
|
||||
}
|
||||
}
|
||||
assertDoesNotThrow(f);
|
||||
assertDoesNotThrow(f);
|
||||
%OptimizeFunctionOnNextCall(f);
|
||||
assertDoesNotThrow(f);
|
||||
})();
|
||||
|
||||
(function InlinedThrowInFrontOfTry() {
|
||||
function g() {
|
||||
%DeoptimizeFunction(f);
|
||||
throw "boom";
|
||||
}
|
||||
function f() {
|
||||
g(); // Right in front of try.
|
||||
try {
|
||||
Math.random();
|
||||
} catch (e) {
|
||||
assertUnreachable();
|
||||
}
|
||||
}
|
||||
assertThrows(f);
|
||||
assertThrows(f);
|
||||
%OptimizeFunctionOnNextCall(f);
|
||||
assertThrows(f);
|
||||
})();
|
Loading…
Reference in New Issue
Block a user