[assembler][x64] Add scoped CodeComment helper for nested comments

CodeComment nicely indents nested comments for better readable
disassembled code.

In addition, there are two helper macros:
- ASM_CODE_COMMENT adds the current function name as comment
- ASM_CODE_COMMENT_STRING macro can be used with custom strings

Bug: v8:11879
Change-Id: If5ff7e315f5acebe613f24b20d34694155f928d3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2960888
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75152}
This commit is contained in:
Camillo Bruni 2021-06-15 14:20:09 +02:00 committed by V8 LUCI CQ
parent 8ceaec1750
commit c9249db663
8 changed files with 272 additions and 150 deletions

View File

@ -386,9 +386,8 @@ MemOperand BaselineCompiler::FeedbackVector() {
}
void BaselineCompiler::LoadFeedbackVector(Register output) {
__ RecordComment("[ LoadFeedbackVector");
ASM_CODE_COMMENT(&masm_);
__ Move(output, __ FeedbackVectorOperand());
__ RecordComment("]");
}
void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
@ -463,12 +462,13 @@ void BaselineCompiler::VisitSingleBytecode() {
// and exception handling, when CFI is enabled.
__ JumpTarget();
#ifdef V8_CODE_COMMENTS
std::ostringstream str;
if (FLAG_code_comments) {
std::ostringstream str;
str << "[ ";
iterator().PrintTo(str);
__ RecordComment(str.str().c_str());
}
ASM_CODE_COMMENT_STRING(&masm_, str.str());
#endif
VerifyFrame();
@ -484,7 +484,6 @@ void BaselineCompiler::VisitSingleBytecode() {
BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CASE
}
__ RecordComment("]");
#ifdef V8_TRACE_UNOPTIMIZED
TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit);
@ -493,7 +492,7 @@ void BaselineCompiler::VisitSingleBytecode() {
void BaselineCompiler::VerifyFrame() {
if (FLAG_debug_code) {
__ RecordComment("[ Verify frame");
ASM_CODE_COMMENT(&masm_);
__ RecordComment(" -- Verify frame size");
VerifyFrameSize();
@ -512,8 +511,6 @@ void BaselineCompiler::VerifyFrame() {
}
// TODO(leszeks): More verification.
__ RecordComment("]");
}
}
@ -545,7 +542,7 @@ INTRINSICS_LIST(DECLARE_VISITOR)
void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel(
int weight, Label* label, Label* skip_interrupt_label) {
if (weight != 0) {
__ RecordComment("[ Update Interrupt Budget");
ASM_CODE_COMMENT(&masm_);
__ AddToInterruptBudgetAndJumpIfNotExceeded(weight, skip_interrupt_label);
if (weight < 0) {
@ -555,7 +552,6 @@ void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel(
}
}
if (label) __ Jump(label);
if (weight != 0) __ RecordComment("]");
}
void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJump() {
@ -591,10 +587,9 @@ Label* BaselineCompiler::BuildForwardJumpLabel() {
template <Builtin kBuiltin, typename... Args>
void BaselineCompiler::CallBuiltin(Args... args) {
__ RecordComment("[ CallBuiltin");
ASM_CODE_COMMENT(&masm_);
detail::MoveArgumentsForBuiltin<kBuiltin>(&basm_, args...);
__ CallBuiltin(kBuiltin);
__ RecordComment("]");
}
template <Builtin kBuiltin, typename... Args>
@ -1940,15 +1935,17 @@ void BaselineCompiler::VisitJumpLoop() {
BaselineAssembler::ScratchRegisterScope scope(&basm_);
Register scratch = scope.AcquireScratch();
Label osr_not_armed;
__ RecordComment("[ OSR Check Armed");
Register osr_level = scratch;
__ LoadRegister(osr_level, interpreter::Register::bytecode_array());
__ LoadByteField(osr_level, osr_level, BytecodeArray::kOsrNestingLevelOffset);
int loop_depth = iterator().GetImmediateOperand(1);
__ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_level, loop_depth,
&osr_not_armed);
CallBuiltin<Builtin::kBaselineOnStackReplacement>();
__ RecordComment("]");
{
ASM_CODE_COMMENT_STRING(&masm_, "OSR Check Armed");
Register osr_level = scratch;
__ LoadRegister(osr_level, interpreter::Register::bytecode_array());
__ LoadByteField(osr_level, osr_level,
BytecodeArray::kOsrNestingLevelOffset);
int loop_depth = iterator().GetImmediateOperand(1);
__ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_level, loop_depth,
&osr_not_armed);
CallBuiltin<Builtin::kBaselineOnStackReplacement>();
}
__ Bind(&osr_not_armed);
Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked;
@ -2147,7 +2144,7 @@ void BaselineCompiler::VisitReThrow() {
}
void BaselineCompiler::VisitReturn() {
__ RecordComment("[ Return");
ASM_CODE_COMMENT_STRING(&masm_, "Return");
int profiling_weight = iterator().current_offset() +
iterator().current_bytecode_size_without_prefix();
int parameter_count = bytecode_->parameter_count();
@ -2159,7 +2156,6 @@ void BaselineCompiler::VisitReturn() {
// computation. We'll account for it at the end.
TailCallBuiltin<Builtin::kBaselineLeaveFrame>(
parameter_count_without_receiver, -profiling_weight);
__ RecordComment("]");
}
void BaselineCompiler::VisitThrowReferenceErrorIfHole() {

View File

@ -122,9 +122,9 @@ void BaselineAssembler::CallBuiltin(Builtin builtin) {
// Generate pc-relative call.
__ CallBuiltin(builtin);
} else {
__ RecordCommentForOffHeapTrampoline(builtin);
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("call", builtin));
__ Call(__ EntryFromBuiltinAsOperand(builtin));
__ RecordComment("]");
}
}
@ -133,9 +133,9 @@ void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
// Generate pc-relative jump.
__ TailCallBuiltin(builtin);
} else {
__ RecordCommentForOffHeapTrampoline(builtin);
ASM_CODE_COMMENT_STRING(
masm_, __ CommentForOffHeapTrampoline("tail call", builtin));
__ Jump(__ EntryFromBuiltinAsOperand(builtin));
__ RecordComment("]");
}
}

View File

@ -16,6 +16,7 @@ namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
ASM_CODE_COMMENT(&masm_);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
@ -26,7 +27,7 @@ void BaselineCompiler::Prologue() {
}
void BaselineCompiler::PrologueFillFrame() {
__ RecordComment("[ Fill frame");
ASM_CODE_COMMENT(&masm_);
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
@ -74,10 +75,10 @@ void BaselineCompiler::PrologueFillFrame() {
__ masm()->decl(scratch);
__ masm()->j(greater, &loop);
}
__ RecordComment("]");
}
void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ Move(kScratchRegister, rsp);
__ masm()->addq(kScratchRegister,
Immediate(InterpreterFrameConstants::kFixedFrameSizeFromFp +

View File

@ -56,6 +56,7 @@ static void GenerateTailCallToReturnedCode(
// -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee)
// -----------------------------------
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the target function, the new target and the actual
@ -658,8 +659,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
__ LoadMap(scratch1, sfi_data);
__ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
@ -833,6 +834,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register closure,
Register scratch1,
Register slot_address) {
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
DCHECK_EQ(closure, kJSFunctionRegister);
// Store the optimized code in the closure.
@ -849,6 +851,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
ASM_CODE_COMMENT(masm);
Register params_size = scratch1;
// Get the size of the formal parameters + receiver (in bytes).
__ movq(params_size,
@ -887,6 +890,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register actual_marker,
OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
__ Cmp(actual_marker, expected_marker);
__ j(not_equal, &no_match);
@ -903,7 +907,7 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker.
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if
@ -937,6 +941,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// rsi : current context, used for the runtime call
// rdi : target function (preserved for callee if needed, and caller)
// -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK_EQ(closure, kJSFunctionRegister);
DCHECK(!AreAliased(rax, rdx, closure, rsi, optimized_code_entry, scratch1,
scratch2));
@ -982,6 +987,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_offset,
Register bytecode, Register scratch1,
Register scratch2, Label* if_return) {
ASM_CODE_COMMENT(masm);
Register bytecode_size_table = scratch1;
// The bytecode offset value will be increased by one in wide and extra wide
@ -1059,21 +1065,19 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
static void LoadOptimizationStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_marker) {
__ RecordComment("[ Check optimization state");
ASM_CODE_COMMENT(masm);
__ movl(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ testl(
optimization_state,
Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ j(not_zero, has_optimized_code_or_marker);
__ RecordComment("]");
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Register closure, JumpMode jump_mode = JumpMode::kJump) {
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
Label maybe_has_optimized_code;
__ testl(
@ -1350,6 +1354,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register num_args,
Register start_address,
Register scratch) {
ASM_CODE_COMMENT(masm);
// Find the argument with lowest address.
__ movq(scratch, num_args);
__ negq(scratch);
@ -1661,55 +1666,55 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ incl(
FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
__ RecordComment("[ Frame Setup");
// Save the return address, so that we can push it to the end of the newly
// set-up frame once we're done setting it up.
__ PopReturnAddressTo(return_address);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::BASELINE);
{
ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
// Save the return address, so that we can push it to the end of the newly
// set-up frame once we're done setting it up.
__ PopReturnAddressTo(return_address);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::BASELINE);
__ Push(descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kCalleeContext)); // Callee's
// context.
Register callee_js_function = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
DCHECK_EQ(callee_js_function, kJSFunctionRegister);
__ Push(callee_js_function); // Callee's JS function.
__ Push(descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::
kJavaScriptCallArgCount)); // Actual argument
// count.
__ Push(descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kCalleeContext)); // Callee's
// context.
Register callee_js_function = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
DCHECK_EQ(callee_js_function, kJSFunctionRegister);
__ Push(callee_js_function); // Callee's JS function.
__ Push(descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::
kJavaScriptCallArgCount)); // Actual argument
// count.
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
Immediate(0));
__ Push(bytecode_array);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
Immediate(0));
__ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
__ Push(feedback_vector);
__ RecordComment("]");
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
__ Push(feedback_vector);
}
Register new_target = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
__ RecordComment("[ Stack/interrupt check");
Label call_stack_guard;
Register frame_size = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
{
ASM_CODE_COMMENT_STRING(masm, " Stack/interrupt check");
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
// interrupt limit. The interrupt limit is either equal to the real stack
@ -1735,7 +1740,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ bind(&has_optimized_code_or_marker);
{
__ RecordComment("[ Optimized marker check");
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the return address, rebalancing the return stack buffer by using
// JumpMode::kPushAndReturn. We can't leave the slot and overwrite it on
// return since we may do a runtime call along the way that requires the
@ -1745,12 +1750,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
feedback_vector, closure,
JumpMode::kPushAndReturn);
__ Trap();
__ RecordComment("]");
}
__ bind(&call_stack_guard);
{
__ RecordComment("[ Stack/interrupt call");
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
{
// Push the baseline code return address now, as if it had been pushed by
// the call to this builtin.
@ -1767,7 +1771,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Return to caller pushed pc, without any frame teardown.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Ret();
__ RecordComment("]");
}
}
@ -1775,6 +1778,7 @@ namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
bool with_result) {
ASM_CODE_COMMENT(masm);
const RegisterConfiguration* config(RegisterConfiguration::Default());
int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) {

View File

@ -34,6 +34,9 @@
#include "src/codegen/assembler.h"
#ifdef V8_CODE_COMMENTS
#include <iomanip>
#endif
#include "src/codegen/assembler-inl.h"
#include "src/codegen/string-constants.h"
#include "src/deoptimizer/deoptimizer.h"
@ -308,5 +311,24 @@ int Assembler::WriteCodeComments() {
return size;
}
#ifdef V8_CODE_COMMENTS
int Assembler::CodeComment::depth() const { return assembler_->comment_depth_; }
void Assembler::CodeComment::Open(const std::string& comment) {
std::stringstream sstream;
sstream << std::setfill(' ') << std::setw(depth() * kIndentWidth + 2);
sstream << "[ " << comment;
assembler_->comment_depth_++;
assembler_->RecordComment(sstream.str());
}
void Assembler::CodeComment::Close() {
assembler_->comment_depth_--;
std::string comment = "]";
comment.insert(0, depth() * kIndentWidth, ' ');
DCHECK_LE(0, depth());
assembler_->RecordComment(comment);
}
#endif
} // namespace internal
} // namespace v8

View File

@ -288,15 +288,48 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// Record an inline code comment that can be used by a disassembler.
// Use --code-comments to enable.
V8_INLINE void RecordComment(const char* msg) {
V8_INLINE void RecordComment(const char* comment) {
// Set explicit dependency on --code-comments for dead-code elimination in
// release builds.
if (!FLAG_code_comments) return;
if (options().emit_code_comments) {
code_comments_writer_.Add(pc_offset(), std::string(msg));
code_comments_writer_.Add(pc_offset(), std::string(comment));
}
}
V8_INLINE void RecordComment(std::string comment) {
// Set explicit dependency on --code-comments for dead-code elimination in
// release builds.
if (!FLAG_code_comments) return;
if (options().emit_code_comments) {
code_comments_writer_.Add(pc_offset(), std::move(comment));
}
}
#ifdef V8_CODE_COMMENTS
class CodeComment {
public:
explicit CodeComment(Assembler* assembler, const std::string& comment)
: assembler_(assembler) {
if (FLAG_code_comments) Open(comment);
}
~CodeComment() {
if (FLAG_code_comments) Close();
}
static const int kIndentWidth = 2;
private:
int depth() const;
void Open(const std::string& comment);
void Close();
Assembler* assembler_;
};
#else // V8_CODE_COMMENTS
class CodeComment {
explicit CodeComment(Assembler* assembler, std::string comment) {}
};
#endif
// The minimum buffer size. Should be at least two times the platform-specific
// {Assembler::kGap}.
static constexpr int kMinimalBufferSize = 128;
@ -386,6 +419,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
JumpOptimizationInfo* jump_optimization_info_;
#ifdef V8_CODE_COMMENTS
int comment_depth_ = 0;
#endif
// Constant pool.
friend class FrameAndConstantPoolScope;
friend class ConstantPoolUnavailableScope;
@ -416,6 +453,15 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
#endif
};
#ifdef V8_CODE_COMMENTS
#define ASM_CODE_COMMENT(asm) ASM_CODE_COMMENT_STRING(asm, __func__)
#define ASM_CODE_COMMENT_STRING(asm, comment) \
AssemblerBase::CodeComment asm_code_comment(asm, comment)
#else
#define ASM_CODE_COMMENT(asm)
#define ASM_CODE_COMMENT_STRING(asm, ...)
#endif
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_ASSEMBLER_H_

View File

@ -99,6 +99,15 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static constexpr int kStackPageSize = 4 * KB;
#endif
V8_INLINE std::string CommentForOffHeapTrampoline(const char* prefix,
Builtin builtin) {
if (!FLAG_code_comments) return "";
std::ostringstream str;
str << "Inlined Trampoline for " << prefix << " to "
<< Builtins::name(builtin);
return str.str();
}
V8_INLINE void RecordCommentForOffHeapTrampoline(Builtin builtin) {
if (!FLAG_code_comments) return;
std::ostringstream str;
@ -128,6 +137,8 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
bool has_frame_ = false;
int comment_depth_ = 0;
DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase);
};

View File

@ -293,33 +293,29 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedSigned");
ASM_CODE_COMMENT(this);
movl(destination, field_operand);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedPointer");
ASM_CODE_COMMENT(this);
movl(destination, field_operand);
addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) {
RecordComment("[ DecompressTaggedPointer");
ASM_CODE_COMMENT(this);
movl(destination, source);
addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(Register destination,
Operand field_operand) {
RecordComment("[ DecompressAnyTagged");
ASM_CODE_COMMENT(this);
movl(destination, field_operand);
addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
void MacroAssembler::RecordWriteField(Register object, int offset,
@ -327,6 +323,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, value, slot_address));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
@ -343,6 +340,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
leaq(slot_address, FieldOperand(object, offset));
if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
Label ok;
testb(slot_address, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
@ -358,6 +356,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
Move(value, kZapValue, RelocInfo::NONE);
Move(slot_address, kZapValue, RelocInfo::NONE);
}
@ -414,6 +413,7 @@ void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
void TurboAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address,
SaveFPRegsMode fp_mode) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, slot_address));
RegList registers =
WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
@ -434,6 +434,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(
Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
StubCallMode mode) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, slot_address));
RegList registers =
WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
@ -452,6 +453,7 @@ void TurboAssembler::CallRecordWriteStub(
Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
StubCallMode mode) {
ASM_CODE_COMMENT(this);
// Use CallRecordWriteStubSaveRegisters if the object and slot registers
// need to be caller saved.
DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
@ -481,6 +483,7 @@ void TurboAssembler::CallRecordWriteStub(
void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(address, value));
TSANRelaxedStoreDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
@ -568,6 +571,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
Register value, SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, slot_address, value));
AssertNotSmi(object);
@ -578,6 +582,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
}
if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
Label ok;
cmp_tagged(value, Operand(slot_address, 0));
j(equal, &ok, Label::kNear);
@ -611,6 +616,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
Move(slot_address, kZapValue, RelocInfo::NONE);
Move(value, kZapValue, RelocInfo::NONE);
}
@ -636,6 +642,7 @@ void TurboAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kSystemPointerSize) {
ASM_CODE_COMMENT(this);
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
testq(rsp, Immediate(frame_alignment_mask));
@ -647,6 +654,7 @@ void TurboAssembler::CheckStackAlignment() {
}
void TurboAssembler::Abort(AbortReason reason) {
ASM_CODE_COMMENT(this);
if (FLAG_code_comments) {
const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
@ -685,6 +693,7 @@ void TurboAssembler::Abort(AbortReason reason) {
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
ASM_CODE_COMMENT(this);
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@ -711,7 +720,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
// For runtime functions with variable arguments:
// -- rax : number of arguments
// -----------------------------------
ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
@ -722,6 +731,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame) {
ASM_CODE_COMMENT(this);
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
@ -756,6 +766,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
@ -788,6 +799,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
@ -1772,8 +1784,8 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
}
void TurboAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
DCHECK(Builtins::IsBuiltinId(builtin));
RecordCommentForOffHeapTrampoline(builtin);
CHECK_NE(builtin, Builtin::kNoBuiltinId);
if (options().short_builtin_calls) {
EmbeddedData d = EmbeddedData::FromBlob(isolate());
@ -1786,12 +1798,12 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
}
RecordComment("]");
}
void TurboAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
DCHECK(Builtins::IsBuiltinId(builtin));
RecordCommentForOffHeapTrampoline(builtin);
CHECK_NE(builtin, Builtin::kNoBuiltinId);
if (options().short_builtin_calls) {
EmbeddedData d = EmbeddedData::FromBlob(isolate());
@ -1803,11 +1815,11 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
Address entry = d.InstructionStartOfBuiltin(builtin);
Jump(entry, RelocInfo::OFF_HEAP_TARGET);
}
RecordComment("]");
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
ASM_CODE_COMMENT(this);
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@ -1868,6 +1880,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
}
void TurboAssembler::RetpolineCall(Register reg) {
ASM_CODE_COMMENT(this);
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
jmp(&setup_return); // Jump past the entire retpoline below.
@ -1893,6 +1906,7 @@ void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
}
void TurboAssembler::RetpolineJump(Register reg) {
ASM_CODE_COMMENT(this);
Label setup_target, capture_spec;
call(&setup_target);
@ -2630,73 +2644,74 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
}
void MacroAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
Condition is_smi = CheckSmi(object);
Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
}
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
Condition is_smi = CheckSmi(object);
Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
}
void MacroAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
void MacroAssembler::AssertSmi(Operand object) {
if (FLAG_debug_code) {
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
void TurboAssembler::AssertZeroExtended(Register int32_register) {
if (FLAG_debug_code) {
DCHECK_NE(int32_register, kScratchRegister);
movq(kScratchRegister, int64_t{0x0000000100000000});
cmpq(kScratchRegister, int32_register);
Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
}
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
DCHECK_NE(int32_register, kScratchRegister);
movq(kScratchRegister, int64_t{0x0000000100000000});
cmpq(kScratchRegister, int32_register);
Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
}
void MacroAssembler::AssertConstructor(Register object) {
if (FLAG_debug_code) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
Push(object);
LoadMap(object, object);
testb(FieldOperand(object, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
Pop(object);
Check(not_zero, AbortReason::kOperandIsNotAConstructor);
}
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
Push(object);
LoadMap(object, object);
testb(FieldOperand(object, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
Pop(object);
Check(not_zero, AbortReason::kOperandIsNotAConstructor);
}
void MacroAssembler::AssertFunction(Register object) {
if (FLAG_debug_code) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
LoadMap(object, object);
CmpInstanceTypeRange(object, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
Pop(object);
Check(below_equal, AbortReason::kOperandIsNotAFunction);
}
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
LoadMap(object, object);
CmpInstanceTypeRange(object, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
Pop(object);
Check(below_equal, AbortReason::kOperandIsNotAFunction);
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
Pop(object);
Check(equal, AbortReason::kOperandIsNotABoundFunction);
}
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
Pop(object);
Check(equal, AbortReason::kOperandIsNotABoundFunction);
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@ -2724,19 +2739,19 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
Cmp(object, isolate()->factory()->undefined_value());
j(equal, &done_checking);
Register map = object;
Push(object);
LoadMap(map, object);
Cmp(map, isolate()->factory()->allocation_site_map());
Pop(object);
Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
Label done_checking;
AssertNotSmi(object);
Cmp(object, isolate()->factory()->undefined_value());
j(equal, &done_checking);
Register map = object;
Push(object);
LoadMap(map, object);
Cmp(map, isolate()->factory()->allocation_site_map());
Pop(object);
Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
@ -2749,6 +2764,7 @@ void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
ASM_CODE_COMMENT(this);
Operand counter_operand =
ExternalReferenceAsOperand(ExternalReference::Create(counter));
// This operation has to be exactly 32-bit wide in case the external
@ -2765,6 +2781,7 @@ void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
ASM_CODE_COMMENT(this);
Operand counter_operand =
ExternalReferenceAsOperand(ExternalReference::Create(counter));
// This operation has to be exactly 32-bit wide in case the external
@ -2781,6 +2798,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) {
void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register caller_args_count,
Register scratch0, Register scratch1) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
// Calculate the destination address where we will put the return address
@ -2829,6 +2847,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register actual_parameter_count,
InvokeType type) {
ASM_CODE_COMMENT(this);
LoadTaggedPointerField(
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movzxwq(rbx,
@ -2852,6 +2871,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, rdi);
@ -2918,6 +2938,7 @@ Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck(
Register num_args, Register scratch, Label* stack_overflow,
Label::Distance stack_overflow_distance) {
ASM_CODE_COMMENT(this);
DCHECK_NE(num_args, scratch);
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@ -2942,6 +2963,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeType type) {
if (expected_parameter_count != actual_parameter_count) {
ASM_CODE_COMMENT(this);
Label regular_invoke;
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
@ -3009,6 +3031,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
ASM_CODE_COMMENT(this);
FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
@ -3038,12 +3061,14 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
pushq(rbp); // Caller's frame pointer.
movq(rbp, rsp);
Push(Immediate(StackFrame::TypeToMarker(type)));
}
void TurboAssembler::Prologue() {
ASM_CODE_COMMENT(this);
pushq(rbp); // Caller's frame pointer.
movq(rbp, rsp);
Push(kContextRegister); // Callee's context.
@ -3052,6 +3077,7 @@ void TurboAssembler::Prologue() {
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
pushq(rbp);
movq(rbp, rsp);
if (!StackFrame::IsJavaScript(type)) {
@ -3060,6 +3086,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
// TODO(v8:11429): Consider passing BASELINE instead, and checking for
// IsJSFrame or similar. Could then unify with manual frame leaves in the
// interpreter too.
@ -3074,6 +3101,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
ASM_CODE_COMMENT(this);
// On Windows and on macOS, we cannot increment the stack size by more than
// one page (minimum page size is 4KB) without accessing at least one byte on
// the page. Check this:
@ -3095,6 +3123,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
}
void TurboAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
while (bytes > kStackPageSize) {
subq(rsp, Immediate(kStackPageSize));
@ -3108,6 +3137,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@ -3142,6 +3172,7 @@ void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
bool save_doubles) {
ASM_CODE_COMMENT(this);
#ifdef V8_TARGET_OS_WIN
const int kShadowSpace = 4;
arg_stack_space += kShadowSpace;
@ -3176,6 +3207,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
Register saved_rax_reg = r12;
EnterExitFramePrologue(saved_rax_reg, frame_type);
@ -3188,11 +3220,13 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
}
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
ASM_CODE_COMMENT(this);
EnterExitFramePrologue(no_reg, StackFrame::EXIT);
EnterExitFrameEpilogue(arg_stack_space, false);
}
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
ASM_CODE_COMMENT(this);
// Registers:
// r15 : argv
if (save_doubles) {
@ -3224,6 +3258,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
}
void MacroAssembler::LeaveApiExitFrame() {
ASM_CODE_COMMENT(this);
movq(rsp, rbp);
popq(rbp);
@ -3231,6 +3266,7 @@ void MacroAssembler::LeaveApiExitFrame() {
}
void MacroAssembler::LeaveExitFrameEpilogue() {
ASM_CODE_COMMENT(this);
// Restore current context from top and clear it in debug mode.
ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
@ -3254,6 +3290,7 @@ static const int kRegisterPassedArguments = 6;
#endif
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
ASM_CODE_COMMENT(this);
// Load native context.
LoadMap(dst, rsi);
LoadTaggedPointerField(
@ -3282,6 +3319,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
}
void TurboAssembler::PrepareCallCFunction(int num_arguments) {
ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment();
DCHECK_NE(frame_alignment, 0);
DCHECK_GE(num_arguments, 0);
@ -3299,11 +3337,13 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) {
void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
ASM_CODE_COMMENT(this);
LoadAddress(rax, function);
CallCFunction(rax, num_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
ASM_CODE_COMMENT(this);
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
@ -3379,6 +3419,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
ASM_CODE_COMMENT(this);
DCHECK(cc == zero || cc == not_zero);
if (scratch == object) {
andq(scratch, Immediate(~kPageAlignmentMask));
@ -3411,6 +3452,7 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
ASM_CODE_COMMENT(this);
// Note: Assembler::call is used here on purpose to guarantee fixed-size
// exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific
// performance tuning which emits a different instruction sequence.