Enable cpplint 'runtime/references' warning
Cpplint usually checks for non-const reference arguments. They are forbidden in the style guide, and v8 does not explicitly make an exception here. This CL re-enables that warning, and fixes all current violations by adding an explicit "NOLINT(runtime/references)" comment. In follow-up CLs, we should aim to remove as many of them as possible. TBR=mlippautz@chromium.org Bug: v8:9429 Change-Id: If7054d0b366138b731972ed5d4e304b5ac8423bb Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1687891 Reviewed-by: Clemens Hammacher <clemensh@chromium.org> Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Jakob Kummerow <jkummerow@chromium.org> Reviewed-by: Igor Sheludko <ishell@chromium.org> Commit-Queue: Clemens Hammacher <clemensh@chromium.org> Cr-Commit-Position: refs/heads/master@{#62551}
This commit is contained in:
parent
46c3ea3154
commit
8e11cc395a
@ -3364,7 +3364,8 @@ class V8_EXPORT Object : public Value {
|
||||
//
|
||||
// Returns true on success.
|
||||
V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
|
||||
Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
|
||||
Local<Context> context, Local<Name> key,
|
||||
PropertyDescriptor& descriptor); // NOLINT(runtime/references)
|
||||
|
||||
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
|
||||
Local<Value> key);
|
||||
|
@ -154,7 +154,8 @@ class AsmJsParser {
|
||||
template <typename T>
|
||||
class CachedVector final : public ZoneVector<T> {
|
||||
public:
|
||||
explicit CachedVector(CachedVectors<T>& cache)
|
||||
explicit CachedVector(
|
||||
CachedVectors<T>& cache) // NOLINT(runtime/references)
|
||||
: ZoneVector<T>(cache.zone()), cache_(&cache) {
|
||||
cache.fill(this);
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ class ReversedAdapter {
|
||||
// // iterates through v from back to front
|
||||
// }
|
||||
template <typename T>
|
||||
ReversedAdapter<T> Reversed(T& t) {
|
||||
ReversedAdapter<T> Reversed(T& t) { // NOLINT(runtime/references)
|
||||
return ReversedAdapter<T>(t);
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,8 @@ namespace internal {
|
||||
|
||||
namespace {
|
||||
void ConsoleCall(
|
||||
Isolate* isolate, internal::BuiltinArguments& args,
|
||||
Isolate* isolate,
|
||||
internal::BuiltinArguments& args, // NOLINT(runtime/references)
|
||||
void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&,
|
||||
const v8::debug::ConsoleContext&)) {
|
||||
CHECK(!isolate->has_pending_exception());
|
||||
|
@ -36,9 +36,10 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
|
||||
kProxyContextLength,
|
||||
};
|
||||
|
||||
Node* AllocateJSArrayForCodeStubArguments(Node* context,
|
||||
CodeStubArguments& args, Node* argc,
|
||||
ParameterMode mode);
|
||||
Node* AllocateJSArrayForCodeStubArguments(
|
||||
Node* context,
|
||||
CodeStubArguments& args, // NOLINT(runtime/references)
|
||||
Node* argc, ParameterMode mode);
|
||||
|
||||
private:
|
||||
Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context);
|
||||
|
@ -2135,9 +2135,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
TNode<Int32T> TruncateHeapNumberValueToWord32(TNode<HeapNumber> object);
|
||||
|
||||
// Conversions.
|
||||
void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>& output,
|
||||
void TryHeapNumberToSmi(TNode<HeapNumber> number,
|
||||
TVariable<Smi>& output, // NOLINT(runtime/references)
|
||||
Label* if_smi);
|
||||
void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>& output,
|
||||
void TryFloat64ToSmi(TNode<Float64T> number,
|
||||
TVariable<Smi>& output, // NOLINT(runtime/references)
|
||||
Label* if_smi);
|
||||
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
|
||||
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
|
||||
|
@ -138,8 +138,9 @@ class ConstantPoolBuilder {
|
||||
inline Label* EmittedPosition() { return &emitted_label_; }
|
||||
|
||||
private:
|
||||
ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry,
|
||||
ConstantPoolEntry::Type type);
|
||||
ConstantPoolEntry::Access AddEntry(
|
||||
ConstantPoolEntry& entry, // NOLINT(runtime/references)
|
||||
ConstantPoolEntry::Type type);
|
||||
void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
|
||||
void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
|
||||
ConstantPoolEntry::Type type);
|
||||
|
@ -1478,11 +1478,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
static bool IsAddImmediate(Instr instr);
|
||||
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
|
||||
static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
|
||||
static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
|
||||
int16_t& jic_offset);
|
||||
static void UnpackTargetAddressUnsigned(uint32_t address,
|
||||
uint32_t& lui_offset,
|
||||
uint32_t& jic_offset);
|
||||
static void UnpackTargetAddress(
|
||||
uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references)
|
||||
int16_t& jic_offset); // NOLINT(runtime/references)
|
||||
static void UnpackTargetAddressUnsigned(
|
||||
uint32_t address,
|
||||
uint32_t& lui_offset, // NOLINT(runtime/references)
|
||||
uint32_t& jic_offset); // NOLINT(runtime/references)
|
||||
|
||||
static bool IsAndImmediate(Instr instr);
|
||||
static bool IsEmittedConstant(Instr instr);
|
||||
@ -1513,7 +1515,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
|
||||
// Helper function for memory load/store using base register and offset.
|
||||
void AdjustBaseAndOffset(
|
||||
MemOperand& src,
|
||||
MemOperand& src, // NOLINT(runtime/references)
|
||||
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
|
||||
int second_access_add_to_offset = 4);
|
||||
|
||||
|
@ -845,9 +845,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
|
||||
MSARegister wt, BranchDelaySlot bd = PROTECT);
|
||||
|
||||
bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits);
|
||||
bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
|
||||
Register& scratch, const Operand& rt);
|
||||
bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
|
||||
OffsetSize bits);
|
||||
bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
|
||||
OffsetSize bits,
|
||||
Register& scratch, // NOLINT(runtime/references)
|
||||
const Operand& rt);
|
||||
|
||||
void BranchShortHelperR6(int32_t offset, Label* L);
|
||||
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
|
||||
|
@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
||||
|
||||
// Helper function for memory load/store using base register and offset.
|
||||
void AdjustBaseAndOffset(
|
||||
MemOperand& src,
|
||||
MemOperand& src, // NOLINT(runtime/references)
|
||||
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
|
||||
int second_access_add_to_offset = 4);
|
||||
|
||||
|
@ -849,9 +849,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
||||
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
||||
int num_double_arguments);
|
||||
|
||||
bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits);
|
||||
bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
|
||||
Register& scratch, const Operand& rt);
|
||||
bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
|
||||
OffsetSize bits);
|
||||
bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
|
||||
OffsetSize bits,
|
||||
Register& scratch, // NOLINT(runtime/references)
|
||||
const Operand& rt);
|
||||
|
||||
void BranchShortHelperR6(int32_t offset, Label* L);
|
||||
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
|
||||
|
@ -31,7 +31,7 @@ class MoreBit : public BitField8<bool, 7, 1> {};
|
||||
class ValueBits : public BitField8<unsigned, 0, 7> {};
|
||||
|
||||
// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
|
||||
void AddAndSetEntry(PositionTableEntry& value,
|
||||
void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references)
|
||||
const PositionTableEntry& other) {
|
||||
value.code_offset += other.code_offset;
|
||||
value.source_position += other.source_position;
|
||||
@ -39,7 +39,7 @@ void AddAndSetEntry(PositionTableEntry& value,
|
||||
}
|
||||
|
||||
// Helper: Subtract the offsets from 'other' from 'value'.
|
||||
void SubtractFromEntry(PositionTableEntry& value,
|
||||
void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references)
|
||||
const PositionTableEntry& other) {
|
||||
value.code_offset -= other.code_offset;
|
||||
value.source_position -= other.source_position;
|
||||
@ -47,7 +47,8 @@ void SubtractFromEntry(PositionTableEntry& value,
|
||||
|
||||
// Helper: Encode an integer.
|
||||
template <typename T>
|
||||
void EncodeInt(std::vector<byte>& bytes, T value) {
|
||||
void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references)
|
||||
T value) {
|
||||
using unsigned_type = typename std::make_unsigned<T>::type;
|
||||
// Zig-zag encoding.
|
||||
static const int kShift = sizeof(T) * kBitsPerByte - 1;
|
||||
@ -65,7 +66,8 @@ void EncodeInt(std::vector<byte>& bytes, T value) {
|
||||
}
|
||||
|
||||
// Encode a PositionTableEntry.
|
||||
void EncodeEntry(std::vector<byte>& bytes, const PositionTableEntry& entry) {
|
||||
void EncodeEntry(std::vector<byte>& bytes, // NOLINT(runtime/references)
|
||||
const PositionTableEntry& entry) {
|
||||
// We only accept ascending code offsets.
|
||||
DCHECK_GE(entry.code_offset, 0);
|
||||
// Since code_offset is not negative, we use sign to encode is_statement.
|
||||
@ -113,8 +115,9 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
|
||||
}
|
||||
|
||||
#ifdef ENABLE_SLOW_DCHECKS
|
||||
void CheckTableEquals(std::vector<PositionTableEntry>& raw_entries,
|
||||
SourcePositionTableIterator& encoded) {
|
||||
void CheckTableEquals(
|
||||
std::vector<PositionTableEntry>& raw_entries, // NOLINT(runtime/references)
|
||||
SourcePositionTableIterator& encoded) { // NOLINT(runtime/references)
|
||||
// Brute force testing: Record all positions and decode
|
||||
// the entire table to verify they are identical.
|
||||
auto raw = raw_entries.begin();
|
||||
|
@ -309,9 +309,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
|
||||
InstructionCode opcode,
|
||||
ArmOperandConverter& i) {
|
||||
void EmitWordLoadPoisoningIfNeeded(
|
||||
CodeGenerator* codegen, InstructionCode opcode,
|
||||
ArmOperandConverter& i) { // NOLINT(runtime/references)
|
||||
const MemoryAccessMode access_mode =
|
||||
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
|
||||
if (access_mode == kMemoryAccessPoisoned) {
|
||||
@ -320,9 +320,10 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
|
||||
}
|
||||
}
|
||||
|
||||
void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
|
||||
InstructionCode opcode,
|
||||
ArmOperandConverter& i, Register address) {
|
||||
void ComputePoisonedAddressForLoad(
|
||||
CodeGenerator* codegen, InstructionCode opcode,
|
||||
ArmOperandConverter& i, // NOLINT(runtime/references)
|
||||
Register address) {
|
||||
DCHECK_EQ(kMemoryAccessPoisoned,
|
||||
static_cast<MemoryAccessMode>(MiscField::decode(opcode)));
|
||||
switch (AddressingModeField::decode(opcode)) {
|
||||
|
@ -376,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
|
||||
InstructionCode opcode, Instruction* instr,
|
||||
Arm64OperandConverter& i) {
|
||||
void EmitWordLoadPoisoningIfNeeded(
|
||||
CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
|
||||
Arm64OperandConverter& i) { // NOLINT(runtime/references)
|
||||
const MemoryAccessMode access_mode =
|
||||
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
|
||||
if (access_mode == kMemoryAccessPoisoned) {
|
||||
|
@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) {
|
||||
// Helper struct containing data about a table or lookup switch.
|
||||
class SwitchInfo {
|
||||
public:
|
||||
SwitchInfo(ZoneVector<CaseInfo>& cases, int32_t min_value, int32_t max_value,
|
||||
BasicBlock* default_branch)
|
||||
SwitchInfo(ZoneVector<CaseInfo>& cases, // NOLINT(runtime/references)
|
||||
int32_t min_value, int32_t max_value, BasicBlock* default_branch)
|
||||
: cases_(cases),
|
||||
min_value_(min_value),
|
||||
max_value_(max_value),
|
||||
|
@ -496,11 +496,15 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
|
||||
VectorSlotPair const& feedback,
|
||||
Node* frame_state);
|
||||
|
||||
void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
|
||||
void EmitLookupSwitch(const SwitchInfo& sw,
|
||||
InstructionOperand& value_operand);
|
||||
void EmitBinarySearchSwitch(const SwitchInfo& sw,
|
||||
InstructionOperand& value_operand);
|
||||
void EmitTableSwitch(
|
||||
const SwitchInfo& sw,
|
||||
InstructionOperand& index_operand); // NOLINT(runtime/references)
|
||||
void EmitLookupSwitch(
|
||||
const SwitchInfo& sw,
|
||||
InstructionOperand& value_operand); // NOLINT(runtime/references)
|
||||
void EmitBinarySearchSwitch(
|
||||
const SwitchInfo& sw,
|
||||
InstructionOperand& value_operand); // NOLINT(runtime/references)
|
||||
|
||||
void TryRename(InstructionOperand* op);
|
||||
int GetRename(int virtual_register);
|
||||
|
@ -17,14 +17,17 @@ class V8_EXPORT_PRIVATE JumpThreading {
|
||||
public:
|
||||
// Compute the forwarding map of basic blocks to their ultimate destination.
|
||||
// Returns {true} if there is at least one block that is forwarded.
|
||||
static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>& result,
|
||||
InstructionSequence* code, bool frame_at_start);
|
||||
static bool ComputeForwarding(
|
||||
Zone* local_zone,
|
||||
ZoneVector<RpoNumber>& result, // NOLINT(runtime/references)
|
||||
InstructionSequence* code, bool frame_at_start);
|
||||
|
||||
// Rewrite the instructions to forward jumps and branches.
|
||||
// May also negate some branches.
|
||||
static void ApplyForwarding(Zone* local_zone,
|
||||
ZoneVector<RpoNumber>& forwarding,
|
||||
InstructionSequence* code);
|
||||
static void ApplyForwarding(
|
||||
Zone* local_zone,
|
||||
ZoneVector<RpoNumber>& forwarding, // NOLINT(runtime/references)
|
||||
InstructionSequence* code);
|
||||
};
|
||||
|
||||
} // namespace compiler
|
||||
|
@ -265,8 +265,9 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
|
||||
FlagsCondition condition) {
|
||||
FPUCondition FlagsConditionToConditionCmpFPU(
|
||||
bool& predicate, // NOLINT(runtime/references)
|
||||
FlagsCondition condition) {
|
||||
switch (condition) {
|
||||
case kEqual:
|
||||
predicate = true;
|
||||
@ -302,9 +303,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
|
||||
<< "\""; \
|
||||
UNIMPLEMENTED();
|
||||
|
||||
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
|
||||
InstructionCode opcode, Instruction* instr,
|
||||
MipsOperandConverter& i) {
|
||||
void EmitWordLoadPoisoningIfNeeded(
|
||||
CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
|
||||
MipsOperandConverter& i) { // NOLINT(runtime/references)
|
||||
const MemoryAccessMode access_mode =
|
||||
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
|
||||
if (access_mode == kMemoryAccessPoisoned) {
|
||||
|
@ -278,8 +278,9 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
|
||||
FlagsCondition condition) {
|
||||
FPUCondition FlagsConditionToConditionCmpFPU(
|
||||
bool& predicate, // NOLINT(runtime/references)
|
||||
FlagsCondition condition) {
|
||||
switch (condition) {
|
||||
case kEqual:
|
||||
predicate = true;
|
||||
@ -310,9 +311,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
|
||||
InstructionCode opcode, Instruction* instr,
|
||||
MipsOperandConverter& i) {
|
||||
void EmitWordLoadPoisoningIfNeeded(
|
||||
CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
|
||||
MipsOperandConverter& i) { // NOLINT(runtime/references)
|
||||
const MemoryAccessMode access_mode =
|
||||
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
|
||||
if (access_mode == kMemoryAccessPoisoned) {
|
||||
|
@ -263,8 +263,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
|
||||
PPCOperandConverter& i) {
|
||||
void EmitWordLoadPoisoningIfNeeded(
|
||||
CodeGenerator* codegen, Instruction* instr,
|
||||
PPCOperandConverter& i) { // NOLINT(runtime/references)
|
||||
const MemoryAccessMode access_mode =
|
||||
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
|
||||
if (access_mode == kMemoryAccessPoisoned) {
|
||||
|
@ -1286,11 +1286,13 @@ class LinearScanAllocator final : public RegisterAllocator {
|
||||
RangeWithRegister::Equals>;
|
||||
|
||||
void MaybeUndoPreviousSplit(LiveRange* range);
|
||||
void SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
|
||||
LifetimePosition position, SpillMode spill_mode);
|
||||
void SpillNotLiveRanges(
|
||||
RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
|
||||
LifetimePosition position, SpillMode spill_mode);
|
||||
LiveRange* AssignRegisterOnReload(LiveRange* range, int reg);
|
||||
void ReloadLiveRanges(RangeWithRegisterSet& to_be_live,
|
||||
LifetimePosition position);
|
||||
void ReloadLiveRanges(
|
||||
RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
|
||||
LifetimePosition position);
|
||||
|
||||
void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block);
|
||||
bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
|
||||
|
@ -1246,8 +1246,9 @@ void AdjustStackPointerForTailCall(
|
||||
}
|
||||
}
|
||||
|
||||
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
|
||||
S390OperandConverter& i) {
|
||||
void EmitWordLoadPoisoningIfNeeded(
|
||||
CodeGenerator* codegen, Instruction* instr,
|
||||
S390OperandConverter& i) { // NOLINT(runtime/references)
|
||||
const MemoryAccessMode access_mode =
|
||||
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
|
||||
if (access_mode == kMemoryAccessPoisoned) {
|
||||
|
@ -447,11 +447,13 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
|
||||
#endif
|
||||
|
||||
template <class CanCombineWithLoad>
|
||||
void GenerateRightOperands(InstructionSelector* selector, Node* node,
|
||||
Node* right, InstructionCode& opcode,
|
||||
OperandModes& operand_mode,
|
||||
InstructionOperand* inputs, size_t& input_count,
|
||||
CanCombineWithLoad canCombineWithLoad) {
|
||||
void GenerateRightOperands(
|
||||
InstructionSelector* selector, Node* node, Node* right,
|
||||
InstructionCode& opcode, // NOLINT(runtime/references)
|
||||
OperandModes& operand_mode, // NOLINT(runtime/references)
|
||||
InstructionOperand* inputs,
|
||||
size_t& input_count, // NOLINT(runtime/references)
|
||||
CanCombineWithLoad canCombineWithLoad) {
|
||||
S390OperandGenerator g(selector);
|
||||
|
||||
if ((operand_mode & OperandMode::kAllowImmediate) &&
|
||||
@ -491,11 +493,13 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node,
|
||||
}
|
||||
|
||||
template <class CanCombineWithLoad>
|
||||
void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
|
||||
Node* left, Node* right, InstructionCode& opcode,
|
||||
OperandModes& operand_mode,
|
||||
InstructionOperand* inputs, size_t& input_count,
|
||||
CanCombineWithLoad canCombineWithLoad) {
|
||||
void GenerateBinOpOperands(
|
||||
InstructionSelector* selector, Node* node, Node* left, Node* right,
|
||||
InstructionCode& opcode, // NOLINT(runtime/references)
|
||||
OperandModes& operand_mode, // NOLINT(runtime/references)
|
||||
InstructionOperand* inputs,
|
||||
size_t& input_count, // NOLINT(runtime/references)
|
||||
CanCombineWithLoad canCombineWithLoad) {
|
||||
S390OperandGenerator g(selector);
|
||||
// left is always register
|
||||
InstructionOperand const left_input = g.UseRegister(left);
|
||||
|
@ -349,7 +349,8 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
|
||||
|
||||
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
InstructionCode opcode, Instruction* instr,
|
||||
X64OperandConverter& i, int pc) {
|
||||
X64OperandConverter& i, // NOLINT(runtime/references)
|
||||
int pc) {
|
||||
const MemoryAccessMode access_mode =
|
||||
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
|
||||
if (access_mode == kMemoryAccessProtected) {
|
||||
@ -357,9 +358,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
}
|
||||
}
|
||||
|
||||
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
|
||||
InstructionCode opcode, Instruction* instr,
|
||||
X64OperandConverter& i) {
|
||||
void EmitWordLoadPoisoningIfNeeded(
|
||||
CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
|
||||
X64OperandConverter& i) { // NOLINT(runtime/references)
|
||||
const MemoryAccessMode access_mode =
|
||||
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
|
||||
if (access_mode == kMemoryAccessPoisoned) {
|
||||
|
@ -93,8 +93,10 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
|
||||
|
||||
namespace {
|
||||
|
||||
void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
|
||||
const interpreter::BytecodeArrayAccessor& accessor) {
|
||||
void UpdateInLiveness(
|
||||
Bytecode bytecode,
|
||||
BytecodeLivenessState& in_liveness, // NOLINT(runtime/references)
|
||||
const interpreter::BytecodeArrayAccessor& accessor) {
|
||||
int num_operands = Bytecodes::NumberOfOperands(bytecode);
|
||||
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
|
||||
|
||||
@ -201,11 +203,13 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
|
||||
}
|
||||
}
|
||||
|
||||
void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
|
||||
BytecodeLivenessState* next_bytecode_in_liveness,
|
||||
const interpreter::BytecodeArrayAccessor& accessor,
|
||||
Handle<BytecodeArray> bytecode_array,
|
||||
const BytecodeLivenessMap& liveness_map) {
|
||||
void UpdateOutLiveness(
|
||||
Bytecode bytecode,
|
||||
BytecodeLivenessState& out_liveness, // NOLINT(runtime/references)
|
||||
BytecodeLivenessState* next_bytecode_in_liveness,
|
||||
const interpreter::BytecodeArrayAccessor& accessor,
|
||||
Handle<BytecodeArray> bytecode_array,
|
||||
const BytecodeLivenessMap& liveness_map) {
|
||||
int current_offset = accessor.current_offset();
|
||||
|
||||
// Special case Suspend and Resume to just pass through liveness.
|
||||
@ -261,7 +265,8 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
|
||||
}
|
||||
}
|
||||
|
||||
void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness,
|
||||
void UpdateLiveness(Bytecode bytecode,
|
||||
BytecodeLiveness& liveness, // NOLINT(runtime/references)
|
||||
BytecodeLivenessState** next_bytecode_in_liveness,
|
||||
const interpreter::BytecodeArrayAccessor& accessor,
|
||||
Handle<BytecodeArray> bytecode_array,
|
||||
@ -274,8 +279,10 @@ void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness,
|
||||
*next_bytecode_in_liveness = liveness.in;
|
||||
}
|
||||
|
||||
void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
|
||||
const interpreter::BytecodeArrayAccessor& accessor) {
|
||||
void UpdateAssignments(
|
||||
Bytecode bytecode,
|
||||
BytecodeLoopAssignments& assignments, // NOLINT(runtime/references)
|
||||
const interpreter::BytecodeArrayAccessor& accessor) {
|
||||
int num_operands = Bytecodes::NumberOfOperands(bytecode);
|
||||
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
|
||||
|
||||
|
@ -858,10 +858,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
|
||||
// TODO(jkummerow): The style guide wants pointers for output parameters.
|
||||
// https://google.github.io/styleguide/cppguide.html#Output_Parameters
|
||||
bool ToInt32Constant(Node* node, int32_t& out_value);
|
||||
bool ToInt64Constant(Node* node, int64_t& out_value);
|
||||
bool ToInt32Constant(Node* node,
|
||||
int32_t& out_value); // NOLINT(runtime/references)
|
||||
bool ToInt64Constant(Node* node,
|
||||
int64_t& out_value); // NOLINT(runtime/references)
|
||||
bool ToSmiConstant(Node* node, Smi* out_value);
|
||||
bool ToIntPtrConstant(Node* node, intptr_t& out_value);
|
||||
bool ToIntPtrConstant(Node* node,
|
||||
intptr_t& out_value); // NOLINT(runtime/references)
|
||||
|
||||
bool IsUndefinedConstant(TNode<Object> node);
|
||||
bool IsNullConstant(TNode<Object> node);
|
||||
|
@ -33,13 +33,16 @@ struct Diamond {
|
||||
}
|
||||
|
||||
// Place {this} after {that} in control flow order.
|
||||
void Chain(Diamond& that) { branch->ReplaceInput(1, that.merge); }
|
||||
void Chain(Diamond& that) { // NOLINT(runtime/references)
|
||||
branch->ReplaceInput(1, that.merge);
|
||||
}
|
||||
|
||||
// Place {this} after {that} in control flow order.
|
||||
void Chain(Node* that) { branch->ReplaceInput(1, that); }
|
||||
|
||||
// Nest {this} into either the if_true or if_false branch of {that}.
|
||||
void Nest(Diamond& that, bool if_true) {
|
||||
void Nest(Diamond& that, // NOLINT(runtime/references)
|
||||
bool if_true) {
|
||||
if (if_true) {
|
||||
branch->ReplaceInput(1, that.if_true);
|
||||
that.merge->ReplaceInput(0, merge);
|
||||
|
@ -59,7 +59,9 @@ class V8_EXPORT_PRIVATE Int64Lowering {
|
||||
bool HasReplacementHigh(Node* node);
|
||||
Node* GetReplacementHigh(Node* node);
|
||||
void PreparePhiReplacement(Node* phi);
|
||||
void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
|
||||
void GetIndexNodes(Node* index,
|
||||
Node*& index_low, // NOLINT(runtime/references)
|
||||
Node*& index_high); // NOLINT(runtime/references)
|
||||
void ReplaceNodeWithProjections(Node* node);
|
||||
void LowerMemoryBaseAndIndex(Node* node);
|
||||
|
||||
|
@ -1095,10 +1095,10 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
|
||||
MapHandles const& receiver_maps,
|
||||
std::vector<ElementsKind>& kinds,
|
||||
bool builtin_is_push = false) {
|
||||
bool CanInlineArrayResizingBuiltin(
|
||||
JSHeapBroker* broker, MapHandles const& receiver_maps,
|
||||
std::vector<ElementsKind>& kinds, // NOLINT(runtime/references)
|
||||
bool builtin_is_push = false) {
|
||||
DCHECK_NE(0, receiver_maps.size());
|
||||
for (auto receiver_map : receiver_maps) {
|
||||
MapRef map(broker, receiver_map);
|
||||
|
@ -59,7 +59,8 @@ class JSInliner final : public AdvancedReducer {
|
||||
SourcePositionTable* const source_positions_;
|
||||
|
||||
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
|
||||
FeedbackVectorRef DetermineCallContext(Node* node, Node*& context_out);
|
||||
FeedbackVectorRef DetermineCallContext(
|
||||
Node* node, Node*& context_out); // NOLINT(runtime/references)
|
||||
|
||||
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
|
||||
int parameter_count, BailoutId bailout_id,
|
||||
|
@ -153,7 +153,8 @@ class JSTypeHintLowering {
|
||||
|
||||
private:
|
||||
friend class JSSpeculativeBinopBuilder;
|
||||
Node* TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect, Node* control,
|
||||
Node* TryBuildSoftDeopt(FeedbackNexus& nexus, // NOLINT(runtime/references)
|
||||
Node* effect, Node* control,
|
||||
DeoptimizeReason reson) const;
|
||||
|
||||
JSGraph* jsgraph() const { return jsgraph_; }
|
||||
|
@ -233,7 +233,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
|
||||
|
||||
bool FieldsEquals(AbstractFields const& this_fields,
|
||||
AbstractFields const& that_fields) const;
|
||||
void FieldsMerge(AbstractFields& this_fields,
|
||||
void FieldsMerge(AbstractFields& this_fields, // NOLINT(runtime/references)
|
||||
AbstractFields const& that_fields, Zone* zone);
|
||||
|
||||
AbstractElements const* elements_ = nullptr;
|
||||
|
@ -728,8 +728,9 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
}
|
||||
};
|
||||
|
||||
int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
|
||||
BasicBlock* child, int unvisited) {
|
||||
int Push(
|
||||
ZoneVector<SpecialRPOStackFrame>& stack, // NOLINT(runtime/references)
|
||||
int depth, BasicBlock* child, int unvisited) {
|
||||
if (child->rpo_number() == unvisited) {
|
||||
stack[depth].block = child;
|
||||
stack[depth].index = 0;
|
||||
@ -958,8 +959,9 @@ class SpecialRPONumberer : public ZoneObject {
|
||||
}
|
||||
|
||||
// Computes loop membership from the backedges of the control flow graph.
|
||||
void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
|
||||
size_t num_loops, ZoneVector<Backedge>* backedges) {
|
||||
void ComputeLoopInfo(
|
||||
ZoneVector<SpecialRPOStackFrame>& queue, // NOLINT(runtime/references)
|
||||
size_t num_loops, ZoneVector<Backedge>* backedges) {
|
||||
// Extend existing loop membership vectors.
|
||||
for (LoopInfo& loop : loops_) {
|
||||
loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
|
||||
|
@ -200,7 +200,7 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
|
||||
|
||||
// Appends the hints for the given register range to {dst} (in order).
|
||||
void ExportRegisterHints(interpreter::Register first, size_t count,
|
||||
HintsVector& dst);
|
||||
HintsVector& dst); // NOLINT(runtime/references)
|
||||
|
||||
private:
|
||||
friend std::ostream& operator<<(std::ostream& out, const Environment& env);
|
||||
|
@ -370,8 +370,9 @@ class SerializerForBackgroundCompilation {
|
||||
bool with_spread = false);
|
||||
void ProcessApiCall(Handle<SharedFunctionInfo> target,
|
||||
const HintsVector& arguments);
|
||||
void ProcessReceiverMapForApiCall(FunctionTemplateInfoRef& target,
|
||||
Handle<Map> receiver);
|
||||
void ProcessReceiverMapForApiCall(
|
||||
FunctionTemplateInfoRef& target, // NOLINT(runtime/references)
|
||||
Handle<Map> receiver);
|
||||
void ProcessBuiltinCall(Handle<SharedFunctionInfo> target,
|
||||
const HintsVector& arguments);
|
||||
|
||||
@ -405,8 +406,8 @@ class SerializerForBackgroundCompilation {
|
||||
|
||||
void ProcessContextAccess(const Hints& context_hints, int slot, int depth,
|
||||
ContextProcessingMode mode);
|
||||
void ProcessImmutableLoad(ContextRef& context, int slot,
|
||||
ContextProcessingMode mode);
|
||||
void ProcessImmutableLoad(ContextRef& context, // NOLINT(runtime/references)
|
||||
int slot, ContextProcessingMode mode);
|
||||
void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator);
|
||||
void ProcessLdaLookupContextSlot(
|
||||
interpreter::BytecodeArrayIterator* iterator);
|
||||
|
@ -392,14 +392,13 @@ MachineType StateValuesAccess::iterator::type() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool StateValuesAccess::iterator::operator!=(iterator& other) {
|
||||
bool StateValuesAccess::iterator::operator!=(
|
||||
iterator& other) { // NOLINT(runtime/references)
|
||||
// We only allow comparison with end().
|
||||
CHECK(other.done());
|
||||
return !done();
|
||||
}
|
||||
|
||||
|
||||
StateValuesAccess::iterator& StateValuesAccess::iterator::operator++() {
|
||||
Advance();
|
||||
return *this;
|
||||
|
@ -92,7 +92,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
|
||||
class V8_EXPORT_PRIVATE iterator {
|
||||
public:
|
||||
// Bare minimum of operators needed for range iteration.
|
||||
bool operator!=(iterator& other);
|
||||
bool operator!=(iterator& other); // NOLINT(runtime/references)
|
||||
iterator& operator++();
|
||||
TypedNode operator*();
|
||||
|
||||
|
13
src/d8/d8.cc
13
src/d8/d8.cc
@ -2001,8 +2001,9 @@ int LineFromOffset(Local<debug::Script> script, int offset) {
|
||||
return location.GetLineNumber();
|
||||
}
|
||||
|
||||
void WriteLcovDataForRange(std::vector<uint32_t>& lines, int start_line,
|
||||
int end_line, uint32_t count) {
|
||||
void WriteLcovDataForRange(
|
||||
std::vector<uint32_t>& lines, // NOLINT(runtime/references)
|
||||
int start_line, int end_line, uint32_t count) {
|
||||
// Ensure space in the array.
|
||||
lines.resize(std::max(static_cast<size_t>(end_line + 1), lines.size()), 0);
|
||||
// Boundary lines could be shared between two functions with different
|
||||
@ -2013,10 +2014,10 @@ void WriteLcovDataForRange(std::vector<uint32_t>& lines, int start_line,
|
||||
for (int k = start_line + 1; k < end_line; k++) lines[k] = count;
|
||||
}
|
||||
|
||||
void WriteLcovDataForNamedRange(std::ostream& sink,
|
||||
std::vector<uint32_t>& lines,
|
||||
const std::string& name, int start_line,
|
||||
int end_line, uint32_t count) {
|
||||
void WriteLcovDataForNamedRange(
|
||||
std::ostream& sink,
|
||||
std::vector<uint32_t>& lines, // NOLINT(runtime/references)
|
||||
const std::string& name, int start_line, int end_line, uint32_t count) {
|
||||
WriteLcovDataForRange(lines, start_line, end_line, count);
|
||||
sink << "FN:" << start_line + 1 << "," << name << std::endl;
|
||||
sink << "FNDA:" << count << "," << name << std::endl;
|
||||
|
@ -68,7 +68,8 @@ bool CompareCoverageBlock(const CoverageBlock& a, const CoverageBlock& b) {
|
||||
return a.start < b.start;
|
||||
}
|
||||
|
||||
void SortBlockData(std::vector<CoverageBlock>& v) {
|
||||
void SortBlockData(
|
||||
std::vector<CoverageBlock>& v) { // NOLINT(runtime/references)
|
||||
// Sort according to the block nesting structure.
|
||||
std::sort(v.begin(), v.end(), CompareCoverageBlock);
|
||||
}
|
||||
|
@ -164,8 +164,9 @@ class WasmScript : public Script {
|
||||
uint32_t GetFunctionHash(int function_index);
|
||||
};
|
||||
|
||||
V8_EXPORT_PRIVATE void GetLoadedScripts(Isolate* isolate,
|
||||
PersistentValueVector<Script>& scripts);
|
||||
V8_EXPORT_PRIVATE void GetLoadedScripts(
|
||||
Isolate* isolate,
|
||||
PersistentValueVector<Script>& scripts); // NOLINT(runtime/references)
|
||||
|
||||
MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
|
||||
Local<String> source);
|
||||
|
@ -937,10 +937,10 @@ class FunctionDataMap : public ThreadVisitor {
|
||||
std::map<FuncId, FunctionData> map_;
|
||||
};
|
||||
|
||||
bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
|
||||
Handle<Script> new_script,
|
||||
FunctionDataMap& function_data_map,
|
||||
debug::LiveEditResult* result) {
|
||||
bool CanPatchScript(
|
||||
const LiteralMap& changed, Handle<Script> script, Handle<Script> new_script,
|
||||
FunctionDataMap& function_data_map, // NOLINT(runtime/references)
|
||||
debug::LiveEditResult* result) {
|
||||
debug::LiveEditResult::Status status = debug::LiveEditResult::OK;
|
||||
for (const auto& mapping : changed) {
|
||||
FunctionData* data = nullptr;
|
||||
@ -971,9 +971,10 @@ bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CanRestartFrame(Isolate* isolate, Address fp,
|
||||
FunctionDataMap& function_data_map,
|
||||
const LiteralMap& changed, debug::LiveEditResult* result) {
|
||||
bool CanRestartFrame(
|
||||
Isolate* isolate, Address fp,
|
||||
FunctionDataMap& function_data_map, // NOLINT(runtime/references)
|
||||
const LiteralMap& changed, debug::LiveEditResult* result) {
|
||||
DCHECK_GT(fp, 0);
|
||||
StackFrame* restart_frame = nullptr;
|
||||
StackFrameIterator it(isolate);
|
||||
|
@ -112,7 +112,9 @@ class StackFrame {
|
||||
INNER_JSENTRY_FRAME = (0 << kSmiTagSize) | kSmiTag,
|
||||
OUTERMOST_JSENTRY_FRAME = (1 << kSmiTagSize) | kSmiTag
|
||||
};
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((INNER_JSENTRY_FRAME & kHeapObjectTagMask) != kHeapObjectTag);
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((OUTERMOST_JSENTRY_FRAME & kHeapObjectTagMask) !=
|
||||
kHeapObjectTag);
|
||||
|
||||
|
@ -2512,7 +2512,8 @@ float FPAbs<float>(float a) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
|
||||
static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind,
|
||||
T& result) { // NOLINT(runtime/references)
|
||||
if (std::isnan(a) && std::isnan(b)) {
|
||||
result = a;
|
||||
} else if (std::isnan(a)) {
|
||||
@ -5271,7 +5272,8 @@ void Simulator::DecodeTypeMsa3R() {
|
||||
}
|
||||
|
||||
template <typename T_int, typename T_fp, typename T_reg>
|
||||
void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
|
||||
void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt,
|
||||
T_reg& wd) { // NOLINT(runtime/references)
|
||||
const T_int all_ones = static_cast<T_int>(-1);
|
||||
const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
|
||||
const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
|
||||
@ -5414,7 +5416,8 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
|
||||
}
|
||||
|
||||
template <typename T_int, typename T_int_dbl, typename T_reg>
|
||||
void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
|
||||
void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt,
|
||||
T_reg& wd) { // NOLINT(runtime/references)
|
||||
// using T_uint = typename std::make_unsigned<T_int>::type;
|
||||
using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
|
||||
const T_int max_int = std::numeric_limits<T_int>::max();
|
||||
@ -5859,7 +5862,8 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
|
||||
#undef QUIET_BIT_D
|
||||
|
||||
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
|
||||
T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
|
||||
T_int Msa2RFInstrHelper(uint32_t opcode, T_src src,
|
||||
T_dst& dst, // NOLINT(runtime/references)
|
||||
Simulator* sim) {
|
||||
using T_uint = typename std::make_unsigned<T_int>::type;
|
||||
switch (opcode) {
|
||||
|
@ -258,16 +258,26 @@ class Simulator : public SimulatorBase {
|
||||
bool set_fcsr_round_error(float original, float rounded);
|
||||
bool set_fcsr_round64_error(double original, double rounded);
|
||||
bool set_fcsr_round64_error(float original, float rounded);
|
||||
void round_according_to_fcsr(double toRound, double& rounded,
|
||||
int32_t& rounded_int, double fs);
|
||||
void round_according_to_fcsr(float toRound, float& rounded,
|
||||
int32_t& rounded_int, float fs);
|
||||
void round_according_to_fcsr(
|
||||
double toRound, double& rounded, // NOLINT(runtime/references)
|
||||
int32_t& rounded_int, // NOLINT(runtime/references)
|
||||
double fs);
|
||||
void round_according_to_fcsr(
|
||||
float toRound, float& rounded, // NOLINT(runtime/references)
|
||||
int32_t& rounded_int, // NOLINT(runtime/references)
|
||||
float fs);
|
||||
template <typename Tfp, typename Tint>
|
||||
void round_according_to_msacsr(Tfp toRound, Tfp& rounded, Tint& rounded_int);
|
||||
void round64_according_to_fcsr(double toRound, double& rounded,
|
||||
int64_t& rounded_int, double fs);
|
||||
void round64_according_to_fcsr(float toRound, float& rounded,
|
||||
int64_t& rounded_int, float fs);
|
||||
void round_according_to_msacsr(
|
||||
Tfp toRound, Tfp& rounded, // NOLINT(runtime/references)
|
||||
Tint& rounded_int); // NOLINT(runtime/references)
|
||||
void round64_according_to_fcsr(
|
||||
double toRound, double& rounded, // NOLINT(runtime/references)
|
||||
int64_t& rounded_int, // NOLINT(runtime/references)
|
||||
double fs);
|
||||
void round64_according_to_fcsr(
|
||||
float toRound, float& rounded, // NOLINT(runtime/references)
|
||||
int64_t& rounded_int, // NOLINT(runtime/references)
|
||||
float fs);
|
||||
// Special case of set_register and get_register to access the raw PC value.
|
||||
void set_pc(int32_t value);
|
||||
int32_t get_pc() const;
|
||||
|
@ -2507,7 +2507,8 @@ float FPAbs<float>(float a) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
|
||||
static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind,
|
||||
T& result) { // NOLINT(runtime/references)
|
||||
if (std::isnan(a) && std::isnan(b)) {
|
||||
result = a;
|
||||
} else if (std::isnan(a)) {
|
||||
@ -5544,7 +5545,8 @@ void Simulator::DecodeTypeMsa3R() {
|
||||
}
|
||||
|
||||
template <typename T_int, typename T_fp, typename T_reg>
|
||||
void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
|
||||
void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt,
|
||||
T_reg& wd) { // NOLINT(runtime/references)
|
||||
const T_int all_ones = static_cast<T_int>(-1);
|
||||
const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
|
||||
const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
|
||||
@ -5687,7 +5689,8 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
|
||||
}
|
||||
|
||||
template <typename T_int, typename T_int_dbl, typename T_reg>
|
||||
void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
|
||||
void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt,
|
||||
T_reg& wd) { // NOLINT(runtime/references)
|
||||
// using T_uint = typename std::make_unsigned<T_int>::type;
|
||||
using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
|
||||
const T_int max_int = std::numeric_limits<T_int>::max();
|
||||
@ -6139,7 +6142,8 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
|
||||
#undef QUIET_BIT_D
|
||||
|
||||
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
|
||||
T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
|
||||
T_int Msa2RFInstrHelper(uint32_t opcode, T_src src,
|
||||
T_dst& dst, // NOLINT(runtime/references)
|
||||
Simulator* sim) {
|
||||
using T_uint = typename std::make_unsigned<T_int>::type;
|
||||
switch (opcode) {
|
||||
|
@ -255,17 +255,26 @@ class Simulator : public SimulatorBase {
|
||||
bool set_fcsr_round64_error(double original, double rounded);
|
||||
bool set_fcsr_round_error(float original, float rounded);
|
||||
bool set_fcsr_round64_error(float original, float rounded);
|
||||
void round_according_to_fcsr(double toRound, double& rounded,
|
||||
int32_t& rounded_int, double fs);
|
||||
void round64_according_to_fcsr(double toRound, double& rounded,
|
||||
int64_t& rounded_int, double fs);
|
||||
void round_according_to_fcsr(float toRound, float& rounded,
|
||||
int32_t& rounded_int, float fs);
|
||||
void round64_according_to_fcsr(float toRound, float& rounded,
|
||||
int64_t& rounded_int, float fs);
|
||||
void round_according_to_fcsr(
|
||||
double toRound, double& rounded, // NOLINT(runtime/references)
|
||||
int32_t& rounded_int, // NOLINT(runtime/references)
|
||||
double fs);
|
||||
void round64_according_to_fcsr(
|
||||
double toRound, double& rounded, // NOLINT(runtime/references)
|
||||
int64_t& rounded_int, // NOLINT(runtime/references)
|
||||
double fs);
|
||||
void round_according_to_fcsr(
|
||||
float toRound, float& rounded, // NOLINT(runtime/references)
|
||||
int32_t& rounded_int, // NOLINT(runtime/references)
|
||||
float fs);
|
||||
void round64_according_to_fcsr(
|
||||
float toRound, float& rounded, // NOLINT(runtime/references)
|
||||
int64_t& rounded_int, // NOLINT(runtime/references)
|
||||
float fs);
|
||||
template <typename T_fp, typename T_int>
|
||||
void round_according_to_msacsr(T_fp toRound, T_fp& rounded,
|
||||
T_int& rounded_int);
|
||||
void round_according_to_msacsr(
|
||||
T_fp toRound, T_fp& rounded, // NOLINT(runtime/references)
|
||||
T_int& rounded_int); // NOLINT(runtime/references)
|
||||
void set_fcsr_rounding_mode(FPURoundingMode mode);
|
||||
void set_msacsr_rounding_mode(FPURoundingMode mode);
|
||||
unsigned int get_fcsr_rounding_mode();
|
||||
|
@ -171,7 +171,8 @@ struct Heap::StrongRootsList {
|
||||
|
||||
class IdleScavengeObserver : public AllocationObserver {
|
||||
public:
|
||||
IdleScavengeObserver(Heap& heap, intptr_t step_size)
|
||||
IdleScavengeObserver(Heap& heap, // NOLINT(runtime/references)
|
||||
intptr_t step_size)
|
||||
: AllocationObserver(step_size), heap_(heap) {}
|
||||
|
||||
void Step(int bytes_allocated, Address, size_t) override {
|
||||
@ -1335,7 +1336,8 @@ intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
|
||||
void ReportDuplicates(
|
||||
int size, std::vector<HeapObject>& objects) { // NOLINT(runtime/references)
|
||||
if (objects.size() == 0) return;
|
||||
|
||||
sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
|
||||
@ -2742,6 +2744,7 @@ STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
|
||||
#endif
|
||||
|
||||
#ifdef V8_HOST_ARCH_32_BIT
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
|
||||
#endif
|
||||
|
||||
|
@ -255,7 +255,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
|
||||
private:
|
||||
class Observer : public AllocationObserver {
|
||||
public:
|
||||
Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
|
||||
Observer(
|
||||
IncrementalMarking& incremental_marking, // NOLINT(runtime/references)
|
||||
intptr_t step_size)
|
||||
: AllocationObserver(step_size),
|
||||
incremental_marking_(incremental_marking) {}
|
||||
|
||||
|
@ -12,7 +12,7 @@ namespace internal {
|
||||
|
||||
class StressMarkingObserver : public AllocationObserver {
|
||||
public:
|
||||
explicit StressMarkingObserver(Heap& heap);
|
||||
explicit StressMarkingObserver(Heap& heap); // NOLINT(runtime/references)
|
||||
|
||||
void Step(int bytes_allocated, Address soon_object, size_t size) override;
|
||||
|
||||
|
@ -12,7 +12,7 @@ namespace internal {
|
||||
|
||||
class StressScavengeObserver : public AllocationObserver {
|
||||
public:
|
||||
explicit StressScavengeObserver(Heap& heap);
|
||||
explicit StressScavengeObserver(Heap& heap); // NOLINT(runtime/references)
|
||||
|
||||
void Step(int bytes_allocated, Address soon_object, size_t size) override;
|
||||
|
||||
|
@ -45,20 +45,29 @@ class StringUtil {
|
||||
return s.find(needle);
|
||||
}
|
||||
static const size_t kNotFound = String::kNotFound;
|
||||
static void builderAppend(StringBuilder& builder, const String& s) {
|
||||
static void builderAppend(
|
||||
StringBuilder& builder, // NOLINT(runtime/references)
|
||||
const String& s) {
|
||||
builder.append(s);
|
||||
}
|
||||
static void builderAppend(StringBuilder& builder, UChar c) {
|
||||
static void builderAppend(
|
||||
StringBuilder& builder, // NOLINT(runtime/references)
|
||||
UChar c) {
|
||||
builder.append(c);
|
||||
}
|
||||
static void builderAppend(StringBuilder& builder, const char* s, size_t len) {
|
||||
static void builderAppend(
|
||||
StringBuilder& builder, // NOLINT(runtime/references)
|
||||
const char* s, size_t len) {
|
||||
builder.append(s, len);
|
||||
}
|
||||
static void builderAppendQuotedString(StringBuilder&, const String&);
|
||||
static void builderReserve(StringBuilder& builder, size_t capacity) {
|
||||
static void builderReserve(
|
||||
StringBuilder& builder, // NOLINT(runtime/references)
|
||||
size_t capacity) {
|
||||
builder.reserveCapacity(capacity);
|
||||
}
|
||||
static String builderToString(StringBuilder& builder) {
|
||||
static String builderToString(
|
||||
StringBuilder& builder) { // NOLINT(runtime/references)
|
||||
return builder.toString();
|
||||
}
|
||||
static std::unique_ptr<protocol::Value> parseJSON(const String16& json);
|
||||
|
@ -496,11 +496,11 @@ void V8Console::valuesCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
|
||||
info.GetReturnValue().Set(values);
|
||||
}
|
||||
|
||||
static void setFunctionBreakpoint(ConsoleHelper& helper, int sessionId,
|
||||
v8::Local<v8::Function> function,
|
||||
V8DebuggerAgentImpl::BreakpointSource source,
|
||||
v8::Local<v8::String> condition,
|
||||
bool enable) {
|
||||
static void setFunctionBreakpoint(
|
||||
ConsoleHelper& helper, // NOLINT(runtime/references)
|
||||
int sessionId, v8::Local<v8::Function> function,
|
||||
V8DebuggerAgentImpl::BreakpointSource source,
|
||||
v8::Local<v8::String> condition, bool enable) {
|
||||
V8InspectorSessionImpl* session = helper.session(sessionId);
|
||||
if (session == nullptr) return;
|
||||
if (!session->debuggerAgent()->enabled()) return;
|
||||
|
@ -24,7 +24,7 @@ static const int kMaxAsyncTaskStacks = 128 * 1024;
|
||||
static const int kNoBreakpointId = 0;
|
||||
|
||||
template <typename Map>
|
||||
void cleanupExpiredWeakPointers(Map& map) {
|
||||
void cleanupExpiredWeakPointers(Map& map) { // NOLINT(runtime/references)
|
||||
for (auto it = map.begin(); it != map.end();) {
|
||||
if (it->second.expired()) {
|
||||
it = map.erase(it);
|
||||
|
@ -107,7 +107,8 @@ bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
|
||||
}
|
||||
|
||||
void innerCallFunctionOn(
|
||||
V8InspectorSessionImpl* session, InjectedScript::Scope& scope,
|
||||
V8InspectorSessionImpl* session,
|
||||
InjectedScript::Scope& scope, // NOLINT(runtime/references)
|
||||
v8::Local<v8::Value> recv, const String16& expression,
|
||||
Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
|
||||
bool silent, WrapMode wrapMode, bool userGesture, bool awaitPromise,
|
||||
|
@ -228,7 +228,9 @@ static const UChar32 offsetsFromUTF8[6] = {0x00000000UL,
|
||||
static_cast<UChar32>(0xFA082080UL),
|
||||
static_cast<UChar32>(0x82082080UL)};
|
||||
|
||||
static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
|
||||
static inline UChar32 readUTF8Sequence(
|
||||
const char*& sequence, // NOLINT(runtime/references)
|
||||
size_t length) {
|
||||
UChar32 character = 0;
|
||||
|
||||
// The cases all fall through.
|
||||
@ -334,7 +336,8 @@ ConversionResult convertUTF8ToUTF16(const char** sourceStart,
|
||||
|
||||
// Helper to write a three-byte UTF-8 code point to the buffer, caller must
|
||||
// check room is available.
|
||||
static inline void putUTF8Triple(char*& buffer, UChar ch) {
|
||||
static inline void putUTF8Triple(char*& buffer, // NOLINT(runtime/references)
|
||||
UChar ch) {
|
||||
*buffer++ = static_cast<char>(((ch >> 12) & 0x0F) | 0xE0);
|
||||
*buffer++ = static_cast<char>(((ch >> 6) & 0x3F) | 0x80);
|
||||
*buffer++ = static_cast<char>((ch & 0x3F) | 0x80);
|
||||
|
@ -241,8 +241,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
|
||||
|
||||
// Build jump to targets[value], where
|
||||
// start_index <= value < start_index + size.
|
||||
void BuildIndexedJump(Register value, size_t start_index, size_t size,
|
||||
ZoneVector<BytecodeLabel>& targets);
|
||||
void BuildIndexedJump(
|
||||
Register value, size_t start_index, size_t size,
|
||||
ZoneVector<BytecodeLabel>& targets); // NOLINT(runtime/references)
|
||||
|
||||
void BuildNewLocalActivationContext();
|
||||
void BuildLocalActivationContextInitialization();
|
||||
|
@ -1123,10 +1123,10 @@ void Logger::SetterCallbackEvent(Name name, Address entry_point) {
|
||||
|
||||
namespace {
|
||||
|
||||
void AppendCodeCreateHeader(Log::MessageBuilder& msg,
|
||||
CodeEventListener::LogEventsAndTags tag,
|
||||
AbstractCode::Kind kind, uint8_t* address, int size,
|
||||
base::ElapsedTimer* timer) {
|
||||
void AppendCodeCreateHeader(
|
||||
Log::MessageBuilder& msg, // NOLINT(runtime/references)
|
||||
CodeEventListener::LogEventsAndTags tag, AbstractCode::Kind kind,
|
||||
uint8_t* address, int size, base::ElapsedTimer* timer) {
|
||||
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
|
||||
<< Logger::kNext << kLogEventsNames[tag] << Logger::kNext << kind
|
||||
<< Logger::kNext << timer->Elapsed().InMicroseconds() << Logger::kNext
|
||||
@ -1134,9 +1134,10 @@ void AppendCodeCreateHeader(Log::MessageBuilder& msg,
|
||||
<< Logger::kNext;
|
||||
}
|
||||
|
||||
void AppendCodeCreateHeader(Log::MessageBuilder& msg,
|
||||
CodeEventListener::LogEventsAndTags tag,
|
||||
AbstractCode code, base::ElapsedTimer* timer) {
|
||||
void AppendCodeCreateHeader(
|
||||
Log::MessageBuilder& msg, // NOLINT(runtime/references)
|
||||
CodeEventListener::LogEventsAndTags tag, AbstractCode code,
|
||||
base::ElapsedTimer* timer) {
|
||||
AppendCodeCreateHeader(msg, tag, code.kind(),
|
||||
reinterpret_cast<uint8_t*>(code.InstructionStart()),
|
||||
code.InstructionSize(), timer);
|
||||
@ -1336,8 +1337,9 @@ void Logger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
|
||||
|
||||
namespace {
|
||||
|
||||
void CodeLinePosEvent(JitLogger* jit_logger, Address code_start,
|
||||
SourcePositionTableIterator& iter) {
|
||||
void CodeLinePosEvent(
|
||||
JitLogger* jit_logger, Address code_start,
|
||||
SourcePositionTableIterator& iter) { // NOLINT(runtime/references)
|
||||
if (jit_logger) {
|
||||
void* jit_handler_data = jit_logger->StartCodePosInfoEvent();
|
||||
for (; !iter.done(); iter.Advance()) {
|
||||
@ -1415,9 +1417,10 @@ void Logger::SuspectReadEvent(Name name, Object obj) {
|
||||
}
|
||||
|
||||
namespace {
|
||||
void AppendFunctionMessage(Log::MessageBuilder& msg, const char* reason,
|
||||
int script_id, double time_delta, int start_position,
|
||||
int end_position, base::ElapsedTimer* timer) {
|
||||
void AppendFunctionMessage(
|
||||
Log::MessageBuilder& msg, // NOLINT(runtime/references)
|
||||
const char* reason, int script_id, double time_delta, int start_position,
|
||||
int end_position, base::ElapsedTimer* timer) {
|
||||
msg << "function" << Logger::kNext << reason << Logger::kNext << script_id
|
||||
<< Logger::kNext << start_position << Logger::kNext << end_position
|
||||
<< Logger::kNext << time_delta << Logger::kNext
|
||||
|
@ -565,6 +565,7 @@ class FeedbackMetadata : public HeapObject {
|
||||
|
||||
// Verify that an empty hash field looks like a tagged object, but can't
|
||||
// possibly be confused with a pointer.
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
|
||||
STATIC_ASSERT(Name::kEmptyHashField == 0x3);
|
||||
// Verify that a set hash field will not look like a tagged object.
|
||||
|
@ -34,11 +34,16 @@ enum StringRepresentationTag {
|
||||
};
|
||||
const uint32_t kIsIndirectStringMask = 1 << 0;
|
||||
const uint32_t kIsIndirectStringTag = 1 << 0;
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0);
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0);
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((kSlicedStringTag & kIsIndirectStringMask) ==
|
||||
kIsIndirectStringTag);
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
|
||||
|
||||
// For strings, bit 3 indicates whether the string consists of two-byte
|
||||
@ -377,6 +382,7 @@ enum InstanceType : uint16_t {
|
||||
constexpr InstanceType LAST_STRING_TYPE =
|
||||
static_cast<InstanceType>(FIRST_NONSTRING_TYPE - 1);
|
||||
|
||||
// NOLINTNEXTLINE(runtime/references) (false positive)
|
||||
STATIC_ASSERT((FIRST_NONSTRING_TYPE & kIsNotStringMask) != kStringTag);
|
||||
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
|
||||
STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
|
||||
|
@ -938,7 +938,7 @@ icu::Calendar* CreateCalendar(Isolate* isolate, const icu::Locale& icu_locale,
|
||||
|
||||
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
|
||||
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
|
||||
icu::DateTimePatternGenerator& generator) {
|
||||
icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
|
||||
// See https://github.com/tc39/ecma402/issues/225 . The best pattern
|
||||
// generation needs to be done in the base locale according to the
|
||||
// current spec however odd it may be. See also crbug.com/826549 .
|
||||
@ -968,9 +968,9 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
|
||||
|
||||
class DateFormatCache {
|
||||
public:
|
||||
icu::SimpleDateFormat* Create(const icu::Locale& icu_locale,
|
||||
const icu::UnicodeString& skeleton,
|
||||
icu::DateTimePatternGenerator& generator) {
|
||||
icu::SimpleDateFormat* Create(
|
||||
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
|
||||
icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
|
||||
std::string key;
|
||||
skeleton.toUTF8String<std::string>(key);
|
||||
key += ":";
|
||||
@ -999,7 +999,7 @@ class DateFormatCache {
|
||||
|
||||
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormatFromCache(
|
||||
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
|
||||
icu::DateTimePatternGenerator& generator) {
|
||||
icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
|
||||
static base::LazyInstance<DateFormatCache>::type cache =
|
||||
LAZY_INSTANCE_INITIALIZER;
|
||||
return std::unique_ptr<icu::SimpleDateFormat>(
|
||||
@ -1135,7 +1135,8 @@ icu::UnicodeString ReplaceSkeleton(const icu::UnicodeString input,
|
||||
std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
|
||||
JSDateTimeFormat::DateTimeStyle date_style,
|
||||
JSDateTimeFormat::DateTimeStyle time_style, const icu::Locale& icu_locale,
|
||||
Intl::HourCycle hc, icu::DateTimePatternGenerator& generator) {
|
||||
Intl::HourCycle hc,
|
||||
icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
|
||||
std::unique_ptr<icu::SimpleDateFormat> result;
|
||||
if (date_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
|
||||
if (time_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
|
||||
|
@ -1093,7 +1093,8 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
|
||||
|
||||
Maybe<bool> DefinePropertyWithInterceptorInternal(
|
||||
LookupIterator* it, Handle<InterceptorInfo> interceptor,
|
||||
Maybe<ShouldThrow> should_throw, PropertyDescriptor& desc) {
|
||||
Maybe<ShouldThrow> should_throw,
|
||||
PropertyDescriptor& desc) { // NOLINT(runtime/references)
|
||||
Isolate* isolate = it->isolate();
|
||||
// Make sure that the top context does not change when doing callbacks or
|
||||
// interceptor calls.
|
||||
|
@ -1644,11 +1644,11 @@ class PreParser : public ParserBase<PreParser> {
|
||||
return PreParserStatement::Jump();
|
||||
}
|
||||
|
||||
V8_INLINE void AddFormalParameter(PreParserFormalParameters* parameters,
|
||||
PreParserExpression& pattern,
|
||||
const PreParserExpression& initializer,
|
||||
int initializer_end_position,
|
||||
bool is_rest) {
|
||||
V8_INLINE void AddFormalParameter(
|
||||
PreParserFormalParameters* parameters,
|
||||
PreParserExpression& pattern, // NOLINT(runtime/references)
|
||||
const PreParserExpression& initializer, int initializer_end_position,
|
||||
bool is_rest) {
|
||||
DeclarationScope* scope = parameters->scope;
|
||||
scope->RecordParameter(is_rest);
|
||||
parameters->UpdateArityAndFunctionLength(!initializer.IsNull(), is_rest);
|
||||
|
@ -31,7 +31,8 @@ V8_INLINE constexpr bool operator<(RootIndex lhs, RootIndex rhs) {
|
||||
return static_cast<type>(lhs) < static_cast<type>(rhs);
|
||||
}
|
||||
|
||||
V8_INLINE RootIndex operator++(RootIndex& index) {
|
||||
V8_INLINE RootIndex
|
||||
operator++(RootIndex& index) { // NOLINT(runtime/references)
|
||||
using type = typename std::underlying_type<RootIndex>::type;
|
||||
index = static_cast<RootIndex>(static_cast<type>(index) + 1);
|
||||
return index;
|
||||
|
@ -150,8 +150,9 @@ inline void SetHomeObject(Isolate* isolate, JSFunction method,
|
||||
// shared name.
|
||||
template <typename Dictionary>
|
||||
MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
|
||||
Isolate* isolate, Arguments& args, Smi index, Handle<JSObject> home_object,
|
||||
Handle<String> name_prefix, Handle<Object> key) {
|
||||
Isolate* isolate, Arguments& args, // NOLINT(runtime/references)
|
||||
Smi index, Handle<JSObject> home_object, Handle<String> name_prefix,
|
||||
Handle<Object> key) {
|
||||
int int_index = index.value();
|
||||
|
||||
// Class constructor and prototype values do not require post processing.
|
||||
@ -185,9 +186,10 @@ MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
|
||||
// This is a simplified version of GetMethodWithSharedNameAndSetHomeObject()
|
||||
// function above that is used when it's guaranteed that the method has
|
||||
// shared name.
|
||||
Object GetMethodWithSharedNameAndSetHomeObject(Isolate* isolate,
|
||||
Arguments& args, Object index,
|
||||
JSObject home_object) {
|
||||
Object GetMethodWithSharedNameAndSetHomeObject(
|
||||
Isolate* isolate,
|
||||
Arguments& args, // NOLINT(runtime/references)
|
||||
Object index, JSObject home_object) {
|
||||
DisallowHeapAllocation no_gc;
|
||||
int int_index = Smi::ToInt(index);
|
||||
|
||||
@ -226,7 +228,8 @@ Handle<Dictionary> ShallowCopyDictionaryTemplate(
|
||||
|
||||
template <typename Dictionary>
|
||||
bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
|
||||
Handle<JSObject> receiver, Arguments& args,
|
||||
Handle<JSObject> receiver,
|
||||
Arguments& args, // NOLINT(runtime/references)
|
||||
bool* install_name_accessor = nullptr) {
|
||||
Handle<Name> name_string = isolate->factory()->name_string();
|
||||
|
||||
@ -284,7 +287,7 @@ bool AddDescriptorsByTemplate(
|
||||
Isolate* isolate, Handle<Map> map,
|
||||
Handle<DescriptorArray> descriptors_template,
|
||||
Handle<NumberDictionary> elements_dictionary_template,
|
||||
Handle<JSObject> receiver, Arguments& args) {
|
||||
Handle<JSObject> receiver, Arguments& args) { // NOLINT(runtime/references)
|
||||
int nof_descriptors = descriptors_template->number_of_descriptors();
|
||||
|
||||
Handle<DescriptorArray> descriptors =
|
||||
@ -391,7 +394,8 @@ bool AddDescriptorsByTemplate(
|
||||
Handle<NameDictionary> properties_dictionary_template,
|
||||
Handle<NumberDictionary> elements_dictionary_template,
|
||||
Handle<FixedArray> computed_properties, Handle<JSObject> receiver,
|
||||
bool install_name_accessor, Arguments& args) {
|
||||
bool install_name_accessor,
|
||||
Arguments& args) { // NOLINT(runtime/references)
|
||||
int computed_properties_length = computed_properties->length();
|
||||
|
||||
// Shallow-copy properties template.
|
||||
@ -476,7 +480,8 @@ bool InitClassPrototype(Isolate* isolate,
|
||||
Handle<ClassBoilerplate> class_boilerplate,
|
||||
Handle<JSObject> prototype,
|
||||
Handle<HeapObject> prototype_parent,
|
||||
Handle<JSFunction> constructor, Arguments& args) {
|
||||
Handle<JSFunction> constructor,
|
||||
Arguments& args) { // NOLINT(runtime/references)
|
||||
Handle<Map> map(prototype->map(), isolate);
|
||||
map = Map::CopyDropDescriptors(isolate, map);
|
||||
map->set_is_prototype_map(true);
|
||||
@ -523,7 +528,8 @@ bool InitClassPrototype(Isolate* isolate,
|
||||
bool InitClassConstructor(Isolate* isolate,
|
||||
Handle<ClassBoilerplate> class_boilerplate,
|
||||
Handle<HeapObject> constructor_parent,
|
||||
Handle<JSFunction> constructor, Arguments& args) {
|
||||
Handle<JSFunction> constructor,
|
||||
Arguments& args) { // NOLINT(runtime/references)
|
||||
Handle<Map> map(constructor->map(), isolate);
|
||||
map = Map::CopyDropDescriptors(isolate, map);
|
||||
DCHECK(map->is_prototype_map());
|
||||
@ -572,11 +578,10 @@ bool InitClassConstructor(Isolate* isolate,
|
||||
}
|
||||
}
|
||||
|
||||
MaybeHandle<Object> DefineClass(Isolate* isolate,
|
||||
Handle<ClassBoilerplate> class_boilerplate,
|
||||
Handle<Object> super_class,
|
||||
Handle<JSFunction> constructor,
|
||||
Arguments& args) {
|
||||
MaybeHandle<Object> DefineClass(
|
||||
Isolate* isolate, Handle<ClassBoilerplate> class_boilerplate,
|
||||
Handle<Object> super_class, Handle<JSFunction> constructor,
|
||||
Arguments& args) { // NOLINT(runtime/references)
|
||||
Handle<Object> prototype_parent;
|
||||
Handle<HeapObject> constructor_parent;
|
||||
|
||||
|
@ -26,7 +26,9 @@ namespace internal {
|
||||
namespace {
|
||||
|
||||
void AdvanceToOffsetForTracing(
|
||||
interpreter::BytecodeArrayIterator& bytecode_iterator, int offset) {
|
||||
interpreter::BytecodeArrayIterator&
|
||||
bytecode_iterator, // NOLINT(runtime/references)
|
||||
int offset) {
|
||||
while (bytecode_iterator.current_offset() +
|
||||
bytecode_iterator.current_bytecode_size() <=
|
||||
offset) {
|
||||
@ -39,7 +41,8 @@ void AdvanceToOffsetForTracing(
|
||||
}
|
||||
|
||||
void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
|
||||
interpreter::BytecodeArrayIterator& bytecode_iterator,
|
||||
interpreter::BytecodeArrayIterator&
|
||||
bytecode_iterator, // NOLINT(runtime/references)
|
||||
Handle<Object> accumulator) {
|
||||
static const char kAccumulator[] = "accumulator";
|
||||
static const int kRegFieldWidth = static_cast<int>(sizeof(kAccumulator) - 1);
|
||||
|
@ -53,7 +53,7 @@ JsonValue ReadMessage() {
|
||||
return ParseJson(content).value;
|
||||
}
|
||||
|
||||
void WriteMessage(JsonValue& message) {
|
||||
void WriteMessage(JsonValue& message) { // NOLINT(runtime/references)
|
||||
std::string content = SerializeToString(message);
|
||||
|
||||
Logger::Log("[outgoing] ", content, "\n\n");
|
||||
@ -330,7 +330,8 @@ void HandleDocumentSymbolRequest(DocumentSymbolRequest request,
|
||||
|
||||
} // namespace
|
||||
|
||||
void HandleMessage(JsonValue& raw_message, MessageWriter writer) {
|
||||
void HandleMessage(JsonValue& raw_message, // NOLINT(runtime/references)
|
||||
MessageWriter writer) {
|
||||
Request<bool> request(raw_message);
|
||||
|
||||
// We ignore responses for now. They are matched to requests
|
||||
|
@ -26,7 +26,9 @@ namespace ls {
|
||||
// To allow unit testing, the "sending" function is configurable.
|
||||
using MessageWriter = std::function<void(JsonValue&)>;
|
||||
|
||||
V8_EXPORT_PRIVATE void HandleMessage(JsonValue& raw_message, MessageWriter);
|
||||
V8_EXPORT_PRIVATE void HandleMessage(
|
||||
JsonValue& raw_message, // NOLINT(runtime/references)
|
||||
MessageWriter);
|
||||
|
||||
// Called when a compilation run finishes. Exposed for testability.
|
||||
V8_EXPORT_PRIVATE void CompilationFinished(TorqueCompilerResult result,
|
||||
|
@ -14,7 +14,7 @@ namespace torque {
|
||||
namespace ls {
|
||||
|
||||
JsonValue ReadMessage();
|
||||
void WriteMessage(JsonValue& message);
|
||||
void WriteMessage(JsonValue& message); // NOLINT(runtime/references)
|
||||
|
||||
} // namespace ls
|
||||
} // namespace torque
|
||||
|
@ -391,7 +391,8 @@ base::Optional<ParseResult> MakeImplicitParameterList(
|
||||
return ParseResult{ImplicitParameters{kind, parameters}};
|
||||
}
|
||||
|
||||
void AddParameter(ParameterList* parameter_list, NameAndTypeExpression& param) {
|
||||
void AddParameter(ParameterList* parameter_list,
|
||||
NameAndTypeExpression& param) { // NOLINT(runtime/references)
|
||||
if (!IsLowerCamelCase(param.name->value)) {
|
||||
NamingConventionError("Parameter", param.name, "lowerCamelCase");
|
||||
}
|
||||
|
@ -416,7 +416,8 @@ class LiftoffCompiler {
|
||||
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
|
||||
}
|
||||
|
||||
void GenerateOutOfLineCode(OutOfLineCode& ool) {
|
||||
void GenerateOutOfLineCode(
|
||||
OutOfLineCode& ool) { // NOLINT(runtime/references)
|
||||
__ bind(ool.label.get());
|
||||
const bool is_stack_check = ool.stub == WasmCode::kWasmStackGuard;
|
||||
const bool is_mem_out_of_bounds =
|
||||
@ -1245,8 +1246,9 @@ class LiftoffCompiler {
|
||||
}
|
||||
}
|
||||
|
||||
void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
|
||||
uint32_t local_index) {
|
||||
void SetLocalFromStackSlot(
|
||||
LiftoffAssembler::VarState& dst_slot, // NOLINT(runtime/references)
|
||||
uint32_t local_index) {
|
||||
auto& state = *__ cache_state();
|
||||
ValueType type = dst_slot.type();
|
||||
if (dst_slot.is_reg()) {
|
||||
@ -1297,8 +1299,10 @@ class LiftoffCompiler {
|
||||
SetLocal(imm.index, true);
|
||||
}
|
||||
|
||||
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
|
||||
LiftoffRegList& pinned, uint32_t* offset) {
|
||||
Register GetGlobalBaseAndOffset(
|
||||
const WasmGlobal* global,
|
||||
LiftoffRegList& pinned, // NOLINT(runtime/references)
|
||||
uint32_t* offset) {
|
||||
Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
|
||||
if (global->mutability && global->imported) {
|
||||
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
|
||||
@ -1340,13 +1344,15 @@ class LiftoffCompiler {
|
||||
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
|
||||
}
|
||||
|
||||
void GetTable(FullDecoder* decoder, const Value& index, Value* result,
|
||||
TableIndexImmediate<validate>& imm) {
|
||||
void GetTable(
|
||||
FullDecoder* decoder, const Value& index, Value* result,
|
||||
TableIndexImmediate<validate>& imm) { // NOLINT(runtime/references)
|
||||
unsupported(decoder, kAnyRef, "table_get");
|
||||
}
|
||||
|
||||
void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
|
||||
TableIndexImmediate<validate>& imm) {
|
||||
void SetTable(
|
||||
FullDecoder* decoder, const Value& index, const Value& value,
|
||||
TableIndexImmediate<validate>& imm) { // NOLINT(runtime/references)
|
||||
unsupported(decoder, kAnyRef, "table_set");
|
||||
}
|
||||
|
||||
@ -1414,7 +1420,8 @@ class LiftoffCompiler {
|
||||
// Generate a branch table case, potentially reusing previously generated
|
||||
// stack transfer code.
|
||||
void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
|
||||
std::map<uint32_t, MovableLabel>& br_targets) {
|
||||
std::map<uint32_t, MovableLabel>&
|
||||
br_targets) { // NOLINT(runtime/references)
|
||||
MovableLabel& label = br_targets[br_depth];
|
||||
if (label.get()->is_bound()) {
|
||||
__ jmp(label.get());
|
||||
@ -1428,8 +1435,10 @@ class LiftoffCompiler {
|
||||
// TODO(wasm): Generate a real branch table (like TF TableSwitch).
|
||||
void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
|
||||
LiftoffRegister value, uint32_t min, uint32_t max,
|
||||
BranchTableIterator<validate>& table_iterator,
|
||||
std::map<uint32_t, MovableLabel>& br_targets) {
|
||||
BranchTableIterator<validate>&
|
||||
table_iterator, // NOLINT(runtime/references)
|
||||
std::map<uint32_t, MovableLabel>&
|
||||
br_targets) { // NOLINT(runtime/references)
|
||||
DCHECK_LT(min, max);
|
||||
// Check base case.
|
||||
if (max == min + 1) {
|
||||
@ -1636,8 +1645,9 @@ class LiftoffCompiler {
|
||||
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
|
||||
}
|
||||
|
||||
Register AddMemoryMasking(Register index, uint32_t* offset,
|
||||
LiftoffRegList& pinned) {
|
||||
Register AddMemoryMasking(
|
||||
Register index, uint32_t* offset,
|
||||
LiftoffRegList& pinned) { // NOLINT(runtime/references)
|
||||
if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
|
||||
return index;
|
||||
}
|
||||
@ -2037,7 +2047,9 @@ class LiftoffCompiler {
|
||||
unsupported(decoder, kBulkMemory, "table.copy");
|
||||
}
|
||||
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
|
||||
Value& value, Value& delta, Value* result) {
|
||||
Value& value, // NOLINT(runtime/references)
|
||||
Value& delta, // NOLINT(runtime/references)
|
||||
Value* result) {
|
||||
unsupported(decoder, kAnyRef, "table.grow");
|
||||
}
|
||||
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
|
||||
@ -2045,7 +2057,8 @@ class LiftoffCompiler {
|
||||
unsupported(decoder, kAnyRef, "table.size");
|
||||
}
|
||||
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
|
||||
Value& start, Value& value, Value& count) {
|
||||
Value& start, Value& value, // NOLINT(runtime/references)
|
||||
Value& count) { // NOLINT(runtime/references)
|
||||
unsupported(decoder, kAnyRef, "table.fill");
|
||||
}
|
||||
|
||||
|
@ -1235,8 +1235,9 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
|
||||
|
||||
namespace liftoff {
|
||||
|
||||
inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
|
||||
Condition condition) {
|
||||
inline FPUCondition ConditionToConditionCmpFPU(
|
||||
bool& predicate, // NOLINT(runtime/references)
|
||||
Condition condition) {
|
||||
switch (condition) {
|
||||
case kEqual:
|
||||
predicate = true;
|
||||
|
@ -1094,8 +1094,9 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
|
||||
|
||||
namespace liftoff {
|
||||
|
||||
inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
|
||||
Condition condition) {
|
||||
inline FPUCondition ConditionToConditionCmpFPU(
|
||||
bool& predicate, // NOLINT(runtime/references)
|
||||
Condition condition) {
|
||||
switch (condition) {
|
||||
case kEqual:
|
||||
predicate = true;
|
||||
|
@ -45,7 +45,7 @@ namespace bin {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Encoding
|
||||
|
||||
void encode_header(char*& ptr) {
|
||||
void encode_header(char*& ptr) { // NOLINT(runtime/references)
|
||||
std::memcpy(ptr,
|
||||
"\x00"
|
||||
"asm\x01\x00\x00\x00",
|
||||
@ -53,7 +53,8 @@ void encode_header(char*& ptr) {
|
||||
ptr += 8;
|
||||
}
|
||||
|
||||
void encode_size32(char*& ptr, size_t n) {
|
||||
void encode_size32(char*& ptr, // NOLINT(runtime/references)
|
||||
size_t n) {
|
||||
assert(n <= 0xffffffff);
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
*ptr++ = (n & 0x7f) | (i == 4 ? 0x00 : 0x80);
|
||||
@ -61,7 +62,8 @@ void encode_size32(char*& ptr, size_t n) {
|
||||
}
|
||||
}
|
||||
|
||||
void encode_valtype(char*& ptr, const ValType* type) {
|
||||
void encode_valtype(char*& ptr, // NOLINT(runtime/references)
|
||||
const ValType* type) {
|
||||
switch (type->kind()) {
|
||||
case I32:
|
||||
*ptr++ = 0x7f;
|
||||
@ -105,7 +107,8 @@ auto zero_size(const ValType* type) -> size_t {
|
||||
}
|
||||
}
|
||||
|
||||
void encode_const_zero(char*& ptr, const ValType* type) {
|
||||
void encode_const_zero(char*& ptr, // NOLINT(runtime/references)
|
||||
const ValType* type) {
|
||||
switch (type->kind()) {
|
||||
case I32:
|
||||
*ptr++ = 0x41;
|
||||
@ -171,7 +174,7 @@ auto wrapper(const FuncType* type) -> vec<byte_t> {
|
||||
|
||||
// Numbers
|
||||
|
||||
auto u32(const byte_t*& pos) -> uint32_t {
|
||||
auto u32(const byte_t*& pos) -> uint32_t { // NOLINT(runtime/references)
|
||||
uint32_t n = 0;
|
||||
uint32_t shift = 0;
|
||||
byte_t b;
|
||||
@ -183,7 +186,7 @@ auto u32(const byte_t*& pos) -> uint32_t {
|
||||
return n;
|
||||
}
|
||||
|
||||
auto u64(const byte_t*& pos) -> uint64_t {
|
||||
auto u64(const byte_t*& pos) -> uint64_t { // NOLINT(runtime/references)
|
||||
uint64_t n = 0;
|
||||
uint64_t shift = 0;
|
||||
byte_t b;
|
||||
@ -195,11 +198,13 @@ auto u64(const byte_t*& pos) -> uint64_t {
|
||||
return n;
|
||||
}
|
||||
|
||||
void u32_skip(const byte_t*& pos) { bin::u32(pos); }
|
||||
void u32_skip(const byte_t*& pos) { // NOLINT(runtime/references)
|
||||
bin::u32(pos);
|
||||
}
|
||||
|
||||
// Names
|
||||
|
||||
auto name(const byte_t*& pos) -> Name {
|
||||
auto name(const byte_t*& pos) -> Name { // NOLINT(runtime/references)
|
||||
auto size = bin::u32(pos);
|
||||
auto start = pos;
|
||||
auto name = Name::make_uninitialized(size);
|
||||
@ -210,7 +215,8 @@ auto name(const byte_t*& pos) -> Name {
|
||||
|
||||
// Types
|
||||
|
||||
auto valtype(const byte_t*& pos) -> own<wasm::ValType*> {
|
||||
auto valtype(const byte_t*& pos) // NOLINT(runtime/references)
|
||||
-> own<wasm::ValType*> {
|
||||
switch (*pos++) {
|
||||
case i::wasm::kLocalI32:
|
||||
return ValType::make(I32);
|
||||
@ -231,11 +237,12 @@ auto valtype(const byte_t*& pos) -> own<wasm::ValType*> {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto mutability(const byte_t*& pos) -> Mutability {
|
||||
auto mutability(const byte_t*& pos) // NOLINT(runtime/references)
|
||||
-> Mutability {
|
||||
return *pos++ ? VAR : CONST;
|
||||
}
|
||||
|
||||
auto limits(const byte_t*& pos) -> Limits {
|
||||
auto limits(const byte_t*& pos) -> Limits { // NOLINT(runtime/references)
|
||||
auto tag = *pos++;
|
||||
auto min = bin::u32(pos);
|
||||
if ((tag & 0x01) == 0) {
|
||||
@ -246,14 +253,16 @@ auto limits(const byte_t*& pos) -> Limits {
|
||||
}
|
||||
}
|
||||
|
||||
auto stacktype(const byte_t*& pos) -> vec<ValType*> {
|
||||
auto stacktype(const byte_t*& pos) // NOLINT(runtime/references)
|
||||
-> vec<ValType*> {
|
||||
size_t size = bin::u32(pos);
|
||||
auto v = vec<ValType*>::make_uninitialized(size);
|
||||
for (uint32_t i = 0; i < size; ++i) v[i] = bin::valtype(pos);
|
||||
return v;
|
||||
}
|
||||
|
||||
auto functype(const byte_t*& pos) -> own<FuncType*> {
|
||||
auto functype(const byte_t*& pos) // NOLINT(runtime/references)
|
||||
-> own<FuncType*> {
|
||||
assert(*pos == i::wasm::kWasmFunctionTypeCode);
|
||||
++pos;
|
||||
auto params = bin::stacktype(pos);
|
||||
@ -261,26 +270,29 @@ auto functype(const byte_t*& pos) -> own<FuncType*> {
|
||||
return FuncType::make(std::move(params), std::move(results));
|
||||
}
|
||||
|
||||
auto globaltype(const byte_t*& pos) -> own<GlobalType*> {
|
||||
auto globaltype(const byte_t*& pos) // NOLINT(runtime/references)
|
||||
-> own<GlobalType*> {
|
||||
auto content = bin::valtype(pos);
|
||||
auto mutability = bin::mutability(pos);
|
||||
return GlobalType::make(std::move(content), mutability);
|
||||
}
|
||||
|
||||
auto tabletype(const byte_t*& pos) -> own<TableType*> {
|
||||
auto tabletype(const byte_t*& pos) // NOLINT(runtime/references)
|
||||
-> own<TableType*> {
|
||||
auto elem = bin::valtype(pos);
|
||||
auto limits = bin::limits(pos);
|
||||
return TableType::make(std::move(elem), limits);
|
||||
}
|
||||
|
||||
auto memorytype(const byte_t*& pos) -> own<MemoryType*> {
|
||||
auto memorytype(const byte_t*& pos) // NOLINT(runtime/references)
|
||||
-> own<MemoryType*> {
|
||||
auto limits = bin::limits(pos);
|
||||
return MemoryType::make(limits);
|
||||
}
|
||||
|
||||
// Expressions
|
||||
|
||||
void expr_skip(const byte_t*& pos) {
|
||||
void expr_skip(const byte_t*& pos) { // NOLINT(runtime/references)
|
||||
switch (*pos++) {
|
||||
case i::wasm::kExprI32Const:
|
||||
case i::wasm::kExprI64Const:
|
||||
@ -842,7 +854,8 @@ struct FuncTypeImpl : ExternTypeImpl {
|
||||
vec<ValType*> params;
|
||||
vec<ValType*> results;
|
||||
|
||||
FuncTypeImpl(vec<ValType*>& params, vec<ValType*>& results)
|
||||
FuncTypeImpl(vec<ValType*>& params, // NOLINT(runtime/references)
|
||||
vec<ValType*>& results) // NOLINT(runtime/references)
|
||||
: ExternTypeImpl(EXTERN_FUNC),
|
||||
params(std::move(params)),
|
||||
results(std::move(results)) {}
|
||||
@ -895,7 +908,8 @@ struct GlobalTypeImpl : ExternTypeImpl {
|
||||
own<ValType*> content;
|
||||
Mutability mutability;
|
||||
|
||||
GlobalTypeImpl(own<ValType*>& content, Mutability mutability)
|
||||
GlobalTypeImpl(own<ValType*>& content, // NOLINT(runtime/references)
|
||||
Mutability mutability)
|
||||
: ExternTypeImpl(EXTERN_GLOBAL),
|
||||
content(std::move(content)),
|
||||
mutability(mutability) {}
|
||||
@ -947,7 +961,8 @@ struct TableTypeImpl : ExternTypeImpl {
|
||||
own<ValType*> element;
|
||||
Limits limits;
|
||||
|
||||
TableTypeImpl(own<ValType*>& element, Limits limits)
|
||||
TableTypeImpl(own<ValType*>& element, // NOLINT(runtime/references)
|
||||
Limits limits)
|
||||
: ExternTypeImpl(EXTERN_TABLE),
|
||||
element(std::move(element)),
|
||||
limits(limits) {}
|
||||
@ -1039,7 +1054,9 @@ struct ImportTypeImpl {
|
||||
Name name;
|
||||
own<ExternType*> type;
|
||||
|
||||
ImportTypeImpl(Name& module, Name& name, own<ExternType*>& type)
|
||||
ImportTypeImpl(Name& module, // NOLINT(runtime/references)
|
||||
Name& name, // NOLINT(runtime/references)
|
||||
own<ExternType*>& type) // NOLINT(runtime/references)
|
||||
: module(std::move(module)),
|
||||
name(std::move(name)),
|
||||
type(std::move(type)) {}
|
||||
@ -1082,7 +1099,8 @@ struct ExportTypeImpl {
|
||||
Name name;
|
||||
own<ExternType*> type;
|
||||
|
||||
ExportTypeImpl(Name& name, own<ExternType*>& type)
|
||||
ExportTypeImpl(Name& name, // NOLINT(runtime/references)
|
||||
own<ExternType*>& type) // NOLINT(runtime/references)
|
||||
: name(std::move(name)), type(std::move(type)) {}
|
||||
|
||||
~ExportTypeImpl() {}
|
||||
|
@ -45,7 +45,7 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
|
||||
DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
|
||||
const WasmFeatures& enabled,
|
||||
const WasmModule* module, WasmFeatures* detected,
|
||||
FunctionBody& body) {
|
||||
FunctionBody& body) { // NOLINT(runtime/references)
|
||||
Zone zone(allocator, ZONE_NAME);
|
||||
WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(
|
||||
&zone, module, enabled, detected, body);
|
||||
|
@ -34,11 +34,10 @@ struct FunctionBody {
|
||||
: sig(sig), offset(offset), start(start), end(end) {}
|
||||
};
|
||||
|
||||
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
|
||||
const WasmFeatures& enabled,
|
||||
const WasmModule* module,
|
||||
WasmFeatures* detected,
|
||||
FunctionBody& body);
|
||||
V8_EXPORT_PRIVATE DecodeResult
|
||||
VerifyWasmCode(AccountingAllocator* allocator, const WasmFeatures& enabled,
|
||||
const WasmModule* module, WasmFeatures* detected,
|
||||
FunctionBody& body); // NOLINT(runtime/references)
|
||||
|
||||
enum PrintLocals { kPrintLocals, kOmitLocals };
|
||||
V8_EXPORT_PRIVATE
|
||||
|
@ -131,8 +131,8 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
|
||||
|
||||
// Reads a length-prefixed string, checking that it is within bounds. Returns
|
||||
// the offset of the string, and the length as an out parameter.
|
||||
WireBytesRef consume_string(Decoder& decoder, bool validate_utf8,
|
||||
const char* name) {
|
||||
WireBytesRef consume_string(Decoder& decoder, // NOLINT(runtime/references)
|
||||
bool validate_utf8, const char* name) {
|
||||
uint32_t length = decoder.consume_u32v("string length");
|
||||
uint32_t offset = decoder.pc_offset();
|
||||
const byte* string_start = decoder.pc();
|
||||
@ -151,7 +151,7 @@ WireBytesRef consume_string(Decoder& decoder, bool validate_utf8,
|
||||
// Automatically skips all unknown sections.
|
||||
class WasmSectionIterator {
|
||||
public:
|
||||
explicit WasmSectionIterator(Decoder& decoder)
|
||||
explicit WasmSectionIterator(Decoder& decoder) // NOLINT(runtime/references)
|
||||
: decoder_(decoder),
|
||||
section_code_(kUnknownSectionCode),
|
||||
section_start_(decoder.pc()),
|
||||
@ -1389,7 +1389,9 @@ class ModuleDecoderImpl : public Decoder {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
uint32_t consume_index(const char* name, std::vector<T>& vector, T** ptr) {
|
||||
uint32_t consume_index(const char* name,
|
||||
std::vector<T>& vector, // NOLINT(runtime/references)
|
||||
T** ptr) {
|
||||
const byte* pos = pc_;
|
||||
uint32_t index = consume_u32v(name);
|
||||
if (index >= vector.size()) {
|
||||
@ -1985,7 +1987,7 @@ std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
|
||||
|
||||
namespace {
|
||||
|
||||
bool FindNameSection(Decoder& decoder) {
|
||||
bool FindNameSection(Decoder& decoder) { // NOLINT(runtime/references)
|
||||
static constexpr int kModuleHeaderSize = 8;
|
||||
decoder.consume_bytes(kModuleHeaderSize, "module header");
|
||||
|
||||
|
@ -144,7 +144,9 @@ class ModuleDecoder {
|
||||
// If a SectionCode other than kUnknownSectionCode is returned, the decoder
|
||||
// will point right after the identifier string. Otherwise, the position is
|
||||
// undefined.
|
||||
static SectionCode IdentifyUnknownSection(Decoder& decoder, const byte* end);
|
||||
static SectionCode IdentifyUnknownSection(
|
||||
Decoder& decoder, // NOLINT(runtime/references)
|
||||
const byte* end);
|
||||
|
||||
private:
|
||||
const WasmFeatures enabled_features_;
|
||||
|
@ -1663,7 +1663,8 @@ class ThreadImpl {
|
||||
}
|
||||
|
||||
template <typename ctype, typename mtype>
|
||||
bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
|
||||
bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
|
||||
int& len, // NOLINT(runtime/references)
|
||||
MachineRepresentation rep) {
|
||||
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
|
||||
sizeof(ctype));
|
||||
@ -1690,7 +1691,8 @@ class ThreadImpl {
|
||||
}
|
||||
|
||||
template <typename ctype, typename mtype>
|
||||
bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
|
||||
bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
|
||||
int& len, // NOLINT(runtime/references)
|
||||
MachineRepresentation rep) {
|
||||
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
|
||||
sizeof(ctype));
|
||||
@ -1735,7 +1737,8 @@ class ThreadImpl {
|
||||
|
||||
template <typename type, typename op_type>
|
||||
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
|
||||
Address& address, pc_t pc, int& len,
|
||||
Address& address, // NOLINT(runtime/references)
|
||||
pc_t pc, int& len, // NOLINT(runtime/references)
|
||||
type* val = nullptr, type* val2 = nullptr) {
|
||||
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
|
||||
sizeof(type));
|
||||
@ -1752,7 +1755,8 @@ class ThreadImpl {
|
||||
}
|
||||
|
||||
bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
|
||||
InterpreterCode* code, pc_t pc, int& len) {
|
||||
InterpreterCode* code, pc_t pc,
|
||||
int& len) { // NOLINT(runtime/references)
|
||||
switch (opcode) {
|
||||
case kExprI32SConvertSatF32:
|
||||
Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
|
||||
@ -1916,7 +1920,8 @@ class ThreadImpl {
|
||||
}
|
||||
|
||||
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
|
||||
InterpreterCode* code, pc_t pc, int& len) {
|
||||
InterpreterCode* code, pc_t pc,
|
||||
int& len) { // NOLINT(runtime/references)
|
||||
#if V8_TARGET_BIG_ENDIAN
|
||||
constexpr bool kBigEndian = true;
|
||||
#else
|
||||
@ -2123,7 +2128,7 @@ class ThreadImpl {
|
||||
}
|
||||
|
||||
bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
|
||||
pc_t pc, int& len) {
|
||||
pc_t pc, int& len) { // NOLINT(runtime/references)
|
||||
switch (opcode) {
|
||||
#define SPLAT_CASE(format, sType, valType, num) \
|
||||
case kExpr##format##Splat: { \
|
||||
|
@ -26,7 +26,8 @@ namespace {
|
||||
|
||||
// Emit a section code and the size as a padded varint that can be patched
|
||||
// later.
|
||||
size_t EmitSection(SectionCode code, ZoneBuffer& buffer) {
|
||||
size_t EmitSection(SectionCode code,
|
||||
ZoneBuffer& buffer) { // NOLINT(runtime/references)
|
||||
// Emit the section code.
|
||||
buffer.write_u8(code);
|
||||
|
||||
@ -35,7 +36,8 @@ size_t EmitSection(SectionCode code, ZoneBuffer& buffer) {
|
||||
}
|
||||
|
||||
// Patch the size of a section after it's finished.
|
||||
void FixupSection(ZoneBuffer& buffer, size_t start) {
|
||||
void FixupSection(ZoneBuffer& buffer, // NOLINT(runtime/references)
|
||||
size_t start) {
|
||||
buffer.patch_u32v(start, static_cast<uint32_t>(buffer.offset() - start -
|
||||
kPaddedVarInt32Size));
|
||||
}
|
||||
|
@ -187,9 +187,10 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
|
||||
}
|
||||
void DeleteCodeAfter(size_t position);
|
||||
|
||||
void WriteSignature(ZoneBuffer& buffer) const;
|
||||
void WriteBody(ZoneBuffer& buffer) const;
|
||||
void WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const;
|
||||
void WriteSignature(ZoneBuffer& buffer) const; // NOLINT(runtime/references)
|
||||
void WriteBody(ZoneBuffer& buffer) const; // NOLINT(runtime/references)
|
||||
void WriteAsmWasmOffsetTable(
|
||||
ZoneBuffer& buffer) const; // NOLINT(runtime/references)
|
||||
|
||||
WasmModuleBuilder* builder() const { return builder_; }
|
||||
uint32_t func_index() { return func_index_; }
|
||||
@ -246,8 +247,9 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
|
||||
void SetHasSharedMemory();
|
||||
|
||||
// Writing methods.
|
||||
void WriteTo(ZoneBuffer& buffer) const;
|
||||
void WriteAsmJsOffsetTable(ZoneBuffer& buffer) const;
|
||||
void WriteTo(ZoneBuffer& buffer) const; // NOLINT(runtime/references)
|
||||
void WriteAsmJsOffsetTable(
|
||||
ZoneBuffer& buffer) const; // NOLINT(runtime/references)
|
||||
|
||||
Zone* zone() { return zone_; }
|
||||
|
||||
|
@ -18,8 +18,8 @@ namespace wasm {
|
||||
namespace {
|
||||
|
||||
PRINTF_FORMAT(3, 0)
|
||||
void VPrintFToString(std::string& str, size_t str_offset, const char* format,
|
||||
va_list args) {
|
||||
void VPrintFToString(std::string& str, // NOLINT(runtime/references)
|
||||
size_t str_offset, const char* format, va_list args) {
|
||||
DCHECK_LE(str_offset, str.size());
|
||||
size_t len = str_offset + strlen(format);
|
||||
// Allocate increasingly large buffers until the message fits.
|
||||
@ -39,8 +39,8 @@ void VPrintFToString(std::string& str, size_t str_offset, const char* format,
|
||||
}
|
||||
|
||||
PRINTF_FORMAT(3, 4)
|
||||
void PrintFToString(std::string& str, size_t str_offset, const char* format,
|
||||
...) {
|
||||
void PrintFToString(std::string& str, // NOLINT(runtime/references)
|
||||
size_t str_offset, const char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
VPrintFToString(str, str_offset, format, args);
|
||||
|
@ -47,7 +47,7 @@ class ZoneAllocator {
|
||||
template <typename U>
|
||||
friend class ZoneAllocator;
|
||||
|
||||
T* address(T& x) const { return &x; }
|
||||
T* address(T& x) const { return &x; } // NOLINT(runtime/references)
|
||||
const T* address(const T& x) const { return &x; }
|
||||
|
||||
T* allocate(size_t n, const void* hint = nullptr) {
|
||||
|
@ -21,7 +21,8 @@ namespace {
|
||||
using Label = CodeAssemblerLabel;
|
||||
using Variable = CodeAssemblerVariable;
|
||||
|
||||
Node* SmiTag(CodeAssembler& m, Node* value) {
|
||||
Node* SmiTag(CodeAssembler& m, // NOLINT(runtime/references)
|
||||
Node* value) {
|
||||
int32_t constant_value;
|
||||
if (m.ToInt32Constant(value, constant_value) &&
|
||||
Smi::IsValid(constant_value)) {
|
||||
@ -30,22 +31,25 @@ Node* SmiTag(CodeAssembler& m, Node* value) {
|
||||
return m.WordShl(value, m.IntPtrConstant(kSmiShiftSize + kSmiTagSize));
|
||||
}
|
||||
|
||||
Node* UndefinedConstant(CodeAssembler& m) {
|
||||
Node* UndefinedConstant(CodeAssembler& m) { // NOLINT(runtime/references)
|
||||
return m.LoadRoot(RootIndex::kUndefinedValue);
|
||||
}
|
||||
|
||||
Node* SmiFromInt32(CodeAssembler& m, Node* value) {
|
||||
Node* SmiFromInt32(CodeAssembler& m, // NOLINT(runtime/references)
|
||||
Node* value) {
|
||||
value = m.ChangeInt32ToIntPtr(value);
|
||||
return m.BitcastWordToTaggedSigned(
|
||||
m.WordShl(value, kSmiShiftSize + kSmiTagSize));
|
||||
}
|
||||
|
||||
Node* LoadObjectField(CodeAssembler& m, Node* object, int offset,
|
||||
Node* LoadObjectField(CodeAssembler& m, // NOLINT(runtime/references)
|
||||
Node* object, int offset,
|
||||
MachineType type = MachineType::AnyTagged()) {
|
||||
return m.Load(type, object, m.IntPtrConstant(offset - kHeapObjectTag));
|
||||
}
|
||||
|
||||
Node* LoadMap(CodeAssembler& m, Node* object) {
|
||||
Node* LoadMap(CodeAssembler& m, // NOLINT(runtime/references)
|
||||
Node* object) {
|
||||
return LoadObjectField(m, object, JSObject::kMapOffset);
|
||||
}
|
||||
|
||||
@ -131,7 +135,8 @@ TEST(SimpleTailCallRuntime2Arg) {
|
||||
|
||||
namespace {
|
||||
|
||||
Handle<JSFunction> CreateSumAllArgumentsFunction(FunctionTester& ft) {
|
||||
Handle<JSFunction> CreateSumAllArgumentsFunction(
|
||||
FunctionTester& ft) { // NOLINT(runtime/references)
|
||||
const char* source =
|
||||
"(function() {\n"
|
||||
" var sum = 0 + this;\n"
|
||||
|
@ -109,8 +109,8 @@ class TestCode : public HandleAndZoneScope {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void VerifyForwarding(TestCode& code, int count, int* expected) {
|
||||
void VerifyForwarding(TestCode& code, // NOLINT(runtime/references)
|
||||
int count, int* expected) {
|
||||
v8::internal::AccountingAllocator allocator;
|
||||
Zone local_zone(&allocator, ZONE_NAME);
|
||||
ZoneVector<RpoNumber> result(&local_zone);
|
||||
@ -122,7 +122,6 @@ void VerifyForwarding(TestCode& code, int count, int* expected) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(FwEmpty1) {
|
||||
TestCode code;
|
||||
|
||||
@ -611,8 +610,8 @@ void RunPermutedDiamond(int* permutation, int size) {
|
||||
|
||||
TEST(FwPermuted_diamond) { RunAllPermutations<4>(RunPermutedDiamond); }
|
||||
|
||||
|
||||
void ApplyForwarding(TestCode& code, int size, int* forward) {
|
||||
void ApplyForwarding(TestCode& code, // NOLINT(runtime/references)
|
||||
int size, int* forward) {
|
||||
code.sequence_.RecomputeAssemblyOrderForTesting();
|
||||
ZoneVector<RpoNumber> vector(code.main_zone());
|
||||
for (int i = 0; i < size; i++) {
|
||||
@ -621,8 +620,8 @@ void ApplyForwarding(TestCode& code, int size, int* forward) {
|
||||
JumpThreading::ApplyForwarding(code.main_zone(), vector, &code.sequence_);
|
||||
}
|
||||
|
||||
|
||||
void CheckJump(TestCode& code, int pos, int target) {
|
||||
void CheckJump(TestCode& code, // NOLINT(runtime/references)
|
||||
int pos, int target) {
|
||||
Instruction* instr = code.sequence_.InstructionAt(pos);
|
||||
CHECK_EQ(kArchJmp, instr->arch_opcode());
|
||||
CHECK_EQ(1, static_cast<int>(instr->InputCount()));
|
||||
@ -631,8 +630,8 @@ void CheckJump(TestCode& code, int pos, int target) {
|
||||
CHECK_EQ(target, code.sequence_.InputRpo(instr, 0).ToInt());
|
||||
}
|
||||
|
||||
|
||||
void CheckNop(TestCode& code, int pos) {
|
||||
void CheckNop(TestCode& code, // NOLINT(runtime/references)
|
||||
int pos) {
|
||||
Instruction* instr = code.sequence_.InstructionAt(pos);
|
||||
CHECK_EQ(kArchNop, instr->arch_opcode());
|
||||
CHECK_EQ(0, static_cast<int>(instr->InputCount()));
|
||||
@ -640,8 +639,8 @@ void CheckNop(TestCode& code, int pos) {
|
||||
CHECK_EQ(0, static_cast<int>(instr->TempCount()));
|
||||
}
|
||||
|
||||
|
||||
void CheckBranch(TestCode& code, int pos, int t1, int t2) {
|
||||
void CheckBranch(TestCode& code, // NOLINT(runtime/references)
|
||||
int pos, int t1, int t2) {
|
||||
Instruction* instr = code.sequence_.InstructionAt(pos);
|
||||
CHECK_EQ(2, static_cast<int>(instr->InputCount()));
|
||||
CHECK_EQ(0, static_cast<int>(instr->OutputCount()));
|
||||
@ -650,15 +649,14 @@ void CheckBranch(TestCode& code, int pos, int t1, int t2) {
|
||||
CHECK_EQ(t2, code.sequence_.InputRpo(instr, 1).ToInt());
|
||||
}
|
||||
|
||||
|
||||
void CheckAssemblyOrder(TestCode& code, int size, int* expected) {
|
||||
void CheckAssemblyOrder(TestCode& code, // NOLINT(runtime/references)
|
||||
int size, int* expected) {
|
||||
int i = 0;
|
||||
for (auto const block : code.sequence_.instruction_blocks()) {
|
||||
CHECK_EQ(expected[i++], block->ao_number().ToInt());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(Rewire1) {
|
||||
TestCode code;
|
||||
|
||||
|
@ -199,7 +199,7 @@ struct While {
|
||||
}
|
||||
|
||||
void chain(Node* control) { loop->ReplaceInput(0, control); }
|
||||
void nest(While& that) {
|
||||
void nest(While& that) { // NOLINT(runtime/references)
|
||||
that.loop->ReplaceInput(1, exit);
|
||||
this->loop->ReplaceInput(0, that.if_true);
|
||||
}
|
||||
@ -212,7 +212,8 @@ struct Counter {
|
||||
Node* phi;
|
||||
Node* add;
|
||||
|
||||
Counter(While& w, int32_t b, int32_t k)
|
||||
Counter(While& w, // NOLINT(runtime/references)
|
||||
int32_t b, int32_t k)
|
||||
: base(w.t.jsgraph.Int32Constant(b)), inc(w.t.jsgraph.Int32Constant(k)) {
|
||||
Build(w);
|
||||
}
|
||||
@ -233,7 +234,7 @@ struct StoreLoop {
|
||||
Node* phi;
|
||||
Node* store;
|
||||
|
||||
explicit StoreLoop(While& w)
|
||||
explicit StoreLoop(While& w) // NOLINT(runtime/references)
|
||||
: base(w.t.graph.start()), val(w.t.jsgraph.Int32Constant(13)) {
|
||||
Build(w);
|
||||
}
|
||||
|
@ -43,7 +43,8 @@ CallDescriptor* CreateCallDescriptor(Zone* zone, int return_count,
|
||||
return compiler::GetWasmCallDescriptor(zone, builder.Build());
|
||||
}
|
||||
|
||||
Node* MakeConstant(RawMachineAssembler& m, MachineType type, int value) {
|
||||
Node* MakeConstant(RawMachineAssembler& m, // NOLINT(runtime/references)
|
||||
MachineType type, int value) {
|
||||
switch (type.representation()) {
|
||||
case MachineRepresentation::kWord32:
|
||||
return m.Int32Constant(static_cast<int32_t>(value));
|
||||
@ -58,7 +59,8 @@ Node* MakeConstant(RawMachineAssembler& m, MachineType type, int value) {
|
||||
}
|
||||
}
|
||||
|
||||
Node* Add(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
|
||||
Node* Add(RawMachineAssembler& m, // NOLINT(runtime/references)
|
||||
MachineType type, Node* a, Node* b) {
|
||||
switch (type.representation()) {
|
||||
case MachineRepresentation::kWord32:
|
||||
return m.Int32Add(a, b);
|
||||
@ -73,7 +75,8 @@ Node* Add(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
|
||||
}
|
||||
}
|
||||
|
||||
Node* Sub(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
|
||||
Node* Sub(RawMachineAssembler& m, // NOLINT(runtime/references)
|
||||
MachineType type, Node* a, Node* b) {
|
||||
switch (type.representation()) {
|
||||
case MachineRepresentation::kWord32:
|
||||
return m.Int32Sub(a, b);
|
||||
@ -88,7 +91,8 @@ Node* Sub(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
|
||||
}
|
||||
}
|
||||
|
||||
Node* Mul(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
|
||||
Node* Mul(RawMachineAssembler& m, // NOLINT(runtime/references)
|
||||
MachineType type, Node* a, Node* b) {
|
||||
switch (type.representation()) {
|
||||
case MachineRepresentation::kWord32:
|
||||
return m.Int32Mul(a, b);
|
||||
@ -103,7 +107,8 @@ Node* Mul(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
|
||||
}
|
||||
}
|
||||
|
||||
Node* ToInt32(RawMachineAssembler& m, MachineType type, Node* a) {
|
||||
Node* ToInt32(RawMachineAssembler& m, // NOLINT(runtime/references)
|
||||
MachineType type, Node* a) {
|
||||
switch (type.representation()) {
|
||||
case MachineRepresentation::kWord32:
|
||||
return a;
|
||||
|
@ -327,28 +327,34 @@ class ArgsBuffer {
|
||||
return kTypes;
|
||||
}
|
||||
|
||||
Node* MakeConstant(RawMachineAssembler& raw, int32_t value) {
|
||||
Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
|
||||
int32_t value) {
|
||||
return raw.Int32Constant(value);
|
||||
}
|
||||
|
||||
Node* MakeConstant(RawMachineAssembler& raw, int64_t value) {
|
||||
Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
|
||||
int64_t value) {
|
||||
return raw.Int64Constant(value);
|
||||
}
|
||||
|
||||
Node* MakeConstant(RawMachineAssembler& raw, float32 value) {
|
||||
Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
|
||||
float32 value) {
|
||||
return raw.Float32Constant(value);
|
||||
}
|
||||
|
||||
Node* MakeConstant(RawMachineAssembler& raw, float64 value) {
|
||||
Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
|
||||
float64 value) {
|
||||
return raw.Float64Constant(value);
|
||||
}
|
||||
|
||||
Node* LoadInput(RawMachineAssembler& raw, Node* base, int index) {
|
||||
Node* LoadInput(RawMachineAssembler& raw, // NOLINT(runtime/references)
|
||||
Node* base, int index) {
|
||||
Node* offset = raw.Int32Constant(index * sizeof(CType));
|
||||
return raw.Load(MachineTypeForC<CType>(), base, offset);
|
||||
}
|
||||
|
||||
Node* StoreOutput(RawMachineAssembler& raw, Node* value) {
|
||||
Node* StoreOutput(RawMachineAssembler& raw, // NOLINT(runtime/references)
|
||||
Node* value) {
|
||||
Node* base = raw.PointerConstant(&output);
|
||||
Node* offset = raw.Int32Constant(0);
|
||||
return raw.Store(MachineTypeForC<CType>().representation(), base, offset,
|
||||
@ -710,9 +716,9 @@ static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
|
||||
31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73,
|
||||
79, 83, 89, 97, 101, 103, 107, 109, 113};
|
||||
|
||||
|
||||
static void Build_Int32_WeightedSum(CallDescriptor* desc,
|
||||
RawMachineAssembler& raw) {
|
||||
static void Build_Int32_WeightedSum(
|
||||
CallDescriptor* desc,
|
||||
RawMachineAssembler& raw) { // NOLINT(runtime/references)
|
||||
Node* result = raw.Int32Constant(0);
|
||||
for (int i = 0; i < ParamCount(desc); i++) {
|
||||
Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i]));
|
||||
@ -721,7 +727,6 @@ static void Build_Int32_WeightedSum(CallDescriptor* desc,
|
||||
raw.Return(result);
|
||||
}
|
||||
|
||||
|
||||
static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) {
|
||||
uint32_t result = 0;
|
||||
for (int i = 0; i < ParamCount(desc); i++) {
|
||||
@ -767,13 +772,13 @@ TEST_INT32_WEIGHTEDSUM(11)
|
||||
TEST_INT32_WEIGHTEDSUM(17)
|
||||
TEST_INT32_WEIGHTEDSUM(19)
|
||||
|
||||
|
||||
template <int which>
|
||||
static void Build_Select(CallDescriptor* desc, RawMachineAssembler& raw) {
|
||||
static void Build_Select(
|
||||
CallDescriptor* desc,
|
||||
RawMachineAssembler& raw) { // NOLINT(runtime/references)
|
||||
raw.Return(raw.Parameter(which));
|
||||
}
|
||||
|
||||
|
||||
template <typename CType, int which>
|
||||
static CType Compute_Select(CallDescriptor* desc, CType* inputs) {
|
||||
return inputs[which];
|
||||
@ -943,10 +948,10 @@ TEST(Float64Select_stack_params_return_reg) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename CType, int which>
|
||||
static void Build_Select_With_Call(CallDescriptor* desc,
|
||||
RawMachineAssembler& raw) {
|
||||
static void Build_Select_With_Call(
|
||||
CallDescriptor* desc,
|
||||
RawMachineAssembler& raw) { // NOLINT(runtime/references)
|
||||
Handle<Code> inner = Handle<Code>::null();
|
||||
int num_params = ParamCount(desc);
|
||||
CHECK_LE(num_params, kMaxParamCount);
|
||||
@ -977,7 +982,6 @@ static void Build_Select_With_Call(CallDescriptor* desc,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(Float64StackParamsToStackParams) {
|
||||
int rarray[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
|
||||
Allocator params(nullptr, 0, nullptr, 0);
|
||||
|
@ -31,8 +31,9 @@ void CheckInvariantsOfAbortedPage(Page* page) {
|
||||
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
|
||||
}
|
||||
|
||||
void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
|
||||
Page* page) {
|
||||
void CheckAllObjectsOnPage(
|
||||
std::vector<Handle<FixedArray>>& handles, // NOLINT(runtime/references)
|
||||
Page* page) {
|
||||
for (Handle<FixedArray> fixed_array : handles) {
|
||||
CHECK(Page::FromHeapObject(*fixed_array) == page);
|
||||
}
|
||||
|
@ -43,7 +43,8 @@ v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
|
||||
return isolate;
|
||||
}
|
||||
|
||||
Page* FindLastPageInNewSpace(std::vector<Handle<FixedArray>>& handles) {
|
||||
Page* FindLastPageInNewSpace(
|
||||
std::vector<Handle<FixedArray>>& handles) { // NOLINT(runtime/references)
|
||||
for (auto rit = handles.rbegin(); rit != handles.rend(); ++rit) {
|
||||
// One deref gets the Handle, the second deref gets the FixedArray.
|
||||
Page* candidate = Page::FromHeapObject(**rit);
|
||||
|
@ -70,7 +70,8 @@ class BytecodeExpectationsPrinter final {
|
||||
const BytecodeArrayIterator& bytecode_iterator,
|
||||
int parameter_count) const;
|
||||
void PrintSourcePosition(std::ostream& stream, // NOLINT
|
||||
SourcePositionTableIterator& source_iterator,
|
||||
SourcePositionTableIterator&
|
||||
source_iterator, // NOLINT(runtime/references)
|
||||
int bytecode_offset) const;
|
||||
void PrintV8String(std::ostream& stream, // NOLINT
|
||||
i::String string) const;
|
||||
|
@ -131,21 +131,22 @@ std::string BuildActual(const BytecodeExpectationsPrinter& printer,
|
||||
}
|
||||
|
||||
// inplace left trim
|
||||
static inline void ltrim(std::string& str) {
|
||||
static inline void ltrim(std::string& str) { // NOLINT(runtime/references)
|
||||
str.erase(str.begin(),
|
||||
std::find_if(str.begin(), str.end(),
|
||||
[](unsigned char ch) { return !std::isspace(ch); }));
|
||||
}
|
||||
|
||||
// inplace right trim
|
||||
static inline void rtrim(std::string& str) {
|
||||
static inline void rtrim(std::string& str) { // NOLINT(runtime/references)
|
||||
str.erase(std::find_if(str.rbegin(), str.rend(),
|
||||
[](unsigned char ch) { return !std::isspace(ch); })
|
||||
.base(),
|
||||
str.end());
|
||||
}
|
||||
|
||||
static inline std::string trim(std::string& str) {
|
||||
static inline std::string trim(
|
||||
std::string& str) { // NOLINT(runtime/references)
|
||||
ltrim(str);
|
||||
rtrim(str);
|
||||
return str;
|
||||
|
@ -1485,19 +1485,18 @@ TEST(InterpreterCall) {
|
||||
}
|
||||
}
|
||||
|
||||
static BytecodeArrayBuilder& SetRegister(BytecodeArrayBuilder& builder,
|
||||
Register reg, int value,
|
||||
Register scratch) {
|
||||
static BytecodeArrayBuilder& SetRegister(
|
||||
BytecodeArrayBuilder& builder, // NOLINT(runtime/references)
|
||||
Register reg, int value, Register scratch) {
|
||||
return builder.StoreAccumulatorInRegister(scratch)
|
||||
.LoadLiteral(Smi::FromInt(value))
|
||||
.StoreAccumulatorInRegister(reg)
|
||||
.LoadAccumulatorWithRegister(scratch);
|
||||
}
|
||||
|
||||
static BytecodeArrayBuilder& IncrementRegister(BytecodeArrayBuilder& builder,
|
||||
Register reg, int value,
|
||||
Register scratch,
|
||||
int slot_index) {
|
||||
static BytecodeArrayBuilder& IncrementRegister(
|
||||
BytecodeArrayBuilder& builder, // NOLINT(runtime/references)
|
||||
Register reg, int value, Register scratch, int slot_index) {
|
||||
return builder.StoreAccumulatorInRegister(scratch)
|
||||
.LoadLiteral(Smi::FromInt(value))
|
||||
.BinaryOperation(Token::Value::ADD, reg, slot_index)
|
||||
|
@ -242,9 +242,10 @@ TEST(TestTracingController) {
|
||||
i::V8::SetPlatformForTesting(old_platform);
|
||||
}
|
||||
|
||||
void GetJSONStrings(std::vector<std::string>& ret, std::string str,
|
||||
std::string param, std::string start_delim,
|
||||
std::string end_delim) {
|
||||
void GetJSONStrings(
|
||||
std::vector<std::string>& ret, // NOLINT(runtime/references)
|
||||
std::string str, std::string param, std::string start_delim,
|
||||
std::string end_delim) {
|
||||
size_t pos = str.find(param);
|
||||
while (pos != std::string::npos) {
|
||||
size_t start_pos = str.find(start_delim, pos + param.length());
|
||||
|
@ -3390,7 +3390,8 @@ TEST(ARMv8_vminmax_f32) {
|
||||
}
|
||||
|
||||
template <typename T, typename Inputs, typename Results>
|
||||
static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(MacroAssembler& assm) {
|
||||
static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(
|
||||
MacroAssembler& assm) { // NOLINT(runtime/references)
|
||||
T a = T::from_code(0); // d0/s0
|
||||
T b = T::from_code(1); // d1/s1
|
||||
T c = T::from_code(2); // d2/s2
|
||||
|
@ -4825,8 +4825,9 @@ TEST(r6_beqzc) {
|
||||
}
|
||||
}
|
||||
|
||||
void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
|
||||
MSARegister w, Register t0, Register t1) {
|
||||
void load_elements_of_vector(
|
||||
MacroAssembler& assm, // NOLINT(runtime/references)
|
||||
const uint64_t elements[], MSARegister w, Register t0, Register t1) {
|
||||
__ li(t0, static_cast<uint32_t>(elements[0] & 0xFFFFFFFF));
|
||||
__ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xFFFFFFFF));
|
||||
__ insert_w(w, 0, t0);
|
||||
@ -4837,8 +4838,9 @@ void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
|
||||
__ insert_w(w, 3, t1);
|
||||
}
|
||||
|
||||
inline void store_elements_of_vector(MacroAssembler& assm, MSARegister w,
|
||||
Register a) {
|
||||
inline void store_elements_of_vector(
|
||||
MacroAssembler& assm, // NOLINT(runtime/references)
|
||||
MSARegister w, Register a) {
|
||||
__ st_d(w, MemOperand(a, 0));
|
||||
}
|
||||
|
||||
|
@ -5430,8 +5430,9 @@ TEST(r6_beqzc) {
|
||||
}
|
||||
}
|
||||
|
||||
void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
|
||||
MSARegister w, Register t0, Register t1) {
|
||||
void load_elements_of_vector(
|
||||
MacroAssembler& assm, // NOLINT(runtime/references)
|
||||
const uint64_t elements[], MSARegister w, Register t0, Register t1) {
|
||||
__ li(t0, static_cast<uint32_t>(elements[0] & 0xFFFFFFFF));
|
||||
__ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xFFFFFFFF));
|
||||
__ insert_w(w, 0, t0);
|
||||
@ -5442,8 +5443,9 @@ void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
|
||||
__ insert_w(w, 3, t1);
|
||||
}
|
||||
|
||||
inline void store_elements_of_vector(MacroAssembler& assm, MSARegister w,
|
||||
Register a) {
|
||||
inline void store_elements_of_vector(
|
||||
MacroAssembler& assm, // NOLINT(runtime/references)
|
||||
MSARegister w, Register a) {
|
||||
__ st_d(w, MemOperand(a, 0));
|
||||
}
|
||||
|
||||
|
@ -1436,7 +1436,8 @@ struct CheckNormalize {
|
||||
//
|
||||
template <typename TestConfig, typename Checker>
|
||||
static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
|
||||
TestConfig& config, Checker& checker) {
|
||||
TestConfig& config, // NOLINT(runtime/references)
|
||||
Checker& checker) { // NOLINT(runtime/references)
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Handle<FieldType> any_type = FieldType::Any(isolate);
|
||||
|
||||
@ -1513,7 +1514,6 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
|
||||
checker.Check(isolate, map1, new_map, expectations1);
|
||||
}
|
||||
|
||||
|
||||
TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
|
||||
CcTest::InitializeVM();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
@ -1526,14 +1526,18 @@ TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
|
||||
js_func_ = factory->NewFunctionForTest(factory->empty_string());
|
||||
}
|
||||
|
||||
Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
|
||||
Handle<Map> map) {
|
||||
Handle<Map> AddPropertyAtBranch(
|
||||
int branch_id,
|
||||
Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map) {
|
||||
CHECK(branch_id == 1 || branch_id == 2);
|
||||
// Add the same data constant property at both transition tree branches.
|
||||
return expectations.AddDataConstant(map, NONE, js_func_);
|
||||
}
|
||||
|
||||
void UpdateExpectations(int property_index, Expectations& expectations) {
|
||||
void UpdateExpectations(
|
||||
int property_index,
|
||||
Expectations& expectations) { // NOLINT(runtime/references)
|
||||
// Expectations stay the same.
|
||||
}
|
||||
};
|
||||
@ -1571,14 +1575,18 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToDataFieldAfterTargetMap) {
|
||||
factory->NewFunction(sloppy_map, info, isolate->native_context());
|
||||
}
|
||||
|
||||
Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
|
||||
Handle<Map> map) {
|
||||
Handle<Map> AddPropertyAtBranch(
|
||||
int branch_id,
|
||||
Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map) {
|
||||
CHECK(branch_id == 1 || branch_id == 2);
|
||||
Handle<JSFunction> js_func = branch_id == 1 ? js_func1_ : js_func2_;
|
||||
return expectations.AddDataConstant(map, NONE, js_func);
|
||||
}
|
||||
|
||||
void UpdateExpectations(int property_index, Expectations& expectations) {
|
||||
void UpdateExpectations(
|
||||
int property_index,
|
||||
Expectations& expectations) { // NOLINT(runtime/references)
|
||||
expectations.SetDataField(property_index, PropertyConstness::kConst,
|
||||
Representation::HeapObject(), function_type_);
|
||||
}
|
||||
@ -1604,8 +1612,10 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToAccConstantAfterTargetMap) {
|
||||
pair_ = CreateAccessorPair(true, true);
|
||||
}
|
||||
|
||||
Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
|
||||
Handle<Map> map) {
|
||||
Handle<Map> AddPropertyAtBranch(
|
||||
int branch_id,
|
||||
Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map) {
|
||||
CHECK(branch_id == 1 || branch_id == 2);
|
||||
if (branch_id == 1) {
|
||||
return expectations.AddDataConstant(map, NONE, js_func_);
|
||||
@ -1614,7 +1624,10 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToAccConstantAfterTargetMap) {
|
||||
}
|
||||
}
|
||||
|
||||
void UpdateExpectations(int property_index, Expectations& expectations) {}
|
||||
void UpdateExpectations(
|
||||
int property_index,
|
||||
Expectations& expectations // NOLINT(runtime/references)
|
||||
) {}
|
||||
};
|
||||
|
||||
TestConfig config;
|
||||
@ -1632,15 +1645,19 @@ TEST(ReconfigureDataFieldAttribute_SameAccessorConstantAfterTargetMap) {
|
||||
Handle<AccessorPair> pair_;
|
||||
TestConfig() { pair_ = CreateAccessorPair(true, true); }
|
||||
|
||||
Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
|
||||
Handle<Map> map) {
|
||||
Handle<Map> AddPropertyAtBranch(
|
||||
int branch_id,
|
||||
Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map) {
|
||||
CHECK(branch_id == 1 || branch_id == 2);
|
||||
// Add the same accessor constant property at both transition tree
|
||||
// branches.
|
||||
return expectations.AddAccessorConstant(map, NONE, pair_);
|
||||
}
|
||||
|
||||
void UpdateExpectations(int property_index, Expectations& expectations) {
|
||||
void UpdateExpectations(
|
||||
int property_index,
|
||||
Expectations& expectations) { // NOLINT(runtime/references)
|
||||
// Two branches are "compatible" so the |map1| should NOT be deprecated.
|
||||
}
|
||||
};
|
||||
@ -1663,14 +1680,18 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToAccFieldAfterTargetMap) {
|
||||
pair2_ = CreateAccessorPair(true, true);
|
||||
}
|
||||
|
||||
Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
|
||||
Handle<Map> map) {
|
||||
Handle<Map> AddPropertyAtBranch(
|
||||
int branch_id,
|
||||
Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map) {
|
||||
CHECK(branch_id == 1 || branch_id == 2);
|
||||
Handle<AccessorPair> pair = branch_id == 1 ? pair1_ : pair2_;
|
||||
return expectations.AddAccessorConstant(map, NONE, pair);
|
||||
}
|
||||
|
||||
void UpdateExpectations(int property_index, Expectations& expectations) {
|
||||
void UpdateExpectations(
|
||||
int property_index,
|
||||
Expectations& expectations) { // NOLINT(runtime/references)
|
||||
if (IS_ACCESSOR_FIELD_SUPPORTED) {
|
||||
expectations.SetAccessorField(property_index);
|
||||
} else {
|
||||
@ -1701,8 +1722,10 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
|
||||
Handle<AccessorPair> pair_;
|
||||
TestConfig() { pair_ = CreateAccessorPair(true, true); }
|
||||
|
||||
Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
|
||||
Handle<Map> map) {
|
||||
Handle<Map> AddPropertyAtBranch(
|
||||
int branch_id,
|
||||
Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map) {
|
||||
CHECK(branch_id == 1 || branch_id == 2);
|
||||
if (branch_id == 1) {
|
||||
return expectations.AddAccessorConstant(map, NONE, pair_);
|
||||
@ -1714,7 +1737,10 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
|
||||
}
|
||||
}
|
||||
|
||||
void UpdateExpectations(int property_index, Expectations& expectations) {}
|
||||
void UpdateExpectations(
|
||||
int property_index,
|
||||
Expectations& expectations // NOLINT(runtime/references)
|
||||
) {}
|
||||
};
|
||||
|
||||
TestConfig config;
|
||||
@ -2115,8 +2141,9 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
|
||||
// fixed.
|
||||
template <typename TestConfig>
|
||||
static void TestGeneralizeFieldWithSpecialTransition(
|
||||
TestConfig& config, const CRFTData& from, const CRFTData& to,
|
||||
const CRFTData& expected, bool expected_deprecation) {
|
||||
TestConfig& config, // NOLINT(runtime/references)
|
||||
const CRFTData& from, const CRFTData& to, const CRFTData& expected,
|
||||
bool expected_deprecation) {
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
|
||||
Expectations expectations(isolate);
|
||||
@ -2357,7 +2384,9 @@ TEST(PrototypeTransitionFromMapOwningDescriptor) {
|
||||
prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
|
||||
}
|
||||
|
||||
Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
|
||||
Handle<Map> Transition(
|
||||
Handle<Map> map,
|
||||
Expectations& expectations) { // NOLINT(runtime/references)
|
||||
return Map::TransitionToPrototype(CcTest::i_isolate(), map, prototype_);
|
||||
}
|
||||
// TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
|
||||
@ -2397,7 +2426,9 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
|
||||
prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
|
||||
}
|
||||
|
||||
Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
|
||||
Handle<Map> Transition(
|
||||
Handle<Map> map,
|
||||
Expectations& expectations) { // NOLINT(runtime/references)
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Handle<FieldType> any_type = FieldType::Any(isolate);
|
||||
|
||||
@ -2453,7 +2484,9 @@ struct TransitionToDataFieldOperator {
|
||||
heap_type_(heap_type),
|
||||
value_(value) {}
|
||||
|
||||
Handle<Map> DoTransition(Expectations& expectations, Handle<Map> map) {
|
||||
Handle<Map> DoTransition(
|
||||
Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map) {
|
||||
return expectations.TransitionToDataField(
|
||||
map, attributes_, constness_, representation_, heap_type_, value_);
|
||||
}
|
||||
@ -2503,8 +2536,10 @@ struct ReconfigureAsDataPropertyOperator {
|
||||
attributes_(attributes),
|
||||
heap_type_(heap_type) {}
|
||||
|
||||
Handle<Map> DoTransition(Isolate* isolate, Expectations& expectations,
|
||||
Handle<Map> map) {
|
||||
Handle<Map> DoTransition(
|
||||
Isolate* isolate,
|
||||
Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map) {
|
||||
expectations.SetDataField(descriptor_, PropertyConstness::kMutable,
|
||||
representation_, heap_type_);
|
||||
return Map::ReconfigureExistingProperty(isolate, map, descriptor_, kData,
|
||||
@ -2549,8 +2584,9 @@ struct FieldGeneralizationChecker {
|
||||
attributes_(attributes),
|
||||
heap_type_(heap_type) {}
|
||||
|
||||
void Check(Isolate* isolate, Expectations& expectations2, Handle<Map> map1,
|
||||
Handle<Map> map2) {
|
||||
void Check(Isolate* isolate,
|
||||
Expectations& expectations2, // NOLINT(runtime/references)
|
||||
Handle<Map> map1, Handle<Map> map2) {
|
||||
CHECK(!map2->is_deprecated());
|
||||
|
||||
CHECK(map1->is_deprecated());
|
||||
@ -2568,8 +2604,9 @@ struct FieldGeneralizationChecker {
|
||||
|
||||
// Checks that existing transition was taken as is.
|
||||
struct SameMapChecker {
|
||||
void Check(Isolate* isolate, Expectations& expectations, Handle<Map> map1,
|
||||
Handle<Map> map2) {
|
||||
void Check(Isolate* isolate,
|
||||
Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map1, Handle<Map> map2) {
|
||||
CHECK(!map2->is_deprecated());
|
||||
CHECK_EQ(*map1, *map2);
|
||||
CHECK(expectations.Check(*map2));
|
||||
@ -2580,7 +2617,8 @@ struct SameMapChecker {
|
||||
// Checks that both |map1| and |map2| should stays non-deprecated, this is
|
||||
// the case when property kind is change.
|
||||
struct PropertyKindReconfigurationChecker {
|
||||
void Check(Expectations& expectations, Handle<Map> map1, Handle<Map> map2) {
|
||||
void Check(Expectations& expectations, // NOLINT(runtime/references)
|
||||
Handle<Map> map1, Handle<Map> map2) {
|
||||
CHECK(!map1->is_deprecated());
|
||||
CHECK(!map2->is_deprecated());
|
||||
CHECK_NE(*map1, *map2);
|
||||
@ -2605,8 +2643,10 @@ struct PropertyKindReconfigurationChecker {
|
||||
// where "p4A" and "p4B" differ only in the attributes.
|
||||
//
|
||||
template <typename TransitionOp1, typename TransitionOp2, typename Checker>
|
||||
static void TestTransitionTo(TransitionOp1& transition_op1,
|
||||
TransitionOp2& transition_op2, Checker& checker) {
|
||||
static void TestTransitionTo(
|
||||
TransitionOp1& transition_op1, // NOLINT(runtime/references)
|
||||
TransitionOp2& transition_op2, // NOLINT(runtime/references)
|
||||
Checker& checker) { // NOLINT(runtime/references)
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Handle<FieldType> any_type = FieldType::Any(isolate);
|
||||
|
||||
@ -2632,7 +2672,6 @@ static void TestTransitionTo(TransitionOp1& transition_op1,
|
||||
checker.Check(isolate, expectations2, map1, map2);
|
||||
}
|
||||
|
||||
|
||||
TEST(TransitionDataFieldToDataField) {
|
||||
CcTest::InitializeVM();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
|
@ -3428,7 +3428,8 @@ TEST(AddressToTraceMap) {
|
||||
}
|
||||
|
||||
static const v8::AllocationProfile::Node* FindAllocationProfileNode(
|
||||
v8::Isolate* isolate, v8::AllocationProfile& profile,
|
||||
v8::Isolate* isolate,
|
||||
v8::AllocationProfile& profile, // NOLINT(runtime/references)
|
||||
const Vector<const char*>& names) {
|
||||
v8::AllocationProfile::Node* node = profile.GetRootNode();
|
||||
for (int i = 0; node != nullptr && i < names.length(); ++i) {
|
||||
|
@ -669,7 +669,8 @@ static const char* line_number_test_source_profile_time_functions =
|
||||
"bar_at_the_second_line();\n"
|
||||
"function lazy_func_at_6th_line() {}";
|
||||
|
||||
int GetFunctionLineNumber(CpuProfiler& profiler, LocalContext& env,
|
||||
int GetFunctionLineNumber(CpuProfiler& profiler, // NOLINT(runtime/references)
|
||||
LocalContext& env, // NOLINT(runtime/references)
|
||||
const char* name) {
|
||||
CodeMap* code_map = profiler.generator()->code_map();
|
||||
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
|
||||
|
@ -98,7 +98,8 @@ class TestSerializer {
|
||||
return v8_isolate;
|
||||
}
|
||||
|
||||
static v8::Isolate* NewIsolateFromBlob(StartupBlobs& blobs) {
|
||||
static v8::Isolate* NewIsolateFromBlob(
|
||||
StartupBlobs& blobs) { // NOLINT(runtime/references)
|
||||
SnapshotData startup_snapshot(blobs.startup);
|
||||
SnapshotData read_only_snapshot(blobs.read_only);
|
||||
ReadOnlyDeserializer read_only_deserializer(&read_only_snapshot);
|
||||
@ -203,14 +204,13 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
|
||||
source_length);
|
||||
}
|
||||
|
||||
|
||||
static v8::Isolate* Deserialize(StartupBlobs& blobs) {
|
||||
static v8::Isolate* Deserialize(
|
||||
StartupBlobs& blobs) { // NOLINT(runtime/references)
|
||||
v8::Isolate* isolate = TestSerializer::NewIsolateFromBlob(blobs);
|
||||
CHECK(isolate);
|
||||
return isolate;
|
||||
}
|
||||
|
||||
|
||||
static void SanityCheck(v8::Isolate* v8_isolate) {
|
||||
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
|
||||
v8::HandleScope scope(v8_isolate);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user