[cleanup] Eliminate non-const reference parameters

Bug: v8:9429

Change-Id: I13780eab38230ea62334485e10a5fa4dbb432e90
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1789395
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63622}
This commit is contained in:
Bill Budge 2019-09-09 06:21:50 -07:00 committed by Commit Bot
parent f87505ca3e
commit af063685fe
13 changed files with 140 additions and 152 deletions

View File

@ -5788,14 +5788,14 @@ TNode<Int32T> CodeStubAssembler::TruncateHeapNumberValueToWord32(
}
void CodeStubAssembler::TryHeapNumberToSmi(TNode<HeapNumber> number,
TVariable<Smi>& var_result_smi,
TVariable<Smi>* var_result_smi,
Label* if_smi) {
TNode<Float64T> value = LoadHeapNumberValue(number);
TryFloat64ToSmi(value, var_result_smi, if_smi);
}
void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value,
TVariable<Smi>& var_result_smi,
TVariable<Smi>* var_result_smi,
Label* if_smi) {
TNode<Int32T> value32 = RoundFloat64ToInt32(value);
TNode<Float64T> value64 = ChangeInt32ToFloat64(value32);
@ -5812,13 +5812,13 @@ void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value,
BIND(&if_int32);
{
if (SmiValuesAre32Bits()) {
var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32));
*var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32));
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(value32, value32);
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, &if_heap_number);
var_result_smi =
*var_result_smi =
BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
}
Goto(if_smi);
@ -5831,7 +5831,7 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
Label if_smi(this), done(this);
TVARIABLE(Smi, var_smi_result);
TVARIABLE(Number, var_result);
TryFloat64ToSmi(value, var_smi_result, &if_smi);
TryFloat64ToSmi(value, &var_smi_result, &if_smi);
var_result = AllocateHeapNumberWithValue(value);
Goto(&done);
@ -7749,7 +7749,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
Comment("NumberToString - HeapNumber");
TNode<HeapNumber> heap_number_input = CAST(input);
// Try normalizing the HeapNumber.
TryHeapNumberToSmi(heap_number_input, smi_input, &if_smi);
TryHeapNumberToSmi(heap_number_input, &smi_input, &if_smi);
// Make a hash from the two 32-bit values of the double.
TNode<Int32T> low =

View File

@ -2256,11 +2256,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> TruncateHeapNumberValueToWord32(TNode<HeapNumber> object);
// Conversions.
void TryHeapNumberToSmi(TNode<HeapNumber> number,
TVariable<Smi>& output, // NOLINT(runtime/references)
void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>* output,
Label* if_smi);
void TryFloat64ToSmi(TNode<Float64T> number,
TVariable<Smi>& output, // NOLINT(runtime/references)
void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>* output,
Label* if_smi);
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);

View File

@ -49,22 +49,22 @@ ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
}
ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
ConstantPoolEntry* entry, ConstantPoolEntry::Type type) {
DCHECK(!emitted_label_.is_bound());
PerTypeEntryInfo& info = info_[type];
const int entry_size = ConstantPoolEntry::size(type);
bool merged = false;
if (entry.sharing_ok()) {
if (entry->sharing_ok()) {
// Try to merge entries
std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
int end = static_cast<int>(info.shared_entries.size());
for (int i = 0; i < end; i++, it++) {
if ((entry_size == kSystemPointerSize)
? entry.value() == it->value()
: entry.value64() == it->value64()) {
? entry->value() == it->value()
: entry->value64() == it->value64()) {
// Merge with found entry.
entry.set_merged_index(i);
entry->set_merged_index(i);
merged = true;
break;
}
@ -72,16 +72,16 @@ ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
}
// By definition, merged entries have regular access.
DCHECK(!merged || entry.merged_index() < info.regular_count);
DCHECK(!merged || entry->merged_index() < info.regular_count);
ConstantPoolEntry::Access access =
(merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
// Enforce an upper bound on search time by limiting the search to
// unique sharable entries which fit in the regular section.
if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
info.shared_entries.push_back(entry);
if (entry->sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
info.shared_entries.push_back(*entry);
} else {
info.entries.push_back(entry);
info.entries.push_back(*entry);
}
// We're done if we found a match or have already triggered the

View File

@ -138,9 +138,8 @@ class ConstantPoolBuilder {
inline Label* EmittedPosition() { return &emitted_label_; }
private:
ConstantPoolEntry::Access AddEntry(
ConstantPoolEntry& entry, // NOLINT(runtime/references)
ConstantPoolEntry::Type type);
ConstantPoolEntry::Access AddEntry(ConstantPoolEntry* entry,
ConstantPoolEntry::Type type);
void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type);

View File

@ -742,27 +742,27 @@ uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
// before that addition, difference between upper part of the target address and
// upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
// in jic register with lui instruction.
void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
int16_t& jic_offset) {
lui_offset = (address & kHiMask) >> kLuiShift;
jic_offset = address & kLoMask;
void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
int16_t* jic_offset) {
*lui_offset = (address & kHiMask) >> kLuiShift;
*jic_offset = address & kLoMask;
if (jic_offset < 0) {
lui_offset -= kImm16Mask;
if (*jic_offset < 0) {
*lui_offset -= kImm16Mask;
}
}
void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
uint32_t& lui_offset,
uint32_t& jic_offset) {
uint32_t* lui_offset,
uint32_t* jic_offset) {
int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
int16_t jic_offset16 = address & kLoMask;
if (jic_offset16 < 0) {
lui_offset16 -= kImm16Mask;
}
lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
*lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
*jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
}
void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui,
@ -1928,7 +1928,7 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
void Assembler::AdjustBaseAndOffset(MemOperand& src,
void Assembler::AdjustBaseAndOffset(MemOperand* src,
OffsetAccessType access_type,
int second_access_add_to_offset) {
// This method is used to adjust the base register and offset pair
@ -1941,26 +1941,26 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// pointer register).
// We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
// is_int16 must be passed a signed value, hence the static cast below.
if (is_int16(src.offset()) &&
if (is_int16(src->offset()) &&
(!two_accesses || is_int16(static_cast<int32_t>(
src.offset() + second_access_add_to_offset)))) {
src->offset() + second_access_add_to_offset)))) {
// Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
// value) fits into int16_t.
return;
}
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(src.rm() != scratch); // Must not overwrite the register 'base'
// while loading 'offset'.
DCHECK(src->rm() != scratch); // Must not overwrite the register 'base'
// while loading 'offset'.
#ifdef DEBUG
// Remember the "(mis)alignment" of 'offset', it will be checked at the end.
uint32_t misalignment = src.offset() & (kDoubleSize - 1);
uint32_t misalignment = src->offset() & (kDoubleSize - 1);
#endif
// Do not load the whole 32-bit 'offset' if it can be represented as
@ -1972,13 +1972,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
0x7FF8; // Max int16_t that's a multiple of 8.
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
addiu(at, src.rm(), kMinOffsetForSimpleAdjustment);
src.offset_ -= kMinOffsetForSimpleAdjustment;
} else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
src.offset() < 0) {
addiu(at, src.rm(), -kMinOffsetForSimpleAdjustment);
src.offset_ += kMinOffsetForSimpleAdjustment;
if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
addiu(at, src->rm(), kMinOffsetForSimpleAdjustment);
src->offset_ -= kMinOffsetForSimpleAdjustment;
} else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
src->offset() < 0) {
addiu(at, src->rm(), -kMinOffsetForSimpleAdjustment);
src->offset_ += kMinOffsetForSimpleAdjustment;
} else if (IsMipsArchVariant(kMips32r6)) {
// On r6 take advantage of the aui instruction, e.g.:
// aui at, base, offset_high
@ -1989,12 +1989,12 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// addiu at, at, 8
// lw reg_lo, (offset_low-8)(at)
// lw reg_hi, (offset_low-4)(at)
int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
int16_t offset_low = static_cast<uint16_t>(src.offset());
int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
int16_t offset_low = static_cast<uint16_t>(src->offset());
offset_high += (offset_low < 0)
? 1
: 0; // Account for offset sign extension in load/store.
aui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
aui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
if (two_accesses && !is_int16(static_cast<int32_t>(
offset_low + second_access_add_to_offset))) {
// Avoid overflow in the 16-bit offset of the load/store instruction when
@ -2002,7 +2002,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
addiu(scratch, scratch, kDoubleSize);
offset_low -= kDoubleSize;
}
src.offset_ = offset_low;
src->offset_ = offset_low;
} else {
// Do not load the whole 32-bit 'offset' if it can be represented as
// a sum of three 16-bit signed offsets. This can save an instruction.
@ -2013,33 +2013,33 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
2 * kMinOffsetForSimpleAdjustment;
constexpr int32_t kMaxOffsetForMediumAdjustment =
3 * kMinOffsetForSimpleAdjustment;
if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
addiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
src.offset_ -= kMinOffsetForMediumAdjustment;
} else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
src.offset() < 0) {
addiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
src->offset_ -= kMinOffsetForMediumAdjustment;
} else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
src->offset() < 0) {
addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
src.offset_ += kMinOffsetForMediumAdjustment;
src->offset_ += kMinOffsetForMediumAdjustment;
} else {
// Now that all shorter options have been exhausted, load the full 32-bit
// offset.
int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
addu(scratch, scratch, src.rm());
src.offset_ -= loaded_offset;
addu(scratch, scratch, src->rm());
src->offset_ -= loaded_offset;
}
}
src.rm_ = scratch;
src->rm_ = scratch;
DCHECK(is_int16(src.offset()));
DCHECK(is_int16(src->offset()));
if (two_accesses) {
DCHECK(is_int16(
static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
}
DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {

View File

@ -1478,13 +1478,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsAddImmediate(Instr instr);
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
static void UnpackTargetAddress(
uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references)
int16_t& jic_offset); // NOLINT(runtime/references)
static void UnpackTargetAddressUnsigned(
uint32_t address,
uint32_t& lui_offset, // NOLINT(runtime/references)
uint32_t& jic_offset); // NOLINT(runtime/references)
static void UnpackTargetAddress(uint32_t address, int16_t* lui_offset,
int16_t* jic_offset);
static void UnpackTargetAddressUnsigned(uint32_t address,
uint32_t* lui_offset,
uint32_t* jic_offset);
static bool IsAndImmediate(Instr instr);
static bool IsEmittedConstant(Instr instr);
@ -1515,7 +1513,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
MemOperand& src, // NOLINT(runtime/references)
MemOperand* src,
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);

View File

@ -2926,18 +2926,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
*offset = GetOffset(*offset, L, bits);
return true;
}
bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
Register& scratch, const Operand& rt) {
bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
scratch = GetRtAsRegisterHelper(rt, scratch);
offset = GetOffset(offset, L, bits);
*scratch = GetRtAsRegisterHelper(rt, *scratch);
*offset = GetOffset(*offset, L, bits);
return true;
}

View File

@ -849,12 +849,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
OffsetSize bits);
bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
OffsetSize bits,
Register& scratch, // NOLINT(runtime/references)
const Operand& rt);
// TODO(mips) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);

View File

@ -1996,7 +1996,7 @@ void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
void Assembler::AdjustBaseAndOffset(MemOperand& src,
void Assembler::AdjustBaseAndOffset(MemOperand* src,
OffsetAccessType access_type,
int second_access_add_to_offset) {
// This method is used to adjust the base register and offset pair
@ -2009,25 +2009,25 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// pointer register).
// We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
// is_int16 must be passed a signed value, hence the static cast below.
if (is_int16(src.offset()) &&
if (is_int16(src->offset()) &&
(!two_accesses || is_int16(static_cast<int32_t>(
src.offset() + second_access_add_to_offset)))) {
src->offset() + second_access_add_to_offset)))) {
// Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
// value) fits into int16_t.
return;
}
DCHECK(src.rm() !=
DCHECK(src->rm() !=
at); // Must not overwrite the register 'base' while loading 'offset'.
#ifdef DEBUG
// Remember the "(mis)alignment" of 'offset', it will be checked at the end.
uint32_t misalignment = src.offset() & (kDoubleSize - 1);
uint32_t misalignment = src->offset() & (kDoubleSize - 1);
#endif
// Do not load the whole 32-bit 'offset' if it can be represented as
@ -2042,13 +2042,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
daddiu(scratch, src.rm(), kMinOffsetForSimpleAdjustment);
src.offset_ -= kMinOffsetForSimpleAdjustment;
} else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
src.offset() < 0) {
daddiu(scratch, src.rm(), -kMinOffsetForSimpleAdjustment);
src.offset_ += kMinOffsetForSimpleAdjustment;
if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
src->offset_ -= kMinOffsetForSimpleAdjustment;
} else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
src->offset() < 0) {
daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
src->offset_ += kMinOffsetForSimpleAdjustment;
} else if (kArchVariant == kMips64r6) {
// On r6 take advantage of the daui instruction, e.g.:
// daui at, base, offset_high
@ -2060,9 +2060,9 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
// daddiu at, at, 8
// lw reg_lo, (offset_low-8)(at)
// lw reg_hi, (offset_low-4)(at)
int16_t offset_low = static_cast<uint16_t>(src.offset());
int16_t offset_low = static_cast<uint16_t>(src->offset());
int32_t offset_low32 = offset_low;
int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
bool increment_hi16 = offset_low < 0;
bool overflow_hi16 = false;
@ -2070,7 +2070,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
offset_high++;
overflow_hi16 = (offset_high == -32768);
}
daui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
daui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
if (overflow_hi16) {
dahi(scratch, 1);
@ -2084,7 +2084,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
offset_low32 -= kDoubleSize;
}
src.offset_ = offset_low32;
src->offset_ = offset_low32;
} else {
// Do not load the whole 32-bit 'offset' if it can be represented as
// a sum of three 16-bit signed offsets. This can save an instruction.
@ -2095,33 +2095,33 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src,
2 * kMinOffsetForSimpleAdjustment;
constexpr int32_t kMaxOffsetForMediumAdjustment =
3 * kMinOffsetForSimpleAdjustment;
if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
daddiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
src.offset_ -= kMinOffsetForMediumAdjustment;
} else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
src.offset() < 0) {
daddiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
src->offset_ -= kMinOffsetForMediumAdjustment;
} else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
src->offset() < 0) {
daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
src.offset_ += kMinOffsetForMediumAdjustment;
src->offset_ += kMinOffsetForMediumAdjustment;
} else {
// Now that all shorter options have been exhausted, load the full 32-bit
// offset.
int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
daddu(scratch, scratch, src.rm());
src.offset_ -= loaded_offset;
daddu(scratch, scratch, src->rm());
src->offset_ -= loaded_offset;
}
}
src.rm_ = scratch;
src->rm_ = scratch;
DCHECK(is_int16(src.offset()));
DCHECK(is_int16(src->offset()));
if (two_accesses) {
DCHECK(is_int16(
static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
}
DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {

View File

@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
MemOperand& src, // NOLINT(runtime/references)
MemOperand* src,
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);

View File

@ -3362,18 +3362,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
*offset = GetOffset(*offset, L, bits);
return true;
}
bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
Register& scratch, const Operand& rt) {
bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
scratch = GetRtAsRegisterHelper(rt, scratch);
offset = GetOffset(offset, L, bits);
*scratch = GetRtAsRegisterHelper(rt, *scratch);
*offset = GetOffset(*offset, L, bits);
return true;
}

View File

@ -850,12 +850,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
OffsetSize bits);
bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
OffsetSize bits,
Register& scratch, // NOLINT(runtime/references)
const Operand& rt);
// TODO(mips) Reorder parameters so out parameters come last.
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
Register* scratch, const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);

View File

@ -31,24 +31,23 @@ using MoreBit = BitField8<bool, 7, 1>;
using ValueBits = BitField8<unsigned, 0, 7>;
// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references)
void AddAndSetEntry(PositionTableEntry* value,
const PositionTableEntry& other) {
value.code_offset += other.code_offset;
value.source_position += other.source_position;
value.is_statement = other.is_statement;
value->code_offset += other.code_offset;
value->source_position += other.source_position;
value->is_statement = other.is_statement;
}
// Helper: Subtract the offsets from 'other' from 'value'.
void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references)
void SubtractFromEntry(PositionTableEntry* value,
const PositionTableEntry& other) {
value.code_offset -= other.code_offset;
value.source_position -= other.source_position;
value->code_offset -= other.code_offset;
value->source_position -= other.source_position;
}
// Helper: Encode an integer.
template <typename T>
void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references)
T value) {
void EncodeInt(std::vector<byte>* bytes, T value) {
using unsigned_type = typename std::make_unsigned<T>::type;
// Zig-zag encoding.
static const int kShift = sizeof(T) * kBitsPerByte - 1;
@ -60,14 +59,13 @@ void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references)
more = encoded > ValueBits::kMax;
byte current =
MoreBit::encode(more) | ValueBits::encode(encoded & ValueBits::kMask);
bytes.push_back(current);
bytes->push_back(current);
encoded >>= ValueBits::kSize;
} while (more);
}
// Encode a PositionTableEntry.
void EncodeEntry(std::vector<byte>& bytes, // NOLINT(runtime/references)
const PositionTableEntry& entry) {
void EncodeEntry(std::vector<byte>* bytes, const PositionTableEntry& entry) {
// We only accept ascending code offsets.
DCHECK_GE(entry.code_offset, 0);
// Since code_offset is not negative, we use sign to encode is_statement.
@ -115,17 +113,16 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
}
#ifdef ENABLE_SLOW_DCHECKS
void CheckTableEquals(
std::vector<PositionTableEntry>& raw_entries, // NOLINT(runtime/references)
SourcePositionTableIterator& encoded) { // NOLINT(runtime/references)
void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries,
SourcePositionTableIterator* encoded) {
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
auto raw = raw_entries.begin();
for (; !encoded.done(); encoded.Advance(), raw++) {
for (; !encoded->done(); encoded->Advance(), raw++) {
DCHECK(raw != raw_entries.end());
DCHECK_EQ(encoded.code_offset(), raw->code_offset);
DCHECK_EQ(encoded.source_position().raw(), raw->source_position);
DCHECK_EQ(encoded.is_statement(), raw->is_statement);
DCHECK_EQ(encoded->code_offset(), raw->code_offset);
DCHECK_EQ(encoded->source_position().raw(), raw->source_position);
DCHECK_EQ(encoded->is_statement(), raw->is_statement);
}
DCHECK(raw == raw_entries.end());
}
@ -148,8 +145,8 @@ void SourcePositionTableBuilder::AddPosition(size_t code_offset,
void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
PositionTableEntry tmp(entry);
SubtractFromEntry(tmp, previous_);
EncodeEntry(bytes_, tmp);
SubtractFromEntry(&tmp, previous_);
EncodeEntry(&bytes_, tmp);
previous_ = entry;
#ifdef ENABLE_SLOW_DCHECKS
raw_entries_.push_back(entry);
@ -169,7 +166,7 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
SourcePositionTableIterator it(*table, SourcePositionTableIterator::kAll);
CheckTableEquals(raw_entries_, it);
CheckTableEquals(raw_entries_, &it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
#endif
@ -187,7 +184,7 @@ OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
// the entire table to verify they are identical.
SourcePositionTableIterator it(table.as_vector(),
SourcePositionTableIterator::kAll);
CheckTableEquals(raw_entries_, it);
CheckTableEquals(raw_entries_, &it);
// No additional source positions after creating the table.
mode_ = OMIT_SOURCE_POSITIONS;
#endif
@ -232,7 +229,7 @@ void SourcePositionTableIterator::Advance() {
} else {
PositionTableEntry tmp;
DecodeEntry(bytes, &index_, &tmp);
AddAndSetEntry(current_, tmp);
AddAndSetEntry(&current_, tmp);
SourcePosition p = source_position();
filter_satisfied = (filter_ == kAll) ||
(filter_ == kJavaScriptOnly && p.IsJavaScript()) ||