[cleanup] Unify naming of instruction size constant

The instruction size in bytes is now kInstrSize on all platforms.

Bug: v8:6666
Change-Id: I2f9880a6a74199a439c8327a4117efb74240aa22
Reviewed-on: https://chromium-review.googlesource.com/1164955
Commit-Queue: Sigurd Schneider <sigurds@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Georg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54944}
This commit is contained in:
Sigurd Schneider 2018-08-07 13:31:32 +02:00 committed by Commit Bot
parent ff7915421d
commit 7f99d292e2
60 changed files with 371 additions and 422 deletions

View File

@ -181,7 +181,7 @@ void RelocInfo::WipeOut() {
Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const {
Instruction* branch = Instruction::At(pc);
int code_target_index = branch->GetBranchOffset() / Instruction::kInstrSize;
int code_target_index = branch->GetBranchOffset() / kInstrSize;
return GetCodeTarget(code_target_index);
}
@ -255,22 +255,21 @@ Address Assembler::target_address_from_return_address(Address pc) {
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Address candidate = pc - 2 * kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
if (IsLdrPcImmediateOffset(candidate_instr)) {
return candidate;
} else {
if (CpuFeatures::IsSupported(ARMv7)) {
candidate -= 1 * Assembler::kInstrSize;
candidate -= 1 * kInstrSize;
DCHECK(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
IsMovT(Memory::int32_at(candidate + kInstrSize)));
} else {
candidate -= 3 * Assembler::kInstrSize;
DCHECK(
IsMovImmed(Memory::int32_at(candidate)) &&
IsOrrImmed(Memory::int32_at(candidate + Assembler::kInstrSize)) &&
IsOrrImmed(Memory::int32_at(candidate + 2 * Assembler::kInstrSize)) &&
IsOrrImmed(Memory::int32_at(candidate + 3 * Assembler::kInstrSize)));
candidate -= 3 * kInstrSize;
DCHECK(IsMovImmed(Memory::int32_at(candidate)) &&
IsOrrImmed(Memory::int32_at(candidate + kInstrSize)) &&
IsOrrImmed(Memory::int32_at(candidate + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(candidate + 3 * kInstrSize)));
}
return candidate;
}

View File

@ -694,9 +694,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// pointer.
static constexpr int kSpecialTargetSize = kPointerSize;
// Size of an instruction.
static constexpr int kInstrSize = sizeof(Instr);
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
VfpRegList* GetScratchVfpRegisterList() {
return &scratch_vfp_register_list_;

View File

@ -227,7 +227,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(
tasm, TurboAssembler::kCallStubSize + 2 * Assembler::kInstrSize);
tasm, TurboAssembler::kCallStubSize + 2 * kInstrSize);
tasm->push(lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->pop(lr);
@ -239,7 +239,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(
masm, TurboAssembler::kCallStubSize + 2 * Assembler::kInstrSize);
masm, TurboAssembler::kCallStubSize + 2 * kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
@ -249,8 +249,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push lr" instruction, followed by a call.
const int32_t kReturnAddressDistanceFromFunctionStart =
3 * Assembler::kInstrSize;
const int32_t kReturnAddressDistanceFromFunctionStart = 3 * kInstrSize;
// This should contain all kCallerSaved registers.
const RegList kSavedRegs =

View File

@ -461,10 +461,12 @@ inline Hint NegateHint(Hint ignored) { return no_hint; }
// return ((type == 0) || (type == 1)) && instr->HasS();
// }
//
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
class Instruction {
public:
static constexpr int kInstrSize = 4;
static constexpr int kInstrSizeLog2 = 2;
// Difference between address of current opcode and value read from pc
// register.
static constexpr int kPcLoadDelta = 8;

View File

@ -1416,7 +1416,7 @@ int Decoder::DecodeType7(Instruction* instr) {
break;
}
}
return Instruction::kInstrSize;
return kInstrSize;
}
@ -2599,14 +2599,14 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
DecodeSpecialCondition(instr);
return Instruction::kInstrSize;
return kInstrSize;
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
return kInstrSize;
}
switch (instr->TypeValue()) {
case 0:
@ -2643,7 +2643,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
break;
}
}
return Instruction::kInstrSize;
return kInstrSize;
}

View File

@ -194,8 +194,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
b(code_target_index * Instruction::kInstrSize, cond,
RelocInfo::RELATIVE_CODE_TARGET);
b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
return;
} else if (root_array_available_ && options().isolate_independent_code) {
UseScratchRegisterScope temps(this);
@ -274,8 +273,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
bl(code_target_index * Instruction::kInstrSize, cond,
RelocInfo::RELATIVE_CODE_TARGET);
bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
return;
} else if (root_array_available_ && options().isolate_independent_code) {
// Use ip directly instead of using UseScratchRegisterScope, as we do not

View File

@ -359,7 +359,7 @@ void ArmDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int regnum = Registers::Number(arg1);
if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
@ -368,7 +368,7 @@ void ArmDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
}
} else {
// The argument is the number of instructions.
@ -376,7 +376,7 @@ void ArmDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
end = cur + (value * Instruction::kInstrSize);
end = cur + (value * kInstrSize);
}
}
} else {
@ -384,7 +384,7 @@ void ArmDebugger::Debug() {
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
end = cur + (value2 * Instruction::kInstrSize);
end = cur + (value2 * kInstrSize);
}
}
@ -427,7 +427,7 @@ void ArmDebugger::Debug() {
PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "stop") == 0) {
int32_t value;
intptr_t stop_pc = sim_->get_pc() - Instruction::kInstrSize;
intptr_t stop_pc = sim_->get_pc() - kInstrSize;
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
@ -632,9 +632,8 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
CHECK_EQ(0,
memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset), Instruction::kInstrSize));
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset), kInstrSize));
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@ -2272,7 +2271,7 @@ void Simulator::DecodeType01(Instruction* instr) {
case BLX: {
uint32_t old_pc = get_pc();
set_pc(get_register(rm));
set_register(lr, old_pc + Instruction::kInstrSize);
set_register(lr, old_pc + kInstrSize);
break;
}
case BKPT: {
@ -3046,7 +3045,7 @@ void Simulator::DecodeType5(Instruction* instr) {
int off = (instr->SImmed24Value() << 2);
intptr_t pc_address = get_pc();
if (instr->HasLink()) {
set_register(lr, pc_address + Instruction::kInstrSize);
set_register(lr, pc_address + kInstrSize);
}
int pc_reg = get_register(pc);
set_pc(pc_reg + off);
@ -5697,12 +5696,10 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr)
+ Instruction::kInstrSize);
set_register(pc, reinterpret_cast<int32_t>(instr) + kInstrSize);
}
}
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.

View File

@ -549,8 +549,8 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Assembler::target_address_at(pc, 0 /* unused */)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
return GetCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2);
DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
return GetCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2);
}
}
@ -570,7 +570,7 @@ Address Assembler::target_address_from_return_address(Address pc) {
// Call sequence on ARM64 is:
// ldr ip0, #... @ load from literal pool
// blr ip0
Address candidate = pc - 2 * kInstructionSize;
Address candidate = pc - 2 * kInstrSize;
Instruction* instr = reinterpret_cast<Instruction*>(candidate);
USE(instr);
DCHECK(instr->IsLdrLiteralX());
@ -598,7 +598,7 @@ void Assembler::deserialization_set_special_target_at(Address location,
target = location;
}
instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
Assembler::FlushICache(location, kInstructionSize);
Assembler::FlushICache(location, kInstrSize);
} else {
DCHECK_EQ(instr->InstructionBits(), 0);
Memory::Address_at(location) = target;
@ -635,7 +635,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, kInstructionSize);
Assembler::FlushICache(pc, kInstrSize);
}
}
}
@ -874,8 +874,8 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
DCHECK_EQ(kStartOfLabelLinkChain, 0);
int offset = LinkAndGetByteOffsetTo(label);
DCHECK(IsAligned(offset, kInstructionSize));
return offset >> kInstructionSizeLog2;
DCHECK(IsAligned(offset, kInstrSize));
return offset >> kInstrSizeLog2;
}
@ -1092,7 +1092,7 @@ Instr Assembler::ImmBarrierType(int imm2) {
}
unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
DCHECK((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
DCHECK((LSSize_offset + LSSize_width) == (kInstrSize * 8));
unsigned size = static_cast<Instr>(op >> LSSize_offset);
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"

View File

@ -211,7 +211,7 @@ uint32_t RelocInfo::wasm_call_tag() const {
Memory::Address_at(Assembler::target_pointer_address_at(pc_)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
return static_cast<uint32_t>(instr->ImmPCOffset() / kInstructionSize);
return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize);
}
}
@ -391,7 +391,7 @@ int ConstPool::WorstCaseSize() {
// blr xzr
// nop
// All entries are 64-bit for now.
return 4 * kInstructionSize + EntryCount() * kPointerSize;
return 4 * kInstrSize + EntryCount() * kPointerSize;
}
@ -403,10 +403,10 @@ int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
// ldr xzr, #pool_size
// blr xzr
// nop ;; if not 64-bit aligned
int prologue_size = require_jump ? kInstructionSize : 0;
prologue_size += 2 * kInstructionSize;
prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
0 : kInstructionSize;
int prologue_size = require_jump ? kInstrSize : 0;
prologue_size += 2 * kInstrSize;
prologue_size +=
IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
// All entries are 64-bit for now.
return prologue_size + EntryCount() * kPointerSize;
@ -596,8 +596,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
Instruction* instr = reinterpret_cast<Instruction*>(pc);
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
UpdateCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2,
DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
UpdateCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2,
request.code_stub()->GetCode());
break;
}
@ -954,12 +954,12 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
reinterpret_cast<const char*>(
instr->InstructionAtOffset(kDebugMessageOffset));
int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
return RoundUp(size, kInstructionSize) / kInstructionSize;
return RoundUp(size, kInstrSize) / kInstrSize;
}
// Same for printf support, see MacroAssembler::CallPrintf().
if ((instr->Mask(ExceptionMask) == HLT) &&
(instr->ImmException() == kImmExceptionIsPrintf)) {
return kPrintfLength / kInstructionSize;
return kPrintfLength / kInstrSize;
}
#endif
if (IsConstantPoolAt(instr)) {
@ -3933,7 +3933,7 @@ void Assembler::dcptr(Label* label) {
// references are not instructions so while unbound they are encoded as
// two consecutive brk instructions. The two 16-bit immediates are used
// to encode the offset.
offset >>= kInstructionSizeLog2;
offset >>= kInstrSizeLog2;
DCHECK(is_int32(offset));
uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, offset);
@ -4064,13 +4064,13 @@ void Assembler::brk(int code) {
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
DCHECK_LE(RoundUp(len, kInstructionSize), static_cast<size_t>(kGap));
DCHECK_LE(RoundUp(len, kInstrSize), static_cast<size_t>(kGap));
EmitData(string, static_cast<int>(len));
// Pad with nullptr characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
static_assert(sizeof(pad) == kInstructionSize,
static_assert(sizeof(pad) == kInstrSize,
"Size of padding must match instruction size.");
EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
EmitData(pad, RoundUp(pc_offset(), kInstrSize) - pc_offset());
}
@ -4417,7 +4417,7 @@ bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstructionSizeLog2);
int inst_size = static_cast<int>(kInstrSizeLog2);
bool offset_is_inst_multiple =
(((offset >> inst_size) << inst_size) == offset);
DCHECK_GT(offset, 0);
@ -4804,7 +4804,7 @@ void Assembler::near_call(HeapObjectRequest request) {
}
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstructionSize;
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
no_const_pool_before_ = pc_limit;
// Make sure the pool won't be blocked for too long.
@ -4856,7 +4856,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Check that the code buffer is large enough before emitting the constant
// pool (this includes the gap to the relocation information).
int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
int needed_space = worst_case_size + kGap + 1 * kInstrSize;
while (buffer_space() <= needed_space) {
GrowBuffer();
}
@ -4875,7 +4875,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
// Account for the branch around the veneers and the guard.
int protection_offset = 2 * kInstructionSize;
int protection_offset = 2 * kInstrSize;
return pc_offset() > max_reachable_pc - margin - protection_offset -
static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
}
@ -5012,10 +5012,10 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
int rd_code = expected_adr->Rd();
for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
CHECK(InstructionAt((i + 1) * kInstrSize)->IsNop(ADR_FAR_NOP));
}
Instruction* expected_movz =
InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstrSize);
CHECK(expected_movz->IsMovz() &&
(expected_movz->ImmMoveWide() == 0) &&
(expected_movz->ShiftMoveWide() == 0));

View File

@ -1014,8 +1014,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// whereas a "far" call will be encoded like this:
// ldr temp, =target
// blr temp
static constexpr int kNearCallSize = 1 * kInstructionSize;
static constexpr int kFarCallSize = 2 * kInstructionSize;
static constexpr int kNearCallSize = 1 * kInstrSize;
static constexpr int kFarCallSize = 2 * kInstrSize;
// Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const {
@ -1034,7 +1034,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Return the number of instructions generated from label to the
// current position.
uint64_t InstructionsGeneratedSince(const Label* label) {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Prevent contant pool emission until EndBlockConstPool is called.
@ -3185,7 +3185,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
static constexpr int kMaxVeneerCodeSize = 1 * kInstructionSize;
static constexpr int kMaxVeneerCodeSize = 1 * kInstrSize;
void RecordVeneerPool(int location_offset, int size);
// Emits veneers for branches that are approaching their maximum range.
@ -3410,13 +3410,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Set how far from current pc the next constant pool check will be.
void SetNextConstPoolCheckIn(int instructions) {
next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
next_constant_pool_check_ = pc_offset() + instructions * kInstrSize;
}
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
STATIC_ASSERT(sizeof(instruction) == kInstrSize);
DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
memcpy(pc_, &instruction, sizeof(instruction));
@ -3601,7 +3601,7 @@ class PatchingAssembler : public Assembler {
// Note that the instruction cache will not be flushed.
PatchingAssembler(const AssemblerOptions& options, byte* start,
unsigned count)
: Assembler(options, start, count * kInstructionSize + kGap) {
: Assembler(options, start, count * kInstrSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}

View File

@ -215,7 +215,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// The entry hook is a Push (stp) instruction, followed by a near call.
static const unsigned int kProfileEntryHookCallSize =
(1 * kInstructionSize) + Assembler::kNearCallSize;
(1 * kInstrSize) + Assembler::kNearCallSize;
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {

View File

@ -28,10 +28,10 @@ namespace internal {
constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
const unsigned kInstructionSize = 4;
const unsigned kInstructionSizeLog2 = 2;
const unsigned kLoadLiteralScaleLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MB;
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
constexpr size_t kLoadLiteralScaleLog2 = 2;
constexpr size_t kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
const int kNumberOfVRegisters = 32;
@ -42,7 +42,7 @@ const int kFirstCalleeSavedRegisterIndex = 19;
const int kNumberOfCalleeSavedVRegisters = 8;
const int kFirstCalleeSavedVRegisterIndex = 8;
// Callee saved registers with no specific purpose in JS are x19-x25.
const unsigned kJSCalleeSavedRegList = 0x03f80000;
const size_t kJSCalleeSavedRegList = 0x03f80000;
const int kWRegSizeInBits = 32;
const int kWRegSizeInBitsLog2 = 5;
const int kWRegSize = kWRegSizeInBits >> 3;

View File

@ -277,7 +277,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Size of an entry of the second level deopt table. Since we do not generate
// a table for ARM64, the size is zero.
const int Deoptimizer::table_entry_size_ = 0 * kInstructionSize;
const int Deoptimizer::table_entry_size_ = 0 * kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UseScratchRegisterScope temps(masm());

View File

@ -3917,7 +3917,7 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
case 'e': offset = instr->ImmTestBranch(); break;
default: UNREACHABLE();
}
offset <<= kInstructionSizeLog2;
offset <<= kInstrSizeLog2;
char sign = '+';
if (offset < 0) {
sign = '-';
@ -4114,7 +4114,7 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
decoder.AppendVisitor(&disasm);
decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(instr));
return v8::internal::kInstructionSize;
return v8::internal::kInstrSize;
}
@ -4129,7 +4129,7 @@ void Disassembler::Disassemble(FILE* file, byte* start, byte* end,
v8::internal::PrintDisassembler disasm(file);
decoder.AppendVisitor(&disasm);
for (byte* pc = start; pc < end; pc += v8::internal::kInstructionSize) {
for (byte* pc = start; pc < end; pc += v8::internal::kInstrSize) {
decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(pc));
}
}

View File

@ -159,7 +159,7 @@ double Instruction::ImmNEONFP64() const {
unsigned CalcLSDataSize(LoadStoreOp op) {
DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
kInstructionSize * 8);
kInstrSize * 8);
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
@ -197,16 +197,16 @@ int64_t Instruction::ImmPCOffset() {
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
offset = ImmBranch() << kInstructionSizeLog2;
offset = ImmBranch() << kInstrSizeLog2;
} else if (IsUnresolvedInternalReference()) {
// Internal references are always word-aligned.
offset = ImmUnresolvedInternalReference() << kInstructionSizeLog2;
offset = ImmUnresolvedInternalReference() << kInstrSizeLog2;
} else {
// Load literal (offset from PC).
DCHECK(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
offset = ImmLLiteral() << kInstructionSizeLog2;
offset = ImmLLiteral() << kInstrSizeLog2;
}
return offset;
}
@ -260,10 +260,10 @@ void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
void Instruction::SetBranchImmTarget(Instruction* target) {
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(IsValidImmPCOffset(BranchType(),
DistanceTo(target) >> kInstructionSizeLog2));
int offset = static_cast<int>(DistanceTo(target) >> kInstructionSizeLog2);
DCHECK(IsAligned(DistanceTo(target), kInstrSize));
DCHECK(
IsValidImmPCOffset(BranchType(), DistanceTo(target) >> kInstrSizeLog2));
int offset = static_cast<int>(DistanceTo(target) >> kInstrSizeLog2);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
switch (BranchType()) {
@ -295,10 +295,10 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(
const AssemblerOptions& options, Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
DCHECK(IsAligned(DistanceTo(target), kInstrSize));
DCHECK(is_int32(DistanceTo(target) >> kInstrSizeLog2));
int32_t target_offset =
static_cast<int32_t>(DistanceTo(target) >> kInstructionSizeLog2);
static_cast<int32_t>(DistanceTo(target) >> kInstrSizeLog2);
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
@ -310,7 +310,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
DCHECK(IsAligned(DistanceTo(source), kInstrSize));
DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
Instr imm = Assembler::ImmLLiteral(
static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));

View File

@ -104,11 +104,11 @@ class Instruction {
}
V8_INLINE const Instruction* following(int count = 1) const {
return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
return InstructionAtOffset(count * static_cast<int>(kInstrSize));
}
V8_INLINE Instruction* following(int count = 1) {
return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
return InstructionAtOffset(count * static_cast<int>(kInstrSize));
}
V8_INLINE const Instruction* preceding(int count = 1) const {
@ -329,9 +329,8 @@ class Instruction {
// The range of the branch instruction, expressed as 'instr +- range'.
static int32_t ImmBranchRange(ImmBranchType branch_type) {
return
(1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
kInstructionSize;
return (1 << (ImmBranchRangeBitwidth(branch_type) + kInstrSizeLog2)) / 2 -
kInstrSize;
}
int ImmBranch() const {
@ -419,14 +418,14 @@ class Instruction {
V8_INLINE const Instruction* InstructionAtOffset(
int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) const {
// The FUZZ_disasm test relies on no check being done.
DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
return this + offset;
}
V8_INLINE Instruction* InstructionAtOffset(
int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) {
// The FUZZ_disasm test relies on no check being done.
DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
return this + offset;
}
@ -534,9 +533,9 @@ const Instr kImmExceptionIsPrintf = 0xdeb1;
// passed in. This information could be retrieved from the printf format string,
// but the format string is not trivial to parse so we encode the relevant
// information with the HLT instruction.
const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
const unsigned kPrintfLength = 3 * kInstructionSize;
const unsigned kPrintfArgCountOffset = 1 * kInstrSize;
const unsigned kPrintfArgPatternListOffset = 2 * kInstrSize;
const unsigned kPrintfLength = 3 * kInstrSize;
const unsigned kPrintfMaxArgCount = 4;
@ -557,12 +556,12 @@ const Instr kImmExceptionIsDebug = 0xdeb0;
// - Debug code.
// - Debug parameters.
// - Debug message string. This is a nullptr-terminated ASCII string, padded to
// kInstructionSize so that subsequent instructions are correctly aligned.
// kInstrSize so that subsequent instructions are correctly aligned.
// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
// string data.
const unsigned kDebugCodeOffset = 1 * kInstructionSize;
const unsigned kDebugParamsOffset = 2 * kInstructionSize;
const unsigned kDebugMessageOffset = 3 * kInstructionSize;
const unsigned kDebugCodeOffset = 1 * kInstrSize;
const unsigned kDebugParamsOffset = 2 * kInstrSize;
const unsigned kDebugMessageOffset = 3 * kInstrSize;
// Debug parameters.
// Used without a TRACE_ option, the Debugger will print the arguments only

View File

@ -1896,7 +1896,7 @@ void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstructionSize;
uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstrSize;
Mov(temp, Immediate(imm, rmode));
Br(temp);
}
@ -1916,8 +1916,8 @@ static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
// address at this point, and needs to be encoded as-is.
if (rmode != RelocInfo::WASM_CALL && rmode != RelocInfo::WASM_STUB_CALL) {
offset -= reinterpret_cast<int64_t>(pc);
DCHECK_EQ(offset % kInstructionSize, 0);
offset = offset / static_cast<int>(kInstructionSize);
DCHECK_EQ(offset % kInstrSize, 0);
offset = offset / static_cast<int>(kInstrSize);
}
return offset;
}
@ -2064,12 +2064,12 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
movz(temp, deopt_id);
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(options().code_range_start);
DCHECK_EQ(offset % kInstructionSize, 0);
offset = offset / static_cast<int>(kInstructionSize);
DCHECK_EQ(offset % kInstrSize, 0);
offset = offset / static_cast<int>(kInstrSize);
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
DCHECK_EQ(kNearCallSize + kInstructionSize, SizeOfCodeGeneratedSince(&start));
DCHECK_EQ(kNearCallSize + kInstrSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@ -3160,7 +3160,8 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
// printf function will use a different instruction set and the procedure-call
// standard will not be compatible.
#ifdef USE_SIMULATOR
{ InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
{
InstructionAccurateScope scope(this, kPrintfLength / kInstrSize);
hlt(kImmExceptionIsPrintf);
dc32(arg_count); // kPrintfArgCountOffset

View File

@ -2046,7 +2046,7 @@ class InstructionAccurateScope BASE_EMBEDDED {
: tasm_(tasm)
#ifdef DEBUG
,
size_(count * kInstructionSize)
size_(count * kInstrSize)
#endif
{
// Before blocking the const pool, see if it needs to be emitted.

View File

@ -1081,7 +1081,7 @@ void Simulator::CheckBreakNext() {
void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
Instruction* end = start->InstructionAtOffset(count * kInstrSize);
for (Instruction* pc = start; pc < end; pc = pc->following()) {
disassembler_decoder_->Decode(pc);
}
@ -3415,7 +3415,7 @@ void Simulator::VisitException(Instruction* instr) {
// The stop parameters are inlined in the code. Skip them:
// - Skip to the end of the message string.
size_t size = kDebugMessageOffset + strlen(message) + 1;
pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstrSize));
// - Verify that the unreachable marker is present.
DCHECK(pc_->Mask(ExceptionMask) == HLT);
DCHECK_EQ(pc_->ImmException(), kImmExceptionIsUnreachable);

View File

@ -774,7 +774,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
}
void ExecuteInstruction() {
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstrSize));
CheckBreakNext();
Decode(pc_);
increment_pc();

View File

@ -2369,7 +2369,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// This branch-and-link sequence is needed to find the current PC on mips
// before r6, saved to the ra register.
__ bal(&find_ra); // bal exposes branch delay slot.
__ Addu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
__ Addu(ra, ra, kNumInstructionsToJump * kInstrSize);
}
__ bind(&find_ra);

View File

@ -2387,7 +2387,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// This branch-and-link sequence is needed to find the current PC on mips
// before r6, saved to the ra register.
__ bal(&find_ra); // bal exposes branch delay slot.
__ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
__ Daddu(ra, ra, kNumInstructionsToJump * kInstrSize);
}
__ bind(&find_ra);

View File

@ -2417,14 +2417,14 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
Label start_call;
constexpr int after_call_offset = 5 * Assembler::kInstrSize;
constexpr int after_call_offset = 5 * kInstrSize;
DCHECK_NE(r7, target);
__ LoadPC(r7);
__ bind(&start_call);
__ addi(r7, r7, Operand(after_call_offset));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ Call(target);
DCHECK_EQ(after_call_offset - Assembler::kInstrSize,
DCHECK_EQ(after_call_offset - kInstrSize,
__ SizeOfCodeGeneratedSince(&start_call));
// If return value is on the stack, pop it to registers.

View File

@ -1835,7 +1835,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
} else {
__ li(i.OutputRegister(1), Operand::Zero());
__ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
__ bc(v8::internal::kInstrSize * 2, BT, crbit);
__ li(i.OutputRegister(1), Operand(1));
}
}
@ -1862,7 +1862,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
} else {
__ li(i.OutputRegister(1), Operand::Zero());
__ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
__ bc(v8::internal::kInstrSize * 2, BT, crbit);
__ li(i.OutputRegister(1), Operand(1));
}
}

View File

@ -101,15 +101,12 @@ Address RelocInfo::target_address_address() {
// On R6 we don't move to the end of the instructions to be patched, but one
// instruction before, because if these instructions are at the end of the
// code object it can cause errors in the deserializer.
return pc_ + (Assembler::kInstructionsFor32BitConstant - 1) *
Assembler::kInstrSize;
return pc_ + (Assembler::kInstructionsFor32BitConstant - 1) * kInstrSize;
} else {
return pc_ +
Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize;
return pc_ + Assembler::kInstructionsFor32BitConstant * kInstrSize;
}
}
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
@ -144,8 +141,8 @@ int Assembler::deserialization_special_target_size(
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
Instr instr1 = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
Instr instr2 = Assembler::instr_at(pc + 1 * Assembler::kInstrSize);
Instr instr1 = Assembler::instr_at(pc + 0 * kInstrSize);
Instr instr2 = Assembler::instr_at(pc + 1 * kInstrSize);
DCHECK(Assembler::IsLui(instr1));
DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
instr1 &= ~kImm16Mask;
@ -157,16 +154,13 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
uint32_t lui_offset_u, jic_offset_u;
Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
instr1 | lui_offset_u);
Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
instr2 | jic_offset_u);
Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
// Encoded internal references are lui/ori load of 32-bit absolute address.
Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
Assembler::instr_at_put(pc + 0 * kInstrSize,
instr1 | ((imm >> kLuiShift) & kImm16Mask));
Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
instr2 | (imm & kImm16Mask));
Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
}
// Currently used only by deserializer, and all code will be flushed
@ -230,8 +224,8 @@ Address RelocInfo::target_internal_reference() {
// Encoded internal references are lui/ori or lui/jic load of 32-bit
// absolute address.
DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
Instr instr1 = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
Instr instr2 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
Instr instr1 = Assembler::instr_at(pc_ + 0 * kInstrSize);
Instr instr2 = Assembler::instr_at(pc_ + 1 * kInstrSize);
DCHECK(Assembler::IsLui(instr1));
DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
if (Assembler::IsJicOrJialc(instr2)) {

View File

@ -861,8 +861,8 @@ int Assembler::target_at(int pos, bool is_internal) {
return AddBranchOffset(pos, instr);
} else if (IsMov(instr, t8, ra)) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 2 * kInstrSize);
Instr instr_ori = instr_at(pos + 3 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
@ -874,10 +874,10 @@ int Assembler::target_at(int pos, bool is_internal) {
return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
DCHECK(IsLui(instr));
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
if (IsBal(instr_at(pos + kInstrSize))) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 0 * kInstrSize);
Instr instr_ori = instr_at(pos + 2 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
@ -888,8 +888,8 @@ int Assembler::target_at(int pos, bool is_internal) {
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr1 = instr_at(pos + 0 * kInstrSize);
Instr instr2 = instr_at(pos + 1 * kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
int32_t imm;
if (IsJicOrJialc(instr2)) {
@ -951,8 +951,8 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr);
} else if (IsMov(instr, t8, ra)) {
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 2 * kInstrSize);
Instr instr_ori = instr_at(pos + 3 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
@ -965,7 +965,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_b = SetBranchOffset(pos, target_pos, instr_b);
instr_at_put(pos, instr_b);
instr_at_put(pos + 1 * Assembler::kInstrSize, 0);
instr_at_put(pos + 1 * kInstrSize, 0);
} else {
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
DCHECK_EQ(imm & 3, 0);
@ -973,16 +973,15 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_at_put(pos + 2 * kInstrSize,
instr_lui | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
}
} else {
DCHECK(IsLui(instr));
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
if (IsBal(instr_at(pos + kInstrSize))) {
Instr instr_lui = instr_at(pos + 0 * kInstrSize);
Instr instr_ori = instr_at(pos + 2 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
@ -991,13 +990,12 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr_at_put(pos + 0 * kInstrSize,
instr_lui | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
} else {
Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr1 = instr_at(pos + 0 * kInstrSize);
Instr instr2 = instr_at(pos + 1 * kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
DCHECK_EQ(imm & 3, 0);
@ -1008,13 +1006,12 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
instr_at_put(pos + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
instr_at_put(pos + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr_at_put(pos + 0 * kInstrSize,
instr1 | ((imm & kHiMask) >> kLuiShift));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr2 | (imm & kImm16Mask));
instr_at_put(pos + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
}
}
}
@ -3732,8 +3729,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
} else {
DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
if (IsLui(instr)) {
Instr instr1 = instr_at(pc + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pc + 1 * Assembler::kInstrSize);
Instr instr1 = instr_at(pc + 0 * kInstrSize);
Instr instr2 = instr_at(pc + 1 * kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
int32_t imm;
if (IsJicOrJialc(instr2)) {
@ -3754,13 +3751,12 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
instr_at_put(pc + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
instr_at_put(pc + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
instr_at_put(pc + 0 * Assembler::kInstrSize,
instr_at_put(pc + 0 * kInstrSize,
instr1 | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pc + 1 * Assembler::kInstrSize,
instr2 | (imm & kImm16Mask));
instr_at_put(pc + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
}
return 2; // Number of instructions patched.
} else {

View File

@ -606,9 +606,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
static constexpr int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset.
static constexpr int kBranchPCOffset = kInstrSize;

View File

@ -250,7 +250,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push ra" instruction, followed by a call.
// Note: on MIPS "push" is 2 instruction
const int32_t kReturnAddressDistanceFromFunctionStart =
Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
Assembler::kCallTargetAddressOffset + (2 * kInstrSize);
// This should contain all kJSCallerSaved registers.
const RegList kSavedRegs =

View File

@ -1244,11 +1244,12 @@ static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
}
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
class InstructionBase {
public:
enum {
kInstrSize = 4,
kInstrSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
// always the value of the current instruction being executed.
kPCReadOffset = 0
@ -1707,14 +1708,14 @@ class Instruction : public InstructionGetters<InstructionBase> {
// C/C++ argument slots size.
const int kCArgSlotCount = 4;
const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize;
const int kInvalidStackOffset = -1;
// JS argument slots size.
const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
const int kJSArgsSlotsSize = 0 * kInstrSize;
// Assembly builtins argument slots size.
const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
const int kBArgsSlotsSize = 0 * kInstrSize;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
const int kBranchReturnOffset = 2 * kInstrSize;
InstructionBase::Type InstructionBase::InstructionType() const {
switch (OpcodeFieldRaw()) {

View File

@ -237,9 +237,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS32R6
const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
#else
const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
#endif
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
@ -252,10 +252,10 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
#ifdef _MIPS_ARCH_MIPS32R6
int kMaxEntriesBranchReach =
(1 << (kImm26Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
(1 << (kImm26Bits - 2)) / (table_entry_size_ / kInstrSize);
#else
int kMaxEntriesBranchReach = (1 << (kImm16Bits - 2))/
(table_entry_size_ / Assembler::kInstrSize);
int kMaxEntriesBranchReach =
(1 << (kImm16Bits - 2)) / (table_entry_size_ / kInstrSize);
#endif
if (count() <= kMaxEntriesBranchReach) {

View File

@ -2704,7 +2704,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
UNSUPPORTED_MIPS();
}
}
return Instruction::kInstrSize;
return kInstrSize;
}

View File

@ -1204,7 +1204,7 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
bind(&here);
addu(scratch, scratch, ra);
pop(ra);
lw(scratch, MemOperand(scratch, 6 * v8::internal::Assembler::kInstrSize));
lw(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize));
}
jr(scratch);
nop(); // Branch delay slot nop.

View File

@ -395,7 +395,7 @@ void MipsDebugger::Debug() {
} else {
// Allow si to jump over generated breakpoints.
PrintF("/!\\ Jumping over generated breakpoint.\n");
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
sim_->set_pc(sim_->get_pc() + kInstrSize);
}
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
@ -562,7 +562,7 @@ void MipsDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int regnum = Registers::Number(arg1);
if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
@ -571,7 +571,7 @@ void MipsDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
}
} else {
// The argument is the number of instructions.
@ -579,7 +579,7 @@ void MipsDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
end = cur + (value * Instruction::kInstrSize);
end = cur + (value * kInstrSize);
}
}
} else {
@ -587,7 +587,7 @@ void MipsDebugger::Debug() {
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
end = cur + (value2 * Instruction::kInstrSize);
end = cur + (value2 * kInstrSize);
}
}
@ -595,7 +595,7 @@ void MipsDebugger::Debug() {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
buffer.start());
cur += Instruction::kInstrSize;
cur += kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
@ -622,12 +622,10 @@ void MipsDebugger::Debug() {
PrintF("No flags on MIPS !\n");
} else if (strcmp(cmd, "stop") == 0) {
int32_t value;
intptr_t stop_pc = sim_->get_pc() -
2 * Instruction::kInstrSize;
intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize;
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
Instruction* msg_address =
reinterpret_cast<Instruction*>(stop_pc +
Instruction::kInstrSize);
reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->IsStopInstruction(stop_instr)) {
@ -696,20 +694,20 @@ void MipsDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// no length parameter passed, assume 10 instructions
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
end = cur + (value2 * Instruction::kInstrSize);
end = cur + (value2 * kInstrSize);
}
}
@ -717,7 +715,7 @@ void MipsDebugger::Debug() {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
buffer.start());
cur += Instruction::kInstrSize;
cur += kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
@ -871,8 +869,7 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset),
Instruction::kInstrSize));
cache_page->CachedData(offset), kInstrSize));
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@ -3797,7 +3794,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int32_t next_pc = rs();
int32_t current_pc = get_pc();
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_pc(next_pc);
pc_modified_ = true;
@ -3808,9 +3805,9 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int32_t return_addr_reg = rd_reg();
int32_t current_pc = get_pc();
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_register(return_addr_reg, current_pc + 2 * Instruction::kInstrSize);
set_register(return_addr_reg, current_pc + 2 * kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
@ -6306,10 +6303,10 @@ void Simulator::DecodeTypeImmediate() {
int32_t current_pc = get_pc();
if (do_branch) {
int16_t imm16 = this->instr_.Imm16Value();
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
set_register(31, current_pc + 2 * Instruction::kInstrSize);
next_pc = current_pc + (imm16 << 2) + kInstrSize;
set_register(31, current_pc + 2 * kInstrSize);
} else {
next_pc = current_pc + 2 * Instruction::kInstrSize;
next_pc = current_pc + 2 * kInstrSize;
}
};
@ -6319,9 +6316,9 @@ void Simulator::DecodeTypeImmediate() {
int32_t current_pc = get_pc();
if (do_branch) {
int16_t imm16 = this->instr_.Imm16Value();
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
next_pc = current_pc + (imm16 << 2) + kInstrSize;
} else {
next_pc = current_pc + 2 * Instruction::kInstrSize;
next_pc = current_pc + 2 * kInstrSize;
}
};
@ -6343,9 +6340,9 @@ void Simulator::DecodeTypeImmediate() {
// pc + kInstrSize + 511 * kInstrSize]
int16_t offset = static_cast<int16_t>(imm16 << (bitsIn16Int - 10)) >>
(bitsIn16Int - 12);
next_pc = current_pc + offset + Instruction::kInstrSize;
next_pc = current_pc + offset + kInstrSize;
} else {
next_pc = current_pc + 2 * Instruction::kInstrSize;
next_pc = current_pc + 2 * kInstrSize;
}
};
@ -6356,8 +6353,8 @@ void Simulator::DecodeTypeImmediate() {
int32_t imm = this->instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
set_register(31, current_pc + Instruction::kInstrSize);
next_pc = current_pc + (imm << 2) + kInstrSize;
set_register(31, current_pc + kInstrSize);
}
};
@ -6368,7 +6365,7 @@ void Simulator::DecodeTypeImmediate() {
int32_t imm = this->instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
next_pc = get_pc() + (imm << 2) + kInstrSize;
}
};
@ -6568,7 +6565,7 @@ void Simulator::DecodeTypeImmediate() {
if (rs_reg != 0) { // BNEZC
BranchCompactHelper(rs != 0, 21);
} else { // JIALC
set_register(31, get_pc() + Instruction::kInstrSize);
set_register(31, get_pc() + kInstrSize);
next_pc = rt + imm16;
}
break;
@ -6864,7 +6861,7 @@ void Simulator::DecodeTypeImmediate() {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(get_pc() + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(get_pc() + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@ -6890,13 +6887,13 @@ void Simulator::DecodeTypeJump() {
// We don't check for end_sim_pc. First it should not be met as the current pc
// is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
// Update pc and ra if necessary.
// Do this after the branch delay execution.
if (simInstr.IsLinkingInstruction()) {
set_register(31, current_pc + 2 * Instruction::kInstrSize);
set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
pc_modified_ = true;
@ -6937,13 +6934,10 @@ void Simulator::InstructionDecode(Instruction* instr) {
trace_buf_.start());
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) +
Instruction::kInstrSize);
set_register(pc, reinterpret_cast<int32_t>(instr) + kInstrSize);
}
}
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.

View File

@ -447,7 +447,7 @@ class Simulator : public SimulatorBase {
// Compact branch guard.
void CheckForbiddenSlot(int32_t current_pc) {
Instruction* instr_after_compact_branch =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(current_pc + kInstrSize);
if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
FATAL(
"Error: Unexpected instruction 0x%08x immediately after a "

View File

@ -96,7 +96,7 @@ Address RelocInfo::target_address_address() {
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
return pc_ + Assembler::kInstructionsFor64BitConstant * Assembler::kInstrSize;
return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
}
@ -128,7 +128,7 @@ int Assembler::deserialization_special_target_size(
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
// Encoded internal references are j/jal instructions.
Instr instr = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
Instr instr = Assembler::instr_at(pc + 0 * kInstrSize);
uint64_t imm28 = target & static_cast<uint64_t>(kImm28Mask);
@ -198,7 +198,7 @@ Address RelocInfo::target_internal_reference() {
} else {
// Encoded internal references are j/jal instructions.
DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
Instr instr = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
Instr instr = Assembler::instr_at(pc_ + 0 * kInstrSize);
instr &= kImm26Mask;
uint64_t imm28 = instr << 2;
uint64_t segment = pc_ & ~static_cast<uint64_t>(kImm28Mask);

View File

@ -795,8 +795,8 @@ int Assembler::target_at(int pos, bool is_internal) {
return AddBranchOffset(pos, instr);
} else if (IsMov(instr, t8, ra)) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 2 * kInstrSize);
Instr instr_ori = instr_at(pos + 3 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
@ -807,10 +807,10 @@ int Assembler::target_at(int pos, bool is_internal) {
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else if (IsLui(instr)) {
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
if (IsBal(instr_at(pos + kInstrSize))) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 0 * kInstrSize);
Instr instr_ori = instr_at(pos + 2 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
@ -821,9 +821,9 @@ int Assembler::target_at(int pos, bool is_internal) {
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 0 * kInstrSize);
Instr instr_ori = instr_at(pos + 1 * kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
DCHECK(IsOri(instr_ori));
DCHECK(IsOri(instr_ori2));
@ -894,9 +894,9 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr);
} else if (IsLui(instr)) {
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
if (IsBal(instr_at(pos + kInstrSize))) {
Instr instr_lui = instr_at(pos + 0 * kInstrSize);
Instr instr_ori = instr_at(pos + 2 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
@ -905,14 +905,13 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr_at_put(pos + 0 * kInstrSize,
instr_lui | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
} else {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 0 * kInstrSize);
Instr instr_ori = instr_at(pos + 1 * kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
DCHECK(IsOri(instr_ori));
DCHECK(IsOri(instr_ori2));
@ -923,16 +922,15 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_ori &= ~kImm16Mask;
instr_ori2 &= ~kImm16Mask;
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr_at_put(pos + 0 * kInstrSize,
instr_lui | ((imm >> 32) & kImm16Mask));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_at_put(pos + 1 * kInstrSize,
instr_ori | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori2 | (imm & kImm16Mask));
instr_at_put(pos + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
}
} else if (IsMov(instr, t8, ra)) {
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pos + 2 * kInstrSize);
Instr instr_ori = instr_at(pos + 3 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
@ -945,7 +943,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_b = SetBranchOffset(pos, target_pos, instr_b);
instr_at_put(pos, instr_b);
instr_at_put(pos + 1 * Assembler::kInstrSize, 0);
instr_at_put(pos + 1 * kInstrSize, 0);
} else {
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
DCHECK_EQ(imm & 3, 0);
@ -953,10 +951,9 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_at_put(pos + 2 * kInstrSize,
instr_lui | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
}
} else if (IsJ(instr) || IsJal(instr)) {
int32_t imm28 = target_pos - pos;
@ -4057,9 +4054,9 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
Instr instr = instr_at(pc);
DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
if (IsLui(instr)) {
Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
Instr instr_lui = instr_at(pc + 0 * kInstrSize);
Instr instr_ori = instr_at(pc + 1 * kInstrSize);
Instr instr_ori2 = instr_at(pc + 3 * kInstrSize);
DCHECK(IsOri(instr_ori));
DCHECK(IsOri(instr_ori2));
// TODO(plind): symbolic names for the shifts.
@ -4079,12 +4076,9 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
instr_ori &= ~kImm16Mask;
instr_ori2 &= ~kImm16Mask;
instr_at_put(pc + 0 * Assembler::kInstrSize,
instr_lui | ((imm >> 32) & kImm16Mask));
instr_at_put(pc + 1 * Assembler::kInstrSize,
instr_ori | (imm >> 16 & kImm16Mask));
instr_at_put(pc + 3 * Assembler::kInstrSize,
instr_ori2 | (imm & kImm16Mask));
instr_at_put(pc + 0 * kInstrSize, instr_lui | ((imm >> 32) & kImm16Mask));
instr_at_put(pc + 1 * kInstrSize, instr_ori | (imm >> 16 & kImm16Mask));
instr_at_put(pc + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
return 4; // Number of instructions patched.
} else if (IsJ(instr) || IsJal(instr)) {
// Regular j/jal relocation.
@ -4366,7 +4360,7 @@ void Assembler::set_target_value_at(Address pc, uint64_t target,
(target & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, 4 * Assembler::kInstrSize);
Assembler::FlushICache(pc, 4 * kInstrSize);
}
}

View File

@ -616,9 +616,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
static constexpr int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset.
static constexpr int kBranchPCOffset = kInstrSize;

View File

@ -251,7 +251,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push ra" instruction, followed by a call.
// Note: on MIPS "push" is 2 instruction
const int32_t kReturnAddressDistanceFromFunctionStart =
Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
Assembler::kCallTargetAddressOffset + (2 * kInstrSize);
// This should contain all kJSCallerSaved registers.
const RegList kSavedRegs =

View File

@ -1278,11 +1278,12 @@ static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
}
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
class InstructionBase {
public:
enum {
kInstrSize = 4,
kInstrSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
// always the value of the current instruction being executed.
kPCReadOffset = 0
@ -1767,10 +1768,10 @@ const int kCArgSlotCount = 0;
// TODO(plind): below should be based on kPointerSize
// TODO(plind): find all usages and remove the needless instructions for n64.
const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2;
const int kInvalidStackOffset = -1;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
const int kBranchReturnOffset = 2 * kInstrSize;
static const int kNegOffset = 0x00008000;

View File

@ -235,9 +235,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS64R6
const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
#else
const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
#endif
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
@ -249,10 +249,10 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&table_start);
#ifdef _MIPS_ARCH_MIPS64R6
int kMaxEntriesBranchReach =
(1 << (kImm26Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
(1 << (kImm26Bits - 2)) / (table_entry_size_ / kInstrSize);
#else
int kMaxEntriesBranchReach =
(1 << (kImm16Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
(1 << (kImm16Bits - 2)) / (table_entry_size_ / kInstrSize);
#endif
if (count() <= kMaxEntriesBranchReach) {

View File

@ -1079,15 +1079,14 @@ int Decoder::DecodeBreakInstr(Instruction* instr) {
Format(instr, "break, code: 'code");
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64,
static_cast<void*>(
reinterpret_cast<int32_t*>(instr + Instruction::kInstrSize)),
static_cast<void*>(reinterpret_cast<int32_t*>(instr + kInstrSize)),
reinterpret_cast<uint64_t>(
*reinterpret_cast<char**>(instr + Instruction::kInstrSize)));
*reinterpret_cast<char**>(instr + kInstrSize)));
// Size 3: the break_ instr, plus embedded 64-bit char pointer.
return 3 * Instruction::kInstrSize;
return 3 * kInstrSize;
} else {
Format(instr, "break, code: 'code");
return Instruction::kInstrSize;
return kInstrSize;
}
}
@ -1897,10 +1896,9 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
default:
UNREACHABLE();
}
return Instruction::kInstrSize;
return kInstrSize;
}
void Decoder::DecodeTypeImmediateCOP1(Instruction* instr) {
switch (instr->RsFieldRaw()) {
case BC1:
@ -3023,10 +3021,9 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
UNSUPPORTED_MIPS();
}
}
return Instruction::kInstrSize;
return kInstrSize;
}
} // namespace internal
} // namespace v8

View File

@ -1281,7 +1281,7 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
bind(&here);
daddu(scratch, scratch, ra);
pop(ra);
Ld(scratch, MemOperand(scratch, 6 * v8::internal::Assembler::kInstrSize));
Ld(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize));
}
jr(scratch);
nop(); // Branch delay slot nop.

View File

@ -360,7 +360,7 @@ void MipsDebugger::Debug() {
} else {
// Allow si to jump over generated breakpoints.
PrintF("/!\\ Jumping over generated breakpoint.\n");
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
sim_->set_pc(sim_->get_pc() + kInstrSize);
}
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
@ -494,7 +494,7 @@ void MipsDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int regnum = Registers::Number(arg1);
if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
@ -503,7 +503,7 @@ void MipsDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
}
} else {
// The argument is the number of instructions.
@ -511,7 +511,7 @@ void MipsDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
end = cur + (value * Instruction::kInstrSize);
end = cur + (value * kInstrSize);
}
}
} else {
@ -519,7 +519,7 @@ void MipsDebugger::Debug() {
int64_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
end = cur + (value2 * Instruction::kInstrSize);
end = cur + (value2 * kInstrSize);
}
}
@ -527,7 +527,7 @@ void MipsDebugger::Debug() {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
buffer.start());
cur += Instruction::kInstrSize;
cur += kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
@ -554,12 +554,10 @@ void MipsDebugger::Debug() {
PrintF("No flags on MIPS !\n");
} else if (strcmp(cmd, "stop") == 0) {
int64_t value;
intptr_t stop_pc = sim_->get_pc() -
2 * Instruction::kInstrSize;
intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize;
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
Instruction* msg_address =
reinterpret_cast<Instruction*>(stop_pc +
Instruction::kInstrSize);
reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->IsStopInstruction(stop_instr)) {
@ -628,20 +626,20 @@ void MipsDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int64_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// no length parameter passed, assume 10 instructions
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
}
} else {
int64_t value1;
int64_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
end = cur + (value2 * Instruction::kInstrSize);
end = cur + (value2 * kInstrSize);
}
}
@ -649,7 +647,7 @@ void MipsDebugger::Debug() {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
buffer.start());
cur += Instruction::kInstrSize;
cur += kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
@ -802,8 +800,7 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset),
Instruction::kInstrSize));
cache_page->CachedData(offset), kInstrSize));
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@ -3672,7 +3669,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int64_t next_pc = rs();
int64_t current_pc = get_pc();
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_pc(next_pc);
pc_modified_ = true;
@ -3683,9 +3680,9 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int64_t current_pc = get_pc();
int32_t return_addr_reg = rd_reg();
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_register(return_addr_reg, current_pc + 2 * Instruction::kInstrSize);
set_register(return_addr_reg, current_pc + 2 * kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
@ -6546,10 +6543,10 @@ void Simulator::DecodeTypeImmediate() {
int64_t current_pc = get_pc();
if (do_branch) {
int16_t imm16 = instr_.Imm16Value();
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
set_register(31, current_pc + 2 * Instruction::kInstrSize);
next_pc = current_pc + (imm16 << 2) + kInstrSize;
set_register(31, current_pc + 2 * kInstrSize);
} else {
next_pc = current_pc + 2 * Instruction::kInstrSize;
next_pc = current_pc + 2 * kInstrSize;
}
};
@ -6559,9 +6556,9 @@ void Simulator::DecodeTypeImmediate() {
int64_t current_pc = get_pc();
if (do_branch) {
int16_t imm16 = instr_.Imm16Value();
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
next_pc = current_pc + (imm16 << 2) + kInstrSize;
} else {
next_pc = current_pc + 2 * Instruction::kInstrSize;
next_pc = current_pc + 2 * kInstrSize;
}
};
@ -6583,9 +6580,9 @@ void Simulator::DecodeTypeImmediate() {
// pc + kInstrSize + 511 * kInstrSize]
int16_t offset = static_cast<int16_t>(imm16 << (bitsIn16Int - 10)) >>
(bitsIn16Int - 12);
next_pc = current_pc + offset + Instruction::kInstrSize;
next_pc = current_pc + offset + kInstrSize;
} else {
next_pc = current_pc + 2 * Instruction::kInstrSize;
next_pc = current_pc + 2 * kInstrSize;
}
};
@ -6596,8 +6593,8 @@ void Simulator::DecodeTypeImmediate() {
int32_t imm = instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
set_register(31, current_pc + Instruction::kInstrSize);
next_pc = current_pc + (imm << 2) + kInstrSize;
set_register(31, current_pc + kInstrSize);
}
};
@ -6608,7 +6605,7 @@ void Simulator::DecodeTypeImmediate() {
int32_t imm = instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
next_pc = get_pc() + (imm << 2) + kInstrSize;
}
};
@ -6814,7 +6811,7 @@ void Simulator::DecodeTypeImmediate() {
BranchCompactHelper(rs != 0, 21);
} else { // JIALC
int64_t current_pc = get_pc();
set_register(31, current_pc + Instruction::kInstrSize);
set_register(31, current_pc + kInstrSize);
next_pc = rt + imm16;
}
break;
@ -7213,7 +7210,7 @@ void Simulator::DecodeTypeImmediate() {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(get_pc() + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(get_pc() + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@ -7238,13 +7235,13 @@ void Simulator::DecodeTypeJump() {
// We don't check for end_sim_pc. First it should not be met as the current pc
// is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
// Update pc and ra if necessary.
// Do this after the branch delay execution.
if (simInstr.IsLinkingInstruction()) {
set_register(31, current_pc + 2 * Instruction::kInstrSize);
set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
pc_modified_ = true;
@ -7290,8 +7287,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int64_t>(instr) +
Instruction::kInstrSize);
set_register(pc, reinterpret_cast<int64_t>(instr) + kInstrSize);
}
}

View File

@ -469,7 +469,7 @@ class Simulator : public SimulatorBase {
// Compact branch guard.
void CheckForbiddenSlot(int64_t current_pc) {
Instruction* instr_after_compact_branch =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(current_pc + kInstrSize);
if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
FATAL(
"Error: Unexpected instruction 0x%08x immediately after a "

View File

@ -597,9 +597,6 @@ class Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
static constexpr int kInstrSize = sizeof(Instr);
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
// PPC platform, as Code, Embedded Object or External-reference pointers

View File

@ -222,9 +222,9 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
if (tasm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(tasm,
#if V8_TARGET_ARCH_PPC64
14 * Assembler::kInstrSize);
14 * kInstrSize);
#else
11 * Assembler::kInstrSize);
11 * kInstrSize);
#endif
tasm->mflr(r0);
tasm->Push(r0, ip);
@ -238,9 +238,9 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(masm,
#if V8_TARGET_ARCH_PPC64
14 * Assembler::kInstrSize);
14 * kInstrSize);
#else
11 * Assembler::kInstrSize);
11 * kInstrSize);
#endif
ProfileEntryHookStub stub(masm->isolate());
__ mflr(r0);
@ -255,7 +255,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push lr, ip" instruction, followed by a call.
const int32_t kReturnAddressDistanceFromFunctionStart =
Assembler::kCallTargetAddressOffset + 3 * Assembler::kInstrSize;
Assembler::kCallTargetAddressOffset + 3 * kInstrSize;
// This should contain all kJSCallerSaved registers.
const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.

View File

@ -2756,10 +2756,13 @@ const Instr rtCallRedirInstr = TWI;
// return ((type == 0) || (type == 1)) && instr->HasS();
// }
//
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
constexpr uint8_t kPcLoadDelta = 8;
class Instruction {
public:
enum { kInstrSize = 4, kInstrSizeLog2 = 2, kPcLoadDelta = 8 };
// Helper macro to define static accessors.
// We use the cast to char* trick to bypass the strict anti-aliasing rules.
#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \

View File

@ -1157,7 +1157,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
// The first field will be identified as a jump table entry. We
// emit the rest of the structure as zero, so just skip past them.
Format(instr, "constant");
return Instruction::kInstrSize;
return kInstrSize;
}
uint32_t opcode = instr->OpcodeValue() << 26;
@ -1466,7 +1466,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
}
}
return Instruction::kInstrSize;
return kInstrSize;
}
} // namespace internal
} // namespace v8

View File

@ -73,8 +73,7 @@ void PPCDebugger::Stop(Instruction* instr) {
// use of kStopCodeMask not right on PowerPC
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
char* msg =
*reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
char* msg = *reinterpret_cast<char**>(sim_->get_pc() + kInstrSize);
// Update this stop description.
if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
sim_->watched_stops_[code].desc = msg;
@ -85,7 +84,7 @@ void PPCDebugger::Stop(Instruction* instr) {
} else {
PrintF("Simulator hit %s\n", msg);
}
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
sim_->set_pc(sim_->get_pc() + kInstrSize + kPointerSize);
Debug();
}
@ -233,7 +232,7 @@ void PPCDebugger::Debug() {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
sim_->set_pc(sim_->get_pc() + kInstrSize);
} else {
sim_->ExecuteInstruction(
reinterpret_cast<Instruction*>(sim_->get_pc()));
@ -257,7 +256,7 @@ void PPCDebugger::Debug() {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
->InstructionBits() == 0x7D821008) {
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
sim_->set_pc(sim_->get_pc() + kInstrSize);
} else {
// Execute the one instruction we broke at with breakpoints disabled.
sim_->ExecuteInstruction(
@ -430,7 +429,7 @@ void PPCDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int regnum = Registers::Number(arg1);
if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
@ -439,7 +438,7 @@ void PPCDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
end = cur + (10 * Instruction::kInstrSize);
end = cur + (10 * kInstrSize);
}
} else {
// The argument is the number of instructions.
@ -447,7 +446,7 @@ void PPCDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
end = cur + (value * Instruction::kInstrSize);
end = cur + (value * kInstrSize);
}
}
} else {
@ -455,7 +454,7 @@ void PPCDebugger::Debug() {
intptr_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
end = cur + (value2 * Instruction::kInstrSize);
end = cur + (value2 * kInstrSize);
}
}
@ -498,11 +497,10 @@ void PPCDebugger::Debug() {
PrintF("FPSCR: %08x\n", sim_->fp_condition_reg_);
} else if (strcmp(cmd, "stop") == 0) {
intptr_t value;
intptr_t stop_pc =
sim_->get_pc() - (Instruction::kInstrSize + kPointerSize);
intptr_t stop_pc = sim_->get_pc() - (kInstrSize + kPointerSize);
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
Instruction* msg_address =
reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->isStopInstruction(stop_instr)) {
@ -725,9 +723,8 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
CHECK_EQ(0,
memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset), Instruction::kInstrSize));
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset), kInstrSize));
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@ -1469,7 +1466,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PPCDebugger dbg(this);
dbg.Stop(instr);
} else {
set_pc(get_pc() + Instruction::kInstrSize + kPointerSize);
set_pc(get_pc() + kInstrSize + kPointerSize);
}
} else {
// This is not a valid svc code.
@ -3922,11 +3919,10 @@ void Simulator::ExecuteInstruction(Instruction* instr) {
ExecuteGeneric(instr);
}
if (!pc_modified_) {
set_pc(reinterpret_cast<intptr_t>(instr) + Instruction::kInstrSize);
set_pc(reinterpret_cast<intptr_t>(instr) + kInstrSize);
}
}
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.

View File

@ -116,8 +116,8 @@ void JumpTableAssembler::EmitJumpSlot(Address target) {
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
DCHECK_EQ(0, bytes % kInstructionSize);
for (; bytes > 0; bytes -= kInstructionSize) {
DCHECK_EQ(0, bytes % kInstrSize);
for (; bytes > 0; bytes -= kInstrSize) {
nop();
}
}

View File

@ -107,8 +107,8 @@ class JumpTableAssembler : public TurboAssembler {
static constexpr int kJumpTableLineSize = 5 * kInstrSize;
static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableLineSize = 3 * kInstructionSize;
static constexpr int kJumpTableSlotSize = 3 * kInstructionSize;
static constexpr int kJumpTableLineSize = 3 * kInstrSize;
static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
#elif V8_TARGET_ARCH_S390X
static constexpr int kJumpTableLineSize = 20;
static constexpr int kJumpTableSlotSize = 20;

View File

@ -151,7 +151,7 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
instr->SetBranchImmTarget(
reinterpret_cast<Instruction*>(rinfo->pc() + tag * kInstructionSize));
reinterpret_cast<Instruction*>(rinfo->pc() + tag * kInstrSize));
}
#else
Address addr = static_cast<Address>(tag);
@ -175,7 +175,7 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
Memory::Address_at(rinfo->constant_pool_entry_address()));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
return static_cast<uint32_t>(instr->ImmPCOffset() / kInstructionSize);
return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize);
}
#else
Address addr;

View File

@ -1714,7 +1714,7 @@ TEST(adr_far) {
INIT_V8();
int max_range = 1 << (Instruction::ImmPCRelRangeBitwidth - 1);
SETUP_SIZE(max_range + 1000 * kInstructionSize);
SETUP_SIZE(max_range + 1000 * kInstrSize);
Label done, fail;
Label test_near, near_forward, near_backward;
@ -1744,7 +1744,7 @@ TEST(adr_far) {
__ Orr(x0, x0, 1 << 3);
__ B(&done);
for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
@ -1906,7 +1906,7 @@ TEST(branch_to_reg) {
RUN();
CHECK_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
CHECK_EQUAL_64(core.xreg(3) + kInstrSize, x0);
CHECK_EQUAL_64(42, x1);
CHECK_EQUAL_64(84, x2);
@ -2048,7 +2048,7 @@ TEST(far_branch_backward) {
std::max(Instruction::ImmBranchRange(CompareBranchType),
Instruction::ImmBranchRange(CondBranchType)));
SETUP_SIZE(max_range + 1000 * kInstructionSize);
SETUP_SIZE(max_range + 1000 * kInstrSize);
START();
@ -2074,7 +2074,7 @@ TEST(far_branch_backward) {
// Generate enough code to overflow the immediate range of the three types of
// branches below.
for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
@ -2095,7 +2095,7 @@ TEST(far_branch_backward) {
// For each out-of-range branch instructions, at least two instructions should
// have been generated.
CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
CHECK_GE(7 * kInstrSize, __ SizeOfCodeGeneratedSince(&test_tbz));
__ Bind(&fail);
__ Mov(x1, 0);
@ -2122,7 +2122,7 @@ TEST(far_branch_simple_veneer) {
std::max(Instruction::ImmBranchRange(CompareBranchType),
Instruction::ImmBranchRange(CondBranchType)));
SETUP_SIZE(max_range + 1000 * kInstructionSize);
SETUP_SIZE(max_range + 1000 * kInstrSize);
START();
@ -2144,7 +2144,7 @@ TEST(far_branch_simple_veneer) {
// Generate enough code to overflow the immediate range of the three types of
// branches below.
for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
@ -2198,7 +2198,7 @@ TEST(far_branch_veneer_link_chain) {
std::max(Instruction::ImmBranchRange(CompareBranchType),
Instruction::ImmBranchRange(CondBranchType)));
SETUP_SIZE(max_range + 1000 * kInstructionSize);
SETUP_SIZE(max_range + 1000 * kInstrSize);
START();
@ -2239,7 +2239,7 @@ TEST(far_branch_veneer_link_chain) {
// Generate enough code to overflow the immediate range of the three types of
// branches below.
for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
@ -2288,7 +2288,7 @@ TEST(far_branch_veneer_broken_link_chain) {
int max_range = Instruction::ImmBranchRange(TestBranchType);
int inter_range = max_range / 2 + max_range / 10;
SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
SETUP_SIZE(3 * inter_range + 1000 * kInstrSize);
START();
@ -2305,7 +2305,7 @@ TEST(far_branch_veneer_broken_link_chain) {
__ Mov(x0, 1);
__ B(&far_target);
for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
for (int i = 0; i < inter_range / kInstrSize; ++i) {
if (i % 100 == 0) {
// Do not allow generating veneers. They should not be needed.
__ b(&fail);
@ -2319,7 +2319,7 @@ TEST(far_branch_veneer_broken_link_chain) {
__ Mov(x0, 2);
__ Tbz(x10, 7, &far_target);
for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
for (int i = 0; i < inter_range / kInstrSize; ++i) {
if (i % 100 == 0) {
// Do not allow generating veneers. They should not be needed.
__ b(&fail);
@ -2334,7 +2334,7 @@ TEST(far_branch_veneer_broken_link_chain) {
__ Mov(x0, 3);
__ Tbz(x10, 7, &far_target);
for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
for (int i = 0; i < inter_range / kInstrSize; ++i) {
if (i % 100 == 0) {
// Allow generating veneers.
__ B(&fail);
@ -6757,7 +6757,7 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
if (option == NoJumpRequired) {
// Space for an explicit branch.
pool_guard_size = kInstructionSize;
pool_guard_size = kInstrSize;
} else {
pool_guard_size = 0;
}
@ -6771,26 +6771,26 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
LoadLiteral(&masm, x1, 0xABCDEF1234567890UL);
CHECK_CONSTANT_POOL_SIZE(16);
code_size += 2 * kInstructionSize;
code_size += 2 * kInstrSize;
// Check that the requested range (allowing space for a branch over the pool)
// can be handled by this test.
CHECK_LE(code_size + pool_guard_size, range);
// Emit NOPs up to 'range', leaving space for the pool guard.
while ((code_size + pool_guard_size + kInstructionSize) < range) {
while ((code_size + pool_guard_size + kInstrSize) < range) {
__ Nop();
code_size += kInstructionSize;
code_size += kInstrSize;
}
// Emit the guard sequence before the literal pool.
if (option == NoJumpRequired) {
__ B(&label_1);
code_size += kInstructionSize;
code_size += kInstrSize;
}
// The next instruction will trigger pool emission when expect_dump is true.
CHECK_EQ(code_size, range - kInstructionSize);
CHECK_EQ(code_size, range - kInstrSize);
CHECK_CONSTANT_POOL_SIZE(16);
// Possibly generate a literal pool.
@ -6834,8 +6834,7 @@ TEST(ldr_literal_range_1) {
TEST(ldr_literal_range_2) {
INIT_V8();
LdrLiteralRangeHelper(
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
kInstructionSize,
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
NoJumpRequired, false);
}
@ -6850,7 +6849,7 @@ TEST(ldr_literal_range_3) {
TEST(ldr_literal_range_4) {
INIT_V8();
LdrLiteralRangeHelper(
MacroAssembler::GetCheckConstPoolIntervalForTesting() - kInstructionSize,
MacroAssembler::GetCheckConstPoolIntervalForTesting() - kInstrSize,
JumpRequired, false);
}
#endif
@ -15251,7 +15250,7 @@ TEST(pool_size) {
}
__ RecordVeneerPool(masm.pc_offset(), veneer_pool_size);
for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) {
for (unsigned i = 0; i < veneer_pool_size / kInstrSize; ++i) {
__ nop();
}
@ -15289,7 +15288,7 @@ TEST(jump_tables_forward) {
const int kNumCases = 512;
INIT_V8();
SETUP_SIZE(kNumCases * 5 * kInstructionSize + 8192);
SETUP_SIZE(kNumCases * 5 * kInstrSize + 8192);
START();
int32_t values[kNumCases];
@ -15353,7 +15352,7 @@ TEST(jump_tables_backward) {
const int kNumCases = 512;
INIT_V8();
SETUP_SIZE(kNumCases * 5 * kInstructionSize + 8192);
SETUP_SIZE(kNumCases * 5 * kInstrSize + 8192);
START();
int32_t values[kNumCases];

View File

@ -3164,8 +3164,7 @@ TEST(jump_tables1) {
Label done;
{
__ BlockTrampolinePoolFor(kNumCases + 7);
PredictableCodeSizeScope predictable(
&assm, (kNumCases + 7) * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(&assm, (kNumCases + 7) * kInstrSize);
Label here;
__ bal(&here);
@ -3173,7 +3172,7 @@ TEST(jump_tables1) {
__ bind(&here);
__ sll(at, a0, 2);
__ addu(at, at, ra);
__ lw(at, MemOperand(at, 5 * Assembler::kInstrSize));
__ lw(at, MemOperand(at, 5 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@ -3243,8 +3242,7 @@ TEST(jump_tables2) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases + 7);
PredictableCodeSizeScope predictable(
&assm, (kNumCases + 7) * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(&assm, (kNumCases + 7) * kInstrSize);
Label here;
__ bal(&here);
@ -3252,7 +3250,7 @@ TEST(jump_tables2) {
__ bind(&here);
__ sll(at, a0, 2);
__ addu(at, at, ra);
__ lw(at, MemOperand(at, 5 * Assembler::kInstrSize));
__ lw(at, MemOperand(at, 5 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@ -3319,8 +3317,7 @@ TEST(jump_tables3) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases + 7);
PredictableCodeSizeScope predictable(
&assm, (kNumCases + 7) * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(&assm, (kNumCases + 7) * kInstrSize);
Label here;
__ bal(&here);
@ -3328,7 +3325,7 @@ TEST(jump_tables3) {
__ bind(&here);
__ sll(at, a0, 2);
__ addu(at, at, ra);
__ lw(at, MemOperand(at, 5 * Assembler::kInstrSize));
__ lw(at, MemOperand(at, 5 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@ -5557,7 +5554,7 @@ TEST(Trampoline) {
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
Label done;
size_t nr_calls = kMaxBranchOffset / (2 * Instruction::kInstrSize) + 2;
size_t nr_calls = kMaxBranchOffset / (2 * kInstrSize) + 2;
for (size_t i = 0; i < nr_calls; ++i) {
__ BranchShort(&done, eq, a0, Operand(a1));
@ -5715,8 +5712,7 @@ uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
Label code_start;
__ bind(&code_start);
__ Subu(v0, zero_reg, imm);
CHECK_EQ(assm.SizeOfCodeGeneratedSince(&code_start),
num_instr * Assembler::kInstrSize);
CHECK_EQ(assm.SizeOfCodeGeneratedSince(&code_start), num_instr * kInstrSize);
__ jr(ra);
__ nop();

View File

@ -3293,15 +3293,15 @@ TEST(jump_tables1) {
Label done;
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
PredictableCodeSizeScope predictable(
&assm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(&assm,
(kNumCases * 2 + 6) * kInstrSize);
Label here;
__ bal(&here);
__ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
__ daddu(at, at, ra);
__ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ Ld(at, MemOperand(at, 4 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@ -3373,15 +3373,15 @@ TEST(jump_tables2) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
PredictableCodeSizeScope predictable(
&assm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(&assm,
(kNumCases * 2 + 6) * kInstrSize);
Label here;
__ bal(&here);
__ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
__ daddu(at, at, ra);
__ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ Ld(at, MemOperand(at, 4 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@ -3453,15 +3453,15 @@ TEST(jump_tables3) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
PredictableCodeSizeScope predictable(
&assm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(&assm,
(kNumCases * 2 + 6) * kInstrSize);
Label here;
__ bal(&here);
__ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
__ daddu(at, at, ra);
__ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ Ld(at, MemOperand(at, 4 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@ -6263,7 +6263,7 @@ TEST(Trampoline) {
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
Label done;
size_t nr_calls = kMaxBranchOffset / (2 * Instruction::kInstrSize) + 2;
size_t nr_calls = kMaxBranchOffset / (2 * kInstrSize) + 2;
for (size_t i = 0; i < nr_calls; ++i) {
__ BranchShort(&done, eq, a0, Operand(a1));

View File

@ -58,7 +58,7 @@ bool DisassembleAndCompare(byte* begin, UseRegex use_regex,
std::vector<std::string> expected_disassembly = {expected_strings...};
size_t n_expected = expected_disassembly.size();
byte* end = begin + (n_expected * Assembler::kInstrSize);
byte* end = begin + (n_expected * kInstrSize);
std::vector<std::string> disassembly;
for (byte* pc = begin; pc < end;) {

View File

@ -815,7 +815,7 @@ TEST_(adr) {
TEST_(branch) {
SET_UP_ASM();
#define INST_OFF(x) ((x) >> kInstructionSizeLog2)
#define INST_OFF(x) ((x) >> kInstrSizeLog2)
COMPARE_PREFIX(b(INST_OFF(0x4)), "b #+0x4");
COMPARE_PREFIX(b(INST_OFF(-0x4)), "b #-0x4");
COMPARE_PREFIX(b(INST_OFF(0x7fffffc)), "b #+0x7fffffc");
@ -840,6 +840,7 @@ TEST_(branch) {
COMPARE_PREFIX(tbnz(w10, 31, INST_OFF(0)), "tbnz w10, #31, #+0x0");
COMPARE_PREFIX(tbnz(x11, 31, INST_OFF(0x4)), "tbnz w11, #31, #+0x4");
COMPARE_PREFIX(tbnz(x12, 32, INST_OFF(0x8)), "tbnz x12, #32, #+0x8");
#undef INST_OFF
COMPARE(br(x0), "br x0");
COMPARE(blr(x1), "blr x1");
COMPARE(ret(x2), "ret x2");

View File

@ -41,7 +41,7 @@ TEST(FUZZ_decoder) {
seed48(seed);
Decoder<DispatchingDecoderVisitor> decoder;
Instruction buffer[kInstructionSize];
Instruction buffer[kInstrSize];
for (int i = 0; i < instruction_count; i++) {
uint32_t instr = static_cast<uint32_t>(mrand48());
@ -61,7 +61,7 @@ TEST(FUZZ_disasm) {
Decoder<DispatchingDecoderVisitor> decoder;
DisassemblingDecoder disasm;
Instruction buffer[kInstructionSize];
Instruction buffer[kInstrSize];
decoder.AppendVisitor(&disasm);
for (int i = 0; i < instruction_count; i++) {

View File

@ -236,7 +236,7 @@ TEST(jump_tables5) {
{
__ BlockTrampolinePoolFor(kNumCases + 6 + 1);
PredictableCodeSizeScope predictable(
masm, kNumCases * kPointerSize + ((6 + 1) * Assembler::kInstrSize));
masm, kNumCases * kPointerSize + ((6 + 1) * kInstrSize));
__ addiupc(at, 6 + 1);
__ Lsa(at, at, a0, 2);
@ -294,7 +294,6 @@ TEST(jump_tables6) {
const int kSwitchTableCases = 40;
const int kInstrSize = Assembler::kInstrSize;
const int kMaxBranchOffset = Assembler::kMaxBranchOffset;
const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize;
const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize;

View File

@ -309,7 +309,7 @@ TEST(jump_tables5) {
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6 + 1);
PredictableCodeSizeScope predictable(
masm, kNumCases * kPointerSize + ((6 + 1) * Assembler::kInstrSize));
masm, kNumCases * kPointerSize + ((6 + 1) * kInstrSize));
__ addiupc(at, 6 + 1);
__ Dlsa(at, at, a0, 3);
@ -368,7 +368,6 @@ TEST(jump_tables6) {
const int kSwitchTableCases = 40;
const int kInstrSize = Assembler::kInstrSize;
const int kMaxBranchOffset = Assembler::kMaxBranchOffset;
const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize;
const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize;