[wasm][liftoff][cleanup] Remove default parameter of GetUnusedRegister

This CL removes the default parameter of GetUnusedRegister to avoid bugs
where the default parameter is used accidentially. With "{}" the default
value of the parameter is easy to write, and also not much more difficult to read.

R=clemensb@chromium.org

Bug: v8:10506
Change-Id: I3debe5eb91578c82abdac81dc6c252435fdf30d6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2202991
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67822}
This commit is contained in:
Andreas Haas 2020-05-15 13:18:18 +02:00 committed by Commit Bot
parent 3b4bafa5a8
commit 6771d3e318
7 changed files with 53 additions and 52 deletions

View File

@ -462,7 +462,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
break;
case ValueType::kF64: {
Register extra_scratch = GetUnusedRegister(kGpReg).gp();
Register extra_scratch = GetUnusedRegister(kGpReg, {}).gp();
vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch);
break;
}
@ -1196,7 +1196,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@ -1241,7 +1241,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
// The scratch register will be required by str if multiple instructions
// are required to encode the offset, and so we cannot use it in that case.
if (!ImmediateFitsAddrMode2Instruction(dst.offset())) {
src = GetUnusedRegister(kGpReg).gp();
src = GetUnusedRegister(kGpReg, {}).gp();
} else {
src = temps.Acquire();
}
@ -1783,7 +1783,7 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
constexpr uint32_t kF32SignBit = uint32_t{1} << 31;
UseScratchRegisterScope temps(this);
Register scratch = GetUnusedRegister(kGpReg).gp();
Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovLow(scratch, lhs);
// Clear sign bit in {scratch}.
@ -1802,7 +1802,7 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
// On arm, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
UseScratchRegisterScope temps(this);
Register scratch = GetUnusedRegister(kGpReg).gp();
Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovHigh(scratch, lhs);
// Clear sign bit in {scratch}.

View File

@ -130,7 +130,7 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
if (candidate.is_byte_register()) return candidate;
// {GetUnusedRegister()} may insert move instructions to spill registers to
// the stack. This is OK because {mov} does not change the status flags.
return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
return assm->GetUnusedRegister(liftoff::kByteRegs, {}).gp();
}
inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
@ -1349,7 +1349,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, DoubleRegister dst,
// We need one tmp register to extract the sign bit. Get it right at the
// beginning, such that the spilling code is not accidentially jumped over.
Register tmp = assm->GetUnusedRegister(kGpReg).gp();
Register tmp = assm->GetUnusedRegister(kGpReg, {}).gp();
#define dop(name, ...) \
do { \
@ -1412,9 +1412,9 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
static constexpr int kF32SignBit = 1 << 31;
Register scratch = GetUnusedRegister(kGpReg).gp();
Register scratch2 =
GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
LiftoffRegList pinned;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
Movd(scratch, lhs); // move {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
Movd(scratch2, rhs); // move {rhs} into {scratch2}.
@ -1541,9 +1541,9 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
static constexpr int kF32SignBit = 1 << 31;
// On ia32, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
Register scratch = GetUnusedRegister(kGpReg).gp();
Register scratch2 =
GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
LiftoffRegList pinned;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
Pextrd(scratch, lhs, 1); // move UH of {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
@ -2381,7 +2381,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
LiftoffRegister tmp = GetUnusedRegister(tmp_rc);
LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
byte shift = static_cast<byte>(rhs & 0x7);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@ -3320,7 +3320,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}

View File

@ -535,7 +535,7 @@ LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
if (slot.is_reg()) {
return half == kLowWord ? slot.reg().low() : slot.reg().high();
}
LiftoffRegister dst = GetUnusedRegister(kGpReg);
LiftoffRegister dst = GetUnusedRegister(kGpReg, {});
if (slot.is_stack()) {
FillI64Half(dst.gp(), slot.offset(), half);
return dst;
@ -574,7 +574,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
if (!slot.is_const()) continue;
RegClass rc =
kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
LiftoffRegister reg = GetUnusedRegister(rc);
LiftoffRegister reg = GetUnusedRegister(rc, {});
LoadConstant(reg, slot.constant());
slot.MakeRegister(reg);
cache_state_.inc_used(reg);

View File

@ -340,7 +340,7 @@ class LiftoffAssembler : public TurboAssembler {
// possible.
LiftoffRegister GetUnusedRegister(
RegClass rc, std::initializer_list<LiftoffRegister> try_first,
LiftoffRegList pinned = {}) {
LiftoffRegList pinned) {
for (LiftoffRegister reg : try_first) {
DCHECK_EQ(reg.reg_class(), rc);
if (cache_state_.is_free(reg)) return reg;
@ -349,7 +349,7 @@ class LiftoffAssembler : public TurboAssembler {
}
// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegList candidates = kGpCacheRegList;
Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();

View File

@ -495,7 +495,7 @@ class LiftoffCompiler {
position, __ cache_state()->used_registers,
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
OutOfLineCode& ool = out_of_line_code_.back();
Register limit_address = __ GetUnusedRegister(kGpReg).gp();
Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
@ -604,7 +604,7 @@ class LiftoffCompiler {
*next_breakpoint_ptr_ == decoder->position());
if (!has_breakpoint) {
DEBUG_CODE_COMMENT("check hook on function call");
Register flag = __ GetUnusedRegister(kGpReg).gp();
Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress,
kSystemPointerSize);
Label no_break;
@ -923,8 +923,8 @@ class LiftoffCompiler {
constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src})
: __ GetUnusedRegister(result_rc);
? __ GetUnusedRegister(result_rc, {src}, {})
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
__ PushRegister(ValueType(result_type), dst);
}
@ -951,8 +951,9 @@ class LiftoffCompiler {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
: __ GetUnusedRegister(dst_rc);
LiftoffRegister dst = src_rc == dst_rc
? __ GetUnusedRegister(dst_rc, {src}, {})
: __ GetUnusedRegister(dst_rc, {});
DCHECK_EQ(!!can_trap, trap_position > 0);
Label* trap = can_trap ? AddOutOfLineTrap(
trap_position,
@ -1122,8 +1123,8 @@ class LiftoffCompiler {
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs})
: __ GetUnusedRegister(result_rc);
? __ GetUnusedRegister(result_rc, {lhs}, {})
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType(result_type), dst);
@ -1141,8 +1142,8 @@ class LiftoffCompiler {
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs, rhs})
: __ GetUnusedRegister(result_rc);
? __ GetUnusedRegister(result_rc, {lhs, rhs}, {})
: __ GetUnusedRegister(result_rc, {});
if (swap_lhs_rhs) std::swap(lhs, rhs);
@ -1483,20 +1484,20 @@ class LiftoffCompiler {
if (value_i32 == value) {
__ PushConstant(kWasmI64, value_i32);
} else {
LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmI64, reg);
}
}
void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
}
void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
}
@ -1546,7 +1547,7 @@ class LiftoffCompiler {
break;
case kStack: {
auto rc = reg_class_for(imm.type);
LiftoffRegister reg = __ GetUnusedRegister(rc);
LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ Fill(reg, slot.offset(), imm.type);
__ PushRegister(slot.type(), reg);
break;
@ -1570,7 +1571,7 @@ class LiftoffCompiler {
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
@ -1609,7 +1610,7 @@ class LiftoffCompiler {
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
LiftoffRegList* pinned, uint32_t* offset) {
Register addr = pinned->set(__ GetUnusedRegister(kGpReg)).gp();
Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
@ -1675,8 +1676,8 @@ class LiftoffCompiler {
DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
LiftoffRegister dst =
__ GetUnusedRegister(true_value.reg_class(), {true_value, false_value});
LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
{true_value, false_value}, {});
__ PushRegister(type, dst);
// Now emit the actual code to move either {true_value} or {false_value}
@ -2075,7 +2076,7 @@ class LiftoffCompiler {
}
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
Register mem_size = __ GetUnusedRegister(kGpReg).gp();
Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
__ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2);
__ PushRegister(kWasmI32, LiftoffRegister(mem_size));
@ -2344,7 +2345,7 @@ class LiftoffCompiler {
src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src3},
LiftoffRegList::ForRegs(src1, src2))
: __ GetUnusedRegister(result_rc);
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
__ PushRegister(ValueType(result_type), dst);
}
@ -2360,14 +2361,14 @@ class LiftoffCompiler {
int32_t imm = rhs_slot.i32_const();
LiftoffRegister operand = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fnImm, dst, operand, imm);
__ PushRegister(kWasmS128, dst);
} else {
LiftoffRegister count = __ PopToRegister();
LiftoffRegister operand = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fn, dst, operand, count);
__ PushRegister(kWasmS128, dst);
@ -2695,8 +2696,8 @@ class LiftoffCompiler {
static constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs})
: __ GetUnusedRegister(result_rc);
? __ GetUnusedRegister(result_rc, {lhs}, {})
: __ GetUnusedRegister(result_rc, {});
fn(dst, lhs, imm.lane);
__ PushRegister(ValueType(result_type), dst);
}
@ -2722,7 +2723,7 @@ class LiftoffCompiler {
(src2_rc == result_rc || pin_src2)
? __ GetUnusedRegister(result_rc, {src1},
LiftoffRegList::ForRegs(src2))
: __ GetUnusedRegister(result_rc, {src1});
: __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane);
__ PushRegister(kWasmS128, dst);
}

View File

@ -603,7 +603,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@ -646,13 +646,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
LiftoffRegister tmp = GetUnusedRegister(kGpRegPair);
LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {});
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@ -2273,7 +2273,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}

View File

@ -532,7 +532,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@ -582,13 +582,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
sd(tmp.gp(), dst);
break;
@ -2221,7 +2221,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}