ARM64: Enable shorten-64-to-32 warning

Enable clang's shorten-64-to-32 warning flag on ARM64, and fix the warnings
that arise.

BUG=

Review URL: https://codereview.chromium.org/1131573006

Cr-Commit-Position: refs/heads/master@{#28412}
This commit is contained in:
martyn.capewell 2015-05-14 22:13:15 -07:00 committed by Commit bot
parent 670ff36d82
commit cdc43bc5fd
25 changed files with 282 additions and 229 deletions

View File

@ -390,9 +390,7 @@
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'ldflags': [ '-pthread', ],
'conditions': [
# TODO(arm64): It'd be nice to enable this for arm64 as well,
# but the Assembler requires some serious fixing first.
[ 'clang==1 and v8_target_arch=="x64"', {
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64")', {
'cflags': [ '-Wshorten-64-to-32' ],
}],
[ 'host_arch=="ppc64" and OS!="aix"', {

View File

@ -1084,13 +1084,14 @@ Instr Assembler::SF(Register rd) {
}
Instr Assembler::ImmAddSub(int64_t imm) {
Instr Assembler::ImmAddSub(int imm) {
DCHECK(IsImmAddSub(imm));
if (is_uint12(imm)) { // No shift required.
return imm << ImmAddSub_offset;
imm <<= ImmAddSub_offset;
} else {
return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
}
return imm;
}
@ -1239,13 +1240,13 @@ LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
}
Instr Assembler::ImmMoveWide(uint64_t imm) {
Instr Assembler::ImmMoveWide(int imm) {
DCHECK(is_uint16(imm));
return imm << ImmMoveWide_offset;
}
Instr Assembler::ShiftMoveWide(int64_t shift) {
Instr Assembler::ShiftMoveWide(int shift) {
DCHECK(is_uint2(shift));
return shift << ShiftMoveWide_offset;
}

View File

@ -580,8 +580,9 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer = reinterpret_cast<byte*>(buffer_);
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos();
desc->reloc_size =
static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos());
desc->origin = this;
}
}
@ -600,13 +601,13 @@ void Assembler::CheckLabelLinkChain(Label const * label) {
if (label->is_linked()) {
static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
int links_checked = 0;
int linkoffset = label->pos();
int64_t linkoffset = label->pos();
bool end_of_chain = false;
while (!end_of_chain) {
if (++links_checked > kMaxLinksToCheck) break;
Instruction * link = InstructionAt(linkoffset);
int linkpcoffset = link->ImmPCOffset();
int prevlinkoffset = linkoffset + linkpcoffset;
int64_t linkpcoffset = link->ImmPCOffset();
int64_t prevlinkoffset = linkoffset + linkpcoffset;
end_of_chain = (linkoffset == prevlinkoffset);
linkoffset = linkoffset + linkpcoffset;
@ -645,7 +646,8 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
// currently referring to this label.
label->Unuse();
} else {
label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
label->link_to(
static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_));
}
} else if (branch == next_link) {
@ -721,7 +723,7 @@ void Assembler::bind(Label* label) {
while (label->is_linked()) {
int linkoffset = label->pos();
Instruction* link = InstructionAt(linkoffset);
int prevlinkoffset = linkoffset + link->ImmPCOffset();
int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset());
CheckLabelLinkChain(label);
@ -811,12 +813,13 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
while (!end_of_chain) {
Instruction * link = InstructionAt(link_offset);
link_pcoffset = link->ImmPCOffset();
link_pcoffset = static_cast<int>(link->ImmPCOffset());
// ADR instructions are not handled by veneers.
if (link->IsImmBranch()) {
int max_reachable_pc = InstructionOffset(link) +
Instruction::ImmBranchRange(link->BranchType());
int max_reachable_pc =
static_cast<int>(InstructionOffset(link) +
Instruction::ImmBranchRange(link->BranchType()));
typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
std::pair<unresolved_info_it, unresolved_info_it> range;
range = unresolved_branches_.equal_range(max_reachable_pc);
@ -909,7 +912,7 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
const char* message =
reinterpret_cast<const char*>(
instr->InstructionAtOffset(kDebugMessageOffset));
int size = kDebugMessageOffset + strlen(message) + 1;
int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
return RoundUp(size, kInstructionSize) / kInstructionSize;
}
// Same for printf support, see MacroAssembler::CallPrintf().
@ -1599,9 +1602,11 @@ void Assembler::LoadStorePair(const CPURegister& rt,
// 'rt' and 'rt2' can only be aliased for stores.
DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
int offset = static_cast<int>(addr.offset());
Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
ImmLSPair(offset, CalcLSPairDataSize(op));
Instr addrmodeop;
if (addr.IsImmediateOffset()) {
@ -1645,11 +1650,11 @@ void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
DCHECK(!rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(addr.IsImmediateOffset());
LSDataSize size = CalcLSPairDataSize(
static_cast<LoadStorePairOp>(op & LoadStorePairMask));
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
ImmLSPair(addr.offset(), size));
DCHECK(IsImmLSPair(addr.offset(), size));
int offset = static_cast<int>(addr.offset());
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
}
@ -2137,13 +2142,13 @@ Instr Assembler::ImmFP64(double imm) {
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = double_to_rawbits(imm);
// bit7: a000.0000
uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
// bit6: 0b00.0000
uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
// bit5_to_0: 00cd.efgh
uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset);
}
@ -2188,8 +2193,8 @@ void Assembler::MoveWide(const Register& rd,
DCHECK(is_uint16(imm));
Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
}
@ -2205,7 +2210,7 @@ void Assembler::AddSub(const Register& rd,
DCHECK(IsImmAddSub(immediate));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
ImmAddSub(immediate) | dest_reg | RnSP(rn));
ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
} else if (operand.IsShiftedRegister()) {
DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
DCHECK(operand.shift() != ROR);
@ -2259,7 +2264,7 @@ void Assembler::brk(int code) {
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
EmitData(string, len);
EmitData(string, static_cast<int>(len));
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
STATIC_ASSERT(sizeof(pad) == kInstructionSize);
@ -2362,7 +2367,8 @@ void Assembler::ConditionalCompare(const Register& rn,
if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue();
DCHECK(IsImmConditionalCompare(immediate));
ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
ccmpop = ConditionalCompareImmediateFixed | op |
ImmCondCmp(static_cast<unsigned>(immediate));
} else {
DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
@ -2502,15 +2508,16 @@ void Assembler::LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base());
int64_t offset = addr.offset();
if (addr.IsImmediateOffset()) {
LSDataSize size = CalcLSDataSize(op);
if (IsImmLSScaled(offset, size)) {
if (IsImmLSScaled(addr.offset(), size)) {
int offset = static_cast<int>(addr.offset());
// Use the scaled addressing mode.
Emit(LoadStoreUnsignedOffsetFixed | memop |
ImmLSUnsigned(offset >> size));
} else if (IsImmLSUnscaled(offset)) {
} else if (IsImmLSUnscaled(addr.offset())) {
int offset = static_cast<int>(addr.offset());
// Use the unscaled addressing mode.
Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
} else {
@ -2536,7 +2543,8 @@ void Assembler::LoadStore(const CPURegister& rt,
} else {
// Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base()));
if (IsImmLSUnscaled(offset)) {
if (IsImmLSUnscaled(addr.offset())) {
int offset = static_cast<int>(addr.offset());
if (addr.IsPreIndex()) {
Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
} else {
@ -2568,6 +2576,14 @@ bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
}
bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstructionSizeLog2);
bool offset_is_inst_multiple =
(((offset >> inst_size) << inst_size) == offset);
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
}
// Test if a given value can be encoded in the immediate field of a logical
// instruction.
// If it can be encoded, the function returns true, and values pointed to by n,
@ -2849,7 +2865,8 @@ void Assembler::GrowBuffer() {
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
desc.reloc_size =
static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos());
// Copy the data.
intptr_t pc_delta = desc.buffer - buffer;
@ -3065,7 +3082,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
}
// Record the veneer pool size.
int pool_size = SizeOfCodeGeneratedSince(&size_check);
int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
if (unresolved_branches_.empty()) {
@ -3113,7 +3130,8 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
int Assembler::buffer_space() const {
return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
return static_cast<int>(reloc_info_writer.pos() -
reinterpret_cast<byte*>(pc_));
}

View File

@ -764,7 +764,7 @@ class ConstPool {
shared_entries_count(0) {}
void RecordEntry(intptr_t data, RelocInfo::Mode mode);
int EntryCount() const {
return shared_entries_count + unique_entries_.size();
return shared_entries_count + static_cast<int>(unique_entries_.size());
}
bool IsEmpty() const {
return shared_entries_.empty() && unique_entries_.empty();
@ -951,7 +951,7 @@ class Assembler : public AssemblerBase {
// Return the number of instructions generated from label to the
// current position.
int InstructionsGeneratedSince(const Label* label) {
uint64_t InstructionsGeneratedSince(const Label* label) {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
@ -1774,7 +1774,7 @@ class Assembler : public AssemblerBase {
Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(int offset) const {
Instruction* InstructionAt(ptrdiff_t offset) const {
return reinterpret_cast<Instruction*>(buffer_ + offset);
}
@ -1841,7 +1841,7 @@ class Assembler : public AssemblerBase {
// Data Processing encoding.
inline static Instr SF(Register rd);
inline static Instr ImmAddSub(int64_t imm);
inline static Instr ImmAddSub(int imm);
inline static Instr ImmS(unsigned imms, unsigned reg_size);
inline static Instr ImmR(unsigned immr, unsigned reg_size);
inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
@ -1876,10 +1876,11 @@ class Assembler : public AssemblerBase {
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
static bool IsImmLLiteral(int64_t offset);
// Move immediates encoding.
inline static Instr ImmMoveWide(uint64_t imm);
inline static Instr ShiftMoveWide(int64_t shift);
inline static Instr ImmMoveWide(int imm);
inline static Instr ShiftMoveWide(int shift);
// FP Immediates.
static Instr ImmFP32(float imm);

View File

@ -5442,7 +5442,7 @@ static const int kCallApiFunctionSpillSpace = 4;
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
return static_cast<int>(ref0.address() - ref1.address());
}

View File

@ -138,8 +138,10 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
auto offset_to_incremental_noncompacting =
static_cast<int32_t>(instr1->ImmPCOffset());
auto offset_to_incremental_compacting =
static_cast<int32_t>(instr2->ImmPCOffset());
switch (mode) {
case STORE_BUFFER_ONLY:

View File

@ -1369,11 +1369,12 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
if (format[5] == 'I') {
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide())
<< (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm);
} else {
DCHECK(format[5] == 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
}
@ -1383,13 +1384,13 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
case 'L': {
switch (format[2]) {
case 'L': { // ILLiteral - Immediate Load Literal.
AppendToOutput("pc%+" PRId64,
instr->ImmLLiteral() << kLoadLiteralScaleLog2);
AppendToOutput("pc%+" PRId32, instr->ImmLLiteral()
<< kLoadLiteralScaleLog2);
return 9;
}
case 'S': { // ILS - Immediate Load/Store.
if (instr->ImmLS() != 0) {
AppendToOutput(", #%" PRId64, instr->ImmLS());
AppendToOutput(", #%" PRId32, instr->ImmLS());
}
return 3;
}
@ -1397,14 +1398,14 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
if (instr->ImmLSPair() != 0) {
// format[3] is the scale value. Convert to a number.
int scale = format[3] - 0x30;
AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
}
return 4;
}
case 'U': { // ILU - Immediate Load/Store Unsigned.
if (instr->ImmLSUnsigned() != 0) {
AppendToOutput(", #%" PRIu64,
instr->ImmLSUnsigned() << instr->SizeLS());
AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned()
<< instr->SizeLS());
}
return 3;
}
@ -1427,7 +1428,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
AppendToOutput("#%d", 64 - instr->FPScale());
return 8;
} else {
AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(),
format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
return 9;
}
@ -1538,7 +1539,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
case 'L': { // HLo.
if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
instr->ImmDPShift());
}
return 3;

View File

@ -230,8 +230,8 @@ void FullCodeGenerator::Generate() {
// Update the write barrier.
if (need_write_barrier) {
__ RecordWriteContextSlot(
cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
__ RecordWriteContextSlot(cp, static_cast<int>(target.offset()), x10,
x11, kLRHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, &done);
@ -404,7 +404,8 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
// we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to
// the result).
int distance =
masm_->SizeOfCodeGeneratedSince(back_edge_target) + kCodeSizeMultiplier / 2;
static_cast<int>(masm_->SizeOfCodeGeneratedSince(back_edge_target) +
kCodeSizeMultiplier / 2);
int weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
@ -790,12 +791,8 @@ void FullCodeGenerator::SetVar(Variable* var,
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
// scratch0 contains the correct context.
__ RecordWriteContextSlot(scratch0,
location.offset(),
src,
scratch1,
kLRHasBeenSaved,
kDontSaveFPRegs);
__ RecordWriteContextSlot(scratch0, static_cast<int>(location.offset()),
src, scratch1, kLRHasBeenSaved, kDontSaveFPRegs);
}
}

View File

@ -93,9 +93,9 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
// met.
uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
int64_t n = BitN();
int64_t imm_s = ImmSetBits();
int64_t imm_r = ImmRotate();
int32_t n = BitN();
int32_t imm_s = ImmSetBits();
int32_t imm_r = ImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
@ -211,7 +211,7 @@ Instruction* Instruction::ImmPCOffsetTarget() {
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int32_t offset) {
ptrdiff_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}
@ -242,7 +242,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
ptrdiff_t target_offset = DistanceTo(target);
Instr imm;
if (Instruction::IsValidPCRelOffset(target_offset)) {
imm = Assembler::ImmPCRelAddress(target_offset);
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
PatchingAssembler patcher(this,
@ -254,9 +254,11 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
void Instruction::SetBranchImmTarget(Instruction* target) {
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(IsValidImmPCOffset(BranchType(),
DistanceTo(target) >> kInstructionSizeLog2));
int offset = static_cast<int>(DistanceTo(target) >> kInstructionSizeLog2);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
switch (BranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
@ -287,9 +289,9 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
ptrdiff_t target_offset = DistanceTo(target) >> kInstructionSizeLog2;
DCHECK(is_int32(target_offset));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
int32_t target_offset =
static_cast<int32_t>(DistanceTo(target) >> kInstructionSizeLog2);
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
@ -302,8 +304,9 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
Instr imm = Assembler::ImmLLiteral(offset);
DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
Instr imm = Assembler::ImmLLiteral(
static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm);

View File

@ -137,8 +137,8 @@ class Instruction {
return following(-count);
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int64_t Name() const { return Func(HighBit, LowBit); }
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int32_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
@ -146,8 +146,8 @@ class Instruction {
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
DCHECK(IsPCRelAddressing());
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int const width = ImmPCRelLo_width + ImmPCRelHi_width;
int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width - 1, 0, offset);
}
@ -369,7 +369,7 @@ class Instruction {
// PC-relative addressing instruction.
Instruction* ImmPCOffsetTarget();
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset);
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
@ -409,9 +409,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(int offset) {
return is_int21(offset);
}
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(Instruction* target);
void SetBranchImmTarget(Instruction* target);
};

View File

@ -743,8 +743,9 @@ bool LCodeGen::GeneratePrologue() {
__ Str(value, target);
// Update the write barrier. This clobbers value and scratch.
if (need_write_barrier) {
__ RecordWriteContextSlot(cp, target.offset(), value, scratch,
GetLinkRegisterState(), kSaveFPRegs);
__ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
value, scratch, GetLinkRegisterState(),
kSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, &done);
@ -5138,14 +5139,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
SmiCheck check_needed =
instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
value,
scratch,
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
scratch, GetLinkRegisterState(), kSaveFPRegs,
EMIT_REMEMBERED_SET, check_needed);
}
__ Bind(&skip_assignment);
}

View File

@ -926,8 +926,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
masm_->PushPreamble(size_);
}
int count = queued_.size();
int index = 0;
size_t count = queued_.size();
size_t index = 0;
while (index < count) {
// PushHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly.
@ -949,8 +949,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
void MacroAssembler::PushPopQueue::PopQueued() {
if (queued_.empty()) return;
int count = queued_.size();
int index = 0;
size_t count = queued_.size();
size_t index = 0;
while (index < count) {
// PopHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly.
@ -1263,7 +1263,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// system stack pointer (csp).
DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * kXRegSize, PreIndex);
MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@ -4693,7 +4693,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Check that the function's map is the same as the expected cached map.
Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
Ldr(scratch2, FieldMemOperand(scratch1, offset));
Cmp(map_in_out, scratch2);
B(ne, no_map_match);
@ -5115,7 +5115,8 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
// 'check' in the other bits. The possible offset is limited in that we
// use BitField to pack the data, and the underlying data type is a
// uint32_t.
uint32_t delta = __ InstructionsGeneratedSince(smi_check);
uint32_t delta =
static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
__ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
} else {
DCHECK(!smi_check->is_bound());
@ -5136,9 +5137,10 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
// 32-bit values.
DCHECK(is_uint32(payload));
if (payload != 0) {
int reg_code = RegisterBits::decode(payload);
uint32_t payload32 = static_cast<uint32_t>(payload);
int reg_code = RegisterBits::decode(payload32);
reg_ = Register::XRegFromCode(reg_code);
uint64_t smi_check_delta = DeltaBits::decode(payload);
int smi_check_delta = DeltaBits::decode(payload32);
DCHECK(smi_check_delta != 0);
smi_check_ = inline_data->preceding(smi_check_delta);
}

View File

@ -886,8 +886,8 @@ class MacroAssembler : public Assembler {
template<typename Field>
void DecodeField(Register dst, Register src) {
static const uint64_t shift = Field::kShift;
static const uint64_t setbits = CountSetBits(Field::kMask, 32);
static const int shift = Field::kShift;
static const int setbits = CountSetBits(Field::kMask, 32);
Ubfx(dst, src, shift, setbits);
}

View File

@ -903,10 +903,11 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
return static_cast<unsignedT>(value) >> amount;
case ASR:
return value >> amount;
case ROR:
case ROR: {
unsignedT mask = (static_cast<unsignedT>(1) << amount) - 1;
return (static_cast<unsignedT>(value) >> amount) |
((value & ((1L << amount) - 1L)) <<
(sizeof(unsignedT) * 8 - amount));
((value & mask) << (sizeof(mask) * 8 - amount));
}
default:
UNIMPLEMENTED();
return 0;
@ -1399,7 +1400,8 @@ void Simulator::VisitAddSubShifted(Instruction* instr) {
int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
AddSubHelper(instr, op2);
} else {
int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
int32_t op2 = static_cast<int32_t>(
ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount));
AddSubHelper(instr, op2);
}
}
@ -1410,7 +1412,7 @@ void Simulator::VisitAddSubImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
AddSubHelper<int64_t>(instr, op2);
} else {
AddSubHelper<int32_t>(instr, op2);
AddSubHelper<int32_t>(instr, static_cast<int32_t>(op2));
}
}
@ -1457,7 +1459,7 @@ void Simulator::VisitLogicalImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
LogicalHelper<int64_t>(instr, instr->ImmLogical());
} else {
LogicalHelper<int32_t>(instr, instr->ImmLogical());
LogicalHelper<int32_t>(instr, static_cast<int32_t>(instr->ImmLogical()));
}
}
@ -1879,7 +1881,7 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
// Get the shifted immediate.
int64_t shift = instr->ShiftMoveWide() * 16;
int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
// Compute the new value.
switch (mov_op) {
@ -1912,25 +1914,32 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
void Simulator::VisitConditionalSelect(Instruction* instr) {
uint64_t new_val = xreg(instr->Rn());
if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
uint64_t new_val = xreg(instr->Rm());
new_val = xreg(instr->Rm());
switch (instr->Mask(ConditionalSelectMask)) {
case CSEL_w: set_wreg(instr->Rd(), new_val); break;
case CSEL_x: set_xreg(instr->Rd(), new_val); break;
case CSINC_w: set_wreg(instr->Rd(), new_val + 1); break;
case CSINC_x: set_xreg(instr->Rd(), new_val + 1); break;
case CSINV_w: set_wreg(instr->Rd(), ~new_val); break;
case CSINV_x: set_xreg(instr->Rd(), ~new_val); break;
case CSNEG_w: set_wreg(instr->Rd(), -new_val); break;
case CSNEG_x: set_xreg(instr->Rd(), -new_val); break;
case CSEL_w:
case CSEL_x:
break;
case CSINC_w:
case CSINC_x:
new_val++;
break;
case CSINV_w:
case CSINV_x:
new_val = ~new_val;
break;
case CSNEG_w:
case CSNEG_x:
new_val = -new_val;
break;
default: UNIMPLEMENTED();
}
}
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), new_val);
} else {
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), xreg(instr->Rn()));
} else {
set_wreg(instr->Rd(), wreg(instr->Rn()));
}
set_wreg(instr->Rd(), static_cast<uint32_t>(new_val));
}
}
@ -1940,13 +1949,27 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
unsigned src = instr->Rn();
switch (instr->Mask(DataProcessing1SourceMask)) {
case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break;
case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break;
case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
case RBIT_w:
set_wreg(dst, ReverseBits(wreg(src)));
break;
case RBIT_x:
set_xreg(dst, ReverseBits(xreg(src)));
break;
case REV16_w:
set_wreg(dst, ReverseBytes(wreg(src), 1));
break;
case REV16_x:
set_xreg(dst, ReverseBytes(xreg(src), 1));
break;
case REV_w:
set_wreg(dst, ReverseBytes(wreg(src), 2));
break;
case REV32_x:
set_xreg(dst, ReverseBytes(xreg(src), 2));
break;
case REV_x:
set_xreg(dst, ReverseBytes(xreg(src), 3));
break;
case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
break;
case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
@ -1964,44 +1987,6 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
}
uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
uint64_t result = 0;
for (unsigned i = 0; i < num_bits; i++) {
result = (result << 1) | (value & 1);
value >>= 1;
}
return result;
}
uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = 0xff00000000000000UL;
for (int i = 7; i >= 0; i--) {
bytes[i] = (value & mask) >> (i * 8);
mask >>= 8;
}
// Permutation tables for REV instructions.
// permute_table[Reverse16] is used by REV16_x, REV16_w
// permute_table[Reverse32] is used by REV32_x, REV_w
// permute_table[Reverse64] is used by REV_x
DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7} };
uint64_t result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[mode][i]];
}
return result;
}
template <typename T>
void Simulator::DataProcessing2Source(Instruction* instr) {
Shift shift_op = NO_SHIFT;
@ -2121,7 +2106,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), result);
} else {
set_wreg(instr->Rd(), result);
set_wreg(instr->Rd(), static_cast<int32_t>(result));
}
}
@ -2138,8 +2123,9 @@ void Simulator::BitfieldHelper(Instruction* instr) {
mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
: static_cast<T>(-1);
} else {
mask = ((1L << (S + 1)) - 1);
mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
uint64_t umask = ((1L << (S + 1)) - 1);
umask = (umask >> R) | (umask << (reg_size - R));
mask = static_cast<T>(umask);
diff += reg_size;
}
@ -2563,7 +2549,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// Bail out early for zero inputs.
if (mantissa == 0) {
return sign << sign_offset;
return static_cast<T>(sign << sign_offset);
}
// If all bits in the exponent are set, the value is infinite or NaN.
@ -2580,9 +2566,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// FPTieEven rounding mode handles overflows using infinities.
exponent = infinite_exponent;
mantissa = 0;
return (sign << sign_offset) |
(exponent << exponent_offset) |
(mantissa << mantissa_offset);
return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) |
(mantissa << mantissa_offset));
}
// Calculate the shift required to move the top mantissa bit to the proper
@ -2605,7 +2591,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// non-zero result after rounding.
if (shift > (highest_significant_bit + 1)) {
// The result will always be +/-0.0.
return sign << sign_offset;
return static_cast<T>(sign << sign_offset);
}
// Properly encode the exponent for a subnormal output.
@ -2624,9 +2610,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
T result = (sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa >> shift) << mantissa_offset);
T result =
static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
((mantissa >> shift) << mantissa_offset));
// A very large mantissa can overflow during rounding. If this happens, the
// exponent should be incremented and the mantissa set to 1.0 (encoded as
@ -2641,9 +2627,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// We have to shift the mantissa to the left (or not at all). The input
// mantissa is exactly representable in the output mantissa, so apply no
// rounding correction.
return (sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa << -shift) << mantissa_offset);
return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa << -shift) << mantissa_offset));
}
}
@ -2838,7 +2824,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
uint32_t sign = raw >> 63;
uint32_t exponent = (1 << 8) - 1;
uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
uint32_t payload =
static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw));
payload |= (1 << 22); // Force a quiet NaN.
return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
@ -2859,7 +2846,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
// Extract the IEEE-754 double components.
uint32_t sign = raw >> 63;
// Extract the exponent and remove the IEEE-754 encoding bias.
int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
int32_t exponent =
static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023;
// Extract the mantissa and add the implicit '1' bit.
uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
if (std::fpclassify(value) == FP_NORMAL) {
@ -3210,11 +3198,11 @@ void Simulator::VisitSystem(Instruction* instr) {
case MSR: {
switch (instr->ImmSystemRegister()) {
case NZCV:
nzcv().SetRawValue(xreg(instr->Rt()));
nzcv().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(NZCV);
break;
case FPCR:
fpcr().SetRawValue(xreg(instr->Rt()));
fpcr().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(FPCR);
break;
default: UNIMPLEMENTED();

View File

@ -72,12 +72,6 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR)
enum ReverseByteMode {
Reverse16 = 0,
Reverse32 = 1,
Reverse64 = 2
};
// The proper way to initialize a simulated system register (such as NZCV) is as
// follows:
@ -706,9 +700,6 @@ class Simulator : public DecoderVisitor {
template <typename T>
void BitfieldHelper(Instruction* instr);
uint64_t ReverseBits(uint64_t value, unsigned num_bits);
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
template <typename T>
T FPDefaultNaN() const;
@ -884,10 +875,10 @@ class Simulator : public DecoderVisitor {
FUNCTION_ADDR(entry), \
p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->CallRegExp( \
entry, \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
static_cast<int>( \
Simulator::current(Isolate::Current()) \
->CallRegExp(entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
// The simulator has its own stack. Thus it has a different stack limit from

View File

@ -74,7 +74,7 @@ int CountSetBits(uint64_t value, int width) {
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
return value;
return static_cast<int>(value);
}

View File

@ -61,6 +61,49 @@ uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
template <typename T>
T ReverseBits(T value) {
DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
(sizeof(value) == 8));
T result = 0;
for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
result = (result << 1) | (value & 1);
value >>= 1;
}
return result;
}
template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
DCHECK((1U << block_bytes_log2) <= sizeof(value));
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = 0xff00000000000000;
for (int i = 7; i >= 0; i--) {
bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
mask >>= 8;
}
// Permutation tables for REV instructions.
// permute_table[0] is used by REV16_x, REV16_w
// permute_table[1] is used by REV32_x, REV_w
// permute_table[2] is used by REV_x
DCHECK((0 < block_bytes_log2) && (block_bytes_log2 < 4));
static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7}};
T result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[block_bytes_log2 - 1][i]];
}
return result;
}
// NaN tests.
inline bool IsSignallingNaN(double num) {
uint64_t raw = double_to_rawbits(num);

View File

@ -44,6 +44,17 @@ inline unsigned CountPopulation64(uint64_t value) {
}
// Overloaded versions of CountPopulation32/64.
inline unsigned CountPopulation(uint32_t value) {
return CountPopulation32(value);
}
inline unsigned CountPopulation(uint64_t value) {
return CountPopulation64(value);
}
// CountLeadingZeros32(value) returns the number of zero bits following the most
// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
inline unsigned CountLeadingZeros32(uint32_t value) {

View File

@ -337,7 +337,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
i.InputRegister##width(1)); \
} else { \
int64_t imm = i.InputOperand##width(1).immediate().value(); \
int imm = \
static_cast<int>(i.InputOperand##width(1).immediate().value()); \
__ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
} \
} while (0)

View File

@ -278,7 +278,7 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
selector->Emit(negate_opcode, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.TempImmediate(-m.right().Value()));
g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
} else {
VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
}
@ -595,7 +595,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Emit(kArm64Ubfx, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
g.UseImmediate(mleft.right().node()),
g.TempImmediate(static_cast<int32_t>(mask_width)));
return;
}
// Other cases fall through to the normal And operation.
@ -731,8 +732,9 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
if ((mask_msb + mask_width + lsb) == 64) {
DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
Emit(kArm64Ubfx, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
g.TempImmediate(mask_width));
g.UseRegister(mleft.left().node()),
g.TempImmediate(static_cast<int32_t>(lsb)),
g.TempImmediate(static_cast<int32_t>(mask_width)));
return;
}
}
@ -1229,8 +1231,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
frame_state_descriptor = GetFrameStateDescriptor(
node->InputAt(static_cast<int>(descriptor->InputCount())));
}
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
@ -1242,8 +1244,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
InitializeCallBuffer(node, &buffer, true, false);
// Push the arguments to the stack.
bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
int aligned_push_count = buffer.pushed_nodes.size();
int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
bool pushed_count_uneven = aligned_push_count & 1;
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
@ -1254,7 +1256,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
// Move arguments to the stack.
{
int slot = buffer.pushed_nodes.size() - 1;
int slot = aligned_push_count - 1;
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
@ -1344,8 +1346,8 @@ void InstructionSelector::VisitTailCall(Node* node) {
} else {
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
frame_state_descriptor = GetFrameStateDescriptor(
node->InputAt(static_cast<int>(descriptor->InputCount())));
}
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
@ -1357,8 +1359,8 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, false);
// Push the arguments to the stack.
bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
int aligned_push_count = buffer.pushed_nodes.size();
int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
bool pushed_count_uneven = aligned_push_count & 1;
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
@ -1369,7 +1371,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
}
// Move arguments to the stack.
{
int slot = buffer.pushed_nodes.size() - 1;
int slot = aligned_push_count - 1;
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];

View File

@ -1458,7 +1458,7 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
// -------------------------------------------------------------------------
int NumRegs(RegList reglist) { return base::bits::CountPopulation32(reglist); }
int NumRegs(RegList reglist) { return base::bits::CountPopulation(reglist); }
struct JSCallerSavedCodeData {

View File

@ -142,8 +142,8 @@ int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
Simulator::CallArgument(from),
Simulator::CallArgument::End()
};
return Simulator::current(Isolate::Current())->CallInt64(
FUNCTION_ADDR(func), args);
return static_cast<int32_t>(Simulator::current(Isolate::Current())
->CallInt64(FUNCTION_ADDR(func), args));
#else
return (*func)(from);
#endif

View File

@ -43,7 +43,7 @@ TEST(FUZZ_decoder) {
Instruction buffer[kInstructionSize];
for (int i = 0; i < instruction_count; i++) {
uint32_t instr = mrand48();
uint32_t instr = static_cast<uint32_t>(mrand48());
buffer->SetInstructionBits(instr);
decoder.Decode(buffer);
}
@ -64,7 +64,7 @@ TEST(FUZZ_disasm) {
decoder.AppendVisitor(&disasm);
for (int i = 0; i < instruction_count; i++) {
uint32_t instr = mrand48();
uint32_t instr = static_cast<uint32_t>(mrand48());
buffer->SetInstructionBits(instr);
decoder.Decode(buffer);
}

View File

@ -96,13 +96,13 @@ class RegisterDump {
return dump_.sp_;
}
inline int64_t wspreg() const {
inline int32_t wspreg() const {
DCHECK(SPRegAliasesMatch());
return dump_.wsp_;
return static_cast<int32_t>(dump_.wsp_);
}
// Flags accessors.
inline uint64_t flags_nzcv() const {
inline uint32_t flags_nzcv() const {
DCHECK(IsComplete());
DCHECK((dump_.flags_ & ~Flags_mask) == 0);
return dump_.flags_ & Flags_mask;

View File

@ -47,7 +47,7 @@ Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
int64_t value) {
switch (type) {
case kMachInt32:
return m.Int32Constant(value);
return m.Int32Constant(static_cast<int32_t>(value));
break;
case kMachInt64: