[ubsan] Make ARM and ARM64 UBSan-clean
Bug: v8:3770,v8:9666 Change-Id: I7b7652887d6b60fbb80e1100834bc7c9df0544d8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1792909 Commit-Queue: Jakob Kummerow <jkummerow@chromium.org> Reviewed-by: Martyn Capewell <martyn.capewell@arm.com> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org> Reviewed-by: Jakob Gruber <jgruber@chromium.org> Cr-Commit-Position: refs/heads/master@{#63801}
This commit is contained in:
parent
66f2519628
commit
9f7ae50aa8
@ -40,6 +40,7 @@
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/cpu.h"
|
||||
#include "src/base/overflowing-math.h"
|
||||
#include "src/codegen/arm/assembler-arm-inl.h"
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
@ -4802,15 +4803,17 @@ void Assembler::GrowBuffer() {
|
||||
int rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
|
||||
size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
|
||||
MemMove(new_start, buffer_start_, pc_offset());
|
||||
MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
|
||||
reloc_size);
|
||||
byte* new_reloc_start = reinterpret_cast<byte*>(
|
||||
reinterpret_cast<Address>(reloc_info_writer.pos()) + rc_delta);
|
||||
MemMove(new_reloc_start, reloc_info_writer.pos(), reloc_size);
|
||||
|
||||
// Switch buffers.
|
||||
buffer_ = std::move(new_buffer);
|
||||
buffer_start_ = new_start;
|
||||
pc_ += pc_delta;
|
||||
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
|
||||
reloc_info_writer.last_pc() + pc_delta);
|
||||
pc_ = reinterpret_cast<byte*>(reinterpret_cast<Address>(pc_) + pc_delta);
|
||||
byte* new_last_pc = reinterpret_cast<byte*>(
|
||||
reinterpret_cast<Address>(reloc_info_writer.last_pc()) + pc_delta);
|
||||
reloc_info_writer.Reposition(new_reloc_start, new_last_pc);
|
||||
|
||||
// None of our relocation types are pc relative pointing outside the code
|
||||
// buffer nor pc absolute pointing inside the code buffer, so there is no need
|
||||
@ -4831,7 +4834,7 @@ void Assembler::dd(uint32_t data) {
|
||||
// blocked before using dd.
|
||||
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
|
||||
CheckBuffer();
|
||||
*reinterpret_cast<uint32_t*>(pc_) = data;
|
||||
base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), data);
|
||||
pc_ += sizeof(uint32_t);
|
||||
}
|
||||
|
||||
@ -4840,7 +4843,7 @@ void Assembler::dq(uint64_t value) {
|
||||
// blocked before using dq.
|
||||
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
|
||||
CheckBuffer();
|
||||
*reinterpret_cast<uint64_t*>(pc_) = value;
|
||||
base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), value);
|
||||
pc_ += sizeof(uint64_t);
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
|
||||
Condition cond) {
|
||||
DCHECK_LT(lsb, 32);
|
||||
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
|
||||
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
|
||||
int mask = (1u << (width + lsb)) - 1u - ((1u << lsb) - 1u);
|
||||
and_(dst, src1, Operand(mask), LeaveCC, cond);
|
||||
if (lsb != 0) {
|
||||
mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
|
||||
|
@ -3968,19 +3968,24 @@ void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr,
|
||||
bool Assembler::IsImmLSUnscaled(int64_t offset) { return is_int9(offset); }
|
||||
|
||||
bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) {
|
||||
bool offset_is_size_multiple = (((offset >> size) << size) == offset);
|
||||
bool offset_is_size_multiple =
|
||||
(static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) ==
|
||||
offset);
|
||||
return offset_is_size_multiple && is_uint12(offset >> size);
|
||||
}
|
||||
|
||||
bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
|
||||
bool offset_is_size_multiple = (((offset >> size) << size) == offset);
|
||||
bool offset_is_size_multiple =
|
||||
(static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) ==
|
||||
offset);
|
||||
return offset_is_size_multiple && is_int7(offset >> size);
|
||||
}
|
||||
|
||||
bool Assembler::IsImmLLiteral(int64_t offset) {
|
||||
int inst_size = static_cast<int>(kInstrSizeLog2);
|
||||
bool offset_is_inst_multiple =
|
||||
(((offset >> inst_size) << inst_size) == offset);
|
||||
(static_cast<int64_t>(static_cast<uint64_t>(offset >> inst_size)
|
||||
<< inst_size) == offset);
|
||||
DCHECK_GT(offset, 0);
|
||||
offset >>= kLoadLiteralScaleLog2;
|
||||
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
|
||||
@ -4179,9 +4184,9 @@ bool Assembler::IsImmLogical(uint64_t value, unsigned width, unsigned* n,
|
||||
// 1110ss 4 UInt(ss)
|
||||
// 11110s 2 UInt(s)
|
||||
//
|
||||
// So we 'or' (-d << 1) with our computed s to form imms.
|
||||
// So we 'or' (-d * 2) with our computed s to form imms.
|
||||
*n = out_n;
|
||||
*imm_s = ((-d << 1) | (s - 1)) & 0x3F;
|
||||
*imm_s = ((-d * 2) | (s - 1)) & 0x3F;
|
||||
*imm_r = r;
|
||||
|
||||
return true;
|
||||
|
@ -33,6 +33,7 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
|
||||
constexpr uint8_t kInstrSize = 4;
|
||||
constexpr uint8_t kInstrSizeLog2 = 2;
|
||||
constexpr uint8_t kLoadLiteralScaleLog2 = 2;
|
||||
constexpr uint8_t kLoadLiteralScale = 1 << kLoadLiteralScaleLog2;
|
||||
constexpr int kMaxLoadLiteralRange = 1 * MB;
|
||||
|
||||
const int kNumberOfRegisters = 32;
|
||||
|
@ -71,6 +71,7 @@ static uint64_t RotateRight(uint64_t value, unsigned int rotate,
|
||||
unsigned int width) {
|
||||
DCHECK_LE(width, 64);
|
||||
rotate &= 63;
|
||||
if (rotate == 0) return value;
|
||||
return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
|
||||
(value >> rotate);
|
||||
}
|
||||
@ -191,16 +192,16 @@ int64_t Instruction::ImmPCOffset() {
|
||||
} else if (BranchType() != UnknownBranchType) {
|
||||
// All PC-relative branches.
|
||||
// Relative branch offsets are instruction-size-aligned.
|
||||
offset = ImmBranch() << kInstrSizeLog2;
|
||||
offset = ImmBranch() * kInstrSize;
|
||||
} else if (IsUnresolvedInternalReference()) {
|
||||
// Internal references are always word-aligned.
|
||||
offset = ImmUnresolvedInternalReference() << kInstrSizeLog2;
|
||||
offset = ImmUnresolvedInternalReference() * kInstrSize;
|
||||
} else {
|
||||
// Load literal (offset from PC).
|
||||
DCHECK(IsLdrLiteral());
|
||||
// The offset is always shifted by 2 bits, even for loads to 64-bits
|
||||
// registers.
|
||||
offset = ImmLLiteral() << kInstrSizeLog2;
|
||||
offset = ImmLLiteral() * kInstrSize;
|
||||
}
|
||||
return offset;
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
|
||||
#define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
|
||||
|
||||
#include "src/base/memory.h"
|
||||
#include "src/codegen/arm64/constants-arm64.h"
|
||||
#include "src/codegen/arm64/register-arm64.h"
|
||||
#include "src/codegen/arm64/utils-arm64.h"
|
||||
@ -82,11 +83,13 @@ enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
|
||||
class Instruction {
|
||||
public:
|
||||
V8_INLINE Instr InstructionBits() const {
|
||||
return *reinterpret_cast<const Instr*>(this);
|
||||
// Usually this is aligned, but when de/serializing that's not guaranteed.
|
||||
return base::ReadUnalignedValue<Instr>(reinterpret_cast<Address>(this));
|
||||
}
|
||||
|
||||
V8_INLINE void SetInstructionBits(Instr new_instr) {
|
||||
*reinterpret_cast<Instr*>(this) = new_instr;
|
||||
// Usually this is aligned, but when de/serializing that's not guaranteed.
|
||||
base::WriteUnalignedValue(reinterpret_cast<Address>(this), new_instr);
|
||||
}
|
||||
|
||||
int Bit(int pos) const { return (InstructionBits() >> pos) & 1; }
|
||||
@ -96,7 +99,9 @@ class Instruction {
|
||||
}
|
||||
|
||||
int32_t SignedBits(int msb, int lsb) const {
|
||||
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
|
||||
// Usually this is aligned, but when de/serializing that's not guaranteed.
|
||||
int32_t bits =
|
||||
base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(this));
|
||||
return signed_bitextract_32(msb, lsb, bits);
|
||||
}
|
||||
|
||||
@ -125,7 +130,8 @@ class Instruction {
|
||||
// formed from ImmPCRelLo and ImmPCRelHi.
|
||||
int ImmPCRel() const {
|
||||
DCHECK(IsPCRelAddressing());
|
||||
int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
|
||||
int offset = (static_cast<uint32_t>(ImmPCRelHi()) << ImmPCRelLo_width) |
|
||||
ImmPCRelLo();
|
||||
int width = ImmPCRelLo_width + ImmPCRelHi_width;
|
||||
return signed_bitextract_32(width - 1, 0, offset);
|
||||
}
|
||||
@ -404,7 +410,7 @@ class Instruction {
|
||||
void SetImmLLiteral(Instruction* source);
|
||||
|
||||
uintptr_t LiteralAddress() {
|
||||
int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
|
||||
int offset = ImmLLiteral() * kLoadLiteralScale;
|
||||
return reinterpret_cast<uintptr_t>(this) + offset;
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,9 @@ int HighestSetBitPosition(uint64_t value) {
|
||||
}
|
||||
|
||||
uint64_t LargestPowerOf2Divisor(uint64_t value) {
|
||||
return value & (-(int64_t)value);
|
||||
// Simulate two's complement (instead of casting to signed and negating) to
|
||||
// avoid undefined behavior on signed overflow.
|
||||
return value & ((~value) + 1);
|
||||
}
|
||||
|
||||
int MaskToBit(uint64_t mask) {
|
||||
|
@ -61,7 +61,7 @@ T ReverseBytes(T value, int block_bytes_log2) {
|
||||
static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
|
||||
{4, 5, 6, 7, 0, 1, 2, 3},
|
||||
{0, 1, 2, 3, 4, 5, 6, 7}};
|
||||
T result = 0;
|
||||
typename std::make_unsigned<T>::type result = 0;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
result <<= 8;
|
||||
result |= bytes[permute_table[block_bytes_log2 - 1][i]];
|
||||
|
@ -529,19 +529,19 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_top_address(
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acos_function, base::ieee754::acos,
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function, base::ieee754::acosh,
|
||||
BUILTIN_FP_FP_CALL)
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_asin_function, base::ieee754::asin,
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_asinh_function, base::ieee754::asinh,
|
||||
BUILTIN_FP_FP_CALL)
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan_function, base::ieee754::atan,
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_atanh_function, base::ieee754::atanh,
|
||||
BUILTIN_FP_FP_CALL)
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan2_function, base::ieee754::atan2,
|
||||
BUILTIN_FP_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cbrt_function, base::ieee754::cbrt,
|
||||
BUILTIN_FP_FP_CALL)
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cos_function, base::ieee754::cos,
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh,
|
||||
@ -549,7 +549,7 @@ FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh,
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_exp_function, base::ieee754::exp,
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_expm1_function, base::ieee754::expm1,
|
||||
BUILTIN_FP_FP_CALL)
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_log_function, base::ieee754::log,
|
||||
BUILTIN_FP_CALL)
|
||||
FUNCTION_REFERENCE_WITH_TYPE(ieee754_log1p_function, base::ieee754::log1p,
|
||||
|
@ -39,6 +39,7 @@
|
||||
|
||||
#include "src/codegen/ia32/assembler-ia32.h"
|
||||
|
||||
#include "src/base/memory.h"
|
||||
#include "src/codegen/assembler.h"
|
||||
#include "src/debug/debug.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
@ -58,12 +59,12 @@ void RelocInfo::apply(intptr_t delta) {
|
||||
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY)));
|
||||
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_) ||
|
||||
IsOffHeapTarget(rmode_)) {
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc_);
|
||||
*p -= delta; // Relocate entry.
|
||||
base::WriteUnalignedValue(pc_,
|
||||
base::ReadUnalignedValue<int32_t>(pc_) - delta);
|
||||
} else if (IsInternalReference(rmode_)) {
|
||||
// absolute code pointer inside code object moves with the code object.
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc_);
|
||||
*p += delta; // Relocate entry.
|
||||
// Absolute code pointer inside code object moves with the code object.
|
||||
base::WriteUnalignedValue(pc_,
|
||||
base::ReadUnalignedValue<int32_t>(pc_) + delta);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -942,7 +942,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
|
||||
uint32_t lsb = m.right().Value();
|
||||
Int32BinopMatcher mleft(m.left().node());
|
||||
if (mleft.right().HasValue()) {
|
||||
uint32_t value = (mleft.right().Value() >> lsb) << lsb;
|
||||
uint32_t value = static_cast<uint32_t>(mleft.right().Value() >> lsb)
|
||||
<< lsb;
|
||||
uint32_t width = base::bits::CountPopulation(value);
|
||||
uint32_t msb = base::bits::CountLeadingZeros32(value);
|
||||
if ((width != 0) && (msb + width + lsb == 32)) {
|
||||
|
@ -499,6 +499,7 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
|
||||
Arm64OperandGenerator g(selector);
|
||||
Matcher m(node);
|
||||
if (m.right().HasValue() && (m.right().Value() < 0) &&
|
||||
(m.right().Value() > std::numeric_limits<int>::min()) &&
|
||||
g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
|
||||
selector->Emit(negate_opcode, g.DefineAsRegister(node),
|
||||
g.UseRegister(m.left().node()),
|
||||
@ -1048,7 +1049,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
|
||||
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
|
||||
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
|
||||
// shifted into the least-significant bits.
|
||||
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
|
||||
uint32_t mask = static_cast<uint32_t>(mleft.right().Value() >> lsb)
|
||||
<< lsb;
|
||||
unsigned mask_width = base::bits::CountPopulation(mask);
|
||||
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
|
||||
if ((mask_msb + mask_width + lsb) == 32) {
|
||||
@ -1091,7 +1093,8 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
|
||||
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
|
||||
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
|
||||
// shifted into the least-significant bits.
|
||||
uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
|
||||
uint64_t mask = static_cast<uint64_t>(mleft.right().Value() >> lsb)
|
||||
<< lsb;
|
||||
unsigned mask_width = base::bits::CountPopulation(mask);
|
||||
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
|
||||
if ((mask_msb + mask_width + lsb) == 64) {
|
||||
|
@ -590,7 +590,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
}
|
||||
case 't': { // 'target: target of branch instructions
|
||||
DCHECK(STRING_STARTS_WITH(format, "target"));
|
||||
int off = (instr->SImmed24Value() << 2) + 8;
|
||||
int off = (static_cast<uint32_t>(instr->SImmed24Value()) << 2) + 8u;
|
||||
out_buffer_pos_ += SNPrintF(
|
||||
out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
|
||||
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
|
||||
|
@ -3840,8 +3840,8 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
|
||||
case 'L': {
|
||||
switch (format[2]) {
|
||||
case 'L': { // ILLiteral - Immediate Load Literal.
|
||||
AppendToOutput("pc%+" PRId32, instr->ImmLLiteral()
|
||||
<< kLoadLiteralScaleLog2);
|
||||
AppendToOutput("pc%+" PRId32,
|
||||
instr->ImmLLiteral() * kLoadLiteralScale);
|
||||
return 9;
|
||||
}
|
||||
case 'S': { // ILS - Immediate Load/Store.
|
||||
@ -4179,7 +4179,7 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
offset <<= kInstrSizeLog2;
|
||||
offset *= kInstrSize;
|
||||
char sign = '+';
|
||||
if (offset < 0) {
|
||||
sign = '-';
|
||||
|
@ -12,6 +12,8 @@
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/lazy-instance.h"
|
||||
#include "src/base/memory.h"
|
||||
#include "src/base/overflowing-math.h"
|
||||
#include "src/codegen/arm/constants-arm.h"
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
@ -899,16 +901,14 @@ int Simulator::ReadW(int32_t addr) {
|
||||
// check the alignment here.
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
return *ptr;
|
||||
return base::ReadUnalignedValue<intptr_t>(addr);
|
||||
}
|
||||
|
||||
int Simulator::ReadExW(int32_t addr) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
|
||||
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
return *ptr;
|
||||
return base::ReadUnalignedValue<intptr_t>(addr);
|
||||
}
|
||||
|
||||
void Simulator::WriteW(int32_t addr, int value) {
|
||||
@ -917,8 +917,7 @@ void Simulator::WriteW(int32_t addr, int value) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
*ptr = value;
|
||||
base::WriteUnalignedValue<intptr_t>(addr, value);
|
||||
}
|
||||
|
||||
int Simulator::WriteExW(int32_t addr, int value) {
|
||||
@ -926,8 +925,7 @@ int Simulator::WriteExW(int32_t addr, int value) {
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
|
||||
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
*ptr = value;
|
||||
base::WriteUnalignedValue<intptr_t>(addr, value);
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
@ -939,8 +937,7 @@ uint16_t Simulator::ReadHU(int32_t addr) {
|
||||
// check the alignment here.
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
return *ptr;
|
||||
return base::ReadUnalignedValue<uint16_t>(addr);
|
||||
}
|
||||
|
||||
int16_t Simulator::ReadH(int32_t addr) {
|
||||
@ -948,16 +945,14 @@ int16_t Simulator::ReadH(int32_t addr) {
|
||||
// check the alignment here.
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
||||
return *ptr;
|
||||
return base::ReadUnalignedValue<int16_t>(addr);
|
||||
}
|
||||
|
||||
uint16_t Simulator::ReadExHU(int32_t addr) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
|
||||
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
return *ptr;
|
||||
return base::ReadUnalignedValue<uint16_t>(addr);
|
||||
}
|
||||
|
||||
void Simulator::WriteH(int32_t addr, uint16_t value) {
|
||||
@ -966,8 +961,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
*ptr = value;
|
||||
base::WriteUnalignedValue(addr, value);
|
||||
}
|
||||
|
||||
void Simulator::WriteH(int32_t addr, int16_t value) {
|
||||
@ -976,8 +970,7 @@ void Simulator::WriteH(int32_t addr, int16_t value) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
|
||||
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
||||
*ptr = value;
|
||||
base::WriteUnalignedValue(addr, value);
|
||||
}
|
||||
|
||||
int Simulator::WriteExH(int32_t addr, uint16_t value) {
|
||||
@ -985,8 +978,7 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) {
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
|
||||
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
*ptr = value;
|
||||
base::WriteUnalignedValue(addr, value);
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
@ -996,39 +988,34 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) {
|
||||
uint8_t Simulator::ReadBU(int32_t addr) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
||||
return *ptr;
|
||||
return base::ReadUnalignedValue<uint8_t>(addr);
|
||||
}
|
||||
|
||||
int8_t Simulator::ReadB(int32_t addr) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
|
||||
return *ptr;
|
||||
return base::ReadUnalignedValue<int8_t>(addr);
|
||||
}
|
||||
|
||||
uint8_t Simulator::ReadExBU(int32_t addr) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
|
||||
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
|
||||
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
||||
return *ptr;
|
||||
return base::ReadUnalignedValue<uint8_t>(addr);
|
||||
}
|
||||
|
||||
void Simulator::WriteB(int32_t addr, uint8_t value) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
|
||||
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
||||
*ptr = value;
|
||||
base::WriteUnalignedValue(addr, value);
|
||||
}
|
||||
|
||||
void Simulator::WriteB(int32_t addr, int8_t value) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
|
||||
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
|
||||
*ptr = value;
|
||||
base::WriteUnalignedValue(addr, value);
|
||||
}
|
||||
|
||||
int Simulator::WriteExB(int32_t addr, uint8_t value) {
|
||||
@ -1036,8 +1023,7 @@ int Simulator::WriteExB(int32_t addr, uint8_t value) {
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
|
||||
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
||||
*ptr = value;
|
||||
base::WriteUnalignedValue(addr, value);
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
@ -1049,16 +1035,14 @@ int32_t* Simulator::ReadDW(int32_t addr) {
|
||||
// check the alignment here.
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
||||
return ptr;
|
||||
return reinterpret_cast<int32_t*>(addr);
|
||||
}
|
||||
|
||||
int32_t* Simulator::ReadExDW(int32_t addr) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::DoubleWord);
|
||||
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
|
||||
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
||||
return ptr;
|
||||
return reinterpret_cast<int32_t*>(addr);
|
||||
}
|
||||
|
||||
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
|
||||
@ -1067,9 +1051,8 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
|
||||
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
|
||||
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
||||
*ptr++ = value1;
|
||||
*ptr = value2;
|
||||
base::WriteUnalignedValue(addr, value1);
|
||||
base::WriteUnalignedValue(addr + sizeof(value1), value2);
|
||||
}
|
||||
|
||||
int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) {
|
||||
@ -1077,9 +1060,8 @@ int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) {
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::DoubleWord) &&
|
||||
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
*ptr++ = value1;
|
||||
*ptr = value2;
|
||||
base::WriteUnalignedValue(addr, value1);
|
||||
base::WriteUnalignedValue(addr + sizeof(value1), value2);
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
@ -1291,9 +1273,9 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
|
||||
if (shift_amount == 0) {
|
||||
*carry_out = c_flag_;
|
||||
} else {
|
||||
result <<= (shift_amount - 1);
|
||||
result = static_cast<uint32_t>(result) << (shift_amount - 1);
|
||||
*carry_out = (result < 0);
|
||||
result <<= 1;
|
||||
result = static_cast<uint32_t>(result) << 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1316,9 +1298,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
|
||||
if (shift_amount == 0) {
|
||||
*carry_out = c_flag_;
|
||||
} else {
|
||||
uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
|
||||
uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
|
||||
result = right | left;
|
||||
result = base::bits::RotateRight32(result, shift_amount);
|
||||
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
|
||||
}
|
||||
break;
|
||||
@ -1358,9 +1338,9 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
|
||||
if (shift_amount == 0) {
|
||||
*carry_out = c_flag_;
|
||||
} else if (shift_amount < 32) {
|
||||
result <<= (shift_amount - 1);
|
||||
result = static_cast<uint32_t>(result) << (shift_amount - 1);
|
||||
*carry_out = (result < 0);
|
||||
result <<= 1;
|
||||
result = static_cast<uint32_t>(result) << 1;
|
||||
} else if (shift_amount == 32) {
|
||||
*carry_out = (result & 1) == 1;
|
||||
result = 0;
|
||||
@ -1395,9 +1375,8 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
|
||||
if (shift_amount == 0) {
|
||||
*carry_out = c_flag_;
|
||||
} else {
|
||||
uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
|
||||
uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
|
||||
result = right | left;
|
||||
// Avoid undefined behavior. Rotating by multiples of 32 is no-op.
|
||||
result = base::bits::RotateRight32(result, shift_amount & 31);
|
||||
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
|
||||
}
|
||||
break;
|
||||
@ -1580,6 +1559,34 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int32_t arg0, int32_t arg1);
|
||||
using SimulatorRuntimeProfilingGetterCall = void (*)(int32_t arg0, int32_t arg1,
|
||||
void* arg2);
|
||||
|
||||
// Separate for fine-grained UBSan blacklisting. Casting any given C++
|
||||
// function to {SimulatorRuntimeCall} is undefined behavior; but since
|
||||
// the target function can indeed be any function that's exposed via
|
||||
// the "fast C call" mechanism, we can't reconstruct its signature here.
|
||||
int64_t UnsafeGenericFunctionCall(intptr_t function, int32_t arg0, int32_t arg1,
|
||||
int32_t arg2, int32_t arg3, int32_t arg4,
|
||||
int32_t arg5, int32_t arg6, int32_t arg7,
|
||||
int32_t arg8, int32_t arg9) {
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(function);
|
||||
return target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
|
||||
}
|
||||
void UnsafeDirectApiCall(intptr_t function, int32_t arg0) {
|
||||
SimulatorRuntimeDirectApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectApiCall>(function);
|
||||
target(arg0);
|
||||
}
|
||||
void UnsafeProfilingApiCall(intptr_t function, int32_t arg0, int32_t arg1) {
|
||||
SimulatorRuntimeProfilingApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(function);
|
||||
target(arg0, Redirection::ReverseRedirection(arg1));
|
||||
}
|
||||
void UnsafeDirectGetterCall(intptr_t function, int32_t arg0, int32_t arg1) {
|
||||
SimulatorRuntimeDirectGetterCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(function);
|
||||
target(arg0, arg1);
|
||||
}
|
||||
|
||||
// Software interrupt instructions are used by the simulator to call into the
|
||||
// C-based V8 runtime.
|
||||
void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
@ -1710,9 +1717,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
PrintF("\n");
|
||||
}
|
||||
CHECK(stack_aligned);
|
||||
SimulatorRuntimeDirectApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
|
||||
target(arg0);
|
||||
UnsafeDirectApiCall(external, arg0);
|
||||
} else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
|
||||
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
|
||||
PrintF("Call to host function at %p args %08x %08x",
|
||||
@ -1723,9 +1728,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
PrintF("\n");
|
||||
}
|
||||
CHECK(stack_aligned);
|
||||
SimulatorRuntimeProfilingApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
|
||||
target(arg0, Redirection::ReverseRedirection(arg1));
|
||||
UnsafeProfilingApiCall(external, arg0, arg1);
|
||||
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
|
||||
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
|
||||
PrintF("Call to host function at %p args %08x %08x",
|
||||
@ -1736,9 +1739,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
PrintF("\n");
|
||||
}
|
||||
CHECK(stack_aligned);
|
||||
SimulatorRuntimeDirectGetterCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
|
||||
target(arg0, arg1);
|
||||
UnsafeDirectGetterCall(external, arg0, arg1);
|
||||
} else if (redirection->type() ==
|
||||
ExternalReference::PROFILING_GETTER_CALL) {
|
||||
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
|
||||
@ -1757,14 +1758,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
// builtin call.
|
||||
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
|
||||
redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(external);
|
||||
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
|
||||
PrintF(
|
||||
"Call to host function at %p "
|
||||
"args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x",
|
||||
reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
|
||||
arg3, arg4, arg5, arg6, arg7, arg8, arg9);
|
||||
reinterpret_cast<void*>(external), arg0, arg1, arg2, arg3, arg4,
|
||||
arg5, arg6, arg7, arg8, arg9);
|
||||
if (!stack_aligned) {
|
||||
PrintF(" with unaligned stack %08x\n", get_register(sp));
|
||||
}
|
||||
@ -1772,7 +1771,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
}
|
||||
CHECK(stack_aligned);
|
||||
int64_t result =
|
||||
target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
|
||||
UnsafeGenericFunctionCall(external, arg0, arg1, arg2, arg3, arg4,
|
||||
arg5, arg6, arg7, arg8, arg9);
|
||||
int32_t lo_res = static_cast<int32_t>(result);
|
||||
int32_t hi_res = static_cast<int32_t>(result >> 32);
|
||||
if (::v8::internal::FLAG_trace_sim) {
|
||||
@ -1938,7 +1938,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
// Rn field to encode it.
|
||||
// Format(instr, "mul'cond's 'rn, 'rm, 'rs");
|
||||
int rd = rn; // Remap the rn field to the Rd register.
|
||||
int32_t alu_out = rm_val * rs_val;
|
||||
int32_t alu_out = base::MulWithWraparound(rm_val, rs_val);
|
||||
set_register(rd, alu_out);
|
||||
if (instr->HasS()) {
|
||||
SetNZFlags(alu_out);
|
||||
@ -1952,13 +1952,13 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
// Rn field to encode the Rd register and the Rd field to encode
|
||||
// the Rn register.
|
||||
// Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
|
||||
int32_t mul_out = rm_val * rs_val;
|
||||
int32_t result = acc_value + mul_out;
|
||||
int32_t mul_out = base::MulWithWraparound(rm_val, rs_val);
|
||||
int32_t result = base::AddWithWraparound(acc_value, mul_out);
|
||||
set_register(rn, result);
|
||||
} else {
|
||||
// Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
|
||||
int32_t mul_out = rm_val * rs_val;
|
||||
int32_t result = acc_value - mul_out;
|
||||
int32_t mul_out = base::MulWithWraparound(rm_val, rs_val);
|
||||
int32_t result = base::SubWithWraparound(acc_value, mul_out);
|
||||
set_register(rn, result);
|
||||
}
|
||||
}
|
||||
@ -2096,7 +2096,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val -= rm_val;
|
||||
rn_val = base::SubWithWraparound(rn_val, rm_val);
|
||||
set_register(rn, rn_val);
|
||||
break;
|
||||
}
|
||||
@ -2104,13 +2104,13 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val += rm_val;
|
||||
rn_val = base::AddWithWraparound(rn_val, rm_val);
|
||||
set_register(rn, rn_val);
|
||||
break;
|
||||
}
|
||||
case db_x: {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
|
||||
rn_val -= rm_val;
|
||||
rn_val = base::SubWithWraparound(rn_val, rm_val);
|
||||
addr = rn_val;
|
||||
if (instr->HasW()) {
|
||||
set_register(rn, rn_val);
|
||||
@ -2119,7 +2119,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
}
|
||||
case ib_x: {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
|
||||
rn_val += rm_val;
|
||||
rn_val = base::AddWithWraparound(rn_val, rm_val);
|
||||
addr = rn_val;
|
||||
if (instr->HasW()) {
|
||||
set_register(rn, rn_val);
|
||||
@ -2139,7 +2139,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val -= imm_val;
|
||||
rn_val = base::SubWithWraparound(rn_val, imm_val);
|
||||
set_register(rn, rn_val);
|
||||
break;
|
||||
}
|
||||
@ -2147,13 +2147,13 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
|
||||
DCHECK(!instr->HasW());
|
||||
addr = rn_val;
|
||||
rn_val += imm_val;
|
||||
rn_val = base::AddWithWraparound(rn_val, imm_val);
|
||||
set_register(rn, rn_val);
|
||||
break;
|
||||
}
|
||||
case db_x: {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
|
||||
rn_val -= imm_val;
|
||||
rn_val = base::SubWithWraparound(rn_val, imm_val);
|
||||
addr = rn_val;
|
||||
if (instr->HasW()) {
|
||||
set_register(rn, rn_val);
|
||||
@ -2162,7 +2162,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
}
|
||||
case ib_x: {
|
||||
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
|
||||
rn_val += imm_val;
|
||||
rn_val = base::AddWithWraparound(rn_val, imm_val);
|
||||
addr = rn_val;
|
||||
if (instr->HasW()) {
|
||||
set_register(rn, rn_val);
|
||||
@ -2328,7 +2328,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
case SUB: {
|
||||
// Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
|
||||
// Format(instr, "sub'cond's 'rd, 'rn, 'imm");
|
||||
alu_out = rn_val - shifter_operand;
|
||||
alu_out = base::SubWithWraparound(rn_val, shifter_operand);
|
||||
set_register(rd, alu_out);
|
||||
if (instr->HasS()) {
|
||||
SetNZFlags(alu_out);
|
||||
@ -2341,7 +2341,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
case RSB: {
|
||||
// Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
|
||||
// Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
|
||||
alu_out = shifter_operand - rn_val;
|
||||
alu_out = base::SubWithWraparound(shifter_operand, rn_val);
|
||||
set_register(rd, alu_out);
|
||||
if (instr->HasS()) {
|
||||
SetNZFlags(alu_out);
|
||||
@ -2354,7 +2354,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
case ADD: {
|
||||
// Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
|
||||
// Format(instr, "add'cond's 'rd, 'rn, 'imm");
|
||||
alu_out = rn_val + shifter_operand;
|
||||
alu_out = base::AddWithWraparound(rn_val, shifter_operand);
|
||||
set_register(rd, alu_out);
|
||||
if (instr->HasS()) {
|
||||
SetNZFlags(alu_out);
|
||||
@ -2367,7 +2367,8 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
case ADC: {
|
||||
// Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
|
||||
// Format(instr, "adc'cond's 'rd, 'rn, 'imm");
|
||||
alu_out = rn_val + shifter_operand + GetCarry();
|
||||
alu_out = base::AddWithWraparound(
|
||||
base::AddWithWraparound(rn_val, shifter_operand), GetCarry());
|
||||
set_register(rd, alu_out);
|
||||
if (instr->HasS()) {
|
||||
SetNZFlags(alu_out);
|
||||
@ -2380,7 +2381,9 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
case SBC: {
|
||||
// Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
|
||||
// Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
|
||||
alu_out = (rn_val - shifter_operand) - (GetCarry() ? 0 : 1);
|
||||
alu_out = base::SubWithWraparound(
|
||||
base::SubWithWraparound(rn_val, shifter_operand),
|
||||
(GetCarry() ? 0 : 1));
|
||||
set_register(rd, alu_out);
|
||||
if (instr->HasS()) {
|
||||
SetNZFlags(alu_out);
|
||||
@ -2430,7 +2433,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
if (instr->HasS()) {
|
||||
// Format(instr, "cmp'cond 'rn, 'shift_rm");
|
||||
// Format(instr, "cmp'cond 'rn, 'imm");
|
||||
alu_out = rn_val - shifter_operand;
|
||||
alu_out = base::SubWithWraparound(rn_val, shifter_operand);
|
||||
SetNZFlags(alu_out);
|
||||
SetCFlag(!BorrowFrom(rn_val, shifter_operand));
|
||||
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
|
||||
@ -2447,7 +2450,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
if (instr->HasS()) {
|
||||
// Format(instr, "cmn'cond 'rn, 'shift_rm");
|
||||
// Format(instr, "cmn'cond 'rn, 'imm");
|
||||
alu_out = rn_val + shifter_operand;
|
||||
alu_out = base::AddWithWraparound(rn_val, shifter_operand);
|
||||
SetNZFlags(alu_out);
|
||||
SetCFlag(CarryFrom(rn_val, shifter_operand));
|
||||
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
|
||||
@ -2937,7 +2940,7 @@ void Simulator::DecodeType3(Instruction* instr) {
|
||||
} else {
|
||||
// sbfx - signed bitfield extract.
|
||||
int32_t rm_val = get_register(instr->RmValue());
|
||||
int32_t extr_val = rm_val << (31 - msbit);
|
||||
int32_t extr_val = static_cast<uint32_t>(rm_val) << (31 - msbit);
|
||||
extr_val = extr_val >> (31 - widthminus1);
|
||||
set_register(instr->RdValue(), extr_val);
|
||||
}
|
||||
@ -2969,7 +2972,7 @@ void Simulator::DecodeType3(Instruction* instr) {
|
||||
return;
|
||||
} else {
|
||||
// Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
|
||||
addr = rn_val + shifter_operand;
|
||||
addr = base::AddWithWraparound(rn_val, shifter_operand);
|
||||
if (instr->HasW()) {
|
||||
set_register(rn, addr);
|
||||
}
|
||||
@ -3010,7 +3013,8 @@ void Simulator::DecodeType4(Instruction* instr) {
|
||||
|
||||
void Simulator::DecodeType5(Instruction* instr) {
|
||||
// Format(instr, "b'l'cond 'target");
|
||||
int off = (instr->SImmed24Value() << 2);
|
||||
int off =
|
||||
static_cast<int>(static_cast<uint32_t>(instr->SImmed24Value()) << 2);
|
||||
intptr_t pc_address = get_pc();
|
||||
if (instr->HasLink()) {
|
||||
set_register(lr, pc_address + kInstrSize);
|
||||
@ -3259,14 +3263,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
|
||||
if (instr->SzValue() == 0x1) {
|
||||
double dn_value = get_double_from_d_register(vn).get_scalar();
|
||||
double dm_value = get_double_from_d_register(vm).get_scalar();
|
||||
double dd_value = dn_value / dm_value;
|
||||
double dd_value = base::Divide(dn_value, dm_value);
|
||||
div_zero_vfp_flag_ = (dm_value == 0);
|
||||
dd_value = canonicalizeNaN(dd_value);
|
||||
set_d_register_from_double(vd, dd_value);
|
||||
} else {
|
||||
float sn_value = get_float_from_s_register(n).get_scalar();
|
||||
float sm_value = get_float_from_s_register(m).get_scalar();
|
||||
float sd_value = sn_value / sm_value;
|
||||
float sd_value = base::Divide(sn_value, sm_value);
|
||||
div_zero_vfp_flag_ = (sm_value == 0);
|
||||
sd_value = canonicalizeNaN(sd_value);
|
||||
set_s_register_from_float(d, sd_value);
|
||||
@ -3594,10 +3598,22 @@ int VFPConversionSaturate(double val, bool unsigned_res) {
|
||||
|
||||
int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer,
|
||||
VFPRoundingMode mode) {
|
||||
// TODO(jkummerow): These casts are undefined behavior if the integral
|
||||
// part of {val} does not fit into the destination type.
|
||||
int32_t result =
|
||||
unsigned_integer ? static_cast<uint32_t>(val) : static_cast<int32_t>(val);
|
||||
int32_t result;
|
||||
if (unsigned_integer) {
|
||||
// The FastD2UI helper does not have the rounding behavior we want here
|
||||
// (it doesn't guarantee any particular rounding, and it doesn't check
|
||||
// for or handle overflow), so do the conversion by hand.
|
||||
using limits = std::numeric_limits<uint32_t>;
|
||||
if (val > limits::max()) {
|
||||
result = limits::max();
|
||||
} else if (!(val >= 0)) { // Negation to catch NaNs.
|
||||
result = 0;
|
||||
} else {
|
||||
result = static_cast<uint32_t>(val);
|
||||
}
|
||||
} else {
|
||||
result = FastD2IChecked(val);
|
||||
}
|
||||
|
||||
inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
|
||||
|
||||
@ -3617,7 +3633,9 @@ int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer,
|
||||
result += val_sign;
|
||||
} else if (abs_diff == 0.5) {
|
||||
// Round to even if exactly halfway.
|
||||
result = ((result % 2) == 0) ? result : result + val_sign;
|
||||
result = ((result % 2) == 0)
|
||||
? result
|
||||
: base::AddWithWraparound(result, val_sign);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -3873,7 +3891,11 @@ void Neg(Simulator* simulator, int Vd, int Vm) {
|
||||
T src[kElems];
|
||||
simulator->get_neon_register<T, SIZE>(Vm, src);
|
||||
for (int i = 0; i < kElems; i++) {
|
||||
src[i] = -src[i];
|
||||
if (src[i] != std::numeric_limits<T>::min()) {
|
||||
src[i] = -src[i];
|
||||
} else {
|
||||
// The respective minimum (negative) value maps to itself.
|
||||
}
|
||||
}
|
||||
simulator->set_neon_register<T, SIZE>(Vd, src);
|
||||
}
|
||||
@ -3998,6 +4020,17 @@ void Sub(Simulator* simulator, int Vd, int Vm, int Vn) {
|
||||
simulator->set_neon_register<T, SIZE>(Vd, src1);
|
||||
}
|
||||
|
||||
namespace {
|
||||
uint32_t Multiply(uint32_t a, uint32_t b) { return a * b; }
|
||||
uint8_t Multiply(uint8_t a, uint8_t b) { return a * b; }
|
||||
// 16-bit integers are special due to C++'s implicit conversion rules.
|
||||
// See https://bugs.llvm.org/show_bug.cgi?id=25580.
|
||||
uint16_t Multiply(uint16_t a, uint16_t b) {
|
||||
uint32_t result = static_cast<uint32_t>(a) * static_cast<uint32_t>(b);
|
||||
return static_cast<uint16_t>(result);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
template <typename T, int SIZE>
|
||||
void Mul(Simulator* simulator, int Vd, int Vm, int Vn) {
|
||||
static const int kElems = SIZE / sizeof(T);
|
||||
@ -4005,7 +4038,7 @@ void Mul(Simulator* simulator, int Vd, int Vm, int Vn) {
|
||||
simulator->get_neon_register<T, SIZE>(Vn, src1);
|
||||
simulator->get_neon_register<T, SIZE>(Vm, src2);
|
||||
for (int i = 0; i < kElems; i++) {
|
||||
src1[i] *= src2[i];
|
||||
src1[i] = Multiply(src1[i], src2[i]);
|
||||
}
|
||||
simulator->set_neon_register<T, SIZE>(Vd, src1);
|
||||
}
|
||||
@ -4090,7 +4123,8 @@ void ShiftByRegister(Simulator* simulator, int Vd, int Vm, int Vn) {
|
||||
if (shift_value >= size) {
|
||||
src[i] = 0;
|
||||
} else {
|
||||
src[i] <<= shift_value;
|
||||
using unsignedT = typename std::make_unsigned<T>::type;
|
||||
src[i] = static_cast<unsignedT>(src[i]) << shift_value;
|
||||
}
|
||||
} else {
|
||||
// If the shift value is greater/equal than size, always end up with -1.
|
||||
@ -5721,7 +5755,7 @@ void Simulator::Execute() {
|
||||
// should be stopping at a particular executed instruction.
|
||||
while (program_counter != end_sim_pc) {
|
||||
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
|
||||
icount_++;
|
||||
icount_ = base::AddWithWraparound(icount_, 1);
|
||||
InstructionDecode(instr);
|
||||
program_counter = get_pc();
|
||||
}
|
||||
@ -5730,7 +5764,7 @@ void Simulator::Execute() {
|
||||
// we reach the particular instruction count.
|
||||
while (program_counter != end_sim_pc) {
|
||||
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
|
||||
icount_++;
|
||||
icount_ = base::AddWithWraparound(icount_, 1);
|
||||
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
|
||||
ArmDebugger dbg(this);
|
||||
dbg.Debug();
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/lazy-instance.h"
|
||||
#include "src/base/overflowing-math.h"
|
||||
#include "src/codegen/arm64/decoder-arm64-inl.h"
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
#include "src/codegen/macro-assembler.h"
|
||||
@ -414,6 +415,34 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
|
||||
using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
|
||||
void* arg2);
|
||||
|
||||
// Separate for fine-grained UBSan blacklisting. Casting any given C++
|
||||
// function to {SimulatorRuntimeCall} is undefined behavior; but since
|
||||
// the target function can indeed be any function that's exposed via
|
||||
// the "fast C call" mechanism, we can't reconstruct its signature here.
|
||||
ObjectPair UnsafeGenericFunctionCall(int64_t function, int64_t arg0,
|
||||
int64_t arg1, int64_t arg2, int64_t arg3,
|
||||
int64_t arg4, int64_t arg5, int64_t arg6,
|
||||
int64_t arg7, int64_t arg8, int64_t arg9) {
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(function);
|
||||
return target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
|
||||
}
|
||||
void UnsafeDirectApiCall(int64_t function, int64_t arg0) {
|
||||
SimulatorRuntimeDirectApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectApiCall>(function);
|
||||
target(arg0);
|
||||
}
|
||||
void UnsafeProfilingApiCall(int64_t function, int64_t arg0, void* arg1) {
|
||||
SimulatorRuntimeProfilingApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(function);
|
||||
target(arg0, arg1);
|
||||
}
|
||||
void UnsafeDirectGetterCall(int64_t function, int64_t arg0, int64_t arg1) {
|
||||
SimulatorRuntimeDirectGetterCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(function);
|
||||
target(arg0, arg1);
|
||||
}
|
||||
|
||||
void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
Redirection* redirection = Redirection::FromInstruction(instr);
|
||||
|
||||
@ -515,10 +544,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
", "
|
||||
"0x%016" PRIx64 ", 0x%016" PRIx64,
|
||||
arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
|
||||
SimulatorRuntimeCall target =
|
||||
reinterpret_cast<SimulatorRuntimeCall>(external);
|
||||
ObjectPair result =
|
||||
target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
|
||||
ObjectPair result = UnsafeGenericFunctionCall(
|
||||
external, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
|
||||
TraceSim("Returned: {%p, %p}\n", reinterpret_cast<void*>(result.x),
|
||||
reinterpret_cast<void*>(result.y));
|
||||
#ifdef DEBUG
|
||||
@ -532,10 +559,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
case ExternalReference::DIRECT_API_CALL: {
|
||||
// void f(v8::FunctionCallbackInfo&)
|
||||
TraceSim("Type: DIRECT_API_CALL\n");
|
||||
SimulatorRuntimeDirectApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
|
||||
TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
|
||||
target(xreg(0));
|
||||
UnsafeDirectApiCall(external, xreg(0));
|
||||
TraceSim("No return value.");
|
||||
#ifdef DEBUG
|
||||
CorruptAllCallerSavedCPURegisters();
|
||||
@ -606,11 +631,9 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
case ExternalReference::DIRECT_GETTER_CALL: {
|
||||
// void f(Local<String> property, PropertyCallbackInfo& info)
|
||||
TraceSim("Type: DIRECT_GETTER_CALL\n");
|
||||
SimulatorRuntimeDirectGetterCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
|
||||
TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n", xreg(0),
|
||||
xreg(1));
|
||||
target(xreg(0), xreg(1));
|
||||
UnsafeDirectGetterCall(external, xreg(0), xreg(1));
|
||||
TraceSim("No return value.");
|
||||
#ifdef DEBUG
|
||||
CorruptAllCallerSavedCPURegisters();
|
||||
@ -621,11 +644,9 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
case ExternalReference::PROFILING_API_CALL: {
|
||||
// void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
|
||||
TraceSim("Type: PROFILING_API_CALL\n");
|
||||
SimulatorRuntimeProfilingApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
|
||||
void* arg1 = Redirection::ReverseRedirection(xreg(1));
|
||||
TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
|
||||
target(xreg(0), arg1);
|
||||
UnsafeProfilingApiCall(external, xreg(0), arg1);
|
||||
TraceSim("No return value.");
|
||||
#ifdef DEBUG
|
||||
CorruptAllCallerSavedCPURegisters();
|
||||
@ -849,10 +870,12 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
|
||||
if (amount == 0) {
|
||||
return value;
|
||||
}
|
||||
// Larger shift {amount}s would be undefined behavior in C++.
|
||||
DCHECK(amount < sizeof(value) * kBitsPerByte);
|
||||
|
||||
switch (shift_type) {
|
||||
case LSL:
|
||||
return value << amount;
|
||||
return static_cast<unsignedT>(value) << amount;
|
||||
case LSR:
|
||||
return static_cast<unsignedT>(value) >> amount;
|
||||
case ASR:
|
||||
@ -873,6 +896,7 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
|
||||
const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8;
|
||||
const unsigned kSignExtendHShift = (sizeof(T) - 2) * 8;
|
||||
const unsigned kSignExtendWShift = (sizeof(T) - 4) * 8;
|
||||
using unsignedT = typename std::make_unsigned<T>::type;
|
||||
|
||||
switch (extend_type) {
|
||||
case UXTB:
|
||||
@ -885,13 +909,19 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
|
||||
value &= kWordMask;
|
||||
break;
|
||||
case SXTB:
|
||||
value = (value << kSignExtendBShift) >> kSignExtendBShift;
|
||||
value =
|
||||
static_cast<T>(static_cast<unsignedT>(value) << kSignExtendBShift) >>
|
||||
kSignExtendBShift;
|
||||
break;
|
||||
case SXTH:
|
||||
value = (value << kSignExtendHShift) >> kSignExtendHShift;
|
||||
value =
|
||||
static_cast<T>(static_cast<unsignedT>(value) << kSignExtendHShift) >>
|
||||
kSignExtendHShift;
|
||||
break;
|
||||
case SXTW:
|
||||
value = (value << kSignExtendWShift) >> kSignExtendWShift;
|
||||
value =
|
||||
static_cast<T>(static_cast<unsignedT>(value) << kSignExtendWShift) >>
|
||||
kSignExtendWShift;
|
||||
break;
|
||||
case UXTX:
|
||||
case SXTX:
|
||||
@ -899,7 +929,7 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
return value << left_shift;
|
||||
return static_cast<T>(static_cast<unsignedT>(value) << left_shift);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -2283,7 +2313,9 @@ void Simulator::VisitConditionalSelect(Instruction* instr) {
|
||||
break;
|
||||
case CSNEG_w:
|
||||
case CSNEG_x:
|
||||
new_val = (uint64_t)(-(int64_t)new_val);
|
||||
// Simulate two's complement (instead of casting to signed and negating)
|
||||
// to avoid undefined behavior on signed overflow.
|
||||
new_val = (~new_val) + 1;
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED();
|
||||
@ -2446,23 +2478,27 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
|
||||
switch (instr->Mask(DataProcessing3SourceMask)) {
|
||||
case MADD_w:
|
||||
case MADD_x:
|
||||
result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
|
||||
result = base::AddWithWraparound(
|
||||
xreg(instr->Ra()),
|
||||
base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm())));
|
||||
break;
|
||||
case MSUB_w:
|
||||
case MSUB_x:
|
||||
result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
|
||||
result = base::SubWithWraparound(
|
||||
xreg(instr->Ra()),
|
||||
base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm())));
|
||||
break;
|
||||
case SMADDL_x:
|
||||
result = xreg(instr->Ra()) + (rn_s32 * rm_s32);
|
||||
result = base::AddWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32));
|
||||
break;
|
||||
case SMSUBL_x:
|
||||
result = xreg(instr->Ra()) - (rn_s32 * rm_s32);
|
||||
result = base::SubWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32));
|
||||
break;
|
||||
case UMADDL_x:
|
||||
result = xreg(instr->Ra()) + (rn_u32 * rm_u32);
|
||||
result = static_cast<uint64_t>(xreg(instr->Ra())) + (rn_u32 * rm_u32);
|
||||
break;
|
||||
case UMSUBL_x:
|
||||
result = xreg(instr->Ra()) - (rn_u32 * rm_u32);
|
||||
result = static_cast<uint64_t>(xreg(instr->Ra())) - (rn_u32 * rm_u32);
|
||||
break;
|
||||
case SMULH_x:
|
||||
DCHECK_EQ(instr->Ra(), kZeroRegCode);
|
||||
@ -2488,10 +2524,10 @@ void Simulator::BitfieldHelper(Instruction* instr) {
|
||||
T diff = S - R;
|
||||
T mask;
|
||||
if (diff >= 0) {
|
||||
mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
|
||||
mask = diff < reg_size - 1 ? (static_cast<unsignedT>(1) << (diff + 1)) - 1
|
||||
: static_cast<T>(-1);
|
||||
} else {
|
||||
uint64_t umask = ((1LL << (S + 1)) - 1);
|
||||
uint64_t umask = ((1ULL << (S + 1)) - 1);
|
||||
umask = (umask >> R) | (umask << (reg_size - R));
|
||||
mask = static_cast<T>(umask);
|
||||
diff += reg_size;
|
||||
@ -2522,11 +2558,15 @@ void Simulator::BitfieldHelper(Instruction* instr) {
|
||||
T dst = inzero ? 0 : reg<T>(instr->Rd());
|
||||
T src = reg<T>(instr->Rn());
|
||||
// Rotate source bitfield into place.
|
||||
T result = (static_cast<unsignedT>(src) >> R) | (src << (reg_size - R));
|
||||
T result = R == 0 ? src
|
||||
: (static_cast<unsignedT>(src) >> R) |
|
||||
(static_cast<unsignedT>(src) << (reg_size - R));
|
||||
// Determine the sign extension.
|
||||
T topbits_preshift = (static_cast<T>(1) << (reg_size - diff - 1)) - 1;
|
||||
T signbits = (extend && ((src >> S) & 1) ? topbits_preshift : 0)
|
||||
<< (diff + 1);
|
||||
T topbits_preshift = (static_cast<unsignedT>(1) << (reg_size - diff - 1)) - 1;
|
||||
T signbits =
|
||||
diff >= reg_size - 1
|
||||
? 0
|
||||
: ((extend && ((src >> S) & 1) ? topbits_preshift : 0) << (diff + 1));
|
||||
|
||||
// Merge sign extension, dest/zero and bitfield.
|
||||
result = signbits | (result & mask) | (dst & ~mask);
|
||||
|
@ -145,7 +145,12 @@ class StackFrame {
|
||||
intptr_t type = marker >> kSmiTagSize;
|
||||
// TODO(petermarshall): There is a bug in the arm simulators that causes
|
||||
// invalid frame markers.
|
||||
#if !(defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM))
|
||||
#if defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)
|
||||
if (static_cast<uintptr_t>(type) >= Type::NUMBER_OF_TYPES) {
|
||||
// Appease UBSan.
|
||||
return Type::NUMBER_OF_TYPES;
|
||||
}
|
||||
#else
|
||||
DCHECK_LT(static_cast<uintptr_t>(type), Type::NUMBER_OF_TYPES);
|
||||
#endif
|
||||
return static_cast<Type>(type);
|
||||
|
@ -170,8 +170,11 @@ void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
|
||||
}
|
||||
case CACHED_MSW: {
|
||||
Register to_advance = GetCachedRegister(reg);
|
||||
__ Add(to_advance, to_advance,
|
||||
static_cast<int64_t>(by) << kWRegSizeInBits);
|
||||
// Sign-extend to int64, shift as uint64, cast back to int64.
|
||||
__ Add(
|
||||
to_advance, to_advance,
|
||||
static_cast<int64_t>(static_cast<uint64_t>(static_cast<int64_t>(by))
|
||||
<< kWRegSizeInBits));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -760,13 +760,8 @@ inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
|
||||
return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
|
||||
}
|
||||
|
||||
inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
|
||||
return (x << (31 - msb)) >> (lsb + 31 - msb);
|
||||
}
|
||||
|
||||
inline int signed_bitextract_64(int msb, int lsb, int x) {
|
||||
// TODO(jbramley): This is broken for big bitfields.
|
||||
return (x << (63 - msb)) >> (lsb + 63 - msb);
|
||||
inline int32_t signed_bitextract_32(int msb, int lsb, uint32_t x) {
|
||||
return static_cast<int32_t>(x << (31 - msb)) >> (lsb + 31 - msb);
|
||||
}
|
||||
|
||||
// Check number width.
|
||||
|
@ -247,6 +247,10 @@ int32_t int64_mod_wrapper(Address data) {
|
||||
if (divisor == 0) {
|
||||
return 0;
|
||||
}
|
||||
if (divisor == -1 && dividend == std::numeric_limits<int64_t>::min()) {
|
||||
WriteUnalignedValue<int64_t>(data, 0);
|
||||
return 1;
|
||||
}
|
||||
WriteUnalignedValue<int64_t>(data, dividend % divisor);
|
||||
return 1;
|
||||
}
|
||||
|
@ -37,6 +37,11 @@ enum TestAlignment {
|
||||
#define A_GIG (1024ULL * 1024ULL * 1024ULL)
|
||||
|
||||
namespace {
|
||||
byte* ComputeOffset(void* real_address, int32_t offset) {
|
||||
return reinterpret_cast<byte*>(reinterpret_cast<Address>(real_address) -
|
||||
offset);
|
||||
}
|
||||
|
||||
void RunLoadInt32(const TestAlignment t) {
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
|
||||
@ -65,7 +70,7 @@ void RunLoadInt32Offset(TestAlignment t) {
|
||||
for (size_t i = 0; i < arraysize(offsets); i++) {
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
int32_t offset = offsets[i];
|
||||
byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
|
||||
byte* pointer = ComputeOffset(&p1, offset);
|
||||
|
||||
// generate load [#base + #index]
|
||||
if (t == TestAlignment::kAligned) {
|
||||
@ -93,8 +98,8 @@ void RunLoadStoreFloat32Offset(TestAlignment t) {
|
||||
base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
int32_t offset = i;
|
||||
byte* from = reinterpret_cast<byte*>(&p1) - offset;
|
||||
byte* to = reinterpret_cast<byte*>(&p2) - offset;
|
||||
byte* from = ComputeOffset(&p1, offset);
|
||||
byte* to = ComputeOffset(&p2, offset);
|
||||
// generate load [#base + #index]
|
||||
if (t == TestAlignment::kAligned) {
|
||||
Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
|
||||
@ -131,8 +136,8 @@ void RunLoadStoreFloat64Offset(TestAlignment t) {
|
||||
base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(i, 3));
|
||||
RawMachineAssemblerTester<int32_t> m;
|
||||
int32_t offset = i;
|
||||
byte* from = reinterpret_cast<byte*>(&p1) - offset;
|
||||
byte* to = reinterpret_cast<byte*>(&p2) - offset;
|
||||
byte* from = ComputeOffset(&p1, offset);
|
||||
byte* to = ComputeOffset(&p2, offset);
|
||||
// generate load [#base + #index]
|
||||
if (t == TestAlignment::kAligned) {
|
||||
Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
|
||||
@ -259,7 +264,7 @@ void RunLoadImmIndex(MachineType type, TestAlignment t) {
|
||||
for (int offset = -1; offset <= 200000; offset *= -5) {
|
||||
for (int i = 0; i < kNumElems; i++) {
|
||||
BufferedRawMachineAssemblerTester<CType> m;
|
||||
void* base_pointer = &buffer[0] - offset;
|
||||
void* base_pointer = ComputeOffset(&buffer[0], offset * sizeof(CType));
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
if (type.IsTagged()) {
|
||||
// When pointer compression is enabled then we need to access only
|
||||
|
@ -9,11 +9,9 @@ function Alloc(size) {
|
||||
}
|
||||
|
||||
function RunSomeAllocs(total, retained, size) {
|
||||
print(`-------iterations = ${total}, retained = $ { retained } -------`);
|
||||
print(`-------iterations = ${total}, retained = ${retained} -------`);
|
||||
var array = new Array(retained);
|
||||
for (var i = 0; i < total; i++) {
|
||||
if ((i % 25) == 0)
|
||||
print(`iteration $ { i }`);
|
||||
let pair = Alloc(size);
|
||||
// For some iterations, retain the memory, view, or both.
|
||||
switch (i % 3) {
|
||||
|
@ -31,7 +31,6 @@ function foo() {
|
||||
for (var j = 1; j < 100; j++) {
|
||||
if (answer == i) answer = 0;
|
||||
// Positive case.
|
||||
print(j + " % " + i + " = " + answer);
|
||||
m = j % i;
|
||||
assertEquals(answer, m, j + " % " + i);
|
||||
m = j % (-i);
|
||||
|
@ -93,7 +93,6 @@ function fuzz() {
|
||||
fuzz_index = 0;
|
||||
seed = 49734321;
|
||||
for (var i = 0; i < 1000; i++) {
|
||||
print(i);
|
||||
var len = rand() & 0x1f;
|
||||
var ranges = new Array(len);
|
||||
var last = rand();
|
||||
|
@ -4131,7 +4131,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
|
||||
TRACED_FORRANGE(int32_t, shift, -32, 63) {
|
||||
int32_t lsb = shift & 0x1F;
|
||||
TRACED_FORRANGE(int32_t, width, 1, 31) {
|
||||
uint32_t msk = (1 << width) - 1;
|
||||
uint32_t msk = (1u << width) - 1;
|
||||
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
|
||||
m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
|
||||
m.Int32Constant(msk)));
|
||||
@ -4147,7 +4147,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
|
||||
TRACED_FORRANGE(int32_t, shift, -32, 63) {
|
||||
int32_t lsb = shift & 0x1F;
|
||||
TRACED_FORRANGE(int32_t, width, 1, 31) {
|
||||
uint32_t msk = (1 << width) - 1;
|
||||
uint32_t msk = (1u << width) - 1;
|
||||
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
|
||||
m.Return(
|
||||
m.Word32And(m.Int32Constant(msk),
|
||||
@ -4394,7 +4394,7 @@ TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
|
||||
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
|
||||
Node* const p0 = m.Parameter(0);
|
||||
Node* const r =
|
||||
m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
|
||||
m.Word32Shl(m.Word32And(p0, m.Int32Constant((1u << (31 - shift)) - 1)),
|
||||
m.Int32Constant(shift + 1));
|
||||
m.Return(r);
|
||||
Stream s = m.Build();
|
||||
|
@ -9,3 +9,9 @@ fun:*v8*internal*InvokeAccessorGetterCallback*
|
||||
# Bug 8735: WeakCallbackInfo<void> vs. WeakCallbackInfo<T>.
|
||||
fun:*v8*internal*GlobalHandles*PendingPhantomCallback*Invoke*
|
||||
fun:*v8*internal*GlobalHandles*Node*PostGarbageCollectionProcessing*
|
||||
|
||||
# Simulators casting C++ functions to a generic signature.
|
||||
fun:*v8*internal*UnsafeDirectApiCall*
|
||||
fun:*v8*internal*UnsafeDirectGetterCall*
|
||||
fun:*v8*internal*UnsafeGenericFunctionCall*
|
||||
fun:*v8*internal*UnsafeProfilingApiCall*
|
||||
|
Loading…
Reference in New Issue
Block a user