[ptr-compr][arm64] Preparing for using smi-corrupting decompression

This CL fixes comparison operations that take into account full-word
value instead of the lower 32 bits.

Bug: v8:9706
Change-Id: I9176ea1ece7c0551b1fa6b9df58445ba49434234
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1824474
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64006}
This commit is contained in:
Igor Sheludko 2019-09-26 17:57:59 +02:00 committed by Commit Bot
parent 318d66d95f
commit 0cf720862a
6 changed files with 104 additions and 49 deletions

View File

@ -1001,7 +1001,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
OptimizationMarker marker, OptimizationMarker marker,
Runtime::FunctionId function_id) { Runtime::FunctionId function_id) {
Label no_match; Label no_match;
__ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match); __ CompareTaggedAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne,
&no_match);
GenerateTailCallToReturnedCode(masm, function_id); GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match); __ bind(&no_match);
} }
@ -1036,9 +1037,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Optimized code slot is a Smi optimization marker. // Optimized code slot is a Smi optimization marker.
// Fall through if no optimization trigger. // Fall through if no optimization trigger.
__ CompareAndBranch(optimized_code_entry, __ CompareTaggedAndBranch(optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq, Operand(Smi::FromEnum(OptimizationMarker::kNone)),
&fallthrough); eq, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if // TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of // feedback vectors are not allocated. We need to find a different way of
@ -1058,7 +1059,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Otherwise, the marker is InOptimizationQueue, so fall through hoping // Otherwise, the marker is InOptimizationQueue, so fall through hoping
// that an interrupt will eventually update the slot with optimized code. // that an interrupt will eventually update the slot with optimized code.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ Cmp( __ CmpTagged(
optimized_code_entry, optimized_code_entry,
Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
__ Assert(eq, AbortReason::kExpectedOptimizationSentinel); __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
@ -1634,7 +1635,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// Set flags for determining the value of smi-tagged argc. // Set flags for determining the value of smi-tagged argc.
// lt => 1, eq => 2, gt => 3. // lt => 1, eq => 2, gt => 3.
__ Cmp(argc, Smi::FromInt(2)); __ CmpTagged(argc, Smi::FromInt(2));
__ B(gt, &three_args); __ B(gt, &three_args);
// One or two arguments. // One or two arguments.
@ -1783,7 +1784,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
Label skip; Label skip;
__ CompareAndBranch(x0, Smi::zero(), ne, &skip); __ CompareTaggedAndBranch(x0, Smi::zero(), ne, &skip);
__ Ret(); __ Ret();
__ Bind(&skip); __ Bind(&skip);
@ -1879,8 +1880,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined. // 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments; Label no_arguments;
__ Cmp(arg_array, null_value); __ CmpTagged(arg_array, null_value);
__ Ccmp(arg_array, undefined_value, ZFlag, ne); __ CcmpTagged(arg_array, undefined_value, ZFlag, ne);
__ B(eq, &no_arguments); __ B(eq, &no_arguments);
// 4a. Apply the receiver to the given argArray. // 4a. Apply the receiver to the given argArray.
@ -2262,7 +2263,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Bind(&loop); __ Bind(&loop);
__ Sub(len, len, 1); __ Sub(len, len, 1);
__ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex)); __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
__ Cmp(scratch, the_hole_value); __ CmpTagged(scratch, the_hole_value);
__ Csel(scratch, scratch, undefined_value, ne); __ Csel(scratch, scratch, undefined_value, ne);
__ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2)); __ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2));
__ Cbnz(len, &loop); __ Cbnz(len, &loop);
@ -2320,7 +2321,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(x4, MemOperand(args_fp, __ Ldr(x4, MemOperand(args_fp,
CommonFrameConstants::kContextOrFrameTypeOffset)); CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)); __ CmpTagged(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &arguments_adaptor); __ B(eq, &arguments_adaptor);
{ {
__ Ldr(scratch, __ Ldr(scratch,
@ -2711,7 +2712,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Patch new.target to [[BoundTargetFunction]] if new.target equals target. // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
{ {
Label done; Label done;
__ Cmp(x1, x3); __ CmpTagged(x1, x3);
__ B(ne, &done); __ B(ne, &done);
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset)); x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));

View File

@ -308,6 +308,18 @@ Operand Operand::ToExtendedRegister() const {
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
} }
Operand Operand::ToW() const {
if (IsShiftedRegister()) {
DCHECK(reg_.Is64Bits());
return Operand(reg_.W(), shift(), shift_amount());
} else if (IsExtendedRegister()) {
DCHECK(reg_.Is64Bits());
return Operand(reg_.W(), extend(), shift_amount());
}
DCHECK(IsImmediate());
return *this;
}
Immediate Operand::immediate_for_heap_object_request() const { Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber && DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) || immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) ||

View File

@ -106,6 +106,9 @@ class Operand {
// which helps in the encoding of instructions that use the stack pointer. // which helps in the encoding of instructions that use the stack pointer.
inline Operand ToExtendedRegister() const; inline Operand ToExtendedRegister() const;
// Returns new Operand adapted for using with W registers.
inline Operand ToW() const;
inline Immediate immediate() const; inline Immediate immediate() const;
inline int64_t ImmediateValue() const; inline int64_t ImmediateValue() const;
inline RelocInfo::Mode ImmediateRMode() const; inline RelocInfo::Mode ImmediateRMode() const;

View File

@ -93,6 +93,15 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand,
} }
} }
void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) {
if (COMPRESS_POINTERS_BOOL) {
Ccmp(rn.W(), operand.ToW(), nzcv, cond);
} else {
Ccmp(rn, operand, nzcv, cond);
}
}
void MacroAssembler::Ccmn(const Register& rn, const Operand& operand, void MacroAssembler::Ccmn(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond) { StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions()); DCHECK(allow_macro_instructions());
@ -157,6 +166,14 @@ void TurboAssembler::Cmp(const Register& rn, const Operand& operand) {
Subs(AppropriateZeroRegFor(rn), rn, operand); Subs(AppropriateZeroRegFor(rn), rn, operand);
} }
void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) {
if (COMPRESS_POINTERS_BOOL) {
Cmp(rn.W(), operand.ToW());
} else {
Cmp(rn, operand);
}
}
void TurboAssembler::Neg(const Register& rd, const Operand& operand) { void TurboAssembler::Neg(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions()); DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero()); DCHECK(!rd.IsZero());
@ -982,7 +999,12 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
AssertSmi(src); AssertSmi(src);
} }
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Asr(dst, src, kSmiShift); if (COMPRESS_POINTERS_BOOL) {
Asr(dst.W(), src.W(), kSmiShift);
Sxtw(dst, dst);
} else {
Asr(dst, src, kSmiShift);
}
} }
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
@ -1002,11 +1024,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
} }
} else { } else {
DCHECK(SmiValuesAre31Bits()); DCHECK(SmiValuesAre31Bits());
#ifdef V8_COMPRESS_POINTERS if (COMPRESS_POINTERS_BOOL) {
Ldrsw(dst, src); Ldrsw(dst, src);
#else } else {
Ldr(dst, src); Ldr(dst, src);
#endif }
SmiUntag(dst); SmiUntag(dst);
} }
} }
@ -1190,6 +1212,16 @@ void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
} }
} }
void MacroAssembler::CompareTaggedAndBranch(const Register& lhs,
const Operand& rhs, Condition cond,
Label* label) {
if (COMPRESS_POINTERS_BOOL) {
CompareAndBranch(lhs.W(), rhs.ToW(), cond, label);
} else {
CompareAndBranch(lhs, rhs, cond, label);
}
}
void TurboAssembler::TestAndBranchIfAnySet(const Register& reg, void TurboAssembler::TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern, const uint64_t bit_pattern,
Label* label) { Label* label) {

View File

@ -1923,21 +1923,25 @@ void TurboAssembler::Call(ExternalReference target) {
} }
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
// The builtin_index register contains the builtin index as a Smi. // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below. // Untagging is folded into the indexing operand below.
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) if (SmiValuesAre32Bits()) {
STATIC_ASSERT(kSmiShiftSize == 0); Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2);
Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift); Add(builtin_index, builtin_index,
#else IsolateData::builtin_entry_table_offset());
STATIC_ASSERT(kSmiShiftSize == 31); Ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); } else {
#endif DCHECK(SmiValuesAre31Bits());
Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset()); if (COMPRESS_POINTERS_BOOL) {
Ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); Add(builtin_index, kRootRegister,
Operand(builtin_index.W(), SXTW, kSystemPointerSizeLog2 - kSmiShift));
} else {
Add(builtin_index, kRootRegister,
Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiShift));
}
Ldr(builtin_index,
MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
}
} }
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@ -2636,7 +2640,7 @@ void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) {
Register temp = temps.AcquireX(); Register temp = temps.AcquireX();
DCHECK(!AreAliased(obj, temp)); DCHECK(!AreAliased(obj, temp));
LoadRoot(temp, index); LoadRoot(temp, index);
Cmp(obj, temp); CmpTagged(obj, temp);
} }
void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index, void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index,
@ -2669,20 +2673,20 @@ void MacroAssembler::JumpIfIsInRange(const Register& value,
void TurboAssembler::LoadTaggedPointerField(const Register& destination, void TurboAssembler::LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand) { const MemOperand& field_operand) {
#ifdef V8_COMPRESS_POINTERS if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedPointer(destination, field_operand); DecompressTaggedPointer(destination, field_operand);
#else } else {
Ldr(destination, field_operand); Ldr(destination, field_operand);
#endif }
} }
void TurboAssembler::LoadAnyTaggedField(const Register& destination, void TurboAssembler::LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand) { const MemOperand& field_operand) {
#ifdef V8_COMPRESS_POINTERS if (COMPRESS_POINTERS_BOOL) {
DecompressAnyTagged(destination, field_operand); DecompressAnyTagged(destination, field_operand);
#else } else {
Ldr(destination, field_operand); Ldr(destination, field_operand);
#endif }
} }
void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
@ -2691,13 +2695,11 @@ void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
void TurboAssembler::StoreTaggedField(const Register& value, void TurboAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand) { const MemOperand& dst_field_operand) {
#ifdef V8_COMPRESS_POINTERS if (COMPRESS_POINTERS_BOOL) {
RecordComment("[ StoreTagged"); Str(value.W(), dst_field_operand);
Str(value.W(), dst_field_operand); } else {
RecordComment("]"); Str(value, dst_field_operand);
#else }
Str(value, dst_field_operand);
#endif
} }
void TurboAssembler::DecompressTaggedSigned(const Register& destination, void TurboAssembler::DecompressTaggedSigned(const Register& destination,

View File

@ -652,6 +652,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
const Operand& operand); const Operand& operand);
inline void Blr(const Register& xn); inline void Blr(const Register& xn);
inline void Cmp(const Register& rn, const Operand& operand); inline void Cmp(const Register& rn, const Operand& operand);
inline void CmpTagged(const Register& rn, const Operand& operand);
inline void Subs(const Register& rd, const Register& rn, inline void Subs(const Register& rd, const Register& rn,
const Operand& operand); const Operand& operand);
void Csel(const Register& rd, const Register& rn, const Operand& operand, void Csel(const Register& rd, const Register& rn, const Operand& operand,
@ -1006,6 +1007,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Conditional macros. // Conditional macros.
inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv, inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond); Condition cond);
inline void CcmpTagged(const Register& rn, const Operand& operand,
StatusFlags nzcv, Condition cond);
inline void Clz(const Register& rd, const Register& rn); inline void Clz(const Register& rd, const Register& rn);
@ -1645,6 +1648,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// condition. May corrupt the status flags. // condition. May corrupt the status flags.
inline void CompareAndBranch(const Register& lhs, const Operand& rhs, inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label); Condition cond, Label* label);
inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label);
// Insert one or more instructions into the instruction stream that encode // Insert one or more instructions into the instruction stream that encode
// some caller-defined data. The instructions used will be executable with no // some caller-defined data. The instructions used will be executable with no