diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h index 444f2c4a94..a0bcb77e6b 100644 --- a/src/arm64/assembler-arm64-inl.h +++ b/src/arm64/assembler-arm64-inl.h @@ -430,12 +430,16 @@ unsigned Operand::shift_amount() const { Operand Operand::UntagSmi(Register smi) { + STATIC_ASSERT(kXRegSizeInBits == static_cast(kSmiShift + + kSmiValueSize)); ASSERT(smi.Is64Bits()); return Operand(smi, ASR, kSmiShift); } Operand Operand::UntagSmiAndScale(Register smi, int scale) { + STATIC_ASSERT(kXRegSizeInBits == static_cast(kSmiShift + + kSmiValueSize)); ASSERT(smi.Is64Bits()); ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize))); if (scale > kSmiShift) { diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc index 31b61f479d..9c5e3e9045 100644 --- a/src/arm64/builtins-arm64.cc +++ b/src/arm64/builtins-arm64.cc @@ -1246,7 +1246,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // TODO(jbramley): Check that the stack usage here is safe. __ Sub(x10, jssp, x10); // Check if the arguments will overflow the stack. - __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2)); + __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2)); __ B(gt, &enough_stack_space); // There is not enough stack space, so use a builtin to throw an appropriate // error. diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc index 0e08907061..e24c7bd078 100644 --- a/src/arm64/code-stubs-arm64.cc +++ b/src/arm64/code-stubs-arm64.cc @@ -1988,9 +1988,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { Register caller_fp = x10; __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); // Load and untag the context. - STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4); - __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset + - (kSmiShift / kBitsPerByte))); + __ Ldr(w11, UntagSmiMemOperand(caller_fp, + StandardFrameConstants::kContextOffset)); __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR); __ B(ne, &runtime); @@ -2838,8 +2837,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Store the smi values in the last match info. __ SmiTag(x10, current_offset); // Clearing the 32 bottom bits gives us a Smi. - STATIC_ASSERT(kSmiShift == 32); - __ And(x11, current_offset, ~kWRegMask); + STATIC_ASSERT(kSmiTag == 0); + __ Bic(x11, current_offset, kSmiShiftMask); __ Stp(x10, x11, MemOperand(last_match_offsets, kXRegSize * 2, PostIndex)); @@ -3478,8 +3477,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); // At this point code register contains smi tagged ASCII char code. - STATIC_ASSERT(kSmiShift > kPointerSizeLog2); - __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2)); + __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2)); __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_); __ Bind(&exit_); @@ -3848,7 +3846,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, // hash = character + (character << 10); __ LoadRoot(hash, Heap::kHashSeedRootIndex); // Untag smi seed and add the character. - __ Add(hash, character, Operand(hash, LSR, kSmiShift)); + __ Add(hash, character, Operand::UntagSmi(hash)); // Compute hashes modulo 2^32 using a 32-bit W register. Register hash_w = hash.W(); diff --git a/src/arm64/debug-arm64.cc b/src/arm64/debug-arm64.cc index 43684d5157..1578452138 100644 --- a/src/arm64/debug-arm64.cc +++ b/src/arm64/debug-arm64.cc @@ -171,7 +171,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, // jssp[8]: 0x00000000 (SMI tag & padding) // jssp[4]: reg[31:0] // jssp[0]: 0x00000000 (SMI tag & padding) - STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32)); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(static_cast(kSmiShift) == kWRegSizeInBits); } if (object_regs != 0) { diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc index afd5f72988..f027dc68a4 100644 --- a/src/arm64/full-codegen-arm64.cc +++ b/src/arm64/full-codegen-arm64.cc @@ -2029,11 +2029,12 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, break; case Token::MUL: { Label not_minus_zero, done; + STATIC_ASSERT(static_cast(kSmiShift) == (kXRegSizeInBits / 2)); + STATIC_ASSERT(kSmiTag == 0); __ Smulh(x10, left, right); __ Cbnz(x10, ¬_minus_zero); __ Eor(x11, left, right); __ Tbnz(x11, kXSignBit, &stub_call); - STATIC_ASSERT(kSmiTag == 0); __ Mov(result, x10); __ B(&done); __ Bind(¬_minus_zero); @@ -2592,9 +2593,10 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); + uint64_t sign_mask = V8_UINT64_C(1) << (kSmiShift + kSmiValueSize - 1); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true, - if_false, fall_through); + __ TestAndSplit(x0, kSmiTagMask | sign_mask, if_true, if_false, fall_through); context()->Plug(if_true, if_false); } diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc index ed6fde31af..dd24b9c65e 100644 --- a/src/arm64/lithium-codegen-arm64.cc +++ b/src/arm64/lithium-codegen-arm64.cc @@ -3525,7 +3525,8 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, ElementsKind elements_kind, Representation representation, int base_offset) { - STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); + STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); int element_size_shift = ElementsKindToShiftSize(elements_kind); // Even though the HLoad/StoreKeyed instructions force the input @@ -3536,8 +3537,7 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); if (representation.IsInteger32()) { ASSERT(elements_kind == FAST_SMI_ELEMENTS); - // Read or write only the most-significant 32 bits in the case of fast smi - // arrays. + // Read or write only the smi payload in the case of fast smi arrays. return UntagSmiMemOperand(base, base_offset); } else { return MemOperand(base, base_offset); @@ -3548,8 +3548,7 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, ASSERT((element_size_shift >= 0) && (element_size_shift <= 4)); if (representation.IsInteger32()) { ASSERT(elements_kind == FAST_SMI_ELEMENTS); - // Read or write only the most-significant 32 bits in the case of fast smi - // arrays. + // Read or write only the smi payload in the case of fast smi arrays. __ Add(base, elements, Operand(key, SXTW, element_size_shift)); return UntagSmiMemOperand(base, base_offset); } else { @@ -3612,8 +3611,8 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { ToInteger32(const_operand) * kPointerSize; if (representation.IsInteger32()) { ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); - STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && - (kSmiTag == 0)); + STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); mem_op = UntagSmiMemOperand(elements, offset); } else { mem_op = MemOperand(elements, offset); @@ -3683,7 +3682,8 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { if (access.representation().IsSmi() && instr->hydrogen()->representation().IsInteger32()) { // Read int value directly from upper half of the smi. - STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); + STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); __ Load(result, UntagSmiFieldMemOperand(source, offset), Representation::Integer32()); } else { @@ -5286,8 +5286,8 @@ void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { if (representation.IsInteger32()) { ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); - STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && - (kSmiTag == 0)); + STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); mem_op = UntagSmiMemOperand(store_base, offset); } else { mem_op = MemOperand(store_base, offset); @@ -5406,7 +5406,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); } #endif - STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); + STATIC_ASSERT(static_cast(kSmiValueSize) == kWRegSizeInBits); + STATIC_ASSERT(kSmiTag == 0); __ Store(value, UntagSmiFieldMemOperand(destination, offset), Representation::Integer32()); } else { diff --git a/src/arm64/macro-assembler-arm64-inl.h b/src/arm64/macro-assembler-arm64-inl.h index 3ce4855b8a..4a93053811 100644 --- a/src/arm64/macro-assembler-arm64-inl.h +++ b/src/arm64/macro-assembler-arm64-inl.h @@ -1308,6 +1308,8 @@ void MacroAssembler::InitializeRootRegister() { void MacroAssembler::SmiTag(Register dst, Register src) { + STATIC_ASSERT(kXRegSizeInBits == + static_cast(kSmiShift + kSmiValueSize)); ASSERT(dst.Is64Bits() && src.Is64Bits()); Lsl(dst, src, kSmiShift); } @@ -1317,6 +1319,8 @@ void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); } void MacroAssembler::SmiUntag(Register dst, Register src) { + STATIC_ASSERT(kXRegSizeInBits == + static_cast(kSmiShift + kSmiValueSize)); ASSERT(dst.Is64Bits() && src.Is64Bits()); if (FLAG_enable_slow_asserts) { AssertSmi(src); @@ -1351,13 +1355,17 @@ void MacroAssembler::SmiUntagToFloat(FPRegister dst, void MacroAssembler::SmiTagAndPush(Register src) { - STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); + STATIC_ASSERT((static_cast(kSmiShift) == kWRegSizeInBits) && + (static_cast(kSmiValueSize) == kWRegSizeInBits) && + (kSmiTag == 0)); Push(src.W(), wzr); } void MacroAssembler::SmiTagAndPush(Register src1, Register src2) { - STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); + STATIC_ASSERT((static_cast(kSmiShift) == kWRegSizeInBits) && + (static_cast(kSmiValueSize) == kWRegSizeInBits) && + (kSmiTag == 0)); Push(src1.W(), wzr, src2.W(), wzr); }