Fix LoadSpillAddress on big endian

BE machines use a 4 byte bias to spill/fill 32-bit values on
the stack. This is done so because TF always fills 64-bit values
even if the spilled value was 32-bits. To make sure this holds between
LO and TF we have added a 4 byte bias in this CL:
crrev.com/c/2756712

LoadSpillAddress needs to also take this into account and
add a bias if the spilled value was 4 bytes.

Change-Id: Ibd2b2071ce1fb11a9c5884611ae8edd1f17cb0c9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891196
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Reviewed-by: Thibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83163}
This commit is contained in:
Milad Fa 2022-09-12 11:21:42 -04:00 committed by V8 LUCI CQ
parent b568d4dcd0
commit ac0cedf161
9 changed files with 18 additions and 9 deletions

View File

@ -1550,7 +1550,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
pop(r0);
}
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) {
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
ValueKind /* kind */) {
sub(dst, fp, Operand(offset));
}

View File

@ -1033,7 +1033,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
}
}
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) {
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
ValueKind /* kind */) {
Sub(dst, fp, offset);
}

View File

@ -1286,7 +1286,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
}
}
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) {
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
ValueKind /* kind */) {
lea(dst, liftoff::GetStackSlot(offset));
}

View File

@ -658,7 +658,7 @@ class LiftoffAssembler : public TurboAssembler {
void Spill(VarState* slot);
void SpillLocals();
void SpillAllRegisters();
inline void LoadSpillAddress(Register dst, int offset);
inline void LoadSpillAddress(Register dst, int offset, ValueKind kind);
// Clear any uses of {reg} in both the cache and in {possible_uses}.
// Any use in the stack is spilled. If any register in {possible_uses} matches

View File

@ -2360,7 +2360,7 @@ class LiftoffCompiler {
__ Spill(&return_slot);
}
DCHECK(return_slot.is_stack());
__ LoadSpillAddress(param_reg, return_slot.offset());
__ LoadSpillAddress(param_reg, return_slot.offset(), return_slot.kind());
}
source_position_table_builder_.AddPosition(

View File

@ -1093,7 +1093,9 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
}
}
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) {
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
ValueKind kind) {
if (kind == kI32) offset = offset + stack_bias;
SubS64(dst, fp, Operand(offset));
}

View File

@ -157,7 +157,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(
GenPCRelativeJump(kScratchReg, imm32);
}
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) {
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
ValueKind /* kind */) {
SubWord(dst, fp, offset);
}

View File

@ -1552,7 +1552,9 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
pop(r0);
}
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) {
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
ValueKind kind) {
if (kind == kI32) offset = offset + stack_bias;
SubS64(dst, fp, Operand(offset));
}

View File

@ -1018,7 +1018,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
}
}
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) {
void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
ValueKind /* kind */) {
leaq(dst, liftoff::GetStackSlot(offset));
}