1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 04:02:41 +01:00

[AArch64LoadStoreOpt] Handle offsets correctly for post-indexed paired loads.

Trunk would try to create something like "stp x9, x8, [x0], #512", which isn't actually a valid instruction.

Differential revision: https://reviews.llvm.org/D23368

llvm-svn: 278559
This commit is contained in:
Eli Friedman 2016-08-12 20:28:02 +00:00
parent b05ac549be
commit 373e6bb017
2 changed files with 107 additions and 6 deletions

View File

@ -1419,9 +1419,6 @@ bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
default:
break;
case AArch64::SUBXri:
// Negate the offset for a SUB instruction.
Offset *= -1;
// FALLTHROUGH
case AArch64::ADDXri:
// Make sure it's a vanilla immediate operand, not a relocation or
// anything else we can't handle.
@ -1439,6 +1436,9 @@ bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
bool IsPairedInsn = isPairedLdSt(MemMI);
int UpdateOffset = MI.getOperand(2).getImm();
if (MI.getOpcode() == AArch64::SUBXri)
UpdateOffset = -UpdateOffset;
// For non-paired load/store instructions, the immediate must fit in a
// signed 9-bit integer.
if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
@ -1453,13 +1453,13 @@ bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
break;
int ScaledOffset = UpdateOffset / Scale;
if (ScaledOffset > 64 || ScaledOffset < -64)
if (ScaledOffset > 63 || ScaledOffset < -64)
break;
}
// If we have a non-zero Offset, we check that it matches the amount
// we're adding to the register.
if (!Offset || Offset == MI.getOperand(2).getImm())
if (!Offset || Offset == UpdateOffset)
return true;
break;
}

View File

@ -1,4 +1,4 @@
; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -o - %s | FileCheck %s
; This file contains tests for the AArch64 load/store optimizer.
@ -1232,3 +1232,104 @@ for.body:
end:
ret void
}
define void @post-indexed-sub-doubleword-offset-min(i64* %a, i64* %b, i64 %count) nounwind {
; CHECK-LABEL: post-indexed-sub-doubleword-offset-min
; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}], #-256
; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #-256
br label %for.body
for.body:
%phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
%phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
%i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
%gep1 = getelementptr i64, i64* %phi1, i64 1
%load1 = load i64, i64* %gep1
%gep2 = getelementptr i64, i64* %phi2, i64 1
store i64 %load1, i64* %gep2
%load2 = load i64, i64* %phi1
store i64 %load2, i64* %phi2
%dec.i = add nsw i64 %i, -1
%gep3 = getelementptr i64, i64* %phi2, i64 -32
%gep4 = getelementptr i64, i64* %phi1, i64 -32
%cond = icmp sgt i64 %dec.i, 0
br i1 %cond, label %for.body, label %end
end:
ret void
}
define void @post-indexed-doubleword-offset-out-of-range(i64* %a, i64* %b, i64 %count) nounwind {
; CHECK-LABEL: post-indexed-doubleword-offset-out-of-range
; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}]
; CHECK: add x{{[0-9]+}}, x{{[0-9]+}}, #256
; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}]
; CHECK: add x{{[0-9]+}}, x{{[0-9]+}}, #256
br label %for.body
for.body:
%phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
%phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
%i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
%gep1 = getelementptr i64, i64* %phi1, i64 1
%load1 = load i64, i64* %gep1
%gep2 = getelementptr i64, i64* %phi2, i64 1
store i64 %load1, i64* %gep2
%load2 = load i64, i64* %phi1
store i64 %load2, i64* %phi2
%dec.i = add nsw i64 %i, -1
%gep3 = getelementptr i64, i64* %phi2, i64 32
%gep4 = getelementptr i64, i64* %phi1, i64 32
%cond = icmp sgt i64 %dec.i, 0
br i1 %cond, label %for.body, label %end
end:
ret void
}
define void @post-indexed-paired-min-offset(i64* %a, i64* %b, i64 %count) nounwind {
; CHECK-LABEL: post-indexed-paired-min-offset
; CHECK: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}], #-512
; CHECK: stp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}], #-512
br label %for.body
for.body:
%phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
%phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
%i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
%gep1 = getelementptr i64, i64* %phi1, i64 1
%load1 = load i64, i64* %gep1
%gep2 = getelementptr i64, i64* %phi2, i64 1
%load2 = load i64, i64* %phi1
store i64 %load1, i64* %gep2
store i64 %load2, i64* %phi2
%dec.i = add nsw i64 %i, -1
%gep3 = getelementptr i64, i64* %phi2, i64 -64
%gep4 = getelementptr i64, i64* %phi1, i64 -64
%cond = icmp sgt i64 %dec.i, 0
br i1 %cond, label %for.body, label %end
end:
ret void
}
define void @post-indexed-paired-offset-out-of-range(i64* %a, i64* %b, i64 %count) nounwind {
; CHECK-LABEL: post-indexed-paired-offset-out-of-range
; CHECK: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}]
; CHECK: add x{{[0-9]+}}, x{{[0-9]+}}, #512
; CHECK: stp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}]
; CHECK: add x{{[0-9]+}}, x{{[0-9]+}}, #512
br label %for.body
for.body:
%phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
%phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
%i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
%gep1 = getelementptr i64, i64* %phi1, i64 1
%load1 = load i64, i64* %phi1
%gep2 = getelementptr i64, i64* %phi2, i64 1
%load2 = load i64, i64* %gep1
store i64 %load1, i64* %gep2
store i64 %load2, i64* %phi2
%dec.i = add nsw i64 %i, -1
%gep3 = getelementptr i64, i64* %phi2, i64 64
%gep4 = getelementptr i64, i64* %phi1, i64 64
%cond = icmp sgt i64 %dec.i, 0
br i1 %cond, label %for.body, label %end
end:
ret void
}