mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
6759d03795
When converting a BUILD_VECTOR or VECTOR_SHUFFLE to a splatting load as of 1461fb6e783cb946b061f66689b419f74f7fad63, we inaccurately check for a single user of the load and neglect to update the users of the output chain of the original load. As a result, we can emit a new load when the original load is kept and the new load can be reordered after a dependent store. This patch fixes those two issues. Fixes https://bugs.llvm.org/show_bug.cgi?id=47891
117 lines
5.1 KiB
LLVM
117 lines
5.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
|
|
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
|
|
%struct.poly2 = type { [11 x i64] }
|
|
|
|
; Function Attrs: nofree norecurse nounwind
|
|
define dso_local void @poly2_lshift1(%struct.poly2* nocapture %p) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: poly2_lshift1:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: li r4, 72
|
|
; CHECK-NEXT: addis r5, r2, .LCPI0_0@toc@ha
|
|
; CHECK-NEXT: addis r6, r2, .LCPI0_1@toc@ha
|
|
; CHECK-NEXT: ld r7, 64(r3)
|
|
; CHECK-NEXT: ld r8, 16(r3)
|
|
; CHECK-NEXT: ld r10, 24(r3)
|
|
; CHECK-NEXT: ld r11, 32(r3)
|
|
; CHECK-NEXT: lxvd2x vs0, r3, r4
|
|
; CHECK-NEXT: addi r5, r5, .LCPI0_0@toc@l
|
|
; CHECK-NEXT: addi r6, r6, .LCPI0_1@toc@l
|
|
; CHECK-NEXT: ld r12, 56(r3)
|
|
; CHECK-NEXT: lxvd2x vs1, 0, r5
|
|
; CHECK-NEXT: mtfprd f2, r7
|
|
; CHECK-NEXT: ld r5, 0(r3)
|
|
; CHECK-NEXT: xxswapd v2, vs0
|
|
; CHECK-NEXT: lxvd2x vs0, 0, r6
|
|
; CHECK-NEXT: ld r6, 8(r3)
|
|
; CHECK-NEXT: rotldi r9, r5, 1
|
|
; CHECK-NEXT: sldi r5, r5, 1
|
|
; CHECK-NEXT: xxswapd v3, vs1
|
|
; CHECK-NEXT: std r5, 0(r3)
|
|
; CHECK-NEXT: rotldi r5, r10, 1
|
|
; CHECK-NEXT: rldimi r9, r6, 1, 0
|
|
; CHECK-NEXT: rotldi r6, r6, 1
|
|
; CHECK-NEXT: xxpermdi v4, v2, vs2, 2
|
|
; CHECK-NEXT: xxswapd v5, vs0
|
|
; CHECK-NEXT: rldimi r6, r8, 1, 0
|
|
; CHECK-NEXT: rotldi r8, r8, 1
|
|
; CHECK-NEXT: std r9, 8(r3)
|
|
; CHECK-NEXT: ld r9, 40(r3)
|
|
; CHECK-NEXT: rldimi r8, r10, 1, 0
|
|
; CHECK-NEXT: rldimi r5, r11, 1, 0
|
|
; CHECK-NEXT: std r6, 16(r3)
|
|
; CHECK-NEXT: rotldi r10, r11, 1
|
|
; CHECK-NEXT: ld r11, 48(r3)
|
|
; CHECK-NEXT: std r5, 32(r3)
|
|
; CHECK-NEXT: rotldi r6, r12, 1
|
|
; CHECK-NEXT: vsrd v3, v4, v3
|
|
; CHECK-NEXT: rldimi r10, r9, 1, 0
|
|
; CHECK-NEXT: rotldi r9, r9, 1
|
|
; CHECK-NEXT: std r8, 24(r3)
|
|
; CHECK-NEXT: vsld v2, v2, v5
|
|
; CHECK-NEXT: rotldi r5, r11, 1
|
|
; CHECK-NEXT: rldimi r9, r11, 1, 0
|
|
; CHECK-NEXT: std r10, 40(r3)
|
|
; CHECK-NEXT: rldimi r5, r12, 1, 0
|
|
; CHECK-NEXT: rldimi r6, r7, 1, 0
|
|
; CHECK-NEXT: std r9, 48(r3)
|
|
; CHECK-NEXT: xxlor vs0, v2, v3
|
|
; CHECK-NEXT: std r5, 56(r3)
|
|
; CHECK-NEXT: std r6, 64(r3)
|
|
; CHECK-NEXT: xxswapd vs0, vs0
|
|
; CHECK-NEXT: stxvd2x vs0, r3, r4
|
|
; CHECK-NEXT: blr
|
|
entry:
|
|
%arrayidx = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 0
|
|
%0 = load i64, i64* %arrayidx, align 8
|
|
%shl = shl i64 %0, 1
|
|
store i64 %shl, i64* %arrayidx, align 8
|
|
%arrayidx.1 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 1
|
|
%1 = load i64, i64* %arrayidx.1, align 8
|
|
%or.1 = call i64 @llvm.fshl.i64(i64 %1, i64 %0, i64 1)
|
|
store i64 %or.1, i64* %arrayidx.1, align 8
|
|
%arrayidx.2 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 2
|
|
%2 = load i64, i64* %arrayidx.2, align 8
|
|
%or.2 = call i64 @llvm.fshl.i64(i64 %2, i64 %1, i64 1)
|
|
store i64 %or.2, i64* %arrayidx.2, align 8
|
|
%arrayidx.3 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 3
|
|
%3 = load i64, i64* %arrayidx.3, align 8
|
|
%or.3 = call i64 @llvm.fshl.i64(i64 %3, i64 %2, i64 1)
|
|
store i64 %or.3, i64* %arrayidx.3, align 8
|
|
%arrayidx.4 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 4
|
|
%4 = load i64, i64* %arrayidx.4, align 8
|
|
%or.4 = call i64 @llvm.fshl.i64(i64 %4, i64 %3, i64 1)
|
|
store i64 %or.4, i64* %arrayidx.4, align 8
|
|
%arrayidx.5 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 5
|
|
%5 = load i64, i64* %arrayidx.5, align 8
|
|
%or.5 = call i64 @llvm.fshl.i64(i64 %5, i64 %4, i64 1)
|
|
store i64 %or.5, i64* %arrayidx.5, align 8
|
|
%arrayidx.6 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 6
|
|
%6 = load i64, i64* %arrayidx.6, align 8
|
|
%or.6 = call i64 @llvm.fshl.i64(i64 %6, i64 %5, i64 1)
|
|
store i64 %or.6, i64* %arrayidx.6, align 8
|
|
%arrayidx.7 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 7
|
|
%7 = load i64, i64* %arrayidx.7, align 8
|
|
%or.7 = call i64 @llvm.fshl.i64(i64 %7, i64 %6, i64 1)
|
|
store i64 %or.7, i64* %arrayidx.7, align 8
|
|
%arrayidx.8 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 8
|
|
%8 = load i64, i64* %arrayidx.8, align 8
|
|
%or.8 = call i64 @llvm.fshl.i64(i64 %8, i64 %7, i64 1)
|
|
store i64 %or.8, i64* %arrayidx.8, align 8
|
|
%arrayidx.9 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 9
|
|
%9 = bitcast i64* %arrayidx.9 to <2 x i64>*
|
|
%10 = load <2 x i64>, <2 x i64>* %9, align 8
|
|
%11 = insertelement <2 x i64> undef, i64 %8, i32 0
|
|
%12 = shufflevector <2 x i64> %11, <2 x i64> %10, <2 x i32> <i32 0, i32 2>
|
|
%13 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %10, <2 x i64> %12, <2 x i64> <i64 1, i64 1>)
|
|
%14 = bitcast i64* %arrayidx.9 to <2 x i64>*
|
|
store <2 x i64> %13, <2 x i64>* %14, align 8
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
|
|
declare i64 @llvm.fshl.i64(i64, i64, i64) #1
|
|
|
|
; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
|
|
declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) #1
|