mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 03:02:36 +01:00
5f5aa15bce
Resubmit after the following changes: * Fix a latent bug related to unrolling with required epilogue (see e49d65f). I believe this is the cause of the prior PPC buildbot failure. * Disable non-latch exits for epilogue vectorization to be safe (9ffa90d) * Split out assert movement (600624a) to reduce churn if this gets reverted again. Previous commit message (try 3) Resubmit after fixing test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll Previous commit message... This is a resubmit of 3e5ce4 (which was reverted by 7fe41ac). The original commit caused a PPC build bot failure we never really got to the bottom of. I can't reproduce the issue, and the bot owner was non-responsive. In the meantime, we stumbled across an issue which seems possibly related, and worked around a latent bug in 80e8025. My best guess is that the original patch exposed that latent issue at higher frequency, but it really is just a guess. Original commit message follows... If we know that the scalar epilogue is required to run, modify the CFG to end the middle block with an unconditional branch to scalar preheader. This is instead of a conditional branch to either the preheader or the exit block. The motivation to do this is to support multiple exit blocks. Specifically, the current structure forces us to identify immediate dominators and *which* exit block to branch from in the middle terminator. For the multiple exit case - where we know require scalar will hold - these questions are ill formed. This is the last change needed to support multiple exit loops, but since the diffs are already large enough, I'm going to land this, and then enable separately. You can think of this as being NFCIish prep work, but the changes are a bit too involved for me to feel comfortable tagging the review that way. Differential Revision: https://reviews.llvm.org/D94892
78 lines
3.8 KiB
LLVM
78 lines
3.8 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt %s -S -loop-vectorize -force-vector-interleave=2 | FileCheck %s
|
|
|
|
; Demonstrate a case where we unroll a loop, but don't vectorize it.
|
|
; The original loop runs stores in the latch block on iterations 0 to 1022,
|
|
; and exits when %indvars.iv = 1023. (That is, it actually runs the stores
|
|
; for an odd number of iterations.) If we unroll by two in the "vector.body"
|
|
; loop, we must exit to the epilogue on iteration with %indvars.iv = 1022 to
|
|
; avoid an out of bounds access.
|
|
|
|
define void @test(double* %data) {
|
|
; CHECK-LABEL: @test(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
|
; CHECK: vector.ph:
|
|
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; CHECK: vector.body:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
|
|
; CHECK-NEXT: [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[INDUCTION]], 1
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[INDUCTION1]], 1
|
|
; CHECK-NEXT: [[TMP2:%.*]] = or i64 [[TMP0]], 1
|
|
; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], 1
|
|
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, double* [[DATA:%.*]], i64 [[TMP2]]
|
|
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, double* [[DATA]], i64 [[TMP3]]
|
|
; CHECK-NEXT: [[TMP6:%.*]] = load double, double* [[TMP4]], align 8
|
|
; CHECK-NEXT: [[TMP7:%.*]] = load double, double* [[TMP5]], align 8
|
|
; CHECK-NEXT: [[TMP8:%.*]] = fneg double [[TMP6]]
|
|
; CHECK-NEXT: [[TMP9:%.*]] = fneg double [[TMP7]]
|
|
; CHECK-NEXT: store double [[TMP8]], double* [[TMP4]], align 8
|
|
; CHECK-NEXT: store double [[TMP9]], double* [[TMP5]], align 8
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
|
|
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1022
|
|
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; CHECK: middle.block:
|
|
; CHECK-NEXT: br label [[SCALAR_PH]]
|
|
; CHECK: scalar.ph:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1022, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
|
|
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
|
; CHECK: for.body:
|
|
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_LATCH:%.*]] ]
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024
|
|
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_LATCH]]
|
|
; CHECK: for.latch:
|
|
; CHECK-NEXT: [[T15:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[T16:%.*]] = or i64 [[T15]], 1
|
|
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[DATA]], i64 [[T16]]
|
|
; CHECK-NEXT: [[T17:%.*]] = load double, double* [[ARRAYIDX]], align 8
|
|
; CHECK-NEXT: [[FNEG:%.*]] = fneg double [[T17]]
|
|
; CHECK-NEXT: store double [[FNEG]], double* [[ARRAYIDX]], align 8
|
|
; CHECK-NEXT: br label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
|
|
; CHECK: for.end:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.latch ]
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
%exitcond.not = icmp eq i64 %indvars.iv.next, 1024
|
|
br i1 %exitcond.not, label %for.end, label %for.latch
|
|
|
|
for.latch:
|
|
%t15 = shl nuw nsw i64 %indvars.iv, 1
|
|
%t16 = or i64 %t15, 1
|
|
%arrayidx = getelementptr inbounds double, double* %data, i64 %t16
|
|
%t17 = load double, double* %arrayidx, align 8
|
|
%fneg = fneg double %t17
|
|
store double %fneg, double* %arrayidx, align 8
|
|
br label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|