From cdd50ed2ffbb6123a76ab720188748c62f314153 Mon Sep 17 00:00:00 2001 From: David Sherwood Date: Fri, 23 Jul 2021 10:52:53 +0100 Subject: [PATCH] [LoopVectorize] Don't interleave scalar ordered reductions for inner loops Consider the following loop: void foo(float *dst, float *src, int N) { for (int i = 0; i < N; i++) { dst[i] = 0.0; for (int j = 0; j < N; j++) { dst[i] += src[(i * N) + j]; } } } When we are not building with -Ofast we may attempt to vectorise the inner loop using ordered reductions instead. In addition we also try to select an appropriate interleave count for the inner loop. However, when choosing a VF=1 the inner loop will be scalar and there is existing code in selectInterleaveCount that limits the interleave count to 2 for reductions due to concerns about increasing the critical path. For ordered reductions this problem is even worse due to the additional data dependency, and so I've added code to simply disable interleaving for scalar ordered reductions for now. Test added here: Transforms/LoopVectorize/AArch64/strict-fadd-vf1.ll Differential Revision: https://reviews.llvm.org/D106646 --- lib/Transforms/Vectorize/LoopVectorize.cpp | 16 ++++++- .../LoopVectorize/AArch64/strict-fadd-vf1.ll | 42 +++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 test/Transforms/LoopVectorize/AArch64/strict-fadd-vf1.ll diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 73b75067380..f24ae6b100d 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -6473,9 +6473,21 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, // If we have a scalar reduction (vector reductions are already dealt with // by this point), we can increase the critical path length if the loop - // we're interleaving is inside another loop. Limit, by default to 2, so the - // critical path only gets increased by one reduction operation. + // we're interleaving is inside another loop. For tree-wise reductions + // set the limit to 2, and for ordered reductions it's best to disable + // interleaving entirely. if (HasReductions && TheLoop->getLoopDepth() > 1) { + bool HasOrderedReductions = + any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { + const RecurrenceDescriptor &RdxDesc = Reduction.second; + return RdxDesc.isOrdered(); + }); + if (HasOrderedReductions) { + LLVM_DEBUG( + dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); + return 1; + } + unsigned F = static_cast(MaxNestedScalarReductionIC); SmallIC = std::min(SmallIC, F); StoresIC = std::min(StoresIC, F); diff --git a/test/Transforms/LoopVectorize/AArch64/strict-fadd-vf1.ll b/test/Transforms/LoopVectorize/AArch64/strict-fadd-vf1.ll new file mode 100644 index 00000000000..a35bab85fed --- /dev/null +++ b/test/Transforms/LoopVectorize/AArch64/strict-fadd-vf1.ll @@ -0,0 +1,42 @@ +; REQUIRES: asserts +; RUN: opt -loop-vectorize -enable-strict-reductions=true -force-vector-width=1 -S < %s -debug 2>log | FileCheck %s +; RUN: cat log | FileCheck %s --check-prefix=CHECK-DEBUG + +target triple = "aarch64-unknown-linux-gnu" + +; CHECK-DEBUG: LV: Not interleaving scalar ordered reductions. + +define void @foo(float* noalias nocapture %dst, float* noalias nocapture readonly %src, i64 %M, i64 %N) { +; CHECK-LABEL: @foo( +; CHECK-NOT: vector.body + +entry: + br label %for.body.us + +for.body.us: ; preds = %entry, %for.cond3 + %i.023.us = phi i64 [ %inc8.us, %for.cond3 ], [ 0, %entry ] + %arrayidx.us = getelementptr inbounds float, float* %dst, i64 %i.023.us + %mul.us = mul nsw i64 %i.023.us, %N + br label %for.body3.us + +for.body3.us: ; preds = %for.body.us, %for.body3.us + %0 = phi float [ 0.000000e+00, %for.body.us ], [ %add6.us, %for.body3.us ] + %j.021.us = phi i64 [ 0, %for.body.us ], [ %inc.us, %for.body3.us ] + %add.us = add nsw i64 %j.021.us, %mul.us + %arrayidx4.us = getelementptr inbounds float, float* %src, i64 %add.us + %1 = load float, float* %arrayidx4.us, align 4 + %add6.us = fadd float %1, %0 + %inc.us = add nuw nsw i64 %j.021.us, 1 + %exitcond.not = icmp eq i64 %inc.us, %N + br i1 %exitcond.not, label %for.cond3, label %for.body3.us + +for.cond3: ; preds = %for.body3.us + %add6.us.lcssa = phi float [ %add6.us, %for.body3.us ] + store float %add6.us.lcssa, float* %arrayidx.us, align 4 + %inc8.us = add nuw nsw i64 %i.023.us, 1 + %exitcond26.not = icmp eq i64 %inc8.us, %M + br i1 %exitcond26.not, label %exit, label %for.body.us + +exit: ; preds = %for.cond3 + ret void +}