From 8a9eb97aa0a9136f6e4141565a8e71c5b30b5a92 Mon Sep 17 00:00:00 2001 From: Michael Kuperstein Date: Mon, 27 Feb 2017 23:18:11 +0000 Subject: [PATCH] [SLP] Load sorting should not try to sort things that aren't loads. We may get a VL where the first element is a load, but the others aren't. Trying to sort such VLs can only lead to sorrow. llvm-svn: 296411 --- lib/Analysis/LoopAccessAnalysis.cpp | 5 +++ .../SLPVectorizer/X86/jumbled-same.ll | 43 +++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 test/Transforms/SLPVectorizer/X86/jumbled-same.ll diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp index 0e4e37fc727..a3ed412b738 100644 --- a/lib/Analysis/LoopAccessAnalysis.cpp +++ b/lib/Analysis/LoopAccessAnalysis.cpp @@ -1052,7 +1052,12 @@ bool llvm::sortMemAccesses(ArrayRef VL, const DataLayout &DL, Value *Obj0 = GetUnderlyingObject(Ptr0, DL); for (auto *Val : VL) { + // The only kind of access we care about here is load. + if (!isa(Val)) + return false; + Value *Ptr = getPointerOperand(Val); + assert(Ptr && "Expected value to have a pointer operand."); // If a pointer refers to a different underlying object, bail - the // pointers are by definition incomparable. diff --git a/test/Transforms/SLPVectorizer/X86/jumbled-same.ll b/test/Transforms/SLPVectorizer/X86/jumbled-same.ll new file mode 100644 index 00000000000..623ab1669c1 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/jumbled-same.ll @@ -0,0 +1,43 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-unknown-linux -mattr=+sse4.2 | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@a = common local_unnamed_addr global [4 x i32] zeroinitializer, align 4 +@b = common local_unnamed_addr global [4 x i32] zeroinitializer, align 4 + +define i32 @fn1() { +; CHECK-LABEL: @fn1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @b to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP3]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 8, i32 3 +; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP2]], <4 x i32> [[TMP7]], <4 x i32> +; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4 +; CHECK-NEXT: ret i32 0 +; +entry: + %0 = load i32, i32* getelementptr ([4 x i32], [4 x i32]* @b, i64 0, i32 0), align 4 + %cmp = icmp sgt i32 %0, 0 + %cond = select i1 %cmp, i32 8, i32 0 + store i32 %cond, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i32 3), align 4 + %1 = load i32, i32* getelementptr ([4 x i32], [4 x i32]* @b, i64 0, i32 1), align 4 + %cmp1 = icmp sgt i32 %1, 0 + %. = select i1 %cmp1, i32 %1, i32 6 + store i32 %., i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i32 0), align 4 + %2 = load i32, i32* getelementptr ([4 x i32], [4 x i32]* @b, i64 0, i32 2), align 4 + %cmp4 = icmp sgt i32 %2, 0 + %3 = select i1 %cmp4, i32 ptrtoint (i32 ()* @fn1 to i32), i32 0 + store i32 %3, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i32 1), align 4 + %4 = load i32, i32* getelementptr ([4 x i32], [4 x i32]* @b, i64 0, i32 3), align 4 + %cmp6 = icmp sgt i32 %4, 0 + %5 = select i1 %cmp6, i32 ptrtoint (i32 ()* @fn1 to i32), i32 0 + store i32 %5, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i32 2), align 4 + ret i32 0 +}