From 3137ffb894c69641c89c1687a33eaa23f1498aa3 Mon Sep 17 00:00:00 2001 From: Mohammad Shahid Date: Sun, 27 Nov 2016 03:35:31 +0000 Subject: [PATCH] [SLP] Add new and update existing lit testfor providing more context to incoming patch for vectorization of jumbled load Change-Id: Ifb9091bb0f84c1937c2c8bd2fc345734f250d2f9 llvm-svn: 287992 --- .../SLPVectorizer/X86/jumbled-load.ll | 68 +++++++++++++++++++ .../SLPVectorizer/X86/reduction_loads.ll | 39 +++++++++-- 2 files changed, 103 insertions(+), 4 deletions(-) create mode 100644 test/Transforms/SLPVectorizer/X86/jumbled-load.ll diff --git a/test/Transforms/SLPVectorizer/X86/jumbled-load.ll b/test/Transforms/SLPVectorizer/X86/jumbled-load.ll new file mode 100644 index 00000000000..06e051a90b0 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/jumbled-load.ll @@ -0,0 +1,68 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s + + + +define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) { +; CHECK-LABEL: @jumbled-load( +; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* %in, i64 0 +; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4 +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3 +; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4 +; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1 +; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4 +; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2 +; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4 +; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* %inn, i64 0 +; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4 +; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2 +; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4 +; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3 +; CHECK-NEXT: [[LOAD_7:%.*]] = load i32, i32* [[GEP_5]], align 4 +; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1 +; CHECK-NEXT: [[LOAD_8:%.*]] = load i32, i32* [[GEP_6]], align 4 +; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_3]], [[LOAD_5]] +; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_8]] +; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_4]], [[LOAD_7]] +; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_1]], [[LOAD_6]] +; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* %out, i64 0 +; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_7]], align 4 +; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* %out, i64 1 +; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_8]], align 4 +; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* %out, i64 2 +; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_9]], align 4 +; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* %out, i64 3 +; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_10]], align 4 +; CHECK-NEXT: ret i32 undef +; + %in.addr = getelementptr inbounds i32, i32* %in, i64 0 + %load.1 = load i32, i32* %in.addr, align 4 + %gep.1 = getelementptr inbounds i32, i32* %in.addr, i64 3 + %load.2 = load i32, i32* %gep.1, align 4 + %gep.2 = getelementptr inbounds i32, i32* %in.addr, i64 1 + %load.3 = load i32, i32* %gep.2, align 4 + %gep.3 = getelementptr inbounds i32, i32* %in.addr, i64 2 + %load.4 = load i32, i32* %gep.3, align 4 + %inn.addr = getelementptr inbounds i32, i32* %inn, i64 0 + %load.5 = load i32, i32* %inn.addr, align 4 + %gep.4 = getelementptr inbounds i32, i32* %inn.addr, i64 2 + %load.6 = load i32, i32* %gep.4, align 4 + %gep.5 = getelementptr inbounds i32, i32* %inn.addr, i64 3 + %load.7 = load i32, i32* %gep.5, align 4 + %gep.6 = getelementptr inbounds i32, i32* %inn.addr, i64 1 + %load.8 = load i32, i32* %gep.6, align 4 + %mul.1 = mul i32 %load.3, %load.5 + %mul.2 = mul i32 %load.2, %load.8 + %mul.3 = mul i32 %load.4, %load.7 + %mul.4 = mul i32 %load.1, %load.6 + %gep.7 = getelementptr inbounds i32, i32* %out, i64 0 + store i32 %mul.1, i32* %gep.7, align 4 + %gep.8 = getelementptr inbounds i32, i32* %out, i64 1 + store i32 %mul.2, i32* %gep.8, align 4 + %gep.9 = getelementptr inbounds i32, i32* %out, i64 2 + store i32 %mul.3, i32* %gep.9, align 4 + %gep.10 = getelementptr inbounds i32, i32* %out, i64 3 + store i32 %mul.4, i32* %gep.10, align 4 + + ret i32 undef +} diff --git a/test/Transforms/SLPVectorizer/X86/reduction_loads.ll b/test/Transforms/SLPVectorizer/X86/reduction_loads.ll index 644b05d5a5e..76b3b9174a5 100644 --- a/test/Transforms/SLPVectorizer/X86/reduction_loads.ll +++ b/test/Transforms/SLPVectorizer/X86/reduction_loads.ll @@ -1,11 +1,42 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse4.2 | FileCheck %s -; CHECK-LABEL: @test -; CHECK: [[CAST:%.*]] = bitcast i32* %p to <8 x i32>* -; CHECK: [[LOAD:%.*]] = load <8 x i32>, <8 x i32>* [[CAST]], align 4 -; CHECK: mul <8 x i32> , [[LOAD]] define i32 @test(i32* nocapture readonly %p) { +; CHECK-LABEL: @test( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* %p, i64 1 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* %p, i64 2 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* %p, i64 3 +; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* %p, i64 4 +; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* %p, i64 5 +; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* %p, i64 6 +; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* %p, i64 7 +; CHECK-NEXT: br label %for.body +; CHECK: for.body: +; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %entry ], [ %add.7, %for.body ] +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* %p to <8 x i32>* +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i32> , [[TMP1]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 undef, [[SUM]] +; CHECK-NEXT: [[ADD_1:%.*]] = add i32 undef, [[ADD]] +; CHECK-NEXT: [[ADD_2:%.*]] = add i32 undef, [[ADD_1]] +; CHECK-NEXT: [[ADD_3:%.*]] = add i32 undef, [[ADD_2]] +; CHECK-NEXT: [[ADD_4:%.*]] = add i32 undef, [[ADD_3]] +; CHECK-NEXT: [[ADD_5:%.*]] = add i32 undef, [[ADD_4]] +; CHECK-NEXT: [[ADD_6:%.*]] = add i32 undef, [[ADD_5]] +; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP2]], [[RDX_SHUF]] +; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]] +; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]] +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0 +; CHECK-NEXT: [[ADD_7:%.*]] = add i32 [[TMP3]], [[SUM]] +; CHECK-NEXT: br i1 true, label %for.end, label %for.body +; CHECK: for.end: +; CHECK-NEXT: ret i32 [[ADD_7]] +; entry: %arrayidx.1 = getelementptr inbounds i32, i32* %p, i64 1 %arrayidx.2 = getelementptr inbounds i32, i32* %p, i64 2