mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
[SLP] Lit test that cannot get vectorized due to lack of look-ahead operand reordering heuristic.
The code in this test is not vectorized by SLP because its operand reordering cannot look beyond the immediate predecessors. This will get fixed in a follow-up patch that introduces the look-ahead operand reordering heuristic. Committed on behalf of @vporpo (Vasileios Porpodas) Differential Revision: https://reviews.llvm.org/D61283 llvm-svn: 359553
This commit is contained in:
parent
c0310342fc
commit
ca0f8db8b4
74
test/Transforms/SLPVectorizer/X86/lookahead.ll
Normal file
74
test/Transforms/SLPVectorizer/X86/lookahead.ll
Normal file
@ -0,0 +1,74 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt -slp-vectorizer -S < %s -mtriple=x86_64-unknown-linux -mcpu=corei7-avx | FileCheck %s
|
||||
;
|
||||
; This checks the look-ahead operand reordering heuristic
|
||||
;
|
||||
; A[0] B[0] C[0] D[0] C[1] D[1] A[1] B[1]
|
||||
; \ / \ / \ / \ /
|
||||
; - - - -
|
||||
; \ / \ /
|
||||
; + +
|
||||
; | |
|
||||
; S[0] S[1]
|
||||
;
|
||||
define void @test(double* %array) {
|
||||
; CHECK-LABEL: @test(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[IDX0:%.*]] = getelementptr inbounds double, double* [[ARRAY:%.*]], i64 0
|
||||
; CHECK-NEXT: [[IDX1:%.*]] = getelementptr inbounds double, double* [[ARRAY]], i64 1
|
||||
; CHECK-NEXT: [[IDX2:%.*]] = getelementptr inbounds double, double* [[ARRAY]], i64 2
|
||||
; CHECK-NEXT: [[IDX3:%.*]] = getelementptr inbounds double, double* [[ARRAY]], i64 3
|
||||
; CHECK-NEXT: [[IDX4:%.*]] = getelementptr inbounds double, double* [[ARRAY]], i64 4
|
||||
; CHECK-NEXT: [[IDX5:%.*]] = getelementptr inbounds double, double* [[ARRAY]], i64 5
|
||||
; CHECK-NEXT: [[IDX6:%.*]] = getelementptr inbounds double, double* [[ARRAY]], i64 6
|
||||
; CHECK-NEXT: [[IDX7:%.*]] = getelementptr inbounds double, double* [[ARRAY]], i64 7
|
||||
; CHECK-NEXT: [[A_0:%.*]] = load double, double* [[IDX0]], align 8
|
||||
; CHECK-NEXT: [[A_1:%.*]] = load double, double* [[IDX1]], align 8
|
||||
; CHECK-NEXT: [[B_0:%.*]] = load double, double* [[IDX2]], align 8
|
||||
; CHECK-NEXT: [[B_1:%.*]] = load double, double* [[IDX3]], align 8
|
||||
; CHECK-NEXT: [[C_0:%.*]] = load double, double* [[IDX4]], align 8
|
||||
; CHECK-NEXT: [[C_1:%.*]] = load double, double* [[IDX5]], align 8
|
||||
; CHECK-NEXT: [[D_0:%.*]] = load double, double* [[IDX6]], align 8
|
||||
; CHECK-NEXT: [[D_1:%.*]] = load double, double* [[IDX7]], align 8
|
||||
; CHECK-NEXT: [[SUBAB_0:%.*]] = fsub fast double [[A_0]], [[B_0]]
|
||||
; CHECK-NEXT: [[SUBCD_0:%.*]] = fsub fast double [[C_0]], [[D_0]]
|
||||
; CHECK-NEXT: [[SUBAB_1:%.*]] = fsub fast double [[A_1]], [[B_1]]
|
||||
; CHECK-NEXT: [[SUBCD_1:%.*]] = fsub fast double [[C_1]], [[D_1]]
|
||||
; CHECK-NEXT: [[ADDABCD_0:%.*]] = fadd fast double [[SUBAB_0]], [[SUBCD_0]]
|
||||
; CHECK-NEXT: [[ADDCDAB_1:%.*]] = fadd fast double [[SUBCD_1]], [[SUBAB_1]]
|
||||
; CHECK-NEXT: store double [[ADDABCD_0]], double* [[IDX0]], align 8
|
||||
; CHECK-NEXT: store double [[ADDCDAB_1]], double* [[IDX1]], align 8
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
entry:
|
||||
%idx0 = getelementptr inbounds double, double* %array, i64 0
|
||||
%idx1 = getelementptr inbounds double, double* %array, i64 1
|
||||
%idx2 = getelementptr inbounds double, double* %array, i64 2
|
||||
%idx3 = getelementptr inbounds double, double* %array, i64 3
|
||||
%idx4 = getelementptr inbounds double, double* %array, i64 4
|
||||
%idx5 = getelementptr inbounds double, double* %array, i64 5
|
||||
%idx6 = getelementptr inbounds double, double* %array, i64 6
|
||||
%idx7 = getelementptr inbounds double, double* %array, i64 7
|
||||
|
||||
%A_0 = load double, double *%idx0, align 8
|
||||
%A_1 = load double, double *%idx1, align 8
|
||||
%B_0 = load double, double *%idx2, align 8
|
||||
%B_1 = load double, double *%idx3, align 8
|
||||
%C_0 = load double, double *%idx4, align 8
|
||||
%C_1 = load double, double *%idx5, align 8
|
||||
%D_0 = load double, double *%idx6, align 8
|
||||
%D_1 = load double, double *%idx7, align 8
|
||||
|
||||
%subAB_0 = fsub fast double %A_0, %B_0
|
||||
%subCD_0 = fsub fast double %C_0, %D_0
|
||||
|
||||
%subAB_1 = fsub fast double %A_1, %B_1
|
||||
%subCD_1 = fsub fast double %C_1, %D_1
|
||||
|
||||
%addABCD_0 = fadd fast double %subAB_0, %subCD_0
|
||||
%addCDAB_1 = fadd fast double %subCD_1, %subAB_1
|
||||
|
||||
store double %addABCD_0, double *%idx0, align 8
|
||||
store double %addCDAB_1, double *%idx1, align 8
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue
Block a user