mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
46421cee58
As mentioned in D93793, there are quite a few places where unary `IRBuilder::CreateShuffleVector(X, Mask)` can be used instead of `IRBuilder::CreateShuffleVector(X, Undef, Mask)`. Let's update them. Actually, it would have been more natural if the patches were made in this order: (1) let them use unary CreateShuffleVector first (2) update IRBuilder::CreateShuffleVector to use poison as a placeholder value (D93793) The order is swapped, but in terms of correctness it is still fine. Reviewed By: spatel Differential Revision: https://reviews.llvm.org/D93923
54 lines
3.8 KiB
LLVM
54 lines
3.8 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt -lower-matrix-intrinsics -S < %s | FileCheck %s
|
|
; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck %s
|
|
|
|
; Currently we only lower stores with shape information, but need to embed the
|
|
; matrix in a flat vector for function calls and returns.
|
|
define <8 x double> @strided_load_4x4(<8 x double> %in, <8 x double>* %Ptr) {
|
|
; CHECK-LABEL: @strided_load_4x4(
|
|
; CHECK-NEXT: [[SPLIT:%.*]] = shufflevector <8 x double> [[IN:%.*]], <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
; CHECK-NEXT: [[SPLIT1:%.*]] = shufflevector <8 x double> [[IN]], <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
|
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x double> [[SPLIT]], i64 0
|
|
; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> undef, double [[TMP1]], i64 0
|
|
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 0
|
|
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP2]], double [[TMP3]], i64 1
|
|
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x double> [[SPLIT]], i64 1
|
|
; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x double> undef, double [[TMP5]], i64 0
|
|
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 1
|
|
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP6]], double [[TMP7]], i64 1
|
|
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x double> [[SPLIT]], i64 2
|
|
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> undef, double [[TMP9]], i64 0
|
|
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 2
|
|
; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x double> [[TMP10]], double [[TMP11]], i64 1
|
|
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x double> [[SPLIT]], i64 3
|
|
; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x double> undef, double [[TMP13]], i64 0
|
|
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x double> [[SPLIT1]], i64 3
|
|
; CHECK-NEXT: [[TMP16:%.*]] = insertelement <2 x double> [[TMP14]], double [[TMP15]], i64 1
|
|
; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> [[TMP8]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <2 x double> [[TMP12]], <2 x double> [[TMP16]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <4 x double> [[TMP17]], <4 x double> [[TMP18]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
; CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x double>* [[PTR:%.*]] to double*
|
|
; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP20]] to <2 x double>*
|
|
; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[VEC_CAST]], align 8
|
|
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP20]], i64 2
|
|
; CHECK-NEXT: [[VEC_CAST2:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>*
|
|
; CHECK-NEXT: store <2 x double> [[TMP8]], <2 x double>* [[VEC_CAST2]], align 8
|
|
; CHECK-NEXT: [[VEC_GEP3:%.*]] = getelementptr double, double* [[TMP20]], i64 4
|
|
; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[VEC_GEP3]] to <2 x double>*
|
|
; CHECK-NEXT: store <2 x double> [[TMP12]], <2 x double>* [[VEC_CAST4]], align 8
|
|
; CHECK-NEXT: [[VEC_GEP5:%.*]] = getelementptr double, double* [[TMP20]], i64 6
|
|
; CHECK-NEXT: [[VEC_CAST6:%.*]] = bitcast double* [[VEC_GEP5]] to <2 x double>*
|
|
; CHECK-NEXT: store <2 x double> [[TMP16]], <2 x double>* [[VEC_CAST6]], align 8
|
|
; CHECK-NEXT: call void @foo(<8 x double> [[TMP19]])
|
|
; CHECK-NEXT: ret <8 x double> [[TMP19]]
|
|
;
|
|
%transposed = call <8 x double> @llvm.matrix.transpose(<8 x double> %in, i32 4, i32 2)
|
|
store <8 x double> %transposed, <8 x double>* %Ptr, align 8
|
|
call void @foo(<8 x double> %transposed)
|
|
ret <8 x double> %transposed
|
|
}
|
|
|
|
declare <8 x double> @llvm.matrix.transpose(<8 x double>, i32, i32)
|
|
|
|
declare void @foo(<8 x double>)
|