; RUN: opt < %s -instcombine -S | FileCheck %s ;; MASKED LOADS ; If the mask isn't constant, do nothing. define <4 x float> @mload(i8* %f, <4 x i32> %mask) { %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask) ret <4 x float> %ld ; CHECK-LABEL: @mload( ; CHECK-NEXT: %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask) ; CHECK-NEXT: ret <4 x float> %ld } ; Zero mask is a nop. define <4 x float> @mload_zeros(i8* %f) { %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> zeroinitializer) ret <4 x float> %ld ; CHECK-LABEL: @mload_zeros( ; CHECK-NEXT: ret <4 x float> undef } ; Only the sign bit matters. define <4 x float> @mload_fake_ones(i8* %f) { %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> ) ret <4 x float> %ld ; CHECK-LABEL: @mload_fake_ones( ; CHECK-NEXT: ret <4 x float> undef } ; All mask bits are set, so this is just a vector load. define <4 x float> @mload_real_ones(i8* %f) { %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> ) ret <4 x float> %ld ; CHECK-LABEL: @mload_real_ones( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x float>* ; CHECK-NEXT: %unmaskedload = load <4 x float>, <4 x float>* %castvec ; CHECK-NEXT: ret <4 x float> %unmaskedload } ; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further. define <4 x float> @mload_one_one(i8* %f) { %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> ) ret <4 x float> %ld ; CHECK-LABEL: @mload_one_one( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x float>* ; CHECK-NEXT: %1 = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %castvec, i32 1, <4 x i1> , <4 x float> undef) ; CHECK-NEXT: ret <4 x float> %1 } ; Try doubles. define <2 x double> @mload_one_one_double(i8* %f) { %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(i8* %f, <2 x i64> ) ret <2 x double> %ld ; CHECK-LABEL: @mload_one_one_double( ; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x double>* ; CHECK-NEXT: %1 = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %castvec, i32 1, <2 x i1> , <2 x double> undef) ; CHECK-NEXT: ret <2 x double> %1 } ; Try 256-bit FP ops. define <8 x float> @mload_v8f32(i8* %f) { %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> ) ret <8 x float> %ld ; CHECK-LABEL: @mload_v8f32( ; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x float>* ; CHECK-NEXT: %1 = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %castvec, i32 1, <8 x i1> , <8 x float> undef) ; CHECK-NEXT: ret <8 x float> %1 } define <4 x double> @mload_v4f64(i8* %f) { %ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %f, <4 x i64> ) ret <4 x double> %ld ; CHECK-LABEL: @mload_v4f64( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x double>* ; CHECK-NEXT: %1 = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %castvec, i32 1, <4 x i1> , <4 x double> undef) ; CHECK-NEXT: ret <4 x double> %1 } ; Try the AVX2 variants. define <4 x i32> @mload_v4i32(i8* %f) { %ld = tail call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %f, <4 x i32> ) ret <4 x i32> %ld ; CHECK-LABEL: @mload_v4i32( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i32>* ; CHECK-NEXT: %1 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %castvec, i32 1, <4 x i1> , <4 x i32> undef) ; CHECK-NEXT: ret <4 x i32> %1 } define <2 x i64> @mload_v2i64(i8* %f) { %ld = tail call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %f, <2 x i64> ) ret <2 x i64> %ld ; CHECK-LABEL: @mload_v2i64( ; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x i64>* ; CHECK-NEXT: %1 = call <2 x i64> @llvm.masked.load.v2i64(<2 x i64>* %castvec, i32 1, <2 x i1> , <2 x i64> undef) ; CHECK-NEXT: ret <2 x i64> %1 } define <8 x i32> @mload_v8i32(i8* %f) { %ld = tail call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %f, <8 x i32> ) ret <8 x i32> %ld ; CHECK-LABEL: @mload_v8i32( ; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x i32>* ; CHECK-NEXT: %1 = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %castvec, i32 1, <8 x i1> , <8 x i32> undef) ; CHECK-NEXT: ret <8 x i32> %1 } define <4 x i64> @mload_v4i64(i8* %f) { %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %f, <4 x i64> ) ret <4 x i64> %ld ; CHECK-LABEL: @mload_v4i64( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i64>* ; CHECK-NEXT: %1 = call <4 x i64> @llvm.masked.load.v4i64(<4 x i64>* %castvec, i32 1, <4 x i1> , <4 x i64> undef) ; CHECK-NEXT: ret <4 x i64> %1 } ;; MASKED STORES ; If the mask isn't constant, do nothing. define void @mstore(i8* %f, <4 x i32> %mask, <4 x float> %v) { tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> %mask, <4 x float> %v) ret void ; CHECK-LABEL: @mstore( ; CHECK-NEXT: tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> %mask, <4 x float> %v) ; CHECK-NEXT: ret void } ; Zero mask is a nop. define void @mstore_zeros(i8* %f, <4 x float> %v) { tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> zeroinitializer, <4 x float> %v) ret void ; CHECK-LABEL: @mstore_zeros( ; CHECK-NEXT: ret void } ; Only the sign bit matters. define void @mstore_fake_ones(i8* %f, <4 x float> %v) { tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> , <4 x float> %v) ret void ; CHECK-LABEL: @mstore_fake_ones( ; CHECK-NEXT: ret void } ; All mask bits are set, so this is just a vector store. define void @mstore_real_ones(i8* %f, <4 x float> %v) { tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> , <4 x float> %v) ret void ; CHECK-LABEL: @mstore_real_ones( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x float>* ; CHECK-NEXT: store <4 x float> %v, <4 x float>* %castvec ; CHECK-NEXT: ret void } ; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further. define void @mstore_one_one(i8* %f, <4 x float> %v) { tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> , <4 x float> %v) ret void ; CHECK-LABEL: @mstore_one_one( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x float>* ; CHECK-NEXT: call void @llvm.masked.store.v4f32(<4 x float> %v, <4 x float>* %castvec, i32 1, <4 x i1> ) ; CHECK-NEXT: ret void } ; Try doubles. define void @mstore_one_one_double(i8* %f, <2 x double> %v) { tail call void @llvm.x86.avx.maskstore.pd(i8* %f, <2 x i64> , <2 x double> %v) ret void ; CHECK-LABEL: @mstore_one_one_double( ; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x double>* ; CHECK-NEXT: call void @llvm.masked.store.v2f64(<2 x double> %v, <2 x double>* %castvec, i32 1, <2 x i1> ) ; CHECK-NEXT: ret void } ; Try 256-bit FP ops. define void @mstore_v8f32(i8* %f, <8 x float> %v) { tail call void @llvm.x86.avx.maskstore.ps.256(i8* %f, <8 x i32> , <8 x float> %v) ret void ; CHECK-LABEL: @mstore_v8f32( ; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x float>* ; CHECK-NEXT: call void @llvm.masked.store.v8f32(<8 x float> %v, <8 x float>* %castvec, i32 1, <8 x i1> ) ; CHECK-NEXT: ret void } define void @mstore_v4f64(i8* %f, <4 x double> %v) { tail call void @llvm.x86.avx.maskstore.pd.256(i8* %f, <4 x i64> , <4 x double> %v) ret void ; CHECK-LABEL: @mstore_v4f64( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x double>* ; CHECK-NEXT: call void @llvm.masked.store.v4f64(<4 x double> %v, <4 x double>* %castvec, i32 1, <4 x i1> ) ; CHECK-NEXT: ret void } ; Try the AVX2 variants. define void @mstore_v4i32(i8* %f, <4 x i32> %v) { tail call void @llvm.x86.avx2.maskstore.d(i8* %f, <4 x i32> , <4 x i32> %v) ret void ; CHECK-LABEL: @mstore_v4i32( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i32>* ; CHECK-NEXT: call void @llvm.masked.store.v4i32(<4 x i32> %v, <4 x i32>* %castvec, i32 1, <4 x i1> ) ; CHECK-NEXT: ret void } define void @mstore_v2i64(i8* %f, <2 x i64> %v) { tail call void @llvm.x86.avx2.maskstore.q(i8* %f, <2 x i64> , <2 x i64> %v) ret void ; CHECK-LABEL: @mstore_v2i64( ; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x i64>* ; CHECK-NEXT: call void @llvm.masked.store.v2i64(<2 x i64> %v, <2 x i64>* %castvec, i32 1, <2 x i1> ) ; CHECK-NEXT: ret void } define void @mstore_v8i32(i8* %f, <8 x i32> %v) { tail call void @llvm.x86.avx2.maskstore.d.256(i8* %f, <8 x i32> , <8 x i32> %v) ret void ; CHECK-LABEL: @mstore_v8i32( ; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x i32>* ; CHECK-NEXT: call void @llvm.masked.store.v8i32(<8 x i32> %v, <8 x i32>* %castvec, i32 1, <8 x i1> ) ; CHECK-NEXT: ret void } define void @mstore_v4i64(i8* %f, <4 x i64> %v) { tail call void @llvm.x86.avx2.maskstore.q.256(i8* %f, <4 x i64> , <4 x i64> %v) ret void ; CHECK-LABEL: @mstore_v4i64( ; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i64>* ; CHECK-NEXT: call void @llvm.masked.store.v4i64(<4 x i64> %v, <4 x i64>* %castvec, i32 1, <4 x i1> ) ; CHECK-NEXT: ret void } declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>) declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>) declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>) declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>) declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>) declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>) declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>) declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>) declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>) declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>)