mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
[AVX512] Remove masked palignr intrinsics and auto-upgrade them to native IR of vector shuffle and select.
llvm-svn: 271872
This commit is contained in:
parent
311ae9cc2f
commit
bb0d5ffb41
@ -7255,19 +7255,16 @@ let TargetPrefix = "x86" in {
|
||||
llvm_i8_ty], [IntrNoMem]>;
|
||||
|
||||
def int_x86_avx512_mask_palignr_128 :
|
||||
GCCBuiltin<"__builtin_ia32_palignr128_mask">,
|
||||
Intrinsic<[llvm_v16i8_ty],
|
||||
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty,
|
||||
llvm_i16_ty], [IntrNoMem]>;
|
||||
|
||||
def int_x86_avx512_mask_palignr_256 :
|
||||
GCCBuiltin<"__builtin_ia32_palignr256_mask">,
|
||||
Intrinsic<[llvm_v32i8_ty],
|
||||
[llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty, llvm_v32i8_ty,
|
||||
llvm_i32_ty], [IntrNoMem]>;
|
||||
|
||||
def int_x86_avx512_mask_palignr_512 :
|
||||
GCCBuiltin<"__builtin_ia32_palignr512_mask">,
|
||||
Intrinsic<[llvm_v64i8_ty],
|
||||
[llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty, llvm_v64i8_ty,
|
||||
llvm_i64_ty], [IntrNoMem]>;
|
||||
|
@ -218,6 +218,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
||||
Name.startswith("x86.avx512.mask.load.q.") ||
|
||||
Name == "x86.sse42.crc32.64.8" ||
|
||||
Name.startswith("x86.avx.vbroadcast.s") ||
|
||||
Name.startswith("x86.avx512.mask.palignr.") ||
|
||||
Name.startswith("x86.sse2.psll.dq") ||
|
||||
Name.startswith("x86.sse2.psrl.dq") ||
|
||||
Name.startswith("x86.avx2.psll.dq") ||
|
||||
@ -366,6 +367,53 @@ static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
||||
return Builder.CreateBitCast(Res, ResultTy, "cast");
|
||||
}
|
||||
|
||||
static Value *UpgradeX86PALIGNRIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
||||
Value *Op0, Value *Op1, Value *Shift,
|
||||
Value *Passthru, Value *Mask) {
|
||||
unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
|
||||
|
||||
unsigned NumElts = Op0->getType()->getVectorNumElements();
|
||||
assert(NumElts % 16 == 0);
|
||||
|
||||
// If palignr is shifting the pair of vectors more than the size of two
|
||||
// lanes, emit zero.
|
||||
if (ShiftVal >= 32)
|
||||
return llvm::Constant::getNullValue(Op0->getType());
|
||||
|
||||
// If palignr is shifting the pair of input vectors more than one lane,
|
||||
// but less than two lanes, convert to shifting in zeroes.
|
||||
if (ShiftVal > 16) {
|
||||
ShiftVal -= 16;
|
||||
Op1 = Op0;
|
||||
Op0 = llvm::Constant::getNullValue(Op0->getType());
|
||||
}
|
||||
|
||||
int Indices[64];
|
||||
// 256-bit palignr operates on 128-bit lanes so we need to handle that
|
||||
for (unsigned l = 0; l != NumElts; l += 16) {
|
||||
for (unsigned i = 0; i != 16; ++i) {
|
||||
unsigned Idx = ShiftVal + i;
|
||||
if (Idx >= 16)
|
||||
Idx += NumElts - 16; // End of lane, switch operand.
|
||||
Indices[l + i] = Idx + l;
|
||||
}
|
||||
}
|
||||
|
||||
Value *Align = Builder.CreateShuffleVector(Op1, Op0,
|
||||
makeArrayRef(Indices, NumElts),
|
||||
"palignr");
|
||||
|
||||
// If the mask is all ones just emit the align operation.
|
||||
if (const auto *C = dyn_cast<Constant>(Mask))
|
||||
if (C->isAllOnesValue())
|
||||
return Align;
|
||||
|
||||
llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(),
|
||||
NumElts);
|
||||
Mask = Builder.CreateBitCast(Mask, MaskTy, "cast");
|
||||
return Builder.CreateSelect(Mask, Align, Passthru);
|
||||
}
|
||||
|
||||
// Handles upgrading SSE2 and AVX2 PSRLDQ intrinsics by converting them
|
||||
// to byte shuffles.
|
||||
static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
||||
@ -725,6 +773,12 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts);
|
||||
Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()),
|
||||
Constant::getNullValue(MaskTy));
|
||||
} else if (Name.startswith("llvm.x86.avx512.mask.palignr.")) {
|
||||
Rep = UpgradeX86PALIGNRIntrinsics(Builder, C, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1),
|
||||
CI->getArgOperand(2),
|
||||
CI->getArgOperand(3),
|
||||
CI->getArgOperand(4));
|
||||
} else if (Name == "llvm.x86.sse2.psll.dq" ||
|
||||
Name == "llvm.x86.avx2.psll.dq") {
|
||||
// 128/256-bit shift left specified in bits.
|
||||
|
@ -105,3 +105,33 @@ define <64 x i8>@test_int_x86_avx512_mask_loadu_b_512(i8* %ptr, i8* %ptr2, <64 x
|
||||
%res2 = add <64 x i8> %res, %res1
|
||||
ret <64 x i8> %res2
|
||||
}
|
||||
|
||||
declare <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8>, <64 x i8>, i32, <64 x i8>, i64)
|
||||
|
||||
define <64 x i8>@test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x3, i64 %x4) {
|
||||
; AVX512BW-LABEL: test_int_x86_avx512_mask_palignr_512:
|
||||
; AVX512BW: ## BB#0:
|
||||
; AVX512BW-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm3
|
||||
; AVX512BW-NEXT: kmovq %rdi, %k1
|
||||
; AVX512BW-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; AVX512BW-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512BW-NEXT: vpaddb %zmm0, %zmm2, %zmm0
|
||||
; AVX512BW-NEXT: vpaddb %zmm3, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_int_x86_avx512_mask_palignr_512:
|
||||
; AVX512F-32: # BB#0:
|
||||
; AVX512F-32-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm3
|
||||
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; AVX512F-32-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512F-32-NEXT: vpaddb %zmm0, %zmm2, %zmm0
|
||||
; AVX512F-32-NEXT: vpaddb %zmm3, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%res = call <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <64 x i8> %x3, i64 %x4)
|
||||
%res1 = call <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <64 x i8> zeroinitializer, i64 %x4)
|
||||
%res2 = call <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <64 x i8> %x3, i64 -1)
|
||||
%res3 = add <64 x i8> %res, %res1
|
||||
%res4 = add <64 x i8> %res3, %res2
|
||||
ret <64 x i8> %res4
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
|
||||
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512F-32
|
||||
@ -2610,38 +2611,6 @@ define <32 x i16>@test_int_x86_avx512_mask_punpcklw_d_512(<32 x i16> %x0, <32 x
|
||||
ret <32 x i16> %res2
|
||||
}
|
||||
|
||||
declare <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8>, <64 x i8>, i32, <64 x i8>, i64)
|
||||
|
||||
define <64 x i8>@test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x3, i64 %x4) {
|
||||
; AVX512BW-LABEL: test_int_x86_avx512_mask_palignr_512:
|
||||
; AVX512BW: ## BB#0:
|
||||
; AVX512BW-NEXT: kmovq %rdi, %k1
|
||||
; AVX512BW-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; AVX512BW-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm3 {%k1} {z}
|
||||
; AVX512BW-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpaddb %zmm3, %zmm2, %zmm1
|
||||
; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_int_x86_avx512_mask_palignr_512:
|
||||
; AVX512F-32: # BB#0:
|
||||
; AVX512F-32-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm3
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1
|
||||
; AVX512F-32-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; AVX512F-32-NEXT: vpalignr $2, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512F-32-NEXT: vpaddb %zmm0, %zmm2, %zmm0
|
||||
; AVX512F-32-NEXT: vpaddb %zmm3, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%res = call <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <64 x i8> %x3, i64 %x4)
|
||||
%res1 = call <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <64 x i8> zeroinitializer, i64 %x4)
|
||||
%res2 = call <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <64 x i8> %x3, i64 -1)
|
||||
%res3 = add <64 x i8> %res, %res1
|
||||
%res4 = add <64 x i8> %res3, %res2
|
||||
ret <64 x i8> %res4
|
||||
}
|
||||
|
||||
declare <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8>, <64 x i8>, i32, <32 x i16>, i32)
|
||||
|
||||
define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x3, i32 %x4) {
|
||||
|
@ -128,3 +128,43 @@ define <32 x i8>@test_int_x86_avx512_mask_loadu_b_256(i8* %ptr, i8* %ptr2, <32 x
|
||||
%res2 = add <32 x i8> %res, %res1
|
||||
ret <32 x i8> %res2
|
||||
}
|
||||
|
||||
declare <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8>, <16 x i8>, i32, <16 x i8>, i16)
|
||||
|
||||
define <16 x i8>@test_int_x86_avx512_mask_palignr_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x3, i16 %x4) {
|
||||
; CHECK-LABEL: test_int_x86_avx512_mask_palignr_128:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm3 ## encoding: [0x62,0xf3,0x7d,0x08,0x0f,0xd9,0x02]
|
||||
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
|
||||
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x0f,0xd1,0x02]
|
||||
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x0f,0xc1,0x02]
|
||||
; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## encoding: [0x62,0xf1,0x6d,0x08,0xfc,0xc0]
|
||||
; CHECK-NEXT: vpaddb %xmm3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xfc,0xc3]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%res = call <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <16 x i8> %x3, i16 %x4)
|
||||
%res1 = call <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <16 x i8> zeroinitializer, i16 %x4)
|
||||
%res2 = call <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <16 x i8> %x3, i16 -1)
|
||||
%res3 = add <16 x i8> %res, %res1
|
||||
%res4 = add <16 x i8> %res3, %res2
|
||||
ret <16 x i8> %res4
|
||||
}
|
||||
|
||||
declare <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8>, <32 x i8>, i32, <32 x i8>, i32)
|
||||
|
||||
define <32 x i8>@test_int_x86_avx512_mask_palignr_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x3, i32 %x4) {
|
||||
; CHECK-LABEL: test_int_x86_avx512_mask_palignr_256:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm3 ## encoding: [0x62,0xf3,0x7d,0x28,0x0f,0xd9,0x02]
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x0f,0xd1,0x02]
|
||||
; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x0f,0xc1,0x02]
|
||||
; CHECK-NEXT: vpaddb %ymm0, %ymm2, %ymm0 ## encoding: [0x62,0xf1,0x6d,0x28,0xfc,0xc0]
|
||||
; CHECK-NEXT: vpaddb %ymm3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0xfc,0xc3]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%res = call <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <32 x i8> %x3, i32 %x4)
|
||||
%res1 = call <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <32 x i8> zeroinitializer, i32 %x4)
|
||||
%res2 = call <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <32 x i8> %x3, i32 -1)
|
||||
%res3 = add <32 x i8> %res, %res1
|
||||
%res4 = add <32 x i8> %res3, %res2
|
||||
ret <32 x i8> %res4
|
||||
}
|
||||
|
@ -5484,46 +5484,6 @@ define <16 x i16>@test_int_x86_avx512_mask_punpckhw_d_256(<16 x i16> %x0, <16 x
|
||||
ret <16 x i16> %res2
|
||||
}
|
||||
|
||||
declare <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8>, <16 x i8>, i32, <16 x i8>, i16)
|
||||
|
||||
define <16 x i8>@test_int_x86_avx512_mask_palignr_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x3, i16 %x4) {
|
||||
; CHECK-LABEL: test_int_x86_avx512_mask_palignr_128:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
|
||||
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x0f,0xd1,0x02]
|
||||
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x0f,0xd9,0x02]
|
||||
; CHECK-NEXT: vpalignr $2, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x0f,0xc1,0x02]
|
||||
; CHECK-NEXT: vpaddb %xmm3, %xmm2, %xmm1 ## encoding: [0x62,0xf1,0x6d,0x08,0xfc,0xcb]
|
||||
; CHECK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ## encoding: [0x62,0xf1,0x75,0x08,0xfc,0xc0]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%res = call <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <16 x i8> %x3, i16 %x4)
|
||||
%res1 = call <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <16 x i8> zeroinitializer, i16 %x4)
|
||||
%res2 = call <16 x i8> @llvm.x86.avx512.mask.palignr.128(<16 x i8> %x0, <16 x i8> %x1, i32 2, <16 x i8> %x3, i16 -1)
|
||||
%res3 = add <16 x i8> %res, %res1
|
||||
%res4 = add <16 x i8> %res3, %res2
|
||||
ret <16 x i8> %res4
|
||||
}
|
||||
|
||||
declare <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8>, <32 x i8>, i32, <32 x i8>, i32)
|
||||
|
||||
define <32 x i8>@test_int_x86_avx512_mask_palignr_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x3, i32 %x4) {
|
||||
; CHECK-LABEL: test_int_x86_avx512_mask_palignr_256:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x0f,0xd1,0x02]
|
||||
; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm3 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x0f,0xd9,0x02]
|
||||
; CHECK-NEXT: vpalignr $2, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x0f,0xc1,0x02]
|
||||
; CHECK-NEXT: vpaddb %ymm3, %ymm2, %ymm1 ## encoding: [0x62,0xf1,0x6d,0x28,0xfc,0xcb]
|
||||
; CHECK-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ## encoding: [0x62,0xf1,0x75,0x28,0xfc,0xc0]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%res = call <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <32 x i8> %x3, i32 %x4)
|
||||
%res1 = call <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <32 x i8> zeroinitializer, i32 %x4)
|
||||
%res2 = call <32 x i8> @llvm.x86.avx512.mask.palignr.256(<32 x i8> %x0, <32 x i8> %x1, i32 2, <32 x i8> %x3, i32 -1)
|
||||
%res3 = add <32 x i8> %res, %res1
|
||||
%res4 = add <32 x i8> %res3, %res2
|
||||
ret <32 x i8> %res4
|
||||
}
|
||||
|
||||
declare <8 x i16> @llvm.x86.avx512.mask.dbpsadbw.128(<16 x i8>, <16 x i8>, i32, <8 x i16>, i8)
|
||||
|
||||
define <8 x i16>@test_int_x86_avx512_mask_dbpsadbw_128(<16 x i8> %x0, <16 x i8> %x1, <8 x i16> %x3, i8 %x4) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user