mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
[PowerPC] Implement Vector Extract Low/High Order Builtins in LLVM/Clang
This patch implements the function prototypes vec_extractl and vec_extracth in altivec.h to utilize the vector extract double element instructions introduced in Power10. Differential Revision: https://reviews.llvm.org/D84622
This commit is contained in:
parent
b0aa16911a
commit
f87266ec05
@ -531,6 +531,39 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
|
||||
Intrinsic<[llvm_v2i64_ty],
|
||||
[llvm_v2i64_ty, llvm_i64_ty, llvm_i32_ty],
|
||||
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
|
||||
// P10 Vector Extract.
|
||||
def int_ppc_altivec_vextdubvlx : GCCBuiltin<"__builtin_altivec_vextdubvlx">,
|
||||
Intrinsic<[llvm_v2i64_ty],
|
||||
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_ppc_altivec_vextdubvrx : GCCBuiltin<"__builtin_altivec_vextdubvrx">,
|
||||
Intrinsic<[llvm_v2i64_ty],
|
||||
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_ppc_altivec_vextduhvlx : GCCBuiltin<"__builtin_altivec_vextduhvlx">,
|
||||
Intrinsic<[llvm_v2i64_ty],
|
||||
[llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_ppc_altivec_vextduhvrx : GCCBuiltin<"__builtin_altivec_vextduhvrx">,
|
||||
Intrinsic<[llvm_v2i64_ty],
|
||||
[llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_ppc_altivec_vextduwvlx : GCCBuiltin<"__builtin_altivec_vextduwvlx">,
|
||||
Intrinsic<[llvm_v2i64_ty],
|
||||
[llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_ppc_altivec_vextduwvrx : GCCBuiltin<"__builtin_altivec_vextduwvrx">,
|
||||
Intrinsic<[llvm_v2i64_ty],
|
||||
[llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_ppc_altivec_vextddvlx : GCCBuiltin<"__builtin_altivec_vextddvlx">,
|
||||
Intrinsic<[llvm_v2i64_ty],
|
||||
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_ppc_altivec_vextddvrx : GCCBuiltin<"__builtin_altivec_vextddvrx">,
|
||||
Intrinsic<[llvm_v2i64_ty],
|
||||
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
|
||||
[IntrNoMem]>;
|
||||
}
|
||||
|
||||
// Vector average.
|
||||
|
@ -948,37 +948,69 @@ let Predicates = [IsISA3_1] in {
|
||||
(int_ppc_altivec_vinsdrx v2i64:$vDi, i64:$rA, i64:$rB))]>,
|
||||
RegConstraint<"$vDi = $vD">, NoEncode<"$vDi">;
|
||||
def VEXTDUBVLX : VAForm_1a<24, (outs vrrc:$vD),
|
||||
(ins vrrc:$vA, vrrc:$vB, g8rc:$rC),
|
||||
(ins vrrc:$vA, vrrc:$vB, gprc:$rC),
|
||||
"vextdubvlx $vD, $vA, $vB, $rC",
|
||||
IIC_VecGeneral, []>;
|
||||
IIC_VecGeneral,
|
||||
[(set v2i64:$vD,
|
||||
(int_ppc_altivec_vextdubvlx v16i8:$vA,
|
||||
v16i8:$vB,
|
||||
i32:$rC))]>;
|
||||
def VEXTDUBVRX : VAForm_1a<25, (outs vrrc:$vD),
|
||||
(ins vrrc:$vA, vrrc:$vB, g8rc:$rC),
|
||||
(ins vrrc:$vA, vrrc:$vB, gprc:$rC),
|
||||
"vextdubvrx $vD, $vA, $vB, $rC",
|
||||
IIC_VecGeneral, []>;
|
||||
IIC_VecGeneral,
|
||||
[(set v2i64:$vD,
|
||||
(int_ppc_altivec_vextdubvrx v16i8:$vA,
|
||||
v16i8:$vB,
|
||||
i32:$rC))]>;
|
||||
def VEXTDUHVLX : VAForm_1a<26, (outs vrrc:$vD),
|
||||
(ins vrrc:$vA, vrrc:$vB, g8rc:$rC),
|
||||
(ins vrrc:$vA, vrrc:$vB, gprc:$rC),
|
||||
"vextduhvlx $vD, $vA, $vB, $rC",
|
||||
IIC_VecGeneral, []>;
|
||||
IIC_VecGeneral,
|
||||
[(set v2i64:$vD,
|
||||
(int_ppc_altivec_vextduhvlx v8i16:$vA,
|
||||
v8i16:$vB,
|
||||
i32:$rC))]>;
|
||||
def VEXTDUHVRX : VAForm_1a<27, (outs vrrc:$vD),
|
||||
(ins vrrc:$vA, vrrc:$vB, g8rc:$rC),
|
||||
(ins vrrc:$vA, vrrc:$vB, gprc:$rC),
|
||||
"vextduhvrx $vD, $vA, $vB, $rC",
|
||||
IIC_VecGeneral, []>;
|
||||
IIC_VecGeneral,
|
||||
[(set v2i64:$vD,
|
||||
(int_ppc_altivec_vextduhvrx v8i16:$vA,
|
||||
v8i16:$vB,
|
||||
i32:$rC))]>;
|
||||
def VEXTDUWVLX : VAForm_1a<28, (outs vrrc:$vD),
|
||||
(ins vrrc:$vA, vrrc:$vB, g8rc:$rC),
|
||||
(ins vrrc:$vA, vrrc:$vB, gprc:$rC),
|
||||
"vextduwvlx $vD, $vA, $vB, $rC",
|
||||
IIC_VecGeneral, []>;
|
||||
IIC_VecGeneral,
|
||||
[(set v2i64:$vD,
|
||||
(int_ppc_altivec_vextduwvlx v4i32:$vA,
|
||||
v4i32:$vB,
|
||||
i32:$rC))]>;
|
||||
def VEXTDUWVRX : VAForm_1a<29, (outs vrrc:$vD),
|
||||
(ins vrrc:$vA, vrrc:$vB, g8rc:$rC),
|
||||
(ins vrrc:$vA, vrrc:$vB, gprc:$rC),
|
||||
"vextduwvrx $vD, $vA, $vB, $rC",
|
||||
IIC_VecGeneral, []>;
|
||||
IIC_VecGeneral,
|
||||
[(set v2i64:$vD,
|
||||
(int_ppc_altivec_vextduwvrx v4i32:$vA,
|
||||
v4i32:$vB,
|
||||
i32:$rC))]>;
|
||||
def VEXTDDVLX : VAForm_1a<30, (outs vrrc:$vD),
|
||||
(ins vrrc:$vA, vrrc:$vB, g8rc:$rC),
|
||||
(ins vrrc:$vA, vrrc:$vB, gprc:$rC),
|
||||
"vextddvlx $vD, $vA, $vB, $rC",
|
||||
IIC_VecGeneral, []>;
|
||||
IIC_VecGeneral,
|
||||
[(set v2i64:$vD,
|
||||
(int_ppc_altivec_vextddvlx v2i64:$vA,
|
||||
v2i64:$vB,
|
||||
i32:$rC))]>;
|
||||
def VEXTDDVRX : VAForm_1a<31, (outs vrrc:$vD),
|
||||
(ins vrrc:$vA, vrrc:$vB, g8rc:$rC),
|
||||
(ins vrrc:$vA, vrrc:$vB, gprc:$rC),
|
||||
"vextddvrx $vD, $vA, $vB, $rC",
|
||||
IIC_VecGeneral, []>;
|
||||
IIC_VecGeneral,
|
||||
[(set v2i64:$vD,
|
||||
(int_ppc_altivec_vextddvrx v2i64:$vA,
|
||||
v2i64:$vB,
|
||||
i32:$rC))]>;
|
||||
def VPDEPD : VXForm_1<1485, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
|
||||
"vpdepd $vD, $vA, $vB", IIC_VecGeneral,
|
||||
[(set v2i64:$vD,
|
||||
|
@ -253,3 +253,91 @@ entry:
|
||||
ret <2 x i64> %0
|
||||
}
|
||||
declare <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64>, i64, i32 immarg)
|
||||
|
||||
define <2 x i64> @testVEXTDUBVLX(<16 x i8> %a, <16 x i8> %b, i32 %c) {
|
||||
; CHECK-LABEL: testVEXTDUBVLX:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vextdubvlx v2, v2, v3, r7
|
||||
; CHECK-NEXT: blr
|
||||
entry:
|
||||
%0 = tail call <2 x i64> @llvm.ppc.altivec.vextdubvlx(<16 x i8> %a, <16 x i8> %b, i32 %c)
|
||||
ret <2 x i64> %0
|
||||
}
|
||||
declare <2 x i64> @llvm.ppc.altivec.vextdubvlx(<16 x i8>, <16 x i8>, i32)
|
||||
|
||||
define <2 x i64> @testVEXTDUBVRX(<16 x i8> %a, <16 x i8> %b, i32 %c) {
|
||||
; CHECK-LABEL: testVEXTDUBVRX:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vextdubvrx v2, v2, v3, r7
|
||||
; CHECK-NEXT: blr
|
||||
entry:
|
||||
%0 = tail call <2 x i64> @llvm.ppc.altivec.vextdubvrx(<16 x i8> %a, <16 x i8> %b, i32 %c)
|
||||
ret <2 x i64> %0
|
||||
}
|
||||
declare <2 x i64> @llvm.ppc.altivec.vextdubvrx(<16 x i8>, <16 x i8>, i32)
|
||||
|
||||
define <2 x i64> @testVEXTDUHVLX(<8 x i16> %a, <8 x i16> %b, i32 %c) {
|
||||
; CHECK-LABEL: testVEXTDUHVLX:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vextduhvlx v2, v2, v3, r7
|
||||
; CHECK-NEXT: blr
|
||||
entry:
|
||||
%0 = tail call <2 x i64> @llvm.ppc.altivec.vextduhvlx(<8 x i16> %a, <8 x i16> %b, i32 %c)
|
||||
ret <2 x i64> %0
|
||||
}
|
||||
declare <2 x i64> @llvm.ppc.altivec.vextduhvlx(<8 x i16>, <8 x i16>, i32)
|
||||
|
||||
define <2 x i64> @testVEXTDUHVRX(<8 x i16> %a, <8 x i16> %b, i32 %c) {
|
||||
; CHECK-LABEL: testVEXTDUHVRX:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vextduhvrx v2, v2, v3, r7
|
||||
; CHECK-NEXT: blr
|
||||
entry:
|
||||
%0 = tail call <2 x i64> @llvm.ppc.altivec.vextduhvrx(<8 x i16> %a, <8 x i16> %b, i32 %c)
|
||||
ret <2 x i64> %0
|
||||
}
|
||||
declare <2 x i64> @llvm.ppc.altivec.vextduhvrx(<8 x i16>, <8 x i16>, i32)
|
||||
|
||||
define <2 x i64> @testVEXTDUWVLX(<4 x i32> %a, <4 x i32> %b, i32 %c) {
|
||||
; CHECK-LABEL: testVEXTDUWVLX:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vextduwvlx v2, v2, v3, r7
|
||||
; CHECK-NEXT: blr
|
||||
entry:
|
||||
%0 = tail call <2 x i64> @llvm.ppc.altivec.vextduwvlx(<4 x i32> %a, <4 x i32> %b, i32 %c)
|
||||
ret <2 x i64> %0
|
||||
}
|
||||
declare <2 x i64> @llvm.ppc.altivec.vextduwvlx(<4 x i32>, <4 x i32>, i32)
|
||||
|
||||
define <2 x i64> @testVEXTDUWVRX(<4 x i32> %a, <4 x i32> %b, i32 %c) {
|
||||
; CHECK-LABEL: testVEXTDUWVRX:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vextduwvrx v2, v2, v3, r7
|
||||
; CHECK-NEXT: blr
|
||||
entry:
|
||||
%0 = tail call <2 x i64> @llvm.ppc.altivec.vextduwvrx(<4 x i32> %a, <4 x i32> %b, i32 %c)
|
||||
ret <2 x i64> %0
|
||||
}
|
||||
declare <2 x i64> @llvm.ppc.altivec.vextduwvrx(<4 x i32>, <4 x i32>, i32)
|
||||
|
||||
define <2 x i64> @testVEXTDDVLX(<2 x i64> %a, <2 x i64> %b, i32 %c) {
|
||||
; CHECK-LABEL: testVEXTDDVLX:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vextddvlx v2, v2, v3, r7
|
||||
; CHECK-NEXT: blr
|
||||
entry:
|
||||
%0 = tail call <2 x i64> @llvm.ppc.altivec.vextddvlx(<2 x i64> %a, <2 x i64> %b, i32 %c)
|
||||
ret <2 x i64> %0
|
||||
}
|
||||
declare <2 x i64> @llvm.ppc.altivec.vextddvlx(<2 x i64>, <2 x i64>, i32)
|
||||
|
||||
define <2 x i64> @testVEXTDDVRX(<2 x i64> %a, <2 x i64> %b, i32 %c) {
|
||||
; CHECK-LABEL: testVEXTDDVRX:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vextddvrx v2, v2, v3, r7
|
||||
; CHECK-NEXT: blr
|
||||
entry:
|
||||
%0 = tail call <2 x i64> @llvm.ppc.altivec.vextddvrx(<2 x i64> %a, <2 x i64> %b, i32 %c)
|
||||
ret <2 x i64> %0
|
||||
}
|
||||
declare <2 x i64> @llvm.ppc.altivec.vextddvrx(<2 x i64>, <2 x i64>, i32)
|
||||
|
Loading…
x
Reference in New Issue
Block a user