1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00

[AArch64] Add combine for add(udot(0, x, y), z) -> udot(z, x, y).

Given a zero input for a udot, an add can be folded in to take the place
of the input, using thte addition that the instruction naturally
performs.

Differential Revision: https://reviews.llvm.org/D97188
This commit is contained in:
David Green 2021-03-01 12:53:34 +00:00
parent 2e8c4023c8
commit 042f6e8e77
3 changed files with 57 additions and 32 deletions

View File

@ -13217,6 +13217,29 @@ static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(0, DL, MVT::i64));
}
// ADD(UDOT(zero, x, y), A) --> UDOT(A, x, y)
static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
if (N->getOpcode() != ISD::ADD)
return SDValue();
SDValue Dot = N->getOperand(0);
SDValue A = N->getOperand(1);
// Handle commutivity
auto isZeroDot = [](SDValue Dot) {
return (Dot.getOpcode() == AArch64ISD::UDOT ||
Dot.getOpcode() == AArch64ISD::SDOT) &&
ISD::isBuildVectorAllZeros(Dot.getOperand(0).getNode());
};
if (!isZeroDot(Dot))
std::swap(Dot, A);
if (!isZeroDot(Dot))
return SDValue();
return DAG.getNode(Dot.getOpcode(), SDLoc(N), VT, A, Dot.getOperand(1),
Dot.getOperand(2));
}
// The basic add/sub long vector instructions have variants with "2" on the end
// which act on the high-half of their inputs. They are normally matched by
// patterns like:
@ -13276,6 +13299,8 @@ static SDValue performAddSubCombine(SDNode *N,
// Try to change sum of two reductions.
if (SDValue Val = performUADDVCombine(N, DAG))
return Val;
if (SDValue Val = performAddDotCombine(N, DAG))
return Val;
return performAddSubLongCombine(N, DCI, DAG);
}

View File

@ -55,9 +55,7 @@ entry:
define <2 x i32> @test_vdot_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
; CHECK-LABEL: test_vdot_u32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: udot v3.2s, v1.8b, v2.8b
; CHECK-NEXT: add v0.2s, v3.2s, v0.2s
; CHECK-NEXT: udot v0.2s, v1.8b, v2.8b
; CHECK-NEXT: ret
entry:
%vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
@ -68,9 +66,7 @@ entry:
define <4 x i32> @test_vdotq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
; CHECK-LABEL: test_vdotq_u32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: udot v3.4s, v1.16b, v2.16b
; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-NEXT: udot v0.4s, v1.16b, v2.16b
; CHECK-NEXT: ret
entry:
%vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
@ -81,9 +77,7 @@ entry:
define <2 x i32> @test_vdot_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
; CHECK-LABEL: test_vdot_s32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: sdot v3.2s, v1.8b, v2.8b
; CHECK-NEXT: add v0.2s, v3.2s, v0.2s
; CHECK-NEXT: sdot v0.2s, v1.8b, v2.8b
; CHECK-NEXT: ret
entry:
%vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
@ -94,9 +88,7 @@ entry:
define <4 x i32> @test_vdotq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
; CHECK-LABEL: test_vdotq_s32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: sdot v3.4s, v1.16b, v2.16b
; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-NEXT: sdot v0.4s, v1.16b, v2.16b
; CHECK-NEXT: ret
entry:
%vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
@ -161,6 +153,11 @@ entry:
define <2 x i32> @test_vdot_lane_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
; CHECK-LABEL: test_vdot_lane_u32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-NEXT: udot v0.2s, v1.8b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
@ -171,6 +168,11 @@ entry:
}
define <4 x i32> @test_vdotq_lane_u32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
; CHECK-LABEL: test_vdotq_lane_u32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-NEXT: udot v0.4s, v1.16b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@ -183,9 +185,7 @@ entry:
define <2 x i32> @test_vdot_laneq_u32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
; CHECK-LABEL: test_vdot_laneq_u32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: udot v3.2s, v1.8b, v2.4b[1]
; CHECK-NEXT: add v0.2s, v3.2s, v0.2s
; CHECK-NEXT: udot v0.2s, v1.8b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
@ -199,9 +199,7 @@ entry:
define <4 x i32> @test_vdotq_laneq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
; CHECK-LABEL: test_vdotq_laneq_u32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: udot v3.4s, v1.16b, v2.4b[1]
; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-NEXT: udot v0.4s, v1.16b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
@ -269,6 +267,11 @@ entry:
define <2 x i32> @test_vdot_lane_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
; CHECK-LABEL: test_vdot_lane_s32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-NEXT: sdot v0.2s, v1.8b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
@ -279,6 +282,11 @@ entry:
}
define <4 x i32> @test_vdotq_lane_s32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
; CHECK-LABEL: test_vdotq_lane_s32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-NEXT: sdot v0.4s, v1.16b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@ -291,9 +299,7 @@ entry:
define <2 x i32> @test_vdot_laneq_s32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
; CHECK-LABEL: test_vdot_laneq_s32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: sdot v3.2s, v1.8b, v2.4b[1]
; CHECK-NEXT: add v0.2s, v3.2s, v0.2s
; CHECK-NEXT: sdot v0.2s, v1.8b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
@ -307,9 +313,7 @@ entry:
define <4 x i32> @test_vdotq_laneq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
; CHECK-LABEL: test_vdotq_laneq_s32_zero:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: sdot v3.4s, v1.16b, v2.4b[1]
; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-NEXT: sdot v0.4s, v1.16b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>

View File

@ -273,11 +273,9 @@ define i32 @test_udot_v16i8_double_nomla(<16 x i8> %a, <16 x i8> %b, <16 x i8> %
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v1.16b, #1
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: movi v4.2d, #0000000000000000
; CHECK-NEXT: udot v4.4s, v1.16b, v0.16b
; CHECK-NEXT: udot v3.4s, v1.16b, v2.16b
; CHECK-NEXT: add v0.4s, v4.4s, v3.4s
; CHECK-NEXT: addv s0, v0.4s
; CHECK-NEXT: udot v3.4s, v1.16b, v0.16b
; CHECK-NEXT: addv s0, v3.4s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
@ -390,11 +388,9 @@ define i32 @test_sdot_v16i8_double_nomla(<16 x i8> %a, <16 x i8> %b, <16 x i8> %
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v1.16b, #1
; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: movi v4.2d, #0000000000000000
; CHECK-NEXT: sdot v4.4s, v1.16b, v0.16b
; CHECK-NEXT: sdot v3.4s, v1.16b, v2.16b
; CHECK-NEXT: add v0.4s, v4.4s, v3.4s
; CHECK-NEXT: addv s0, v0.4s
; CHECK-NEXT: sdot v3.4s, v1.16b, v0.16b
; CHECK-NEXT: addv s0, v3.4s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry: