1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00
llvm-mirror/test/CodeGen/AArch64/neon-mla-mls.ll
Sanne Wouda a94f7d9f21 [AArch64] Fix MUL/SUB fusing
Summary:
When MUL is the first operand to SUB, we can't use MLS because the accumulator
should be negated.  Emit a NEG of the accumulator and an MLA instead, similar to
what we do for FMUL / FSUB fusing.

Reviewers: dmgreen, SjoerdMeijer, fhahn, Gerolf, mstorsjo, asbirlea

Reviewed By: asbirlea

Subscribers: kristof.beyls, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D71067
2019-12-05 18:10:06 +00:00

210 lines
5.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
define <8 x i8> @mla8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) {
; CHECK-LABEL: mla8xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: mla v2.8b, v0.8b, v1.8b
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <8 x i8> %A, %B;
%tmp2 = add <8 x i8> %C, %tmp1;
ret <8 x i8> %tmp2
}
define <16 x i8> @mla16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
; CHECK-LABEL: mla16xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: mla v2.16b, v0.16b, v1.16b
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <16 x i8> %A, %B;
%tmp2 = add <16 x i8> %C, %tmp1;
ret <16 x i8> %tmp2
}
define <4 x i16> @mla4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) {
; CHECK-LABEL: mla4xi16:
; CHECK: // %bb.0:
; CHECK-NEXT: mla v2.4h, v0.4h, v1.4h
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <4 x i16> %A, %B;
%tmp2 = add <4 x i16> %C, %tmp1;
ret <4 x i16> %tmp2
}
define <8 x i16> @mla8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) {
; CHECK-LABEL: mla8xi16:
; CHECK: // %bb.0:
; CHECK-NEXT: mla v2.8h, v0.8h, v1.8h
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <8 x i16> %A, %B;
%tmp2 = add <8 x i16> %C, %tmp1;
ret <8 x i16> %tmp2
}
define <2 x i32> @mla2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) {
; CHECK-LABEL: mla2xi32:
; CHECK: // %bb.0:
; CHECK-NEXT: mla v2.2s, v0.2s, v1.2s
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <2 x i32> %A, %B;
%tmp2 = add <2 x i32> %C, %tmp1;
ret <2 x i32> %tmp2
}
define <4 x i32> @mla4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) {
; CHECK-LABEL: mla4xi32:
; CHECK: // %bb.0:
; CHECK-NEXT: mla v2.4s, v0.4s, v1.4s
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <4 x i32> %A, %B;
%tmp2 = add <4 x i32> %C, %tmp1;
ret <4 x i32> %tmp2
}
define <8 x i8> @mls8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) {
; CHECK-LABEL: mls8xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: mls v2.8b, v0.8b, v1.8b
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <8 x i8> %A, %B;
%tmp2 = sub <8 x i8> %C, %tmp1;
ret <8 x i8> %tmp2
}
define <16 x i8> @mls16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
; CHECK-LABEL: mls16xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: mls v2.16b, v0.16b, v1.16b
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <16 x i8> %A, %B;
%tmp2 = sub <16 x i8> %C, %tmp1;
ret <16 x i8> %tmp2
}
define <4 x i16> @mls4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) {
; CHECK-LABEL: mls4xi16:
; CHECK: // %bb.0:
; CHECK-NEXT: mls v2.4h, v0.4h, v1.4h
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <4 x i16> %A, %B;
%tmp2 = sub <4 x i16> %C, %tmp1;
ret <4 x i16> %tmp2
}
define <8 x i16> @mls8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) {
; CHECK-LABEL: mls8xi16:
; CHECK: // %bb.0:
; CHECK-NEXT: mls v2.8h, v0.8h, v1.8h
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <8 x i16> %A, %B;
%tmp2 = sub <8 x i16> %C, %tmp1;
ret <8 x i16> %tmp2
}
define <2 x i32> @mls2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) {
; CHECK-LABEL: mls2xi32:
; CHECK: // %bb.0:
; CHECK-NEXT: mls v2.2s, v0.2s, v1.2s
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <2 x i32> %A, %B;
%tmp2 = sub <2 x i32> %C, %tmp1;
ret <2 x i32> %tmp2
}
define <4 x i32> @mls4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) {
; CHECK-LABEL: mls4xi32:
; CHECK: // %bb.0:
; CHECK-NEXT: mls v2.4s, v0.4s, v1.4s
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <4 x i32> %A, %B;
%tmp2 = sub <4 x i32> %C, %tmp1;
ret <4 x i32> %tmp2
}
define <8 x i8> @mls2v8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) {
; CHECK-LABEL: mls2v8xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: neg v2.8b, v2.8b
; CHECK-NEXT: mla v2.8b, v0.8b, v1.8b
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <8 x i8> %A, %B;
%tmp2 = sub <8 x i8> %tmp1, %C;
ret <8 x i8> %tmp2
}
define <16 x i8> @mls2v16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
; CHECK-LABEL: mls2v16xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: neg v2.16b, v2.16b
; CHECK-NEXT: mla v2.16b, v0.16b, v1.16b
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <16 x i8> %A, %B;
%tmp2 = sub <16 x i8> %tmp1, %C;
ret <16 x i8> %tmp2
}
define <4 x i16> @mls2v4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) {
; CHECK-LABEL: mls2v4xi16:
; CHECK: // %bb.0:
; CHECK-NEXT: neg v2.4h, v2.4h
; CHECK-NEXT: mla v2.4h, v0.4h, v1.4h
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <4 x i16> %A, %B;
%tmp2 = sub <4 x i16> %tmp1, %C;
ret <4 x i16> %tmp2
}
define <8 x i16> @mls2v8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) {
; CHECK-LABEL: mls2v8xi16:
; CHECK: // %bb.0:
; CHECK-NEXT: neg v2.8h, v2.8h
; CHECK-NEXT: mla v2.8h, v0.8h, v1.8h
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <8 x i16> %A, %B;
%tmp2 = sub <8 x i16> %tmp1, %C;
ret <8 x i16> %tmp2
}
define <2 x i32> @mls2v2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) {
; CHECK-LABEL: mls2v2xi32:
; CHECK: // %bb.0:
; CHECK-NEXT: neg v2.2s, v2.2s
; CHECK-NEXT: mla v2.2s, v0.2s, v1.2s
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <2 x i32> %A, %B;
%tmp2 = sub <2 x i32> %tmp1, %C;
ret <2 x i32> %tmp2
}
define <4 x i32> @mls2v4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) {
; CHECK-LABEL: mls2v4xi32:
; CHECK: // %bb.0:
; CHECK-NEXT: neg v2.4s, v2.4s
; CHECK-NEXT: mla v2.4s, v0.4s, v1.4s
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: ret
%tmp1 = mul <4 x i32> %A, %B;
%tmp2 = sub <4 x i32> %tmp1, %C;
ret <4 x i32> %tmp2
}