1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-24 05:23:45 +02:00

[X86][FMA4] Add test cases to demonstrate missed folding opportunities for FMA4 scalar intrinsics.

llvm-svn: 288008
This commit is contained in:
Craig Topper 2016-11-27 21:36:58 +00:00
parent ba29d9ef0b
commit 485d41b7f7

View File

@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2
; RUN: llc < %s -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
; RUN: llc < %s -mattr=fma4 | FileCheck %s --check-prefix=FMA4
target triple = "x86_64-unknown-unknown"
@ -21,6 +22,14 @@ define void @fmadd_aab_ss(float* %a, float* %b) {
; CHECK-NEXT: vfmadd213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fmadd_aab_ss:
; FMA4: # BB#0:
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; FMA4-NEXT: vfmaddss %xmm1, %xmm0, %xmm0, %xmm0
; FMA4-NEXT: vmovss %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@ -47,6 +56,14 @@ define void @fmadd_aba_ss(float* %a, float* %b) {
; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fmadd_aba_ss:
; FMA4: # BB#0:
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; FMA4-NEXT: vfmaddss %xmm0, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vmovss %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@ -73,6 +90,14 @@ define void @fmsub_aab_ss(float* %a, float* %b) {
; CHECK-NEXT: vfmsub213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fmsub_aab_ss:
; FMA4: # BB#0:
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; FMA4-NEXT: vfmsubss %xmm1, %xmm0, %xmm0, %xmm0
; FMA4-NEXT: vmovss %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@ -99,6 +124,14 @@ define void @fmsub_aba_ss(float* %a, float* %b) {
; CHECK-NEXT: vfmsub132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fmsub_aba_ss:
; FMA4: # BB#0:
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; FMA4-NEXT: vfmsubss %xmm0, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vmovss %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@ -125,6 +158,14 @@ define void @fnmadd_aab_ss(float* %a, float* %b) {
; CHECK-NEXT: vfnmadd213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fnmadd_aab_ss:
; FMA4: # BB#0:
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; FMA4-NEXT: vfnmaddss %xmm1, %xmm0, %xmm0, %xmm0
; FMA4-NEXT: vmovss %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@ -151,6 +192,14 @@ define void @fnmadd_aba_ss(float* %a, float* %b) {
; CHECK-NEXT: vfnmadd132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fnmadd_aba_ss:
; FMA4: # BB#0:
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; FMA4-NEXT: vfnmaddss %xmm0, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vmovss %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@ -177,6 +226,14 @@ define void @fnmsub_aab_ss(float* %a, float* %b) {
; CHECK-NEXT: vfnmsub213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fnmsub_aab_ss:
; FMA4: # BB#0:
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; FMA4-NEXT: vfnmsubss %xmm1, %xmm0, %xmm0, %xmm0
; FMA4-NEXT: vmovss %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@ -203,6 +260,14 @@ define void @fnmsub_aba_ss(float* %a, float* %b) {
; CHECK-NEXT: vfnmsub132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fnmsub_aba_ss:
; FMA4: # BB#0:
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; FMA4-NEXT: vfnmsubss %xmm0, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vmovss %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@ -229,6 +294,14 @@ define void @fmadd_aab_sd(double* %a, double* %b) {
; CHECK-NEXT: vfmadd213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fmadd_aab_sd:
; FMA4: # BB#0:
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; FMA4-NEXT: vfmaddsd %xmm1, %xmm0, %xmm0, %xmm0
; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@ -251,6 +324,14 @@ define void @fmadd_aba_sd(double* %a, double* %b) {
; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fmadd_aba_sd:
; FMA4: # BB#0:
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; FMA4-NEXT: vfmaddsd %xmm0, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@ -273,6 +354,14 @@ define void @fmsub_aab_sd(double* %a, double* %b) {
; CHECK-NEXT: vfmsub213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fmsub_aab_sd:
; FMA4: # BB#0:
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; FMA4-NEXT: vfmsubsd %xmm1, %xmm0, %xmm0, %xmm0
; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@ -295,6 +384,14 @@ define void @fmsub_aba_sd(double* %a, double* %b) {
; CHECK-NEXT: vfmsub132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fmsub_aba_sd:
; FMA4: # BB#0:
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; FMA4-NEXT: vfmsubsd %xmm0, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@ -317,6 +414,14 @@ define void @fnmadd_aab_sd(double* %a, double* %b) {
; CHECK-NEXT: vfnmadd213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fnmadd_aab_sd:
; FMA4: # BB#0:
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; FMA4-NEXT: vfnmaddsd %xmm1, %xmm0, %xmm0, %xmm0
; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@ -339,6 +444,14 @@ define void @fnmadd_aba_sd(double* %a, double* %b) {
; CHECK-NEXT: vfnmadd132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fnmadd_aba_sd:
; FMA4: # BB#0:
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; FMA4-NEXT: vfnmaddsd %xmm0, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@ -361,6 +474,14 @@ define void @fnmsub_aab_sd(double* %a, double* %b) {
; CHECK-NEXT: vfnmsub213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fnmsub_aab_sd:
; FMA4: # BB#0:
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; FMA4-NEXT: vfnmsubsd %xmm1, %xmm0, %xmm0, %xmm0
; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@ -383,6 +504,14 @@ define void @fnmsub_aba_sd(double* %a, double* %b) {
; CHECK-NEXT: vfnmsub132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
;
; FMA4-LABEL: fnmsub_aba_sd:
; FMA4: # BB#0:
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; FMA4-NEXT: vfnmsubsd %xmm0, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1