From bf274751c88ceeb794f816028cafd5e33ef25dbf Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 8 Jul 2018 18:04:00 +0000 Subject: [PATCH] [X86] Enhance combineFMA to look for FNEG behind an EXTRACT_VECTOR_ELT. llvm-svn: 336514 --- lib/Target/X86/X86ISelLowering.cpp | 14 ++- .../X86/avx512-intrinsics-fast-isel.ll | 114 ++++-------------- test/CodeGen/X86/fma4-fneg-combine.ll | 28 ++--- 3 files changed, 45 insertions(+), 111 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 6d5923010cc..5c7b43cc086 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -37762,11 +37762,23 @@ static SDValue combineFMA(SDNode *N, SelectionDAG &DAG, SDValue B = N->getOperand(1); SDValue C = N->getOperand(2); - auto invertIfNegative = [](SDValue &V) { + auto invertIfNegative = [&DAG](SDValue &V) { if (SDValue NegVal = isFNEG(V.getNode())) { V = NegVal; return true; } + // Look through extract_vector_elts. If it comes from an FNEG, create a + // new extract from the FNEG input. + if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT && + isa(V.getOperand(1)) && + cast(V.getOperand(1))->getZExtValue() == 0) { + if (SDValue NegVal = isFNEG(V.getOperand(0).getNode())) { + V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(), + NegVal, V.getOperand(1)); + return true; + } + } + return false; }; diff --git a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll index 1c7f633df38..24f08585a4d 100644 --- a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll @@ -5048,18 +5048,14 @@ define <4 x float> @test_mm_mask_fmsub_round_ss(<4 x float> %__W, i8 zeroext %__ ; X86-LABEL: test_mm_mask_fmsub_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X86-NEXT: vxorps %xmm3, %xmm2, %xmm2 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: vfmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_fmsub_round_ss: ; X64: # %bb.0: # %entry -; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X64-NEXT: vxorps %xmm3, %xmm2, %xmm2 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; X64-NEXT: vfmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = extractelement <4 x float> %__W, i64 0 @@ -5104,18 +5100,14 @@ define <4 x float> @test_mm_maskz_fmsub_round_ss(i8 zeroext %__U, <4 x float> %_ ; X86-LABEL: test_mm_maskz_fmsub_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X86-NEXT: vxorps %xmm3, %xmm2, %xmm2 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} +; X86-NEXT: vfmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_fmsub_round_ss: ; X64: # %bb.0: # %entry -; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X64-NEXT: vxorps %xmm3, %xmm2, %xmm2 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} +; X64-NEXT: vfmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = extractelement <4 x float> %__A, i64 0 @@ -5163,21 +5155,15 @@ define <4 x float> @test_mm_mask3_fmsub_round_ss(<4 x float> %__W, <4 x float> % ; X86-LABEL: test_mm_mask3_fmsub_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X86-NEXT: vxorps %xmm3, %xmm2, %xmm3 -; X86-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} +; X86-NEXT: vfmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; X86-NEXT: vmovaps %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask3_fmsub_round_ss: ; X64: # %bb.0: # %entry -; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X64-NEXT: vxorps %xmm3, %xmm2, %xmm3 -; X64-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} +; X64-NEXT: vfmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; X64-NEXT: vmovaps %xmm2, %xmm0 ; X64-NEXT: retq entry: @@ -5224,18 +5210,14 @@ define <4 x float> @test_mm_mask_fnmadd_round_ss(<4 x float> %__W, i8 zeroext %_ ; X86-LABEL: test_mm_mask_fnmadd_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: vfnmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_fnmadd_round_ss: ; X64: # %bb.0: # %entry -; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; X64-NEXT: vfnmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = extractelement <4 x float> %__W, i64 0 @@ -5280,18 +5262,14 @@ define <4 x float> @test_mm_maskz_fnmadd_round_ss(i8 zeroext %__U, <4 x float> % ; X86-LABEL: test_mm_maskz_fnmadd_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} +; X86-NEXT: vfnmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_fnmadd_round_ss: ; X64: # %bb.0: # %entry -; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} +; X64-NEXT: vfnmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = extractelement <4 x float> %__A, i64 0 @@ -5339,19 +5317,15 @@ define <4 x float> @test_mm_mask3_fnmadd_round_ss(<4 x float> %__W, <4 x float> ; X86-LABEL: test_mm_mask3_fnmadd_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmadd231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X86-NEXT: vfnmadd231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; X86-NEXT: vmovaps %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask3_fnmadd_round_ss: ; X64: # %bb.0: # %entry -; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmadd231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X64-NEXT: vfnmadd231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; X64-NEXT: vmovaps %xmm2, %xmm0 ; X64-NEXT: retq entry: @@ -5399,20 +5373,14 @@ define <4 x float> @test_mm_mask_fnmsub_round_ss(<4 x float> %__W, i8 zeroext %_ ; X86-LABEL: test_mm_mask_fnmsub_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1 -; X86-NEXT: vxorps %xmm3, %xmm2, %xmm2 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: vfnmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_fnmsub_round_ss: ; X64: # %bb.0: # %entry -; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1 -; X64-NEXT: vxorps %xmm3, %xmm2, %xmm2 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; X64-NEXT: vfnmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = extractelement <4 x float> %__W, i64 0 @@ -5459,20 +5427,14 @@ define <4 x float> @test_mm_maskz_fnmsub_round_ss(i8 zeroext %__U, <4 x float> % ; X86-LABEL: test_mm_maskz_fnmsub_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1 -; X86-NEXT: vxorps %xmm3, %xmm2, %xmm2 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} +; X86-NEXT: vfnmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_fnmsub_round_ss: ; X64: # %bb.0: # %entry -; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1 -; X64-NEXT: vxorps %xmm3, %xmm2, %xmm2 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} +; X64-NEXT: vfnmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = extractelement <4 x float> %__A, i64 0 @@ -5522,23 +5484,15 @@ define <4 x float> @test_mm_mask3_fnmsub_round_ss(<4 x float> %__W, <4 x float> ; X86-LABEL: test_mm_mask3_fnmsub_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1 -; X86-NEXT: vxorps %xmm3, %xmm2, %xmm3 -; X86-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} +; X86-NEXT: vfnmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; X86-NEXT: vmovaps %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask3_fnmsub_round_ss: ; X64: # %bb.0: # %entry -; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] -; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1 -; X64-NEXT: vxorps %xmm3, %xmm2, %xmm3 -; X64-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} +; X64-NEXT: vfnmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; X64-NEXT: vmovaps %xmm2, %xmm0 ; X64-NEXT: retq entry: @@ -6083,20 +6037,14 @@ define <2 x double> @test_mm_mask_fnmsub_round_sd(<2 x double> %__W, i8 zeroext ; X86-LABEL: test_mm_mask_fnmsub_round_sd: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] -; X86-NEXT: vxorpd %xmm3, %xmm1, %xmm1 -; X86-NEXT: vxorpd %xmm3, %xmm2, %xmm2 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: vfnmsub213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_fnmsub_round_sd: ; X64: # %bb.0: # %entry -; X64-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] -; X64-NEXT: vxorpd %xmm3, %xmm1, %xmm1 -; X64-NEXT: vxorpd %xmm3, %xmm2, %xmm2 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; X64-NEXT: vfnmsub213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: %0 = extractelement <2 x double> %__W, i64 0 @@ -6143,20 +6091,14 @@ define <2 x double> @test_mm_maskz_fnmsub_round_sd(i8 zeroext %__U, <2 x double> ; X86-LABEL: test_mm_maskz_fnmsub_round_sd: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] -; X86-NEXT: vxorpd %xmm3, %xmm1, %xmm1 -; X86-NEXT: vxorpd %xmm3, %xmm2, %xmm2 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} +; X86-NEXT: vfnmsub213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_fnmsub_round_sd: ; X64: # %bb.0: # %entry -; X64-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] -; X64-NEXT: vxorpd %xmm3, %xmm1, %xmm1 -; X64-NEXT: vxorpd %xmm3, %xmm2, %xmm2 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} +; X64-NEXT: vfnmsub213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: %0 = extractelement <2 x double> %__A, i64 0 @@ -6206,23 +6148,15 @@ define <2 x double> @test_mm_mask3_fnmsub_round_sd(<2 x double> %__W, <2 x doubl ; X86-LABEL: test_mm_mask3_fnmsub_round_sd: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] -; X86-NEXT: vxorpd %xmm3, %xmm1, %xmm1 -; X86-NEXT: vxorpd %xmm3, %xmm2, %xmm3 -; X86-NEXT: vfmadd213sd %xmm3, %xmm0, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} +; X86-NEXT: vfnmsub231sd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; X86-NEXT: vmovapd %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask3_fnmsub_round_sd: ; X64: # %bb.0: # %entry -; X64-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] -; X64-NEXT: vxorpd %xmm3, %xmm1, %xmm1 -; X64-NEXT: vxorpd %xmm3, %xmm2, %xmm3 -; X64-NEXT: vfmadd213sd %xmm3, %xmm0, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} +; X64-NEXT: vfnmsub231sd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; X64-NEXT: vmovapd %xmm2, %xmm0 ; X64-NEXT: retq entry: diff --git a/test/CodeGen/X86/fma4-fneg-combine.ll b/test/CodeGen/X86/fma4-fneg-combine.ll index f29908678a7..771162a2c99 100644 --- a/test/CodeGen/X86/fma4-fneg-combine.ll +++ b/test/CodeGen/X86/fma4-fneg-combine.ll @@ -20,8 +20,7 @@ define <4 x float> @test1(<4 x float> %a, <4 x float> %b, <4 x float> %c) { define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: -; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm2, %xmm2 -; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vfmsubss %xmm2, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %sub.i = fsub <4 x float> , %c %res = tail call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i) @@ -31,8 +30,7 @@ define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) { define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test3: ; CHECK: # %bb.0: -; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %sub.i = fsub <4 x float> , %b %res = tail call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a, <4 x float> %sub.i, <4 x float> %c) @@ -42,8 +40,7 @@ define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) { define <4 x float> @test4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test4: ; CHECK: # %bb.0: -; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0 -; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %sub.i = fsub <4 x float> , %a %res = tail call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %sub.i, <4 x float> %b, <4 x float> %c) @@ -53,10 +50,7 @@ define <4 x float> @test4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { define <4 x float> @test5(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test5: ; CHECK: # %bb.0: -; CHECK-NEXT: vmovaps {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00] -; CHECK-NEXT: vxorps %xmm3, %xmm0, %xmm0 -; CHECK-NEXT: vxorps %xmm3, %xmm2, %xmm2 -; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vfnmsubss %xmm2, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %sub.i = fsub <4 x float> , %a %sub.i.2 = fsub <4 x float> , %c @@ -78,8 +72,7 @@ define <2 x double> @test6(<2 x double> %a, <2 x double> %b, <2 x double> %c) { define <2 x double> @test7(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test7: ; CHECK: # %bb.0: -; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm2, %xmm2 -; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vfmsubsd %xmm2, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %sub.i = fsub <2 x double> , %c %res = tail call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %sub.i) @@ -89,8 +82,7 @@ define <2 x double> @test7(<2 x double> %a, <2 x double> %b, <2 x double> %c) { define <2 x double> @test8(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test8: ; CHECK: # %bb.0: -; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %sub.i = fsub <2 x double> , %b %res = tail call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a, <2 x double> %sub.i, <2 x double> %c) @@ -100,8 +92,7 @@ define <2 x double> @test8(<2 x double> %a, <2 x double> %b, <2 x double> %c) { define <2 x double> @test9(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test9: ; CHECK: # %bb.0: -; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0 -; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %sub.i = fsub <2 x double> , %a %res = tail call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c) @@ -111,10 +102,7 @@ define <2 x double> @test9(<2 x double> %a, <2 x double> %b, <2 x double> %c) { define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test10: ; CHECK: # %bb.0: -; CHECK-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] -; CHECK-NEXT: vxorpd %xmm3, %xmm0, %xmm0 -; CHECK-NEXT: vxorpd %xmm3, %xmm2, %xmm2 -; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %sub.i = fsub <2 x double> , %a %sub.i.2 = fsub <2 x double> , %c