mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
c275334e12
I had manually removed unused prefixes from CodeGen/X86 directory for more than 100 tests. I checked the change history for each of them at the beginning, and then I mainly focused on the format since I found all of the unused prefixes were result from either insensible copy or residuum after functional update. I think it's OK to remove the remaining X86 tests by script now. I wrote a rough script which works for me in most tests. I put it in llvm/utils temporarily for review and hope it may help other components owners. The tests in this patch are all generated by the tool and checked by update tool for the autogenerated tests. I skimmed them and checked about 30 tests and didn't find any unexpected changes. Reviewed By: mtrofin, MaskRay Differential Revision: https://reviews.llvm.org/D91496
564 lines
23 KiB
LLVM
564 lines
23 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mcpu=bdver2 -mtriple=x86_64-pc-win32 | FileCheck %s --check-prefix=FMA4
|
|
|
|
attributes #0 = { nounwind }
|
|
|
|
declare <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
|
|
define <4 x float> @test_x86_fmadd_baa_ss(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_baa_ss:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %b, <4 x float> %a, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fmadd_aba_ss(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_aba_ss:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fmadd_bba_ss(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_bba_ss:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %b, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
|
|
define <4 x float> @test_x86_fmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_baa_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %xmm0
|
|
; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %b, <4 x float> %a, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_aba_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %xmm0
|
|
; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %a, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_bba_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rdx), %xmm0
|
|
; FMA4-NEXT: vfmaddps {{.*#+}} xmm0 = (xmm0 * xmm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %b, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
|
|
define <8 x float> @test_x86_fmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_baa_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %ymm0
|
|
; FMA4-NEXT: vfmaddps {{.*#+}} ymm0 = (ymm0 * mem) + ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %b, <8 x float> %a, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
define <8 x float> @test_x86_fmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_aba_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %ymm0
|
|
; FMA4-NEXT: vfmaddps {{.*#+}} ymm0 = (ymm0 * mem) + ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
define <8 x float> @test_x86_fmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_bba_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rdx), %ymm0
|
|
; FMA4-NEXT: vfmaddps {{.*#+}} ymm0 = (ymm0 * ymm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %b, <8 x float> %b, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
|
|
define <2 x double> @test_x86_fmadd_baa_sd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_baa_sd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
|
; FMA4-NEXT: vfmaddsd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %b, <2 x double> %a, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fmadd_aba_sd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_aba_sd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
|
; FMA4-NEXT: vfmaddsd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fmadd_bba_sd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_bba_sd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
|
; FMA4-NEXT: vfmaddsd {{.*#+}} xmm0 = (xmm0 * xmm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %b, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
|
|
define <2 x double> @test_x86_fmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_baa_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %xmm0
|
|
; FMA4-NEXT: vfmaddpd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %b, <2 x double> %a, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_aba_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %xmm0
|
|
; FMA4-NEXT: vfmaddpd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %a, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_bba_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rdx), %xmm0
|
|
; FMA4-NEXT: vfmaddpd {{.*#+}} xmm0 = (xmm0 * xmm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %b, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
declare <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
|
|
define <4 x double> @test_x86_fmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_baa_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %ymm0
|
|
; FMA4-NEXT: vfmaddpd {{.*#+}} ymm0 = (ymm0 * mem) + ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %b, <4 x double> %a, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_x86_fmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_aba_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %ymm0
|
|
; FMA4-NEXT: vfmaddpd {{.*#+}} ymm0 = (ymm0 * mem) + ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_x86_fmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmadd_bba_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rdx), %ymm0
|
|
; FMA4-NEXT: vfmaddpd {{.*#+}} ymm0 = (ymm0 * ymm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %b, <4 x double> %b, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
|
|
define <4 x float> @test_x86_fnmadd_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_baa_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %xmm0
|
|
; FMA4-NEXT: vfnmaddps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %b, <4 x float> %a, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fnmadd_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_aba_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %xmm0
|
|
; FMA4-NEXT: vfnmaddps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %a, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fnmadd_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_bba_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rdx), %xmm0
|
|
; FMA4-NEXT: vfnmaddps {{.*#+}} xmm0 = -(xmm0 * xmm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %b, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
|
|
define <8 x float> @test_x86_fnmadd_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_baa_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %ymm0
|
|
; FMA4-NEXT: vfnmaddps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %b, <8 x float> %a, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
define <8 x float> @test_x86_fnmadd_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_aba_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %ymm0
|
|
; FMA4-NEXT: vfnmaddps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
define <8 x float> @test_x86_fnmadd_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_bba_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rdx), %ymm0
|
|
; FMA4-NEXT: vfnmaddps {{.*#+}} ymm0 = -(ymm0 * ymm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %b, <8 x float> %b, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
|
|
define <2 x double> @test_x86_fnmadd_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_baa_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %xmm0
|
|
; FMA4-NEXT: vfnmaddpd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %b, <2 x double> %a, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fnmadd_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_aba_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %xmm0
|
|
; FMA4-NEXT: vfnmaddpd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %a, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fnmadd_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_bba_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rdx), %xmm0
|
|
; FMA4-NEXT: vfnmaddpd {{.*#+}} xmm0 = -(xmm0 * xmm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %b, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
declare <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
|
|
define <4 x double> @test_x86_fnmadd_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_baa_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %ymm0
|
|
; FMA4-NEXT: vfnmaddpd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %b, <4 x double> %a, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_x86_fnmadd_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_aba_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %ymm0
|
|
; FMA4-NEXT: vfnmaddpd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_x86_fnmadd_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmadd_bba_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rdx), %ymm0
|
|
; FMA4-NEXT: vfnmaddpd {{.*#+}} ymm0 = -(ymm0 * ymm0) + mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %b, <4 x double> %b, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
|
|
define <4 x float> @test_x86_fmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_baa_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %xmm0
|
|
; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %b, <4 x float> %a, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_aba_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %xmm0
|
|
; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %a, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_bba_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rdx), %xmm0
|
|
; FMA4-NEXT: vfmsubps {{.*#+}} xmm0 = (xmm0 * xmm0) - mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %b, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
|
|
define <8 x float> @test_x86_fmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_baa_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %ymm0
|
|
; FMA4-NEXT: vfmsubps {{.*#+}} ymm0 = (ymm0 * mem) - ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %b, <8 x float> %a, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
define <8 x float> @test_x86_fmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_aba_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %ymm0
|
|
; FMA4-NEXT: vfmsubps {{.*#+}} ymm0 = (ymm0 * mem) - ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
define <8 x float> @test_x86_fmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_bba_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rdx), %ymm0
|
|
; FMA4-NEXT: vfmsubps {{.*#+}} ymm0 = (ymm0 * ymm0) - mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %b, <8 x float> %b, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
|
|
define <2 x double> @test_x86_fmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_baa_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %xmm0
|
|
; FMA4-NEXT: vfmsubpd {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %b, <2 x double> %a, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_aba_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %xmm0
|
|
; FMA4-NEXT: vfmsubpd {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %a, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_bba_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rdx), %xmm0
|
|
; FMA4-NEXT: vfmsubpd {{.*#+}} xmm0 = (xmm0 * xmm0) - mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %b, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
declare <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
|
|
define <4 x double> @test_x86_fmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_baa_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %ymm0
|
|
; FMA4-NEXT: vfmsubpd {{.*#+}} ymm0 = (ymm0 * mem) - ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %b, <4 x double> %a, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_x86_fmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_aba_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %ymm0
|
|
; FMA4-NEXT: vfmsubpd {{.*#+}} ymm0 = (ymm0 * mem) - ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_x86_fmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fmsub_bba_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rdx), %ymm0
|
|
; FMA4-NEXT: vfmsubpd {{.*#+}} ymm0 = (ymm0 * ymm0) - mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %b, <4 x double> %b, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
|
|
define <4 x float> @test_x86_fnmsub_baa_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_baa_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %xmm0
|
|
; FMA4-NEXT: vfnmsubps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %b, <4 x float> %a, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fnmsub_aba_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_aba_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %xmm0
|
|
; FMA4-NEXT: vfnmsubps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %a, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_fnmsub_bba_ps(<4 x float> %a, <4 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_bba_ps:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rdx), %xmm0
|
|
; FMA4-NEXT: vfnmsubps {{.*#+}} xmm0 = -(xmm0 * xmm0) - mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %b, <4 x float> %b, <4 x float> %a) nounwind
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
|
|
define <8 x float> @test_x86_fnmsub_baa_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_baa_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %ymm0
|
|
; FMA4-NEXT: vfnmsubps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %b, <8 x float> %a, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
define <8 x float> @test_x86_fnmsub_aba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_aba_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rcx), %ymm0
|
|
; FMA4-NEXT: vfnmsubps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
define <8 x float> @test_x86_fnmsub_bba_ps_y(<8 x float> %a, <8 x float> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_bba_ps_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovaps (%rdx), %ymm0
|
|
; FMA4-NEXT: vfnmsubps {{.*#+}} ymm0 = -(ymm0 * ymm0) - mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %b, <8 x float> %b, <8 x float> %a) nounwind
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
|
|
define <2 x double> @test_x86_fnmsub_baa_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_baa_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %xmm0
|
|
; FMA4-NEXT: vfnmsubpd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %b, <2 x double> %a, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fnmsub_aba_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_aba_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %xmm0
|
|
; FMA4-NEXT: vfnmsubpd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %a, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_x86_fnmsub_bba_pd(<2 x double> %a, <2 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_bba_pd:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rdx), %xmm0
|
|
; FMA4-NEXT: vfnmsubpd {{.*#+}} xmm0 = -(xmm0 * xmm0) - mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %b, <2 x double> %b, <2 x double> %a) nounwind
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
declare <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
|
|
define <4 x double> @test_x86_fnmsub_baa_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_baa_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %ymm0
|
|
; FMA4-NEXT: vfnmsubpd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %b, <4 x double> %a, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_x86_fnmsub_aba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_aba_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rcx), %ymm0
|
|
; FMA4-NEXT: vfnmsubpd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm0
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_x86_fnmsub_bba_pd_y(<4 x double> %a, <4 x double> %b) #0 {
|
|
; FMA4-LABEL: test_x86_fnmsub_bba_pd_y:
|
|
; FMA4: # %bb.0:
|
|
; FMA4-NEXT: vmovapd (%rdx), %ymm0
|
|
; FMA4-NEXT: vfnmsubpd {{.*#+}} ymm0 = -(ymm0 * ymm0) - mem
|
|
; FMA4-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %b, <4 x double> %b, <4 x double> %a) nounwind
|
|
ret <4 x double> %res
|
|
}
|
|
|