mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
1411 lines
53 KiB
LLVM
1411 lines
53 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=X86
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=X64
|
|
|
|
define <16 x i32> @test_v16f32_oeq_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_oeq_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpeqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_oeq_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpeqps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"oeq",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ogt_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ogt_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpgt_oqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ogt_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmplt_oqps %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ogt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_oge_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_oge_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpge_oqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_oge_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmple_oqps %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"oge",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_olt_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_olt_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmplt_oqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_olt_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmplt_oqps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"olt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ole_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ole_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmple_oqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ole_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmple_oqps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ole",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_one_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_one_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpneq_oqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_one_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpneq_oqps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"one",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ord_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ord_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpordps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ord_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpordps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ord",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ueq_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ueq_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpeq_uqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ueq_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpeq_uqps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ueq",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ugt_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ugt_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnle_uqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ugt_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnle_uqps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ugt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_uge_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_uge_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnlt_uqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_uge_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnlt_uqps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"uge",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ult_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ult_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnge_uqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ult_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnle_uqps %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ult",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ule_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ule_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpngt_uqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ule_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnlt_uqps %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ule",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_une_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_une_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpneqps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_une_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpneqps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"une",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_uno_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_uno_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpunordps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_uno_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpunordps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"uno",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_oeq_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_oeq_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpeqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_oeq_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpeqpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"oeq",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ogt_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ogt_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpgt_oqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ogt_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmplt_oqpd %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ogt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_oge_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_oge_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpge_oqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_oge_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmple_oqpd %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"oge",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_olt_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_olt_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmplt_oqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_olt_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmplt_oqpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"olt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ole_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ole_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmple_oqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ole_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmple_oqpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ole",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_one_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_one_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpneq_oqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_one_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpneq_oqpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"one",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ord_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ord_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpordpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ord_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpordpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ord",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ueq_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ueq_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpeq_uqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ueq_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpeq_uqpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ueq",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ugt_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ugt_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnle_uqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ugt_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnle_uqpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ugt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_uge_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_uge_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnlt_uqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_uge_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnlt_uqpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"uge",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ult_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ult_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnge_uqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ult_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnle_uqpd %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ult",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ule_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ule_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpngt_uqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ule_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnlt_uqpd %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ule",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_une_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_une_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpneqpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_une_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpneqpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"une",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_uno_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_uno_q:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpunordpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_uno_q:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpunordpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"uno",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_oeq_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_oeq_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpeq_osps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_oeq_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpeq_osps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"oeq",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ogt_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ogt_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpgtps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ogt_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpltps %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ogt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_oge_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_oge_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpgeps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_oge_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpleps %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"oge",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_olt_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_olt_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpltps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_olt_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpltps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"olt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ole_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ole_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpleps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ole_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpleps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ole",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_one_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_one_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpneq_osps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_one_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpneq_osps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"one",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ord_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ord_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpord_sps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ord_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpord_sps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ord",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ueq_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ueq_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpeq_usps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ueq_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpeq_usps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ueq",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ugt_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ugt_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnleps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ugt_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnleps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ugt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_uge_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_uge_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnltps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_uge_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnltps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"uge",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ult_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ult_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpngeps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ult_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnleps %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ult",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_ule_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_ule_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpngtps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_ule_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnltps %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"ule",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_une_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_une_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpneq_usps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_une_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpneq_usps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"une",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_v16f32_uno_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
|
|
; X86-LABEL: test_v16f32_uno_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpunord_sps 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v16f32_uno_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpunord_sps %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
|
|
<16 x float> %f1, <16 x float> %f2, metadata !"uno",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_oeq_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_oeq_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpeq_ospd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_oeq_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpeq_ospd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"oeq",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ogt_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ogt_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpgtpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ogt_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpltpd %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ogt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_oge_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_oge_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpgepd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_oge_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmplepd %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"oge",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_olt_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_olt_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpltpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_olt_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpltpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"olt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ole_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ole_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmplepd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ole_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmplepd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ole",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_one_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_one_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpneq_ospd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_one_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpneq_ospd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"one",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ord_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ord_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpord_spd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ord_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpord_spd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ord",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ueq_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ueq_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpeq_uspd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ueq_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpeq_uspd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ueq",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ugt_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ugt_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnlepd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ugt_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnlepd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ugt",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_uge_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_uge_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpnltpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_uge_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnltpd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"uge",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ult_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ult_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpngepd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ult_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnlepd %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ult",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_ule_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_ule_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpngtpd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_ule_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpnltpd %zmm2, %zmm3, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"ule",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_une_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_une_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpneq_uspd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_une_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpneq_uspd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"une",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_v8f64_uno_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
|
|
; X86-LABEL: test_v8f64_uno_s:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vcmpunord_spd 8(%ebp), %zmm2, %k1
|
|
; X86-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_v8f64_uno_s:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcmpunord_spd %zmm3, %zmm2, %k1
|
|
; X64-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
%cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
|
|
<8 x double> %f1, <8 x double> %f2, metadata !"uno",
|
|
metadata !"fpexcept.strict") #0
|
|
%res = select <8 x i1> %cond, <8 x i64> %a, <8 x i64> %b
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
attributes #0 = { strictfp nounwind }
|
|
|
|
declare <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(<16 x float>, <16 x float>, metadata, metadata)
|
|
declare <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(<8 x double>, <8 x double>, metadata, metadata)
|
|
declare <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(<16 x float>, <16 x float>, metadata, metadata)
|
|
declare <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(<8 x double>, <8 x double>, metadata, metadata)
|