mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
8957a377cc
Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
677 lines
23 KiB
LLVM
677 lines
23 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=armv8a--none-eabi -mattr=+fullfp16 -float-abi=hard | FileCheck %s
|
|
|
|
@varhalf = global half 0.0
|
|
@vardouble = global double 0.0
|
|
define void @test_vsel32sgt(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32sgt:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: cmp r0, r1
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vselgt.f16 s0, s0, s2
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%tst1 = icmp sgt i32 %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32sge(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32sge:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: cmp r0, r1
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vselge.f16 s0, s0, s2
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%tst1 = icmp sge i32 %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32eq(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32eq:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: cmp r0, r1
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vseleq.f16 s0, s0, s2
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%tst1 = icmp eq i32 %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32slt(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32slt:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: cmp r0, r1
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vselge.f16 s0, s2, s0
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%tst1 = icmp slt i32 %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32sle(i32 %lhs, i32 %rhs, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32sle:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: cmp r0, r1
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vselgt.f16 s0, s2, s0
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%tst1 = icmp sle i32 %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ogt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ogt:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselgt.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp ogt half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32oge(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32oge:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselge.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp oge half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32oeq(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32oeq:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vseleq.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp oeq half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ugt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ugt:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s6, s4
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselge.f16 s0, s2, s0
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp ugt half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32uge(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32uge:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s6, s4
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselgt.f16 s0, s2, s0
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp uge half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32olt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32olt:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s6, s4
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselgt.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp olt half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ult(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ult:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselge.f16 s0, s2, s0
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp ult half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ole(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ole:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s6, s4
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselge.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp ole half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ule(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ule:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselgt.f16 s0, s2, s0
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp ule half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ord(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ord:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselvs.f16 s0, s2, s0
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp ord half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32une(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32une:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vseleq.f16 s0, s2, s0
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp une half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32uno(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32uno:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselvs.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp uno half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @test_vsel32ogt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ogt_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselgt.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan ogt half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32oge_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32oge_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselge.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan oge half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32oeq_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32oeq_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vseleq.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan oeq half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ugt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ugt_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselgt.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan ugt half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32uge_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32uge_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselge.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan uge half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32olt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32olt_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s6, s4
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselgt.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan olt half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ult_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ult_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s6, s4
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselgt.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan ult half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ole_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ole_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s6, s4
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselge.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan ole half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ule_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ule_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s6, s4
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselge.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan ule half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32ord_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32ord_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselvs.f16 s0, s2, s0
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan ord half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32une_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32une_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vseleq.f16 s0, s2, s0
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan une half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|
|
|
|
define void @test_vsel32uno_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* %b_ptr) {
|
|
; CHECK-LABEL: test_vsel32uno_nnan:
|
|
; CHECK: @ %bb.0:
|
|
; CHECK-NEXT: vldr.16 s0, [r2]
|
|
; CHECK-NEXT: vldr.16 s2, [r3]
|
|
; CHECK-NEXT: vldr.16 s4, [r0]
|
|
; CHECK-NEXT: vldr.16 s6, [r1]
|
|
; CHECK-NEXT: movw r0, :lower16:varhalf
|
|
; CHECK-NEXT: vcmp.f16 s4, s6
|
|
; CHECK-NEXT: movt r0, :upper16:varhalf
|
|
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
|
|
; CHECK-NEXT: vselvs.f16 s0, s0, s2
|
|
; CHECK-NEXT: vstr.16 s0, [r0]
|
|
; CHECK-NEXT: bx lr
|
|
%a = load volatile half, half* %a_ptr
|
|
%b = load volatile half, half* %b_ptr
|
|
%lhs = load volatile half, half* %lhs_ptr
|
|
%rhs = load volatile half, half* %rhs_ptr
|
|
%tst1 = fcmp nnan uno half %lhs, %rhs
|
|
%val1 = select i1 %tst1, half %a, half %b
|
|
store half %val1, half* @varhalf
|
|
ret void
|
|
}
|