mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 03:02:36 +01:00
8957a377cc
Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
60 lines
2.0 KiB
LLVM
60 lines
2.0 KiB
LLVM
; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a8 | FileCheck %s
|
|
; rdar://8598427
|
|
; Adjust if-converter heuristics to avoid predicating vmrs which can cause
|
|
; significant regression.
|
|
|
|
%struct.xyz_t = type { double, double, double }
|
|
|
|
define i32 @effie(i32 %tsets, %struct.xyz_t* nocapture %p, i32 %a, i32 %b, i32 %c) nounwind readonly noinline {
|
|
; CHECK-LABEL: effie:
|
|
entry:
|
|
%0 = icmp sgt i32 %tsets, 0
|
|
br i1 %0, label %bb.nph, label %bb6
|
|
|
|
bb.nph: ; preds = %entry
|
|
%1 = add nsw i32 %b, %a
|
|
%2 = add nsw i32 %1, %c
|
|
br label %bb
|
|
|
|
bb: ; preds = %bb4, %bb.nph
|
|
; CHECK: vcmp.f64
|
|
; CHECK: vmrs APSR_nzcv, fpscr
|
|
%r.19 = phi i32 [ 0, %bb.nph ], [ %r.0, %bb4 ]
|
|
%n.08 = phi i32 [ 0, %bb.nph ], [ %10, %bb4 ]
|
|
%scevgep10 = getelementptr inbounds %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 0
|
|
%scevgep11 = getelementptr %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 1
|
|
%3 = load double, double* %scevgep10, align 4
|
|
%4 = load double, double* %scevgep11, align 4
|
|
%5 = fcmp uge double %3, %4
|
|
br i1 %5, label %bb3, label %bb1
|
|
|
|
bb1: ; preds = %bb
|
|
; CHECK-NOT: it
|
|
; CHECK-NOT: vcmpmi
|
|
; CHECK-NOT: vmrsmi
|
|
; CHECK: vcmp.f64
|
|
; CHECK: vmrs APSR_nzcv, fpscr
|
|
%scevgep12 = getelementptr %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 2
|
|
%6 = load double, double* %scevgep12, align 4
|
|
%7 = fcmp uge double %3, %6
|
|
br i1 %7, label %bb3, label %bb2
|
|
|
|
bb2: ; preds = %bb1
|
|
%8 = add nsw i32 %2, %r.19
|
|
br label %bb4
|
|
|
|
bb3: ; preds = %bb1, %bb
|
|
%9 = add nsw i32 %r.19, 1
|
|
br label %bb4
|
|
|
|
bb4: ; preds = %bb3, %bb2
|
|
%r.0 = phi i32 [ %9, %bb3 ], [ %8, %bb2 ]
|
|
%10 = add nsw i32 %n.08, 1
|
|
%exitcond = icmp eq i32 %10, %tsets
|
|
br i1 %exitcond, label %bb6, label %bb
|
|
|
|
bb6: ; preds = %bb4, %entry
|
|
%r.1.lcssa = phi i32 [ 0, %entry ], [ %r.0, %bb4 ]
|
|
ret i32 %r.1.lcssa
|
|
}
|