mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 19:52:54 +01:00
02638731c1
This is effectively a revert of: http://reviews.llvm.org/rL249702 - [InstCombine] transform masking off of an FP sign bit into a fabs() intrinsic call (PR24886) and: http://reviews.llvm.org/rL249701 - [ValueTracking] teach computeKnownBits that a fabs() clears sign bits and a reimplementation as a DAG combine for targets that have IEEE754-compliant fabs/fneg instructions. This is intended to resolve the objections raised on the dev list: http://lists.llvm.org/pipermail/llvm-dev/2016-April/098154.html and: https://llvm.org/bugs/show_bug.cgi?id=24886#c4 In the interest of patch minimalism, I've only partly enabled AArch64. PowerPC, MIPS, x86 and others can enable later. Differential Revision: http://reviews.llvm.org/D19391 llvm-svn: 271573
101 lines
2.7 KiB
LLVM
101 lines
2.7 KiB
LLVM
; RUN: opt < %s -instcombine -S | FileCheck %s
|
|
|
|
; Make sure all library calls are eliminated when the input is known positive.
|
|
|
|
declare float @fabsf(float)
|
|
declare double @fabs(double)
|
|
declare fp128 @fabsl(fp128)
|
|
|
|
define float @square_fabs_call_f32(float %x) {
|
|
%mul = fmul float %x, %x
|
|
%fabsf = tail call float @fabsf(float %mul)
|
|
ret float %fabsf
|
|
|
|
; CHECK-LABEL: square_fabs_call_f32(
|
|
; CHECK-NEXT: %mul = fmul float %x, %x
|
|
; CHECK-NEXT: ret float %mul
|
|
}
|
|
|
|
define double @square_fabs_call_f64(double %x) {
|
|
%mul = fmul double %x, %x
|
|
%fabs = tail call double @fabs(double %mul)
|
|
ret double %fabs
|
|
|
|
; CHECK-LABEL: square_fabs_call_f64(
|
|
; CHECK-NEXT: %mul = fmul double %x, %x
|
|
; CHECK-NEXT: ret double %mul
|
|
}
|
|
|
|
define fp128 @square_fabs_call_f128(fp128 %x) {
|
|
%mul = fmul fp128 %x, %x
|
|
%fabsl = tail call fp128 @fabsl(fp128 %mul)
|
|
ret fp128 %fabsl
|
|
|
|
; CHECK-LABEL: square_fabs_call_f128(
|
|
; CHECK-NEXT: %mul = fmul fp128 %x, %x
|
|
; CHECK-NEXT: ret fp128 %mul
|
|
}
|
|
|
|
; Make sure all intrinsic calls are eliminated when the input is known positive.
|
|
|
|
declare float @llvm.fabs.f32(float)
|
|
declare double @llvm.fabs.f64(double)
|
|
declare fp128 @llvm.fabs.f128(fp128)
|
|
|
|
define float @square_fabs_intrinsic_f32(float %x) {
|
|
%mul = fmul float %x, %x
|
|
%fabsf = tail call float @llvm.fabs.f32(float %mul)
|
|
ret float %fabsf
|
|
|
|
; CHECK-LABEL: square_fabs_intrinsic_f32(
|
|
; CHECK-NEXT: %mul = fmul float %x, %x
|
|
; CHECK-NEXT: ret float %mul
|
|
}
|
|
|
|
define double @square_fabs_intrinsic_f64(double %x) {
|
|
%mul = fmul double %x, %x
|
|
%fabs = tail call double @llvm.fabs.f64(double %mul)
|
|
ret double %fabs
|
|
|
|
; CHECK-LABEL: square_fabs_intrinsic_f64(
|
|
; CHECK-NEXT: %mul = fmul double %x, %x
|
|
; CHECK-NEXT: ret double %mul
|
|
}
|
|
|
|
define fp128 @square_fabs_intrinsic_f128(fp128 %x) {
|
|
%mul = fmul fp128 %x, %x
|
|
%fabsl = tail call fp128 @llvm.fabs.f128(fp128 %mul)
|
|
ret fp128 %fabsl
|
|
|
|
; CHECK-LABEL: square_fabs_intrinsic_f128(
|
|
; CHECK-NEXT: %mul = fmul fp128 %x, %x
|
|
; CHECK-NEXT: ret fp128 %mul
|
|
}
|
|
|
|
; Shrinking a library call to a smaller type should not be inhibited by nor inhibit the square optimization.
|
|
|
|
define float @square_fabs_shrink_call1(float %x) {
|
|
%ext = fpext float %x to double
|
|
%sq = fmul double %ext, %ext
|
|
%fabs = call double @fabs(double %sq)
|
|
%trunc = fptrunc double %fabs to float
|
|
ret float %trunc
|
|
|
|
; CHECK-LABEL: square_fabs_shrink_call1(
|
|
; CHECK-NEXT: %trunc = fmul float %x, %x
|
|
; CHECK-NEXT: ret float %trunc
|
|
}
|
|
|
|
define float @square_fabs_shrink_call2(float %x) {
|
|
%sq = fmul float %x, %x
|
|
%ext = fpext float %sq to double
|
|
%fabs = call double @fabs(double %ext)
|
|
%trunc = fptrunc double %fabs to float
|
|
ret float %trunc
|
|
|
|
; CHECK-LABEL: square_fabs_shrink_call2(
|
|
; CHECK-NEXT: %sq = fmul float %x, %x
|
|
; CHECK-NEXT: ret float %sq
|
|
}
|
|
|