1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00
llvm-mirror/test/CodeGen/PowerPC/lround-conv.ll
Adhemerval Zanella c00c3084e9 [CodeGen] Add lround/llround builtins
This patch add the ISD::LROUND and ISD::LLROUND along with new
intrinsics.  The changes are straightforward as for other
floating-point rounding functions, with just some adjustments
required to handle the return value being an interger.

The idea is to optimize lround/llround generation for AArch64
in a subsequent patch.  Current semantic is just route it to libm
symbol.

llvm-svn: 360889
2019-05-16 13:15:27 +00:00

57 lines
1.3 KiB
LLVM

; RUN: llc < %s -mtriple=powerpc64le | FileCheck %s
; CHECK-LABEL: testmsws:
; CHECK: bl lroundf
define signext i32 @testmsws(float %x) {
entry:
%0 = tail call i64 @llvm.lround.i64.f32(float %x)
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-LABEL: testmsxs:
; CHECK: bl lroundf
define i64 @testmsxs(float %x) {
entry:
%0 = tail call i64 @llvm.lround.i64.f32(float %x)
ret i64 %0
}
; CHECK-LABEL: testmswd:
; CHECK: bl lround
define signext i32 @testmswd(double %x) {
entry:
%0 = tail call i64 @llvm.lround.i64.f64(double %x)
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-LABEL: testmsxd:
; CHECK: bl lround
define i64 @testmsxd(double %x) {
entry:
%0 = tail call i64 @llvm.lround.i64.f64(double %x)
ret i64 %0
}
; CHECK-LABEL: testmswl:
; CHECK: bl lroundl
define signext i32 @testmswl(ppc_fp128 %x) {
entry:
%0 = tail call i64 @llvm.lround.i64.ppcf128(ppc_fp128 %x)
%conv = trunc i64 %0 to i32
ret i32 %conv
}
; CHECK-LABEL: testmsll:
; CHECK: bl lroundl
define i64 @testmsll(ppc_fp128 %x) {
entry:
%0 = tail call i64 @llvm.lround.i64.ppcf128(ppc_fp128 %x)
ret i64 %0
}
declare i64 @llvm.lround.i64.f32(float) nounwind readnone
declare i64 @llvm.lround.i64.f64(double) nounwind readnone
declare i64 @llvm.lround.i64.ppcf128(ppc_fp128) nounwind readnone