1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00
llvm-mirror/test/CodeGen/AArch64/fp-cond-sel.ll
Matthias Braun 778da94f90 AArch64: Change modeling of zero cycle zeroing.
On CPUs with the zero cycle zeroing feature enabled "movi v.2d" should
be used to zero a vector register. This was previously done at
instruction selection time, however the register coalescer sometimes
widened multiple vregs to the Q width because of that leading to extra
spills. This patch leaves the decision on how to zero a register to the
AsmPrinter phase where it doesn't affect register allocation anymore.

This patch also sets isAsCheapAsAMove=1 on FMOVS0, FMOVD0.

This fixes http://llvm.org/PR27454, rdar://25866262

Differential Revision: http://reviews.llvm.org/D21826

llvm-svn: 274686
2016-07-06 21:39:33 +00:00

36 lines
1.0 KiB
LLVM

; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -mcpu=cyclone | FileCheck %s
@varfloat = global float 0.0
@vardouble = global double 0.0
declare void @use_float(float)
declare void @use_double(double)
define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
; CHECK-LABEL: test_csel:
%tst1 = icmp ugt i32 %lhs32, %rhs32
%val1 = select i1 %tst1, float 0.0, float 1.0
store float %val1, float* @varfloat
; CHECK-DAG: movi v[[FLT0:[0-9]+]].2d, #0
; CHECK-DAG: fmov s[[FLT1:[0-9]+]], #1.0
; CHECK: fcsel {{s[0-9]+}}, s[[FLT0]], s[[FLT1]], hi
%rhs64 = sext i32 %rhs32 to i64
%tst2 = icmp sle i64 %lhs64, %rhs64
%val2 = select i1 %tst2, double 1.0, double 0.0
store double %val2, double* @vardouble
; FLT0 is reused from above on ARM64.
; CHECK: fmov d[[FLT1:[0-9]+]], #1.0
; CHECK: fcsel {{d[0-9]+}}, d[[FLT1]], d[[FLT0]], le
call void @use_float(float 0.0)
call void @use_float(float 1.0)
call void @use_double(double 0.0)
call void @use_double(double 1.0)
ret void
; CHECK: ret
}