; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s ; Ensure we use the CC result of SVE compare instructions when branching. define void @sve_cmplt_setcc(* %out, %in, %pg) { ; CHECK-LABEL: @sve_cmplt_setcc ; CHECK: cmplt p1.h, p0/z, z0.h, #0 ; CHECK-NEXT: b.eq entry: %0 = tail call @llvm.aarch64.sve.cmplt.wide.nxv8i16( %pg, %in, zeroinitializer) %1 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv8i1( %pg, %0) br i1 %1, label %if.then, label %if.end if.then: tail call void @llvm.masked.store.nxv8i16.p0nxv8i16( %in, * %out, i32 2, %pg) br label %if.end if.end: ret void } ; Ensure we use the inverted CC result of SVE compare instructions when branching. define void @sve_cmplt_setcc_inverted(* %out, %in, %pg) { ; CHECK-LABEL: @sve_cmplt_setcc_inverted ; CHECK: cmplt p1.h, p0/z, z0.h, #0 ; CHECK-NEXT: b.ne entry: %0 = tail call @llvm.aarch64.sve.cmplt.wide.nxv8i16( %pg, %in, zeroinitializer) %1 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv8i1( %pg, %0) br i1 %1, label %if.end, label %if.then if.then: tail call void @llvm.masked.store.nxv8i16.p0nxv8i16( %in, * %out, i32 2, %pg) br label %if.end if.end: ret void } ; Ensure we combine setcc and csel so as to not end up with an extra compare define void @sve_cmplt_setcc_hslo(* %out, %in, %pg) { ; CHECK-LABEL: @sve_cmplt_setcc_hslo ; CHECK: cmplt p1.h, p0/z, z0.h, #0 ; CHECK-NEXT: b.hs entry: %0 = tail call @llvm.aarch64.sve.cmplt.wide.nxv8i16( %pg, %in, zeroinitializer) %1 = tail call i1 @llvm.aarch64.sve.ptest.last.nxv8i1( %pg, %0) br i1 %1, label %if.then, label %if.end if.then: tail call void @llvm.masked.store.nxv8i16.p0nxv8i16( %in, * %out, i32 2, %pg) br label %if.end if.end: ret void } declare i1 @llvm.aarch64.sve.ptest.any.nxv8i1(, ) declare i1 @llvm.aarch64.sve.ptest.last.nxv8i1(, ) declare @llvm.aarch64.sve.cmplt.wide.nxv8i16(, , ) declare void @llvm.masked.store.nxv8i16.p0nxv8i16(, *, i32, )