mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 03:02:36 +01:00
745bb7deaa
This patch folds an AND of a masked load and build vector into a zero extended masked load. Differential Revision: https://reviews.llvm.org/D86789
109 lines
4.2 KiB
LLVM
109 lines
4.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs -o - %s | FileCheck %s
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @foo_v4i16(<4 x i16>* nocapture readonly %pSrc, <4 x i16> %a) {
|
|
; CHECK-LABEL: foo_v4i16:
|
|
; CHECK: @ %bb.0: @ %entry
|
|
; CHECK-NEXT: vmovlb.s16 q0, q0
|
|
; CHECK-NEXT: vpt.s32 lt, q0, zr
|
|
; CHECK-NEXT: vldrht.u32 q0, [r0]
|
|
; CHECK-NEXT: vcvt.f32.u32 q0, q0
|
|
; CHECK-NEXT: bx lr
|
|
entry:
|
|
%active.lane.mask = icmp slt <4 x i16> %a, zeroinitializer
|
|
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %pSrc, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
|
|
%0 = uitofp <4 x i16> %wide.masked.load to <4 x float>
|
|
ret <4 x float> %0
|
|
}
|
|
|
|
define arm_aapcs_vfpcc <8 x half> @foo_v8i8(<8 x i8>* nocapture readonly %pSrc, i32 %blockSize, <8 x i8> %a) {
|
|
; CHECK-LABEL: foo_v8i8:
|
|
; CHECK: @ %bb.0: @ %entry
|
|
; CHECK-NEXT: vmovlb.s8 q0, q0
|
|
; CHECK-NEXT: vpt.s16 lt, q0, zr
|
|
; CHECK-NEXT: vldrbt.u16 q0, [r0]
|
|
; CHECK-NEXT: vcvt.f16.u16 q0, q0
|
|
; CHECK-NEXT: bx lr
|
|
entry:
|
|
%active.lane.mask = icmp slt <8 x i8> %a, zeroinitializer
|
|
%wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %pSrc, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
|
|
%0 = uitofp <8 x i8> %wide.masked.load to <8 x half>
|
|
ret <8 x half> %0
|
|
}
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @foo_v4i8(<4 x i8>* nocapture readonly %pSrc, i32 %blockSize, <4 x i8> %a) {
|
|
; CHECK-LABEL: foo_v4i8:
|
|
; CHECK: @ %bb.0: @ %entry
|
|
; CHECK-NEXT: vmovlb.s8 q0, q0
|
|
; CHECK-NEXT: vmovlb.s16 q0, q0
|
|
; CHECK-NEXT: vpt.s32 lt, q0, zr
|
|
; CHECK-NEXT: vldrbt.u32 q0, [r0]
|
|
; CHECK-NEXT: vcvt.f32.u32 q0, q0
|
|
; CHECK-NEXT: bx lr
|
|
entry:
|
|
%active.lane.mask = icmp slt <4 x i8> %a, zeroinitializer
|
|
%wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %pSrc, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
|
|
%0 = uitofp <4 x i8> %wide.masked.load to <4 x float>
|
|
ret <4 x float> %0
|
|
}
|
|
|
|
define arm_aapcs_vfpcc <4 x double> @foo_v4i32(<4 x i32>* nocapture readonly %pSrc, i32 %blockSize, <4 x i32> %a) {
|
|
; CHECK-LABEL: foo_v4i32:
|
|
; CHECK: @ %bb.0: @ %entry
|
|
; CHECK-NEXT: .save {r4, r5, r6, r7, lr}
|
|
; CHECK-NEXT: push {r4, r5, r6, r7, lr}
|
|
; CHECK-NEXT: .pad #4
|
|
; CHECK-NEXT: sub sp, #4
|
|
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
|
|
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
|
|
; CHECK-NEXT: vpt.s32 lt, q0, zr
|
|
; CHECK-NEXT: vldrwt.u32 q4, [r0]
|
|
; CHECK-NEXT: vmov.f64 d0, d8
|
|
; CHECK-NEXT: vmov.i64 q5, #0xffffffff
|
|
; CHECK-NEXT: vmov.f32 s2, s17
|
|
; CHECK-NEXT: vand q6, q0, q5
|
|
; CHECK-NEXT: vmov r0, s24
|
|
; CHECK-NEXT: vmov r1, s25
|
|
; CHECK-NEXT: bl __aeabi_ul2d
|
|
; CHECK-NEXT: mov r4, r0
|
|
; CHECK-NEXT: mov r5, r1
|
|
; CHECK-NEXT: vmov r0, s26
|
|
; CHECK-NEXT: vmov r1, s27
|
|
; CHECK-NEXT: bl __aeabi_ul2d
|
|
; CHECK-NEXT: vmov.f64 d0, d9
|
|
; CHECK-NEXT: vmov.f32 s2, s19
|
|
; CHECK-NEXT: vand q0, q0, q5
|
|
; CHECK-NEXT: vmov d9, r0, r1
|
|
; CHECK-NEXT: vmov r2, s2
|
|
; CHECK-NEXT: vmov r3, s3
|
|
; CHECK-NEXT: vmov r6, s0
|
|
; CHECK-NEXT: vmov r7, s1
|
|
; CHECK-NEXT: vmov d8, r4, r5
|
|
; CHECK-NEXT: mov r0, r2
|
|
; CHECK-NEXT: mov r1, r3
|
|
; CHECK-NEXT: bl __aeabi_ul2d
|
|
; CHECK-NEXT: vmov d11, r0, r1
|
|
; CHECK-NEXT: mov r0, r6
|
|
; CHECK-NEXT: mov r1, r7
|
|
; CHECK-NEXT: bl __aeabi_ul2d
|
|
; CHECK-NEXT: vmov d10, r0, r1
|
|
; CHECK-NEXT: vmov q0, q4
|
|
; CHECK-NEXT: vmov q1, q5
|
|
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
|
|
; CHECK-NEXT: add sp, #4
|
|
; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
|
|
entry:
|
|
%active.lane.mask = icmp slt <4 x i32> %a, zeroinitializer
|
|
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %pSrc, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
|
|
%0 = uitofp <4 x i32> %wide.masked.load to <4 x double>
|
|
ret <4 x double> %0
|
|
}
|
|
|
|
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
|
|
|
|
declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
|
|
|
|
declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>)
|
|
|
|
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
|