1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

[ARM] Add vrev32 NEON fp16 patterns

Fill in the gaps for vrev32.16 f16 patterns, extending the existing i16
patterns.

Differential Revision: https://reviews.llvm.org/D69508
This commit is contained in:
David Green 2019-11-04 13:33:22 +00:00
parent 5f211f835c
commit 1941ea029d
2 changed files with 215 additions and 44 deletions

View File

@ -6797,9 +6797,12 @@ def VREV64q16 : VREV64Q<0b01, "vrev64", "16", v8i16>;
def VREV64q32 : VREV64Q<0b10, "vrev64", "32", v4i32>;
let Predicates = [HasNEON] in {
def : Pat<(v4f32 (ARMvrev64 (v4f32 QPR:$Vm))), (VREV64q32 QPR:$Vm)>;
def : Pat<(v8f16 (ARMvrev64 (v8f16 QPR:$Vm))), (VREV64q16 QPR:$Vm)>;
def : Pat<(v4f16 (ARMvrev64 (v4f16 DPR:$Vm))), (VREV64d16 DPR:$Vm)>;
def : Pat<(v4f32 (ARMvrev64 (v4f32 QPR:$Vm))),
(VREV64q32 QPR:$Vm)>;
def : Pat<(v8f16 (ARMvrev64 (v8f16 QPR:$Vm))),
(VREV64q16 QPR:$Vm)>;
def : Pat<(v4f16 (ARMvrev64 (v4f16 DPR:$Vm))),
(VREV64d16 DPR:$Vm)>;
}
// VREV32 : Vector Reverse elements within 32-bit words
@ -6821,6 +6824,13 @@ def VREV32d16 : VREV32D<0b01, "vrev32", "16", v4i16>;
def VREV32q8 : VREV32Q<0b00, "vrev32", "8", v16i8>;
def VREV32q16 : VREV32Q<0b01, "vrev32", "16", v8i16>;
let Predicates = [HasNEON] in {
def : Pat<(v8f16 (ARMvrev32 (v8f16 QPR:$Vm))),
(VREV32q16 QPR:$Vm)>;
def : Pat<(v4f16 (ARMvrev32 (v4f16 DPR:$Vm))),
(VREV32d16 DPR:$Vm)>;
}
// VREV16 : Vector Reverse elements within 16-bit halfwords
class VREV16D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>

View File

@ -1,112 +1,226 @@
; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=arm-eabi -mattr=+neon,+fullfp16 %s -o - | FileCheck %s
define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev64D8:
;CHECK: vrev64.8
; CHECK-LABEL: test_vrev64D8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev64.8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <8 x i8> %tmp2
}
define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev64D16:
;CHECK: vrev64.16
; CHECK-LABEL: test_vrev64D16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev64.16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x i16> %tmp2
}
define <4 x half> @test_vrev64Df16(<4 x half>* %A) nounwind {
; CHECK-LABEL: test_vrev64Df16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev64.16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x half>, <4 x half>* %A
%tmp2 = shufflevector <4 x half> %tmp1, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x half> %tmp2
}
define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: test_vrev64D32:
;CHECK: vrev64.32
; CHECK-LABEL: test_vrev64D32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev64.32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
ret <2 x i32> %tmp2
}
define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
;CHECK-LABEL: test_vrev64Df:
;CHECK: vrev64.32
; CHECK-LABEL: test_vrev64Df:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev64.32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x float>, <2 x float>* %A
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
ret <2 x float> %tmp2
}
define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev64Q8:
;CHECK: vrev64.8
; CHECK-LABEL: test_vrev64Q8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev64.8 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
ret <16 x i8> %tmp2
}
define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev64Q16:
;CHECK: vrev64.16
; CHECK-LABEL: test_vrev64Q16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev64.16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
ret <8 x i16> %tmp2
}
define <8 x half> @test_vrev64Qf16(<8 x half>* %A) nounwind {
; CHECK-LABEL: test_vrev64Qf16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev64.16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x half>, <8 x half>* %A
%tmp2 = shufflevector <8 x half> %tmp1, <8 x half> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
ret <8 x half> %tmp2
}
define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
;CHECK-LABEL: test_vrev64Q32:
;CHECK: vrev64.32
; CHECK-LABEL: test_vrev64Q32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev64.32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x i32> %tmp2
}
define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
;CHECK-LABEL: test_vrev64Qf:
;CHECK: vrev64.32
; CHECK-LABEL: test_vrev64Qf:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev64.32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x float>, <4 x float>* %A
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x float> %tmp2
}
define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev32D8:
;CHECK: vrev32.8
; CHECK-LABEL: test_vrev32D8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev32.8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
ret <8 x i8> %tmp2
}
define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev32D16:
;CHECK: vrev32.16
; CHECK-LABEL: test_vrev32D16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev32.16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x i16> %tmp2
}
define <4 x half> @test_vrev32Df16(<4 x half>* %A) nounwind {
; CHECK-LABEL: test_vrev32Df16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev32.16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x half>, <4 x half>* %A
%tmp2 = shufflevector <4 x half> %tmp1, <4 x half> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x half> %tmp2
}
define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev32Q8:
;CHECK: vrev32.8
; CHECK-LABEL: test_vrev32Q8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev32.8 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
ret <16 x i8> %tmp2
}
define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev32Q16:
;CHECK: vrev32.16
; CHECK-LABEL: test_vrev32Q16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev32.16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
ret <8 x i16> %tmp2
}
define <8 x half> @test_vrev32Qf16(<8 x half>* %A) nounwind {
; CHECK-LABEL: test_vrev32Qf16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev32.16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x half>, <8 x half>* %A
%tmp2 = shufflevector <8 x half> %tmp1, <8 x half> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
ret <8 x half> %tmp2
}
define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev16D8:
;CHECK: vrev16.8
; CHECK-LABEL: test_vrev16D8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev16.8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
ret <8 x i8> %tmp2
}
define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev16Q8:
;CHECK: vrev16.8
; CHECK-LABEL: test_vrev16Q8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev16.8 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
ret <16 x i8> %tmp2
@ -115,27 +229,54 @@ define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
; Undef shuffle indices should not prevent matching to VREV:
define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev64D8_undef:
;CHECK: vrev64.8
; CHECK-LABEL: test_vrev64D8_undef:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vrev64.8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <8 x i8> %tmp2
}
define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev32Q16_undef:
;CHECK: vrev32.16
; CHECK-LABEL: test_vrev32Q16_undef:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev32.16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
ret <8 x i16> %tmp2
}
define <8 x half> @test_vrev32Qf16_undef(<8 x half>* %A) nounwind {
; CHECK-LABEL: test_vrev32Qf16_undef:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vrev32.16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x half>, <8 x half>* %A
%tmp2 = shufflevector <8 x half> %tmp1, <8 x half> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
ret <8 x half> %tmp2
}
; A vcombine feeding a VREV should not obscure things. Radar 8597007.
define void @test_with_vcombine(<4 x float>* %v) nounwind {
;CHECK-LABEL: test_with_vcombine:
;CHECK-NOT: vext
;CHECK: vrev64.32
; CHECK-LABEL: test_with_vcombine:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: vadd.f32 d18, d17, d17
; CHECK-NEXT: vrev64.32 d16, d16
; CHECK-NEXT: vrev64.32 d17, d18
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x float>, <4 x float>* %v, align 16
%tmp2 = bitcast <4 x float> %tmp1 to <2 x double>
%tmp3 = extractelement <2 x double> %tmp2, i32 0
@ -152,7 +293,15 @@ define void @test_with_vcombine(<4 x float>* %v) nounwind {
; to <2 x i16> when stored to memory.
define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst) nounwind ssp {
; CHECK-LABEL: test_vrev64:
; CHECK: vst1.32
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vld1.32 {d16, d17}, [r0]
; CHECK-NEXT: vmov.u16 r0, d17[2]
; CHECK-NEXT: vmov.u16 r2, d17[1]
; CHECK-NEXT: vmov.32 d16[0], r0
; CHECK-NEXT: vmov.32 d16[1], r2
; CHECK-NEXT: vuzp.16 d16, d17
; CHECK-NEXT: vst1.32 {d16[0]}, [r1:32]
; CHECK-NEXT: mov pc, lr
entry:
%0 = bitcast <4 x i16>* %source to <8 x i16>*
%tmp2 = load <8 x i16>, <8 x i16>* %0, align 4
@ -166,9 +315,15 @@ entry:
; Test vrev of float4
define void @float_vrev64(float* nocapture %source, <4 x float>* nocapture %dest) nounwind noinline ssp {
; CHECK: float_vrev64
; CHECK: vext.32
; CHECK: vrev64.32
; CHECK-LABEL: float_vrev64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q8, #0x0
; CHECK-NEXT: vld1.32 {d18, d19}, [r0]
; CHECK-NEXT: add r0, r1, #176
; CHECK-NEXT: vext.32 q8, q9, q8, #3
; CHECK-NEXT: vrev64.32 q8, q8
; CHECK-NEXT: vst1.32 {d16, d17}, [r0]
; CHECK-NEXT: mov pc, lr
entry:
%0 = bitcast float* %source to <4 x float>*
%tmp2 = load <4 x float>, <4 x float>* %0, align 4
@ -180,7 +335,13 @@ entry:
define <4 x i32> @test_vrev32_bswap(<4 x i32> %source) nounwind {
; CHECK-LABEL: test_vrev32_bswap:
; CHECK: vrev32.8
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vrev32.8 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%bswap = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %source)
ret <4 x i32> %bswap
}