1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00
llvm-mirror/test/CodeGen/AArch64/vecreduce-and-legalization.ll
Nikita Popov b4de4b44fe [SDAG][AArch64] Legalize VECREDUCE
Fixes https://bugs.llvm.org/show_bug.cgi?id=36796.

Implement basic legalizations (PromoteIntRes, PromoteIntOp,
ExpandIntRes, ScalarizeVecOp, WidenVecOp) for VECREDUCE opcodes.
There are more legalizations missing (esp float legalizations),
but there's no way to test them right now, so I'm not adding them.

This also includes a few more changes to make this work somewhat
reasonably:

 * Add support for expanding VECREDUCE in SDAG. Usually
   experimental.vector.reduce is expanded prior to codegen, but if the
   target does have native vector reduce, it may of course still be
   necessary to expand due to legalization issues. This uses a shuffle
   reduction if possible, followed by a naive scalar reduction.
 * Allow the result type of integer VECREDUCE to be larger than the
   vector element type. For example we need to be able to reduce a v8i8
   into an (nominally) i32 result type on AArch64.
 * Use the vector operand type rather than the scalar result type to
   determine the action, so we can control exactly which vector types are
   supported. Also change the legalize vector op code to handle
   operations that only have vector operands, but no vector results, as
   is the case for VECREDUCE.
 * Default VECREDUCE to Expand. On AArch64 (only target using VECREDUCE),
   explicitly specify for which vector types the reductions are supported.

This does not handle anything related to VECREDUCE_STRICT_*.

Differential Revision: https://reviews.llvm.org/D58015

llvm-svn: 355860
2019-03-11 20:22:13 +00:00

199 lines
6.5 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
declare i1 @llvm.experimental.vector.reduce.and.i1.v1i1(<1 x i1> %a)
declare i8 @llvm.experimental.vector.reduce.and.i8.v1i8(<1 x i8> %a)
declare i16 @llvm.experimental.vector.reduce.and.i16.v1i16(<1 x i16> %a)
declare i24 @llvm.experimental.vector.reduce.and.i24.v1i24(<1 x i24> %a)
declare i32 @llvm.experimental.vector.reduce.and.i32.v1i32(<1 x i32> %a)
declare i64 @llvm.experimental.vector.reduce.and.i64.v1i64(<1 x i64> %a)
declare i128 @llvm.experimental.vector.reduce.and.i128.v1i128(<1 x i128> %a)
declare i8 @llvm.experimental.vector.reduce.and.i8.v3i8(<3 x i8> %a)
declare i8 @llvm.experimental.vector.reduce.and.i8.v9i8(<9 x i8> %a)
declare i32 @llvm.experimental.vector.reduce.and.i32.v3i32(<3 x i32> %a)
declare i1 @llvm.experimental.vector.reduce.and.i1.v4i1(<4 x i1> %a)
declare i24 @llvm.experimental.vector.reduce.and.i24.v4i24(<4 x i24> %a)
declare i128 @llvm.experimental.vector.reduce.and.i128.v2i128(<2 x i128> %a)
declare i32 @llvm.experimental.vector.reduce.and.i32.v16i32(<16 x i32> %a)
define i1 @test_v1i1(<1 x i1> %a) nounwind {
; CHECK-LABEL: test_v1i1:
; CHECK: // %bb.0:
; CHECK-NEXT: and w0, w0, #0x1
; CHECK-NEXT: ret
%b = call i1 @llvm.experimental.vector.reduce.and.i1.v1i1(<1 x i1> %a)
ret i1 %b
}
define i8 @test_v1i8(<1 x i8> %a) nounwind {
; CHECK-LABEL: test_v1i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w0, v0.b[0]
; CHECK-NEXT: ret
%b = call i8 @llvm.experimental.vector.reduce.and.i8.v1i8(<1 x i8> %a)
ret i8 %b
}
define i16 @test_v1i16(<1 x i16> %a) nounwind {
; CHECK-LABEL: test_v1i16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w0, v0.h[0]
; CHECK-NEXT: ret
%b = call i16 @llvm.experimental.vector.reduce.and.i16.v1i16(<1 x i16> %a)
ret i16 %b
}
define i24 @test_v1i24(<1 x i24> %a) nounwind {
; CHECK-LABEL: test_v1i24:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%b = call i24 @llvm.experimental.vector.reduce.and.i24.v1i24(<1 x i24> %a)
ret i24 %b
}
define i32 @test_v1i32(<1 x i32> %a) nounwind {
; CHECK-LABEL: test_v1i32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
%b = call i32 @llvm.experimental.vector.reduce.and.i32.v1i32(<1 x i32> %a)
ret i32 %b
}
define i64 @test_v1i64(<1 x i64> %a) nounwind {
; CHECK-LABEL: test_v1i64:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: fmov x0, d0
; CHECK-NEXT: ret
%b = call i64 @llvm.experimental.vector.reduce.and.i64.v1i64(<1 x i64> %a)
ret i64 %b
}
define i128 @test_v1i128(<1 x i128> %a) nounwind {
; CHECK-LABEL: test_v1i128:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%b = call i128 @llvm.experimental.vector.reduce.and.i128.v1i128(<1 x i128> %a)
ret i128 %b
}
define i8 @test_v3i8(<3 x i8> %a) nounwind {
; CHECK-LABEL: test_v3i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, w1
; CHECK-NEXT: and w8, w8, w2
; CHECK-NEXT: and w0, w8, #0xff
; CHECK-NEXT: ret
%b = call i8 @llvm.experimental.vector.reduce.and.i8.v3i8(<3 x i8> %a)
ret i8 %b
}
define i8 @test_v9i8(<9 x i8> %a) nounwind {
; CHECK-LABEL: test_v9i8:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: mov v0.b[9], w8
; CHECK-NEXT: mov v0.b[10], w8
; CHECK-NEXT: mov v0.b[11], w8
; CHECK-NEXT: mov v0.b[12], w8
; CHECK-NEXT: mov v0.b[13], w8
; CHECK-NEXT: mov v0.b[14], w8
; CHECK-NEXT: mov v0.b[15], w8
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-NEXT: umov w8, v0.b[1]
; CHECK-NEXT: umov w9, v0.b[0]
; CHECK-NEXT: and w8, w9, w8
; CHECK-NEXT: umov w9, v0.b[2]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[3]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[4]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[5]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[6]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[7]
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%b = call i8 @llvm.experimental.vector.reduce.and.i8.v9i8(<9 x i8> %a)
ret i8 %b
}
define i32 @test_v3i32(<3 x i32> %a) nounwind {
; CHECK-LABEL: test_v3i32:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: mov v0.s[3], w8
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-NEXT: mov w8, v0.s[1]
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: and w0, w9, w8
; CHECK-NEXT: ret
%b = call i32 @llvm.experimental.vector.reduce.and.i32.v3i32(<3 x i32> %a)
ret i32 %b
}
define i1 @test_v4i1(<4 x i1> %a) nounwind {
; CHECK-LABEL: test_v4i1:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w10, v0.h[1]
; CHECK-NEXT: umov w11, v0.h[0]
; CHECK-NEXT: umov w9, v0.h[2]
; CHECK-NEXT: and w10, w11, w10
; CHECK-NEXT: umov w8, v0.h[3]
; CHECK-NEXT: and w9, w10, w9
; CHECK-NEXT: and w8, w9, w8
; CHECK-NEXT: and w0, w8, #0x1
; CHECK-NEXT: ret
%b = call i1 @llvm.experimental.vector.reduce.and.i1.v4i1(<4 x i1> %a)
ret i1 %b
}
define i24 @test_v4i24(<4 x i24> %a) nounwind {
; CHECK-LABEL: test_v4i24:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-NEXT: mov w8, v0.s[1]
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: and w0, w9, w8
; CHECK-NEXT: ret
%b = call i24 @llvm.experimental.vector.reduce.and.i24.v4i24(<4 x i24> %a)
ret i24 %b
}
define i128 @test_v2i128(<2 x i128> %a) nounwind {
; CHECK-LABEL: test_v2i128:
; CHECK: // %bb.0:
; CHECK-NEXT: and x0, x0, x2
; CHECK-NEXT: and x1, x1, x3
; CHECK-NEXT: ret
%b = call i128 @llvm.experimental.vector.reduce.and.i128.v2i128(<2 x i128> %a)
ret i128 %b
}
define i32 @test_v16i32(<16 x i32> %a) nounwind {
; CHECK-LABEL: test_v16i32:
; CHECK: // %bb.0:
; CHECK-NEXT: and v1.16b, v1.16b, v3.16b
; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-NEXT: mov w8, v0.s[1]
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: and w0, w9, w8
; CHECK-NEXT: ret
%b = call i32 @llvm.experimental.vector.reduce.and.i32.v16i32(<16 x i32> %a)
ret i32 %b
}