1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00
llvm-mirror/test/Transforms/InstCombine/shuffle-cast.ll
Sanjay Patel 6660b7b194 [InstCombine] convert bitcast-shuffle to vector trunc
As discussed in D76983, that patch can turn a chain of insert/extract
with scalar trunc ops into bitcast+extract and existing instcombine
vector transforms end up creating a shuffle out of that (see the
PhaseOrdering test for an example). Currently, that process requires
at least this sequence: -instcombine -early-cse -instcombine.

Before D76983, the sequence of insert/extract would reach the SLP
vectorizer and become a vector trunc there.

Based on a small sampling of public targets/types, converting the
shuffle to a trunc is better for codegen in most cases (and a
regression of that form is the reason this was noticed). The trunc is
clearly better for IR-level analysis as well.

This means that we can induce "spontaneous vectorization" without
invoking any explicit vectorizer passes (at least a vector cast op
may be created out of scalar casts), but that seems to be the right
choice given that we started with a chain of insert/extract, and the
backend would expand back to that chain if a target does not support
the op.

Differential Revision: https://reviews.llvm.org/D77299
2020-04-05 09:48:02 -04:00

124 lines
4.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S -data-layout="e" | FileCheck %s --check-prefixes=ANY,LE
; RUN: opt < %s -instcombine -S -data-layout="E" | FileCheck %s --check-prefixes=ANY,BE
define <4 x i16> @trunc_little_endian(<4 x i32> %x) {
; LE-LABEL: @trunc_little_endian(
; LE-NEXT: [[R:%.*]] = trunc <4 x i32> [[X:%.*]] to <4 x i16>
; LE-NEXT: ret <4 x i16> [[R]]
;
; BE-LABEL: @trunc_little_endian(
; BE-NEXT: [[B:%.*]] = bitcast <4 x i32> [[X:%.*]] to <8 x i16>
; BE-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[B]], <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; BE-NEXT: ret <4 x i16> [[R]]
;
%b = bitcast <4 x i32> %x to <8 x i16>
%r = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
ret <4 x i16> %r
}
define <4 x i16> @trunc_big_endian(<4 x i32> %x) {
; LE-LABEL: @trunc_big_endian(
; LE-NEXT: [[B:%.*]] = bitcast <4 x i32> [[X:%.*]] to <8 x i16>
; LE-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[B]], <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; LE-NEXT: ret <4 x i16> [[R]]
;
; BE-LABEL: @trunc_big_endian(
; BE-NEXT: [[R:%.*]] = trunc <4 x i32> [[X:%.*]] to <4 x i16>
; BE-NEXT: ret <4 x i16> [[R]]
;
%b = bitcast <4 x i32> %x to <8 x i16>
%r = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
ret <4 x i16> %r
}
declare void @use_v8i16(<8 x i16>)
; Extra use is ok.
define <2 x i16> @trunc_little_endian_extra_use(<2 x i64> %x) {
; LE-LABEL: @trunc_little_endian_extra_use(
; LE-NEXT: [[B:%.*]] = bitcast <2 x i64> [[X:%.*]] to <8 x i16>
; LE-NEXT: call void @use_v8i16(<8 x i16> [[B]])
; LE-NEXT: [[R:%.*]] = trunc <2 x i64> [[X]] to <2 x i16>
; LE-NEXT: ret <2 x i16> [[R]]
;
; BE-LABEL: @trunc_little_endian_extra_use(
; BE-NEXT: [[B:%.*]] = bitcast <2 x i64> [[X:%.*]] to <8 x i16>
; BE-NEXT: call void @use_v8i16(<8 x i16> [[B]])
; BE-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[B]], <8 x i16> undef, <2 x i32> <i32 0, i32 4>
; BE-NEXT: ret <2 x i16> [[R]]
;
%b = bitcast <2 x i64> %x to <8 x i16>
call void @use_v8i16(<8 x i16> %b)
%r = shufflevector <8 x i16> %b, <8 x i16> undef, <2 x i32> <i32 0, i32 4>
ret <2 x i16> %r
}
declare void @use_v12i11(<12 x i11>)
; Weird types are ok.
define <4 x i11> @trunc_big_endian_extra_use(<4 x i33> %x) {
; LE-LABEL: @trunc_big_endian_extra_use(
; LE-NEXT: [[B:%.*]] = bitcast <4 x i33> [[X:%.*]] to <12 x i11>
; LE-NEXT: call void @use_v12i11(<12 x i11> [[B]])
; LE-NEXT: [[R:%.*]] = shufflevector <12 x i11> [[B]], <12 x i11> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
; LE-NEXT: ret <4 x i11> [[R]]
;
; BE-LABEL: @trunc_big_endian_extra_use(
; BE-NEXT: [[B:%.*]] = bitcast <4 x i33> [[X:%.*]] to <12 x i11>
; BE-NEXT: call void @use_v12i11(<12 x i11> [[B]])
; BE-NEXT: [[R:%.*]] = trunc <4 x i33> [[X]] to <4 x i11>
; BE-NEXT: ret <4 x i11> [[R]]
;
%b = bitcast <4 x i33> %x to <12 x i11>
call void @use_v12i11(<12 x i11> %b)
%r = shufflevector <12 x i11> %b, <12 x i11> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
ret <4 x i11> %r
}
define <4 x i16> @wrong_cast1(i128 %x) {
; ANY-LABEL: @wrong_cast1(
; ANY-NEXT: [[B:%.*]] = bitcast i128 [[X:%.*]] to <8 x i16>
; ANY-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[B]], <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; ANY-NEXT: ret <4 x i16> [[R]]
;
%b = bitcast i128 %x to <8 x i16>
%r = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
ret <4 x i16> %r
}
define <4 x i16> @wrong_cast2(<4 x float> %x) {
; ANY-LABEL: @wrong_cast2(
; ANY-NEXT: [[B:%.*]] = bitcast <4 x float> [[X:%.*]] to <8 x i16>
; ANY-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[B]], <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; ANY-NEXT: ret <4 x i16> [[R]]
;
%b = bitcast <4 x float> %x to <8 x i16>
%r = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
ret <4 x i16> %r
}
define <4 x half> @wrong_cast3(<4 x i32> %x) {
; ANY-LABEL: @wrong_cast3(
; ANY-NEXT: [[B:%.*]] = bitcast <4 x i32> [[X:%.*]] to <8 x half>
; ANY-NEXT: [[R:%.*]] = shufflevector <8 x half> [[B]], <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; ANY-NEXT: ret <4 x half> [[R]]
;
%b = bitcast <4 x i32> %x to <8 x half>
%r = shufflevector <8 x half> %b, <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
ret <4 x half> %r
}
define <2 x i16> @wrong_shuffle(<4 x i32> %x) {
; ANY-LABEL: @wrong_shuffle(
; ANY-NEXT: [[B:%.*]] = bitcast <4 x i32> [[X:%.*]] to <8 x i16>
; ANY-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[B]], <8 x i16> undef, <2 x i32> <i32 0, i32 2>
; ANY-NEXT: ret <2 x i16> [[R]]
;
%b = bitcast <4 x i32> %x to <8 x i16>
%r = shufflevector <8 x i16> %b, <8 x i16> undef, <2 x i32> <i32 0, i32 2>
ret <2 x i16> %r
}