1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00
llvm-mirror/test/CodeGen/WebAssembly/simd-widening.ll
Thomas Lively e2b0ae5192 [WebAssembly] Remove intrinsics for SIMD widening ops
Instead, pattern match extends of extract_subvectors to generate
widening operations. Since extract_subvector is not a legal node, this
is implemented via a custom combine that recognizes extract_subvector
nodes before they are legalized. The combine produces custom ISD nodes
that are later pattern matched directly, just like the intrinsic was.

Also removes the clang builtins for these operations since the
instructions can now be generated from portable code sequences.

Differential Revision: https://reviews.llvm.org/D84556
2020-07-28 18:25:55 -07:00

181 lines
6.4 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mattr=+simd128 | FileCheck %s
;; Test that SIMD widening operations can be successfully selected
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
define <8 x i16> @widen_low_i8x16_s(<16 x i8> %v) {
; CHECK-LABEL: widen_low_i8x16_s:
; CHECK: .functype widen_low_i8x16_s (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i16x8.widen_low_i8x16_s
; CHECK-NEXT: # fallthrough-return
%low = shufflevector <16 x i8> %v, <16 x i8> undef,
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%widened = sext <8 x i8> %low to <8 x i16>
ret <8 x i16> %widened
}
define <8 x i16> @widen_low_i8x16_u(<16 x i8> %v) {
; CHECK-LABEL: widen_low_i8x16_u:
; CHECK: .functype widen_low_i8x16_u (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i16x8.widen_low_i8x16_u
; CHECK-NEXT: # fallthrough-return
%low = shufflevector <16 x i8> %v, <16 x i8> undef,
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%widened = zext <8 x i8> %low to <8 x i16>
ret <8 x i16> %widened
}
define <8 x i16> @widen_high_i8x16_s(<16 x i8> %v) {
; CHECK-LABEL: widen_high_i8x16_s:
; CHECK: .functype widen_high_i8x16_s (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i16x8.widen_high_i8x16_s
; CHECK-NEXT: # fallthrough-return
%low = shufflevector <16 x i8> %v, <16 x i8> undef,
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%widened = sext <8 x i8> %low to <8 x i16>
ret <8 x i16> %widened
}
define <8 x i16> @widen_high_i8x16_u(<16 x i8> %v) {
; CHECK-LABEL: widen_high_i8x16_u:
; CHECK: .functype widen_high_i8x16_u (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i16x8.widen_high_i8x16_u
; CHECK-NEXT: # fallthrough-return
%low = shufflevector <16 x i8> %v, <16 x i8> undef,
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%widened = zext <8 x i8> %low to <8 x i16>
ret <8 x i16> %widened
}
define <4 x i32> @widen_low_i16x8_s(<8 x i16> %v) {
; CHECK-LABEL: widen_low_i16x8_s:
; CHECK: .functype widen_low_i16x8_s (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32x4.widen_low_i16x8_s
; CHECK-NEXT: # fallthrough-return
%low = shufflevector <8 x i16> %v, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
%widened = sext <4 x i16> %low to <4 x i32>
ret <4 x i32> %widened
}
define <4 x i32> @widen_low_i16x8_u(<8 x i16> %v) {
; CHECK-LABEL: widen_low_i16x8_u:
; CHECK: .functype widen_low_i16x8_u (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32x4.widen_low_i16x8_u
; CHECK-NEXT: # fallthrough-return
%low = shufflevector <8 x i16> %v, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
%widened = zext <4 x i16> %low to <4 x i32>
ret <4 x i32> %widened
}
define <4 x i32> @widen_high_i16x8_s(<8 x i16> %v) {
; CHECK-LABEL: widen_high_i16x8_s:
; CHECK: .functype widen_high_i16x8_s (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32x4.widen_high_i16x8_s
; CHECK-NEXT: # fallthrough-return
%low = shufflevector <8 x i16> %v, <8 x i16> undef,
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
%widened = sext <4 x i16> %low to <4 x i32>
ret <4 x i32> %widened
}
define <4 x i32> @widen_high_i16x8_u(<8 x i16> %v) {
; CHECK-LABEL: widen_high_i16x8_u:
; CHECK: .functype widen_high_i16x8_u (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32x4.widen_high_i16x8_u
; CHECK-NEXT: # fallthrough-return
%low = shufflevector <8 x i16> %v, <8 x i16> undef,
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
%widened = zext <4 x i16> %low to <4 x i32>
ret <4 x i32> %widened
}
;; Also test that similar patterns with offsets not corresponding to
;; the low or high half are correctly expanded.
define <8 x i16> @widen_lowish_i8x16_s(<16 x i8> %v) {
; CHECK-LABEL: widen_lowish_i8x16_s:
; CHECK: .functype widen_lowish_i8x16_s (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i8x16.extract_lane_u 1
; CHECK-NEXT: i16x8.splat
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i8x16.extract_lane_u 2
; CHECK-NEXT: i16x8.replace_lane 1
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i8x16.extract_lane_u 3
; CHECK-NEXT: i16x8.replace_lane 2
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i8x16.extract_lane_u 4
; CHECK-NEXT: i16x8.replace_lane 3
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i8x16.extract_lane_u 5
; CHECK-NEXT: i16x8.replace_lane 4
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i8x16.extract_lane_u 6
; CHECK-NEXT: i16x8.replace_lane 5
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i8x16.extract_lane_u 7
; CHECK-NEXT: i16x8.replace_lane 6
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i8x16.extract_lane_u 8
; CHECK-NEXT: i16x8.replace_lane 7
; CHECK-NEXT: i32.const 8
; CHECK-NEXT: i16x8.shl
; CHECK-NEXT: i32.const 8
; CHECK-NEXT: i16x8.shr_s
; CHECK-NEXT: # fallthrough-return
%lowish = shufflevector <16 x i8> %v, <16 x i8> undef,
<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
%widened = sext <8 x i8> %lowish to <8 x i16>
ret <8 x i16> %widened
}
define <4 x i32> @widen_lowish_i16x8_s(<8 x i16> %v) {
; CHECK-LABEL: widen_lowish_i16x8_s:
; CHECK: .functype widen_lowish_i16x8_s (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i16x8.extract_lane_u 1
; CHECK-NEXT: i32x4.splat
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i16x8.extract_lane_u 2
; CHECK-NEXT: i32x4.replace_lane 1
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i16x8.extract_lane_u 3
; CHECK-NEXT: i32x4.replace_lane 2
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i16x8.extract_lane_u 4
; CHECK-NEXT: i32x4.replace_lane 3
; CHECK-NEXT: i32.const 16
; CHECK-NEXT: i32x4.shl
; CHECK-NEXT: i32.const 16
; CHECK-NEXT: i32x4.shr_s
; CHECK-NEXT: # fallthrough-return
%lowish = shufflevector <8 x i16> %v, <8 x i16> undef,
<4 x i32> <i32 1, i32 2, i32 3, i32 4>
%widened = sext <4 x i16> %lowish to <4 x i32>
ret <4 x i32> %widened
}