From e2b0ae5192feb33a10d12ecf9f2cff3aec0c8938 Mon Sep 17 00:00:00 2001 From: Thomas Lively Date: Tue, 28 Jul 2020 18:25:55 -0700 Subject: [PATCH] [WebAssembly] Remove intrinsics for SIMD widening ops Instead, pattern match extends of extract_subvectors to generate widening operations. Since extract_subvector is not a legal node, this is implemented via a custom combine that recognizes extract_subvector nodes before they are legalized. The combine produces custom ISD nodes that are later pattern matched directly, just like the intrinsic was. Also removes the clang builtins for these operations since the instructions can now be generated from portable code sequences. Differential Revision: https://reviews.llvm.org/D84556 --- include/llvm/IR/IntrinsicsWebAssembly.td | 16 -- lib/Target/WebAssembly/WebAssemblyISD.def | 4 + .../WebAssembly/WebAssemblyISelLowering.cpp | 50 +++++ .../WebAssembly/WebAssemblyInstrSIMD.td | 14 +- test/CodeGen/WebAssembly/simd-intrinsics.ll | 80 -------- test/CodeGen/WebAssembly/simd-widening.ll | 180 ++++++++++++++++++ 6 files changed, 244 insertions(+), 100 deletions(-) create mode 100644 test/CodeGen/WebAssembly/simd-widening.ll diff --git a/include/llvm/IR/IntrinsicsWebAssembly.td b/include/llvm/IR/IntrinsicsWebAssembly.td index 7c9ceb148a4..9cc9f9eb6f1 100644 --- a/include/llvm/IR/IntrinsicsWebAssembly.td +++ b/include/llvm/IR/IntrinsicsWebAssembly.td @@ -159,22 +159,6 @@ def int_wasm_narrow_unsigned : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>], [IntrNoMem, IntrSpeculatable]>; -def int_wasm_widen_low_signed : - Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty], - [IntrNoMem, IntrSpeculatable]>; -def int_wasm_widen_high_signed : - Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty], - [IntrNoMem, IntrSpeculatable]>; -def int_wasm_widen_low_unsigned : - Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty], - [IntrNoMem, IntrSpeculatable]>; -def int_wasm_widen_high_unsigned : - Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty], - [IntrNoMem, IntrSpeculatable]>; // TODO: Replace these intrinsics with normal ISel patterns def int_wasm_pmin : diff --git a/lib/Target/WebAssembly/WebAssemblyISD.def b/lib/Target/WebAssembly/WebAssemblyISD.def index dee1c4e2814..5720d3e5afb 100644 --- a/lib/Target/WebAssembly/WebAssemblyISD.def +++ b/lib/Target/WebAssembly/WebAssemblyISD.def @@ -29,6 +29,10 @@ HANDLE_NODETYPE(SWIZZLE) HANDLE_NODETYPE(VEC_SHL) HANDLE_NODETYPE(VEC_SHR_S) HANDLE_NODETYPE(VEC_SHR_U) +HANDLE_NODETYPE(WIDEN_LOW_S) +HANDLE_NODETYPE(WIDEN_LOW_U) +HANDLE_NODETYPE(WIDEN_HIGH_S) +HANDLE_NODETYPE(WIDEN_HIGH_U) HANDLE_NODETYPE(THROW) HANDLE_NODETYPE(MEMORY_COPY) HANDLE_NODETYPE(MEMORY_FILL) diff --git a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index d6197e9ea84..cdfbfe388ab 100644 --- a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -123,6 +123,10 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( // Hoist bitcasts out of shuffles setTargetDAGCombine(ISD::VECTOR_SHUFFLE); + // Combine extends of extract_subvectors into widening ops + setTargetDAGCombine(ISD::SIGN_EXTEND); + setTargetDAGCombine(ISD::ZERO_EXTEND); + // Support saturating add for i8x16 and i16x8 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT}) for (auto T : {MVT::v16i8, MVT::v8i16}) @@ -1745,6 +1749,49 @@ performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { return DAG.getBitcast(DstType, NewShuffle); } +static SDValue performVectorWidenCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + auto &DAG = DCI.DAG; + assert(N->getOpcode() == ISD::SIGN_EXTEND || + N->getOpcode() == ISD::ZERO_EXTEND); + + // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if + // possible before the extract_subvector can be expanded. + auto Extract = N->getOperand(0); + if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR) + return SDValue(); + auto Source = Extract.getOperand(0); + auto *IndexNode = dyn_cast(Extract.getOperand(1)); + if (IndexNode == nullptr) + return SDValue(); + auto Index = IndexNode->getZExtValue(); + + // Only v8i8 and v4i16 extracts can be widened, and only if the extracted + // subvector is the low or high half of its source. + EVT ResVT = N->getValueType(0); + if (ResVT == MVT::v8i16) { + if (Extract.getValueType() != MVT::v8i8 || + Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8)) + return SDValue(); + } else if (ResVT == MVT::v4i32) { + if (Extract.getValueType() != MVT::v4i16 || + Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4)) + return SDValue(); + } else { + return SDValue(); + } + + bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND; + bool IsLow = Index == 0; + + unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::WIDEN_LOW_S + : WebAssemblyISD::WIDEN_HIGH_S) + : (IsLow ? WebAssemblyISD::WIDEN_LOW_U + : WebAssemblyISD::WIDEN_HIGH_U); + + return DAG.getNode(Op, SDLoc(N), ResVT, Source); +} + SDValue WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { @@ -1753,5 +1800,8 @@ WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); case ISD::VECTOR_SHUFFLE: return performVECTOR_SHUFFLECombine(N, DCI); + case ISD::SIGN_EXTEND: + case ISD::ZERO_EXTEND: + return performVectorWidenCombine(N, DCI); } } diff --git a/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td index 16bfc81af8b..9bbccecffaa 100644 --- a/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -892,15 +892,21 @@ def : Pat<(v4i32 (int_wasm_trunc_saturate_unsigned (v4f32 V128:$src))), (fp_to_uint_v4i32_v4f32 (v4f32 V128:$src))>; // Widening operations +def widen_t : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>; +def widen_low_s : SDNode<"WebAssemblyISD::WIDEN_LOW_S", widen_t>; +def widen_high_s : SDNode<"WebAssemblyISD::WIDEN_HIGH_S", widen_t>; +def widen_low_u : SDNode<"WebAssemblyISD::WIDEN_LOW_U", widen_t>; +def widen_high_u : SDNode<"WebAssemblyISD::WIDEN_HIGH_U", widen_t>; + multiclass SIMDWiden baseInst> { - defm "" : SIMDConvert; - defm "" : SIMDConvert; - defm "" : SIMDConvert; - defm "" : SIMDConvert; } diff --git a/test/CodeGen/WebAssembly/simd-intrinsics.ll b/test/CodeGen/WebAssembly/simd-intrinsics.ll index 05d256fa552..63092a8c517 100644 --- a/test/CodeGen/WebAssembly/simd-intrinsics.ll +++ b/test/CodeGen/WebAssembly/simd-intrinsics.ll @@ -294,46 +294,6 @@ define <8 x i16> @narrow_unsigned_v8i16(<4 x i32> %low, <4 x i32> %high) { ret <8 x i16> %a } -; CHECK-LABEL: widen_low_signed_v8i16: -; SIMD128-NEXT: .functype widen_low_signed_v8i16 (v128) -> (v128){{$}} -; SIMD128-NEXT: i16x8.widen_low_i8x16_s $push[[R:[0-9]+]]=, $0{{$}} -; SIMD128-NEXT: return $pop[[R]]{{$}} -declare <8 x i16> @llvm.wasm.widen.low.signed.v8i16.v16i8(<16 x i8>) -define <8 x i16> @widen_low_signed_v8i16(<16 x i8> %v) { - %a = call <8 x i16> @llvm.wasm.widen.low.signed.v8i16.v16i8(<16 x i8> %v) - ret <8 x i16> %a -} - -; CHECK-LABEL: widen_high_signed_v8i16: -; SIMD128-NEXT: .functype widen_high_signed_v8i16 (v128) -> (v128){{$}} -; SIMD128-NEXT: i16x8.widen_high_i8x16_s $push[[R:[0-9]+]]=, $0{{$}} -; SIMD128-NEXT: return $pop[[R]]{{$}} -declare <8 x i16> @llvm.wasm.widen.high.signed.v8i16.v16i8(<16 x i8>) -define <8 x i16> @widen_high_signed_v8i16(<16 x i8> %v) { - %a = call <8 x i16> @llvm.wasm.widen.high.signed.v8i16.v16i8(<16 x i8> %v) - ret <8 x i16> %a -} - -; CHECK-LABEL: widen_low_unsigned_v8i16: -; SIMD128-NEXT: .functype widen_low_unsigned_v8i16 (v128) -> (v128){{$}} -; SIMD128-NEXT: i16x8.widen_low_i8x16_u $push[[R:[0-9]+]]=, $0{{$}} -; SIMD128-NEXT: return $pop[[R]]{{$}} -declare <8 x i16> @llvm.wasm.widen.low.unsigned.v8i16.v16i8(<16 x i8>) -define <8 x i16> @widen_low_unsigned_v8i16(<16 x i8> %v) { - %a = call <8 x i16> @llvm.wasm.widen.low.unsigned.v8i16.v16i8(<16 x i8> %v) - ret <8 x i16> %a -} - -; CHECK-LABEL: widen_high_unsigned_v8i16: -; SIMD128-NEXT: .functype widen_high_unsigned_v8i16 (v128) -> (v128){{$}} -; SIMD128-NEXT: i16x8.widen_high_i8x16_u $push[[R:[0-9]+]]=, $0{{$}} -; SIMD128-NEXT: return $pop[[R]]{{$}} -declare <8 x i16> @llvm.wasm.widen.high.unsigned.v8i16.v16i8(<16 x i8>) -define <8 x i16> @widen_high_unsigned_v8i16(<16 x i8> %v) { - %a = call <8 x i16> @llvm.wasm.widen.high.unsigned.v8i16.v16i8(<16 x i8> %v) - ret <8 x i16> %a -} - ; ============================================================================== ; 4 x i32 ; ============================================================================== @@ -411,46 +371,6 @@ define <4 x i32> @trunc_sat_u_v4i32(<4 x float> %x) { ret <4 x i32> %a } -; CHECK-LABEL: widen_low_signed_v4i32: -; SIMD128-NEXT: .functype widen_low_signed_v4i32 (v128) -> (v128){{$}} -; SIMD128-NEXT: i32x4.widen_low_i16x8_s $push[[R:[0-9]+]]=, $0{{$}} -; SIMD128-NEXT: return $pop[[R]]{{$}} -declare <4 x i32> @llvm.wasm.widen.low.signed.v4i32.v8i16(<8 x i16>) -define <4 x i32> @widen_low_signed_v4i32(<8 x i16> %v) { - %a = call <4 x i32> @llvm.wasm.widen.low.signed.v4i32.v8i16(<8 x i16> %v) - ret <4 x i32> %a -} - -; CHECK-LABEL: widen_high_signed_v4i32: -; SIMD128-NEXT: .functype widen_high_signed_v4i32 (v128) -> (v128){{$}} -; SIMD128-NEXT: i32x4.widen_high_i16x8_s $push[[R:[0-9]+]]=, $0{{$}} -; SIMD128-NEXT: return $pop[[R]]{{$}} -declare <4 x i32> @llvm.wasm.widen.high.signed.v4i32.v8i16(<8 x i16>) -define <4 x i32> @widen_high_signed_v4i32(<8 x i16> %v) { - %a = call <4 x i32> @llvm.wasm.widen.high.signed.v4i32.v8i16(<8 x i16> %v) - ret <4 x i32> %a -} - -; CHECK-LABEL: widen_low_unsigned_v4i32: -; SIMD128-NEXT: .functype widen_low_unsigned_v4i32 (v128) -> (v128){{$}} -; SIMD128-NEXT: i32x4.widen_low_i16x8_u $push[[R:[0-9]+]]=, $0{{$}} -; SIMD128-NEXT: return $pop[[R]]{{$}} -declare <4 x i32> @llvm.wasm.widen.low.unsigned.v4i32.v8i16(<8 x i16>) -define <4 x i32> @widen_low_unsigned_v4i32(<8 x i16> %v) { - %a = call <4 x i32> @llvm.wasm.widen.low.unsigned.v4i32.v8i16(<8 x i16> %v) - ret <4 x i32> %a -} - -; CHECK-LABEL: widen_high_unsigned_v4i32: -; SIMD128-NEXT: .functype widen_high_unsigned_v4i32 (v128) -> (v128){{$}} -; SIMD128-NEXT: i32x4.widen_high_i16x8_u $push[[R:[0-9]+]]=, $0{{$}} -; SIMD128-NEXT: return $pop[[R]]{{$}} -declare <4 x i32> @llvm.wasm.widen.high.unsigned.v4i32.v8i16(<8 x i16>) -define <4 x i32> @widen_high_unsigned_v4i32(<8 x i16> %v) { - %a = call <4 x i32> @llvm.wasm.widen.high.unsigned.v4i32.v8i16(<8 x i16> %v) - ret <4 x i32> %a -} - ; ============================================================================== ; 2 x i64 ; ============================================================================== diff --git a/test/CodeGen/WebAssembly/simd-widening.ll b/test/CodeGen/WebAssembly/simd-widening.ll new file mode 100644 index 00000000000..c9a7ffbbfca --- /dev/null +++ b/test/CodeGen/WebAssembly/simd-widening.ll @@ -0,0 +1,180 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mattr=+simd128 | FileCheck %s + +;; Test that SIMD widening operations can be successfully selected + +target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +target triple = "wasm32-unknown-unknown" + +define <8 x i16> @widen_low_i8x16_s(<16 x i8> %v) { +; CHECK-LABEL: widen_low_i8x16_s: +; CHECK: .functype widen_low_i8x16_s (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i16x8.widen_low_i8x16_s +; CHECK-NEXT: # fallthrough-return + %low = shufflevector <16 x i8> %v, <16 x i8> undef, + <8 x i32> + %widened = sext <8 x i8> %low to <8 x i16> + ret <8 x i16> %widened +} + +define <8 x i16> @widen_low_i8x16_u(<16 x i8> %v) { +; CHECK-LABEL: widen_low_i8x16_u: +; CHECK: .functype widen_low_i8x16_u (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i16x8.widen_low_i8x16_u +; CHECK-NEXT: # fallthrough-return + %low = shufflevector <16 x i8> %v, <16 x i8> undef, + <8 x i32> + %widened = zext <8 x i8> %low to <8 x i16> + ret <8 x i16> %widened +} + +define <8 x i16> @widen_high_i8x16_s(<16 x i8> %v) { +; CHECK-LABEL: widen_high_i8x16_s: +; CHECK: .functype widen_high_i8x16_s (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i16x8.widen_high_i8x16_s +; CHECK-NEXT: # fallthrough-return + %low = shufflevector <16 x i8> %v, <16 x i8> undef, + <8 x i32> + %widened = sext <8 x i8> %low to <8 x i16> + ret <8 x i16> %widened +} + +define <8 x i16> @widen_high_i8x16_u(<16 x i8> %v) { +; CHECK-LABEL: widen_high_i8x16_u: +; CHECK: .functype widen_high_i8x16_u (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i16x8.widen_high_i8x16_u +; CHECK-NEXT: # fallthrough-return + %low = shufflevector <16 x i8> %v, <16 x i8> undef, + <8 x i32> + %widened = zext <8 x i8> %low to <8 x i16> + ret <8 x i16> %widened +} + +define <4 x i32> @widen_low_i16x8_s(<8 x i16> %v) { +; CHECK-LABEL: widen_low_i16x8_s: +; CHECK: .functype widen_low_i16x8_s (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32x4.widen_low_i16x8_s +; CHECK-NEXT: # fallthrough-return + %low = shufflevector <8 x i16> %v, <8 x i16> undef, + <4 x i32> + %widened = sext <4 x i16> %low to <4 x i32> + ret <4 x i32> %widened +} + +define <4 x i32> @widen_low_i16x8_u(<8 x i16> %v) { +; CHECK-LABEL: widen_low_i16x8_u: +; CHECK: .functype widen_low_i16x8_u (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32x4.widen_low_i16x8_u +; CHECK-NEXT: # fallthrough-return + %low = shufflevector <8 x i16> %v, <8 x i16> undef, + <4 x i32> + %widened = zext <4 x i16> %low to <4 x i32> + ret <4 x i32> %widened +} + +define <4 x i32> @widen_high_i16x8_s(<8 x i16> %v) { +; CHECK-LABEL: widen_high_i16x8_s: +; CHECK: .functype widen_high_i16x8_s (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32x4.widen_high_i16x8_s +; CHECK-NEXT: # fallthrough-return + %low = shufflevector <8 x i16> %v, <8 x i16> undef, + <4 x i32> + %widened = sext <4 x i16> %low to <4 x i32> + ret <4 x i32> %widened +} + +define <4 x i32> @widen_high_i16x8_u(<8 x i16> %v) { +; CHECK-LABEL: widen_high_i16x8_u: +; CHECK: .functype widen_high_i16x8_u (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32x4.widen_high_i16x8_u +; CHECK-NEXT: # fallthrough-return + %low = shufflevector <8 x i16> %v, <8 x i16> undef, + <4 x i32> + %widened = zext <4 x i16> %low to <4 x i32> + ret <4 x i32> %widened +} + +;; Also test that similar patterns with offsets not corresponding to +;; the low or high half are correctly expanded. + +define <8 x i16> @widen_lowish_i8x16_s(<16 x i8> %v) { +; CHECK-LABEL: widen_lowish_i8x16_s: +; CHECK: .functype widen_lowish_i8x16_s (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.extract_lane_u 1 +; CHECK-NEXT: i16x8.splat +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.extract_lane_u 2 +; CHECK-NEXT: i16x8.replace_lane 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.extract_lane_u 3 +; CHECK-NEXT: i16x8.replace_lane 2 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.extract_lane_u 4 +; CHECK-NEXT: i16x8.replace_lane 3 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.extract_lane_u 5 +; CHECK-NEXT: i16x8.replace_lane 4 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.extract_lane_u 6 +; CHECK-NEXT: i16x8.replace_lane 5 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.extract_lane_u 7 +; CHECK-NEXT: i16x8.replace_lane 6 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.extract_lane_u 8 +; CHECK-NEXT: i16x8.replace_lane 7 +; CHECK-NEXT: i32.const 8 +; CHECK-NEXT: i16x8.shl +; CHECK-NEXT: i32.const 8 +; CHECK-NEXT: i16x8.shr_s +; CHECK-NEXT: # fallthrough-return + %lowish = shufflevector <16 x i8> %v, <16 x i8> undef, + <8 x i32> + %widened = sext <8 x i8> %lowish to <8 x i16> + ret <8 x i16> %widened +} + +define <4 x i32> @widen_lowish_i16x8_s(<8 x i16> %v) { +; CHECK-LABEL: widen_lowish_i16x8_s: +; CHECK: .functype widen_lowish_i16x8_s (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i16x8.extract_lane_u 1 +; CHECK-NEXT: i32x4.splat +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i16x8.extract_lane_u 2 +; CHECK-NEXT: i32x4.replace_lane 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i16x8.extract_lane_u 3 +; CHECK-NEXT: i32x4.replace_lane 2 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i16x8.extract_lane_u 4 +; CHECK-NEXT: i32x4.replace_lane 3 +; CHECK-NEXT: i32.const 16 +; CHECK-NEXT: i32x4.shl +; CHECK-NEXT: i32.const 16 +; CHECK-NEXT: i32x4.shr_s +; CHECK-NEXT: # fallthrough-return + %lowish = shufflevector <8 x i16> %v, <8 x i16> undef, + <4 x i32> + %widened = sext <4 x i16> %lowish to <4 x i32> + ret <4 x i32> %widened +}