From 1dd8fe9b9b67eb965ecc5f48825daf7f6266f605 Mon Sep 17 00:00:00 2001 From: Thomas Lively Date: Thu, 15 Oct 2020 15:33:10 +0000 Subject: [PATCH] [WebAssembly] v128.load{8,16,32,64}_lane instructions Prototype the newly proposed load_lane instructions, as specified in https://github.com/WebAssembly/simd/pull/350. Since these instructions are not available to origin trial users on Chrome stable, make them opt-in by only selecting them from intrinsics rather than normal ISel patterns. Since we only need rough prototypes to measure performance right now, this commit does not implement all the load and store patterns that would be necessary to make full use of the offset immediate. However, the full suite of offset tests is included to make it easy to track improvements in the future. Since these are the first instructions to have a memarg immediate as well as an additional immediate, the disassembler needed some additional hacks to be able to parse them correctly. Making that code more principled is left as future work. Differential Revision: https://reviews.llvm.org/D89366 --- include/llvm/IR/IntrinsicsWebAssembly.td | 46 + .../AsmParser/WebAssemblyAsmParser.cpp | 6 + .../MCTargetDesc/WebAssemblyMCTargetDesc.h | 16 +- .../WebAssembly/WebAssemblyISelLowering.cpp | 50 + .../WebAssembly/WebAssemblyInstrSIMD.td | 97 +- .../WebAssembly/simd-load-lane-offset.ll | 968 ++++++++++++++++++ test/MC/WebAssembly/simd-encodings.s | 24 + 7 files changed, 1201 insertions(+), 6 deletions(-) create mode 100644 test/CodeGen/WebAssembly/simd-load-lane-offset.ll diff --git a/include/llvm/IR/IntrinsicsWebAssembly.td b/include/llvm/IR/IntrinsicsWebAssembly.td index a7d86e2d2ce..8298312491f 100644 --- a/include/llvm/IR/IntrinsicsWebAssembly.td +++ b/include/llvm/IR/IntrinsicsWebAssembly.td @@ -208,6 +208,52 @@ def int_wasm_load64_zero : [IntrReadMem, IntrArgMemOnly], "", [SDNPMemOperand]>; +// These intrinsics do not mark their lane index arguments as immediate because +// that changes the corresponding SDNode from ISD::Constant to +// ISD::TargetConstant, which would require extra complications in the ISel +// tablegen patterns. TODO: Replace these intrinsic with normal ISel patterns +// once the load_lane instructions are merged to the proposal. +def int_wasm_load8_lane : + Intrinsic<[llvm_v16i8_ty], + [LLVMPointerType, llvm_v16i8_ty, llvm_i32_ty], + [IntrReadMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_load16_lane : + Intrinsic<[llvm_v8i16_ty], + [LLVMPointerType, llvm_v8i16_ty, llvm_i32_ty], + [IntrReadMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_load32_lane : + Intrinsic<[llvm_v4i32_ty], + [LLVMPointerType, llvm_v4i32_ty, llvm_i32_ty], + [IntrReadMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_load64_lane : + Intrinsic<[llvm_v2i64_ty], + [LLVMPointerType, llvm_v2i64_ty, llvm_i32_ty], + [IntrReadMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_store8_lane : + Intrinsic<[], + [LLVMPointerType, llvm_v16i8_ty, llvm_i32_ty], + [IntrWriteMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_store16_lane : + Intrinsic<[], + [LLVMPointerType, llvm_v8i16_ty, llvm_i32_ty], + [IntrWriteMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_store32_lane : + Intrinsic<[], + [LLVMPointerType, llvm_v4i32_ty, llvm_i32_ty], + [IntrWriteMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_store64_lane : + Intrinsic<[], + [LLVMPointerType, llvm_v2i64_ty, llvm_i32_ty], + [IntrWriteMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; + //===----------------------------------------------------------------------===// // Thread-local storage intrinsics //===----------------------------------------------------------------------===// diff --git a/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp index 14908a630a0..92e855972e8 100644 --- a/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp +++ b/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp @@ -421,6 +421,12 @@ public: return error("Expected integer constant"); parseSingleInteger(false, Operands); } else { + // v128.{load,store}{8,16,32,64}_lane has both a memarg and a lane + // index. We need to avoid parsing an extra alignment operand for the + // lane index. + auto IsLoadStoreLane = InstName.find("_lane") != StringRef::npos; + if (IsLoadStoreLane && Operands.size() == 4) + return false; // Alignment not specified (or atomics, must use default alignment). // We can't just call WebAssembly::GetDefaultP2Align since we don't have // an opcode until after the assembly matcher, so set a default to fix diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h index 631e96dd924..d4f3a2853d2 100644 --- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h +++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h @@ -177,7 +177,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) { WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I32) WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I64) WASM_LOAD_STORE(LOAD_SPLAT_v8x16) - return 0; + WASM_LOAD_STORE(LOAD_LANE_v16i8) + WASM_LOAD_STORE(STORE_LANE_v16i8) + return 0; WASM_LOAD_STORE(LOAD16_S_I32) WASM_LOAD_STORE(LOAD16_U_I32) WASM_LOAD_STORE(LOAD16_S_I64) @@ -203,7 +205,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) { WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I32) WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I64) WASM_LOAD_STORE(LOAD_SPLAT_v16x8) - return 1; + WASM_LOAD_STORE(LOAD_LANE_v8i16) + WASM_LOAD_STORE(STORE_LANE_v8i16) + return 1; WASM_LOAD_STORE(LOAD_I32) WASM_LOAD_STORE(LOAD_F32) WASM_LOAD_STORE(STORE_I32) @@ -233,7 +237,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) { WASM_LOAD_STORE(ATOMIC_WAIT_I32) WASM_LOAD_STORE(LOAD_SPLAT_v32x4) WASM_LOAD_STORE(LOAD_ZERO_v4i32) - return 2; + WASM_LOAD_STORE(LOAD_LANE_v4i32) + WASM_LOAD_STORE(STORE_LANE_v4i32) + return 2; WASM_LOAD_STORE(LOAD_I64) WASM_LOAD_STORE(LOAD_F64) WASM_LOAD_STORE(STORE_I64) @@ -256,7 +262,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) { WASM_LOAD_STORE(LOAD_EXTEND_S_v2i64) WASM_LOAD_STORE(LOAD_EXTEND_U_v2i64) WASM_LOAD_STORE(LOAD_ZERO_v2i64) - return 3; + WASM_LOAD_STORE(LOAD_LANE_v2i64) + WASM_LOAD_STORE(STORE_LANE_v2i64) + return 3; WASM_LOAD_STORE(LOAD_V128) WASM_LOAD_STORE(STORE_V128) return 4; diff --git a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index 8a3f2f16c24..91a79b5985d 100644 --- a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -685,6 +685,56 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8); Info.flags = MachineMemOperand::MOLoad; return true; + case Intrinsic::wasm_load8_lane: + case Intrinsic::wasm_load16_lane: + case Intrinsic::wasm_load32_lane: + case Intrinsic::wasm_load64_lane: + case Intrinsic::wasm_store8_lane: + case Intrinsic::wasm_store16_lane: + case Intrinsic::wasm_store32_lane: + case Intrinsic::wasm_store64_lane: { + MVT MemVT; + Align MemAlign; + switch (Intrinsic) { + case Intrinsic::wasm_load8_lane: + case Intrinsic::wasm_store8_lane: + MemVT = MVT::i8; + MemAlign = Align(1); + break; + case Intrinsic::wasm_load16_lane: + case Intrinsic::wasm_store16_lane: + MemVT = MVT::i16; + MemAlign = Align(2); + break; + case Intrinsic::wasm_load32_lane: + case Intrinsic::wasm_store32_lane: + MemVT = MVT::i32; + MemAlign = Align(4); + break; + case Intrinsic::wasm_load64_lane: + case Intrinsic::wasm_store64_lane: + MemVT = MVT::i64; + MemAlign = Align(8); + break; + default: + llvm_unreachable("unexpected intrinsic"); + } + if (Intrinsic == Intrinsic::wasm_load8_lane || + Intrinsic == Intrinsic::wasm_load16_lane || + Intrinsic == Intrinsic::wasm_load32_lane || + Intrinsic == Intrinsic::wasm_load64_lane) { + Info.opc = ISD::INTRINSIC_W_CHAIN; + Info.flags = MachineMemOperand::MOLoad; + } else { + Info.opc = ISD::INTRINSIC_VOID; + Info.flags = MachineMemOperand::MOStore; + } + Info.ptrVal = I.getArgOperand(0); + Info.memVT = MemVT; + Info.offset = 0; + Info.align = MemAlign; + return true; + } default: return false; } diff --git a/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td index 544eed54415..634f958d6ca 100644 --- a/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -53,7 +53,7 @@ defm LOAD_V128_A64 : "v128.load\t$off$p2align", 0>; } -// Def load and store patterns from WebAssemblyInstrMemory.td for vector types +// Def load patterns from WebAssemblyInstrMemory.td for vector types foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { defm : LoadPatNoOffset; defm : LoadPatImmOff; @@ -201,6 +201,51 @@ defm : LoadPatOffsetOnly; defm : LoadPatGlobalAddrOffOnly; defm : LoadPatGlobalAddrOffOnly; +// Load lane +multiclass SIMDLoadLane simdop> { + let mayLoad = 1, UseNamedOperandTable = 1 in { + defm LOAD_LANE_#vec_t#_A32 : + SIMD_I<(outs V128:$dst), + (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx, + I32:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx), + [], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx", + name#"\t$off$p2align, $idx", simdop>; + defm LOAD_LANE_#vec_t#_A64 : + SIMD_I<(outs V128:$dst), + (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx, + I64:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx), + [], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx", + name#"\t$off$p2align, $idx", simdop>; + } // mayLoad = 1, UseNamedOperandTable = 1 +} + +// TODO: Also support v4f32 and v2f64 once the instructions are merged +// to the proposal +defm "" : SIMDLoadLane; +defm "" : SIMDLoadLane; +defm "" : SIMDLoadLane; +defm "" : SIMDLoadLane; + +// Select loads with no constant offset. +multiclass LoadLanePatNoOffset { + def : Pat<(ty (kind (i32 I32:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx))), + (!cast("LOAD_LANE_"#ty#"_A32") 0, 0, imm:$idx, I32:$addr, V128:$vec)>, + Requires<[HasAddr32]>; + def : Pat<(ty (kind (i64 I64:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx))), + (!cast("LOAD_LANE_"#ty#"_A64") 0, 0, imm:$idx, I64:$addr, V128:$vec)>, + Requires<[HasAddr64]>; +} + +defm : LoadLanePatNoOffset; +defm : LoadLanePatNoOffset; +defm : LoadLanePatNoOffset; +defm : LoadLanePatNoOffset; + +// TODO: Also support the other load patterns for load_lane once the instructions +// are merged to the proposal. + // Store: v128.store let mayStore = 1, UseNamedOperandTable = 1 in { defm STORE_V128_A32 : @@ -214,8 +259,9 @@ defm STORE_V128_A64 : "v128.store\t${off}(${addr})$p2align, $vec", "v128.store\t$off$p2align", 11>; } + +// Def store patterns from WebAssemblyInstrMemory.td for vector types foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { -// Def load and store patterns from WebAssemblyInstrMemory.td for vector types defm : StorePatNoOffset; defm : StorePatImmOff; defm : StorePatImmOff; @@ -223,6 +269,53 @@ defm : StorePatOffsetOnly; defm : StorePatGlobalAddrOffOnly; } +// Store lane +multiclass SIMDStoreLane simdop> { + let mayStore = 1, UseNamedOperandTable = 1 in { + defm STORE_LANE_#vec_t#_A32 : + SIMD_I<(outs), + (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx, + I32:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx), + [], name#"\t${off}(${addr})$p2align, $vec, $idx", + name#"\t$off$p2align, $idx", simdop>; + defm STORE_LANE_#vec_t#_A64 : + SIMD_I<(outs V128:$dst), + (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx, + I64:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx), + [], name#"\t${off}(${addr})$p2align, $vec, $idx", + name#"\t$off$p2align, $idx", simdop>; + } // mayStore = 1, UseNamedOperandTable = 1 +} + +// TODO: Also support v4f32 and v2f64 once the instructions are merged +// to the proposal +defm "" : SIMDStoreLane; +defm "" : SIMDStoreLane; +defm "" : SIMDStoreLane; +defm "" : SIMDStoreLane; + +// Select stores with no constant offset. +multiclass StoreLanePatNoOffset { + def : Pat<(kind (i32 I32:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx)), + (!cast("STORE_LANE_"#ty#"_A32") + 0, 0, imm:$idx, I32:$addr, ty:$vec)>, + Requires<[HasAddr32]>; + def : Pat<(kind (i32 I64:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx)), + (!cast("STORE_LANE_"#ty#"_A64") + 0, 0, imm:$idx, I64:$addr, ty:$vec)>, + Requires<[HasAddr64]>; +} + +defm : StoreLanePatNoOffset; +defm : StoreLanePatNoOffset; +defm : StoreLanePatNoOffset; +defm : StoreLanePatNoOffset; + +// TODO: Also support the other store patterns for store_lane once the +// instructions are merged to the proposal. + //===----------------------------------------------------------------------===// // Constructing SIMD values //===----------------------------------------------------------------------===// diff --git a/test/CodeGen/WebAssembly/simd-load-lane-offset.ll b/test/CodeGen/WebAssembly/simd-load-lane-offset.ll new file mode 100644 index 00000000000..08c3f80e57b --- /dev/null +++ b/test/CodeGen/WebAssembly/simd-load-lane-offset.ll @@ -0,0 +1,968 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -verify-machineinstrs -mattr=+simd128 | FileCheck %s + +; Test SIMD v128.load{8,16,32,64}_lane instructions. + +; TODO: Use the offset field by supporting more patterns. Right now only the +; equivalents of LoadPatNoOffset/StorePatNoOffset are supported. + +target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +target triple = "wasm32-unknown-unknown" + +declare <16 x i8> @llvm.wasm.load8.lane(i8*, <16 x i8>, i32) +declare <8 x i16> @llvm.wasm.load16.lane(i16*, <8 x i16>, i32) +declare <4 x i32> @llvm.wasm.load32.lane(i32*, <4 x i32>, i32) +declare <2 x i64> @llvm.wasm.load64.lane(i64*, <2 x i64>, i32) + +declare void @llvm.wasm.store8.lane(i8*, <16 x i8>, i32) +declare void @llvm.wasm.store16.lane(i16*, <8 x i16>, i32) +declare void @llvm.wasm.store32.lane(i32*, <4 x i32>, i32) +declare void @llvm.wasm.store64.lane(i64*, <2 x i64>, i32) + +;===---------------------------------------------------------------------------- +; v128.load8_lane / v128.store8_lane +;===---------------------------------------------------------------------------- + +define <16 x i8> @load_lane_i8_no_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_no_offset: +; CHECK: .functype load_lane_i8_no_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %p, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_folded_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_folded_offset: +; CHECK: .functype load_lane_i8_with_folded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_folded_gep_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_folded_gep_offset: +; CHECK: .functype load_lane_i8_with_folded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 6 + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_unfolded_gep_negative_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_unfolded_gep_negative_offset: +; CHECK: .functype load_lane_i8_with_unfolded_gep_negative_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 -6 + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_unfolded_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_unfolded_offset: +; CHECK: .functype load_lane_i8_with_unfolded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i8* + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_unfolded_gep_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_unfolded_gep_offset: +; CHECK: .functype load_lane_i8_with_unfolded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i8, i8* %p, i32 6 + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_from_numeric_address(<16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_from_numeric_address: +; CHECK: .functype load_lane_i8_from_numeric_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i8* + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +@gv_i8 = global i8 0 +define <16 x i8> @load_lane_i8_from_global_address(<16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_from_global_address: +; CHECK: .functype load_lane_i8_from_global_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i8 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* @gv_i8, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define void @store_lane_i8_no_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_no_offset: +; CHECK: .functype store_lane_i8_no_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store8.lane(i8* %p, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_folded_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_folded_offset: +; CHECK: .functype store_lane_i8_with_folded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_folded_gep_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_folded_gep_offset: +; CHECK: .functype store_lane_i8_with_folded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 6 + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_unfolded_gep_negative_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_unfolded_gep_negative_offset: +; CHECK: .functype store_lane_i8_with_unfolded_gep_negative_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const -6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 -6 + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_unfolded_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_unfolded_offset: +; CHECK: .functype store_lane_i8_with_unfolded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i8* + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_unfolded_gep_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_unfolded_gep_offset: +; CHECK: .functype store_lane_i8_with_unfolded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i8, i8* %p, i32 6 + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_to_numeric_address(<16 x i8> %v) { +; CHECK-LABEL: store_lane_i8_to_numeric_address: +; CHECK: .functype store_lane_i8_to_numeric_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i8* + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_from_global_address(<16 x i8> %v) { +; CHECK-LABEL: store_lane_i8_from_global_address: +; CHECK: .functype store_lane_i8_from_global_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i8 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store8.lane(i8* @gv_i8, <16 x i8> %v, i32 0) + ret void +} + +;===---------------------------------------------------------------------------- +; v128.load16_lane / v128.store16_lane +;===---------------------------------------------------------------------------- + +define <8 x i16> @load_lane_i16_no_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_no_offset: +; CHECK: .functype load_lane_i16_no_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %p, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_folded_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_folded_offset: +; CHECK: .functype load_lane_i16_with_folded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i16* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i16* + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_folded_gep_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_folded_gep_offset: +; CHECK: .functype load_lane_i16_with_folded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i16, i16* %p, i32 6 + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_unfolded_gep_negative_offset: +; CHECK: .functype load_lane_i16_with_unfolded_gep_negative_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i16, i16* %p, i32 -6 + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_unfolded_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_unfolded_offset: +; CHECK: .functype load_lane_i16_with_unfolded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i16* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i16* + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_unfolded_gep_offset: +; CHECK: .functype load_lane_i16_with_unfolded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i16, i16* %p, i32 6 + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_from_numeric_address(<8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_from_numeric_address: +; CHECK: .functype load_lane_i16_from_numeric_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i16* + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +@gv_i16 = global i16 0 +define <8 x i16> @load_lane_i16_from_global_address(<8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_from_global_address: +; CHECK: .functype load_lane_i16_from_global_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i16 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* @gv_i16, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define void @store_lane_i16_no_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_no_offset: +; CHECK: .functype store_lane_i16_no_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store16.lane(i16* %p, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_folded_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_folded_offset: +; CHECK: .functype store_lane_i16_with_folded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i16* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i16* + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_folded_gep_offset: +; CHECK: .functype store_lane_i16_with_folded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i16, i16* %p, i32 6 + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_unfolded_gep_negative_offset: +; CHECK: .functype store_lane_i16_with_unfolded_gep_negative_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const -12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i16, i16* %p, i32 -6 + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_unfolded_offset: +; CHECK: .functype store_lane_i16_with_unfolded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i16* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i16* + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_unfolded_gep_offset: +; CHECK: .functype store_lane_i16_with_unfolded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i16, i16* %p, i32 6 + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_to_numeric_address(<8 x i16> %v) { +; CHECK-LABEL: store_lane_i16_to_numeric_address: +; CHECK: .functype store_lane_i16_to_numeric_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i16* + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_from_global_address(<8 x i16> %v) { +; CHECK-LABEL: store_lane_i16_from_global_address: +; CHECK: .functype store_lane_i16_from_global_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i16 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store16.lane(i16* @gv_i16, <8 x i16> %v, i32 0) + ret void +} + +;===---------------------------------------------------------------------------- +; v128.load32_lane / v128.store32_lane +;===---------------------------------------------------------------------------- + +define <4 x i32> @load_lane_i32_no_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_no_offset: +; CHECK: .functype load_lane_i32_no_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %p, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_folded_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_folded_offset: +; CHECK: .functype load_lane_i32_with_folded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i32* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i32* + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_folded_gep_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_folded_gep_offset: +; CHECK: .functype load_lane_i32_with_folded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i32, i32* %p, i32 6 + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_unfolded_gep_negative_offset: +; CHECK: .functype load_lane_i32_with_unfolded_gep_negative_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i32, i32* %p, i32 -6 + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_unfolded_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_unfolded_offset: +; CHECK: .functype load_lane_i32_with_unfolded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i32* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i32* + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_unfolded_gep_offset: +; CHECK: .functype load_lane_i32_with_unfolded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i32, i32* %p, i32 6 + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_from_numeric_address(<4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_from_numeric_address: +; CHECK: .functype load_lane_i32_from_numeric_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i32* + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +@gv_i32 = global i32 0 +define <4 x i32> @load_lane_i32_from_global_address(<4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_from_global_address: +; CHECK: .functype load_lane_i32_from_global_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i32 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* @gv_i32, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define void @store_lane_i32_no_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_no_offset: +; CHECK: .functype store_lane_i32_no_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store32.lane(i32* %p, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_folded_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_folded_offset: +; CHECK: .functype store_lane_i32_with_folded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i32* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i32* + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_folded_gep_offset: +; CHECK: .functype store_lane_i32_with_folded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i32, i32* %p, i32 6 + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_unfolded_gep_negative_offset: +; CHECK: .functype store_lane_i32_with_unfolded_gep_negative_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const -24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i32, i32* %p, i32 -6 + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_unfolded_offset: +; CHECK: .functype store_lane_i32_with_unfolded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i32* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i32* + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_unfolded_gep_offset: +; CHECK: .functype store_lane_i32_with_unfolded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i32, i32* %p, i32 6 + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_to_numeric_address(<4 x i32> %v) { +; CHECK-LABEL: store_lane_i32_to_numeric_address: +; CHECK: .functype store_lane_i32_to_numeric_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i32* + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_from_global_address(<4 x i32> %v) { +; CHECK-LABEL: store_lane_i32_from_global_address: +; CHECK: .functype store_lane_i32_from_global_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i32 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store32.lane(i32* @gv_i32, <4 x i32> %v, i32 0) + ret void +} + +;===---------------------------------------------------------------------------- +; v128.load64_lane / v128.store64_lane +;===---------------------------------------------------------------------------- + +define <2 x i64> @load_lane_i64_no_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_no_offset: +; CHECK: .functype load_lane_i64_no_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %p, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_folded_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_folded_offset: +; CHECK: .functype load_lane_i64_with_folded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i64* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i64* + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_folded_gep_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_folded_gep_offset: +; CHECK: .functype load_lane_i64_with_folded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i64, i64* %p, i32 6 + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_unfolded_gep_negative_offset: +; CHECK: .functype load_lane_i64_with_unfolded_gep_negative_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i64, i64* %p, i32 -6 + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_unfolded_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_unfolded_offset: +; CHECK: .functype load_lane_i64_with_unfolded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i64* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i64* + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_unfolded_gep_offset: +; CHECK: .functype load_lane_i64_with_unfolded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i64, i64* %p, i32 6 + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_from_numeric_address(<2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_from_numeric_address: +; CHECK: .functype load_lane_i64_from_numeric_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i64* + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +@gv_i64 = global i64 0 +define <2 x i64> @load_lane_i64_from_global_address(<2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_from_global_address: +; CHECK: .functype load_lane_i64_from_global_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i64 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* @gv_i64, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define void @store_lane_i64_no_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_no_offset: +; CHECK: .functype store_lane_i64_no_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store64.lane(i64* %p, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_folded_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_folded_offset: +; CHECK: .functype store_lane_i64_with_folded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i64* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i64* + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_folded_gep_offset: +; CHECK: .functype store_lane_i64_with_folded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i64, i64* %p, i32 6 + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_unfolded_gep_negative_offset: +; CHECK: .functype store_lane_i64_with_unfolded_gep_negative_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const -48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i64, i64* %p, i32 -6 + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_unfolded_offset: +; CHECK: .functype store_lane_i64_with_unfolded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i64* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i64* + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_unfolded_gep_offset: +; CHECK: .functype store_lane_i64_with_unfolded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i64, i64* %p, i32 6 + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_to_numeric_address(<2 x i64> %v) { +; CHECK-LABEL: store_lane_i64_to_numeric_address: +; CHECK: .functype store_lane_i64_to_numeric_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i64* + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_from_global_address(<2 x i64> %v) { +; CHECK-LABEL: store_lane_i64_from_global_address: +; CHECK: .functype store_lane_i64_from_global_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i64 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store64.lane(i64* @gv_i64, <2 x i64> %v, i32 0) + ret void +} diff --git a/test/MC/WebAssembly/simd-encodings.s b/test/MC/WebAssembly/simd-encodings.s index 57b42f2753f..f18a4f196d6 100644 --- a/test/MC/WebAssembly/simd-encodings.s +++ b/test/MC/WebAssembly/simd-encodings.s @@ -280,6 +280,30 @@ main: # CHECK: v128.bitselect # encoding: [0xfd,0x52] v128.bitselect + # CHECK: v128.load8_lane 32, 1 # encoding: [0xfd,0x58,0x00,0x20,0x01] + v128.load8_lane 32, 1 + + # CHECK: v128.load16_lane 32, 1 # encoding: [0xfd,0x59,0x01,0x20,0x01] + v128.load16_lane 32, 1 + + # CHECK: v128.load32_lane 32, 1 # encoding: [0xfd,0x5a,0x02,0x20,0x01] + v128.load32_lane 32, 1 + + # CHECK: v128.load64_lane 32, 1 # encoding: [0xfd,0x5b,0x03,0x20,0x01] + v128.load64_lane 32, 1 + + # CHECK: v128.store8_lane 32, 1 # encoding: [0xfd,0x5c,0x00,0x20,0x01] + v128.store8_lane 32, 1 + + # CHECK: v128.store16_lane 32, 1 # encoding: [0xfd,0x5d,0x01,0x20,0x01] + v128.store16_lane 32, 1 + + # CHECK: v128.store32_lane 32, 1 # encoding: [0xfd,0x5e,0x02,0x20,0x01] + v128.store32_lane 32, 1 + + # CHECK: v128.store64_lane 32, 1 # encoding: [0xfd,0x5f,0x03,0x20,0x01] + v128.store64_lane 32, 1 + # CHECK: i8x16.abs # encoding: [0xfd,0x60] i8x16.abs