1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-02-01 05:01:59 +01:00

[WebAssembly] v128.load{8,16,32,64}_lane instructions

Prototype the newly proposed load_lane instructions, as specified in
https://github.com/WebAssembly/simd/pull/350. Since these instructions are not
available to origin trial users on Chrome stable, make them opt-in by only
selecting them from intrinsics rather than normal ISel patterns. Since we only
need rough prototypes to measure performance right now, this commit does not
implement all the load and store patterns that would be necessary to make full
use of the offset immediate. However, the full suite of offset tests is included
to make it easy to track improvements in the future.

Since these are the first instructions to have a memarg immediate as well as an
additional immediate, the disassembler needed some additional hacks to be able
to parse them correctly. Making that code more principled is left as future
work.

Differential Revision: https://reviews.llvm.org/D89366
This commit is contained in:
Thomas Lively 2020-10-15 15:33:10 +00:00
parent d5acf960dc
commit 1dd8fe9b9b
7 changed files with 1201 additions and 6 deletions

View File

@ -208,6 +208,52 @@ def int_wasm_load64_zero :
[IntrReadMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
// These intrinsics do not mark their lane index arguments as immediate because
// that changes the corresponding SDNode from ISD::Constant to
// ISD::TargetConstant, which would require extra complications in the ISel
// tablegen patterns. TODO: Replace these intrinsic with normal ISel patterns
// once the load_lane instructions are merged to the proposal.
def int_wasm_load8_lane :
Intrinsic<[llvm_v16i8_ty],
[LLVMPointerType<llvm_i8_ty>, llvm_v16i8_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
def int_wasm_load16_lane :
Intrinsic<[llvm_v8i16_ty],
[LLVMPointerType<llvm_i16_ty>, llvm_v8i16_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
def int_wasm_load32_lane :
Intrinsic<[llvm_v4i32_ty],
[LLVMPointerType<llvm_i32_ty>, llvm_v4i32_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
def int_wasm_load64_lane :
Intrinsic<[llvm_v2i64_ty],
[LLVMPointerType<llvm_i64_ty>, llvm_v2i64_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
def int_wasm_store8_lane :
Intrinsic<[],
[LLVMPointerType<llvm_i8_ty>, llvm_v16i8_ty, llvm_i32_ty],
[IntrWriteMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
def int_wasm_store16_lane :
Intrinsic<[],
[LLVMPointerType<llvm_i16_ty>, llvm_v8i16_ty, llvm_i32_ty],
[IntrWriteMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
def int_wasm_store32_lane :
Intrinsic<[],
[LLVMPointerType<llvm_i32_ty>, llvm_v4i32_ty, llvm_i32_ty],
[IntrWriteMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
def int_wasm_store64_lane :
Intrinsic<[],
[LLVMPointerType<llvm_i64_ty>, llvm_v2i64_ty, llvm_i32_ty],
[IntrWriteMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
//===----------------------------------------------------------------------===//
// Thread-local storage intrinsics
//===----------------------------------------------------------------------===//

View File

@ -421,6 +421,12 @@ public:
return error("Expected integer constant");
parseSingleInteger(false, Operands);
} else {
// v128.{load,store}{8,16,32,64}_lane has both a memarg and a lane
// index. We need to avoid parsing an extra alignment operand for the
// lane index.
auto IsLoadStoreLane = InstName.find("_lane") != StringRef::npos;
if (IsLoadStoreLane && Operands.size() == 4)
return false;
// Alignment not specified (or atomics, must use default alignment).
// We can't just call WebAssembly::GetDefaultP2Align since we don't have
// an opcode until after the assembly matcher, so set a default to fix

View File

@ -177,7 +177,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I32)
WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I64)
WASM_LOAD_STORE(LOAD_SPLAT_v8x16)
return 0;
WASM_LOAD_STORE(LOAD_LANE_v16i8)
WASM_LOAD_STORE(STORE_LANE_v16i8)
return 0;
WASM_LOAD_STORE(LOAD16_S_I32)
WASM_LOAD_STORE(LOAD16_U_I32)
WASM_LOAD_STORE(LOAD16_S_I64)
@ -203,7 +205,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I32)
WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I64)
WASM_LOAD_STORE(LOAD_SPLAT_v16x8)
return 1;
WASM_LOAD_STORE(LOAD_LANE_v8i16)
WASM_LOAD_STORE(STORE_LANE_v8i16)
return 1;
WASM_LOAD_STORE(LOAD_I32)
WASM_LOAD_STORE(LOAD_F32)
WASM_LOAD_STORE(STORE_I32)
@ -233,7 +237,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
WASM_LOAD_STORE(ATOMIC_WAIT_I32)
WASM_LOAD_STORE(LOAD_SPLAT_v32x4)
WASM_LOAD_STORE(LOAD_ZERO_v4i32)
return 2;
WASM_LOAD_STORE(LOAD_LANE_v4i32)
WASM_LOAD_STORE(STORE_LANE_v4i32)
return 2;
WASM_LOAD_STORE(LOAD_I64)
WASM_LOAD_STORE(LOAD_F64)
WASM_LOAD_STORE(STORE_I64)
@ -256,7 +262,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
WASM_LOAD_STORE(LOAD_EXTEND_S_v2i64)
WASM_LOAD_STORE(LOAD_EXTEND_U_v2i64)
WASM_LOAD_STORE(LOAD_ZERO_v2i64)
return 3;
WASM_LOAD_STORE(LOAD_LANE_v2i64)
WASM_LOAD_STORE(STORE_LANE_v2i64)
return 3;
WASM_LOAD_STORE(LOAD_V128)
WASM_LOAD_STORE(STORE_V128)
return 4;

View File

@ -685,6 +685,56 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8);
Info.flags = MachineMemOperand::MOLoad;
return true;
case Intrinsic::wasm_load8_lane:
case Intrinsic::wasm_load16_lane:
case Intrinsic::wasm_load32_lane:
case Intrinsic::wasm_load64_lane:
case Intrinsic::wasm_store8_lane:
case Intrinsic::wasm_store16_lane:
case Intrinsic::wasm_store32_lane:
case Intrinsic::wasm_store64_lane: {
MVT MemVT;
Align MemAlign;
switch (Intrinsic) {
case Intrinsic::wasm_load8_lane:
case Intrinsic::wasm_store8_lane:
MemVT = MVT::i8;
MemAlign = Align(1);
break;
case Intrinsic::wasm_load16_lane:
case Intrinsic::wasm_store16_lane:
MemVT = MVT::i16;
MemAlign = Align(2);
break;
case Intrinsic::wasm_load32_lane:
case Intrinsic::wasm_store32_lane:
MemVT = MVT::i32;
MemAlign = Align(4);
break;
case Intrinsic::wasm_load64_lane:
case Intrinsic::wasm_store64_lane:
MemVT = MVT::i64;
MemAlign = Align(8);
break;
default:
llvm_unreachable("unexpected intrinsic");
}
if (Intrinsic == Intrinsic::wasm_load8_lane ||
Intrinsic == Intrinsic::wasm_load16_lane ||
Intrinsic == Intrinsic::wasm_load32_lane ||
Intrinsic == Intrinsic::wasm_load64_lane) {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.flags = MachineMemOperand::MOLoad;
} else {
Info.opc = ISD::INTRINSIC_VOID;
Info.flags = MachineMemOperand::MOStore;
}
Info.ptrVal = I.getArgOperand(0);
Info.memVT = MemVT;
Info.offset = 0;
Info.align = MemAlign;
return true;
}
default:
return false;
}

View File

@ -53,7 +53,7 @@ defm LOAD_V128_A64 :
"v128.load\t$off$p2align", 0>;
}
// Def load and store patterns from WebAssemblyInstrMemory.td for vector types
// Def load patterns from WebAssemblyInstrMemory.td for vector types
foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
defm : LoadPatNoOffset<vec_t, load, "LOAD_V128">;
defm : LoadPatImmOff<vec_t, load, regPlusImm, "LOAD_V128">;
@ -201,6 +201,51 @@ defm : LoadPatOffsetOnly<v2i64, int_wasm_load64_zero, "LOAD_ZERO_v2i64">;
defm : LoadPatGlobalAddrOffOnly<v4i32, int_wasm_load32_zero, "LOAD_ZERO_v4i32">;
defm : LoadPatGlobalAddrOffOnly<v2i64, int_wasm_load64_zero, "LOAD_ZERO_v2i64">;
// Load lane
multiclass SIMDLoadLane<ValueType vec_t, string name, bits<32> simdop> {
let mayLoad = 1, UseNamedOperandTable = 1 in {
defm LOAD_LANE_#vec_t#_A32 :
SIMD_I<(outs V128:$dst),
(ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx,
I32:$addr, V128:$vec),
(outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx),
[], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx",
name#"\t$off$p2align, $idx", simdop>;
defm LOAD_LANE_#vec_t#_A64 :
SIMD_I<(outs V128:$dst),
(ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx,
I64:$addr, V128:$vec),
(outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx),
[], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx",
name#"\t$off$p2align, $idx", simdop>;
} // mayLoad = 1, UseNamedOperandTable = 1
}
// TODO: Also support v4f32 and v2f64 once the instructions are merged
// to the proposal
defm "" : SIMDLoadLane<v16i8, "v128.load8_lane", 88>;
defm "" : SIMDLoadLane<v8i16, "v128.load16_lane", 89>;
defm "" : SIMDLoadLane<v4i32, "v128.load32_lane", 90>;
defm "" : SIMDLoadLane<v2i64, "v128.load64_lane", 91>;
// Select loads with no constant offset.
multiclass LoadLanePatNoOffset<ValueType ty, PatFrag kind, ImmLeaf lane_imm_t> {
def : Pat<(ty (kind (i32 I32:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx))),
(!cast<NI>("LOAD_LANE_"#ty#"_A32") 0, 0, imm:$idx, I32:$addr, V128:$vec)>,
Requires<[HasAddr32]>;
def : Pat<(ty (kind (i64 I64:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx))),
(!cast<NI>("LOAD_LANE_"#ty#"_A64") 0, 0, imm:$idx, I64:$addr, V128:$vec)>,
Requires<[HasAddr64]>;
}
defm : LoadLanePatNoOffset<v16i8, int_wasm_load8_lane, LaneIdx16>;
defm : LoadLanePatNoOffset<v8i16, int_wasm_load16_lane, LaneIdx8>;
defm : LoadLanePatNoOffset<v4i32, int_wasm_load32_lane, LaneIdx4>;
defm : LoadLanePatNoOffset<v2i64, int_wasm_load64_lane, LaneIdx2>;
// TODO: Also support the other load patterns for load_lane once the instructions
// are merged to the proposal.
// Store: v128.store
let mayStore = 1, UseNamedOperandTable = 1 in {
defm STORE_V128_A32 :
@ -214,8 +259,9 @@ defm STORE_V128_A64 :
"v128.store\t${off}(${addr})$p2align, $vec",
"v128.store\t$off$p2align", 11>;
}
// Def store patterns from WebAssemblyInstrMemory.td for vector types
foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
// Def load and store patterns from WebAssemblyInstrMemory.td for vector types
defm : StorePatNoOffset<vec_t, store, "STORE_V128">;
defm : StorePatImmOff<vec_t, store, regPlusImm, "STORE_V128">;
defm : StorePatImmOff<vec_t, store, or_is_add, "STORE_V128">;
@ -223,6 +269,53 @@ defm : StorePatOffsetOnly<vec_t, store, "STORE_V128">;
defm : StorePatGlobalAddrOffOnly<vec_t, store, "STORE_V128">;
}
// Store lane
multiclass SIMDStoreLane<ValueType vec_t, string name, bits<32> simdop> {
let mayStore = 1, UseNamedOperandTable = 1 in {
defm STORE_LANE_#vec_t#_A32 :
SIMD_I<(outs),
(ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx,
I32:$addr, V128:$vec),
(outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx),
[], name#"\t${off}(${addr})$p2align, $vec, $idx",
name#"\t$off$p2align, $idx", simdop>;
defm STORE_LANE_#vec_t#_A64 :
SIMD_I<(outs V128:$dst),
(ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx,
I64:$addr, V128:$vec),
(outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx),
[], name#"\t${off}(${addr})$p2align, $vec, $idx",
name#"\t$off$p2align, $idx", simdop>;
} // mayStore = 1, UseNamedOperandTable = 1
}
// TODO: Also support v4f32 and v2f64 once the instructions are merged
// to the proposal
defm "" : SIMDStoreLane<v16i8, "v128.store8_lane", 92>;
defm "" : SIMDStoreLane<v8i16, "v128.store16_lane", 93>;
defm "" : SIMDStoreLane<v4i32, "v128.store32_lane", 94>;
defm "" : SIMDStoreLane<v2i64, "v128.store64_lane", 95>;
// Select stores with no constant offset.
multiclass StoreLanePatNoOffset<ValueType ty, PatFrag kind, ImmLeaf lane_imm_t> {
def : Pat<(kind (i32 I32:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx)),
(!cast<NI>("STORE_LANE_"#ty#"_A32")
0, 0, imm:$idx, I32:$addr, ty:$vec)>,
Requires<[HasAddr32]>;
def : Pat<(kind (i32 I64:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx)),
(!cast<NI>("STORE_LANE_"#ty#"_A64")
0, 0, imm:$idx, I64:$addr, ty:$vec)>,
Requires<[HasAddr64]>;
}
defm : StoreLanePatNoOffset<v16i8, int_wasm_store8_lane, LaneIdx16>;
defm : StoreLanePatNoOffset<v8i16, int_wasm_store16_lane, LaneIdx8>;
defm : StoreLanePatNoOffset<v4i32, int_wasm_store32_lane, LaneIdx4>;
defm : StoreLanePatNoOffset<v2i64, int_wasm_store64_lane, LaneIdx2>;
// TODO: Also support the other store patterns for store_lane once the
// instructions are merged to the proposal.
//===----------------------------------------------------------------------===//
// Constructing SIMD values
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,968 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -verify-machineinstrs -mattr=+simd128 | FileCheck %s
; Test SIMD v128.load{8,16,32,64}_lane instructions.
; TODO: Use the offset field by supporting more patterns. Right now only the
; equivalents of LoadPatNoOffset/StorePatNoOffset are supported.
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
target triple = "wasm32-unknown-unknown"
declare <16 x i8> @llvm.wasm.load8.lane(i8*, <16 x i8>, i32)
declare <8 x i16> @llvm.wasm.load16.lane(i16*, <8 x i16>, i32)
declare <4 x i32> @llvm.wasm.load32.lane(i32*, <4 x i32>, i32)
declare <2 x i64> @llvm.wasm.load64.lane(i64*, <2 x i64>, i32)
declare void @llvm.wasm.store8.lane(i8*, <16 x i8>, i32)
declare void @llvm.wasm.store16.lane(i16*, <8 x i16>, i32)
declare void @llvm.wasm.store32.lane(i32*, <4 x i32>, i32)
declare void @llvm.wasm.store64.lane(i64*, <2 x i64>, i32)
;===----------------------------------------------------------------------------
; v128.load8_lane / v128.store8_lane
;===----------------------------------------------------------------------------
define <16 x i8> @load_lane_i8_no_offset(i8* %p, <16 x i8> %v) {
; CHECK-LABEL: load_lane_i8_no_offset:
; CHECK: .functype load_lane_i8_no_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %p, <16 x i8> %v, i32 0)
ret <16 x i8> %t
}
define <16 x i8> @load_lane_i8_with_folded_offset(i8* %p, <16 x i8> %v) {
; CHECK-LABEL: load_lane_i8_with_folded_offset:
; CHECK: .functype load_lane_i8_with_folded_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i8* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i8*
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
ret <16 x i8> %t
}
define <16 x i8> @load_lane_i8_with_folded_gep_offset(i8* %p, <16 x i8> %v) {
; CHECK-LABEL: load_lane_i8_with_folded_gep_offset:
; CHECK: .functype load_lane_i8_with_folded_gep_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 6
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i8, i8* %p, i32 6
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
ret <16 x i8> %t
}
define <16 x i8> @load_lane_i8_with_unfolded_gep_negative_offset(i8* %p, <16 x i8> %v) {
; CHECK-LABEL: load_lane_i8_with_unfolded_gep_negative_offset:
; CHECK: .functype load_lane_i8_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const -6
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i8, i8* %p, i32 -6
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
ret <16 x i8> %t
}
define <16 x i8> @load_lane_i8_with_unfolded_offset(i8* %p, <16 x i8> %v) {
; CHECK-LABEL: load_lane_i8_with_unfolded_offset:
; CHECK: .functype load_lane_i8_with_unfolded_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i8* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i8*
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
ret <16 x i8> %t
}
define <16 x i8> @load_lane_i8_with_unfolded_gep_offset(i8* %p, <16 x i8> %v) {
; CHECK-LABEL: load_lane_i8_with_unfolded_gep_offset:
; CHECK: .functype load_lane_i8_with_unfolded_gep_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 6
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i8, i8* %p, i32 6
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
ret <16 x i8> %t
}
define <16 x i8> @load_lane_i8_from_numeric_address(<16 x i8> %v) {
; CHECK-LABEL: load_lane_i8_from_numeric_address:
; CHECK: .functype load_lane_i8_from_numeric_address (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.load8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i8*
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
ret <16 x i8> %t
}
@gv_i8 = global i8 0
define <16 x i8> @load_lane_i8_from_global_address(<16 x i8> %v) {
; CHECK-LABEL: load_lane_i8_from_global_address:
; CHECK: .functype load_lane_i8_from_global_address (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i8
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.load8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* @gv_i8, <16 x i8> %v, i32 0)
ret <16 x i8> %t
}
define void @store_lane_i8_no_offset(<16 x i8> %v, i8* %p) {
; CHECK-LABEL: store_lane_i8_no_offset:
; CHECK: .functype store_lane_i8_no_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store8.lane(i8* %p, <16 x i8> %v, i32 0)
ret void
}
define void @store_lane_i8_with_folded_offset(<16 x i8> %v, i8* %p) {
; CHECK-LABEL: store_lane_i8_with_folded_offset:
; CHECK: .functype store_lane_i8_with_folded_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i8* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i8*
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
ret void
}
define void @store_lane_i8_with_folded_gep_offset(<16 x i8> %v, i8* %p) {
; CHECK-LABEL: store_lane_i8_with_folded_gep_offset:
; CHECK: .functype store_lane_i8_with_folded_gep_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 6
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i8, i8* %p, i32 6
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
ret void
}
define void @store_lane_i8_with_unfolded_gep_negative_offset(<16 x i8> %v, i8* %p) {
; CHECK-LABEL: store_lane_i8_with_unfolded_gep_negative_offset:
; CHECK: .functype store_lane_i8_with_unfolded_gep_negative_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const -6
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i8, i8* %p, i32 -6
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
ret void
}
define void @store_lane_i8_with_unfolded_offset(<16 x i8> %v, i8* %p) {
; CHECK-LABEL: store_lane_i8_with_unfolded_offset:
; CHECK: .functype store_lane_i8_with_unfolded_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i8* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i8*
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
ret void
}
define void @store_lane_i8_with_unfolded_gep_offset(<16 x i8> %v, i8* %p) {
; CHECK-LABEL: store_lane_i8_with_unfolded_gep_offset:
; CHECK: .functype store_lane_i8_with_unfolded_gep_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 6
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i8, i8* %p, i32 6
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
ret void
}
define void @store_lane_i8_to_numeric_address(<16 x i8> %v) {
; CHECK-LABEL: store_lane_i8_to_numeric_address:
; CHECK: .functype store_lane_i8_to_numeric_address (v128) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i8*
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
ret void
}
define void @store_lane_i8_from_global_address(<16 x i8> %v) {
; CHECK-LABEL: store_lane_i8_from_global_address:
; CHECK: .functype store_lane_i8_from_global_address (v128) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i8
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store8_lane 0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store8.lane(i8* @gv_i8, <16 x i8> %v, i32 0)
ret void
}
;===----------------------------------------------------------------------------
; v128.load16_lane / v128.store16_lane
;===----------------------------------------------------------------------------
define <8 x i16> @load_lane_i16_no_offset(i16* %p, <8 x i16> %v) {
; CHECK-LABEL: load_lane_i16_no_offset:
; CHECK: .functype load_lane_i16_no_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %p, <8 x i16> %v, i32 0)
ret <8 x i16> %t
}
define <8 x i16> @load_lane_i16_with_folded_offset(i16* %p, <8 x i16> %v) {
; CHECK-LABEL: load_lane_i16_with_folded_offset:
; CHECK: .functype load_lane_i16_with_folded_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i16* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i16*
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
ret <8 x i16> %t
}
define <8 x i16> @load_lane_i16_with_folded_gep_offset(i16* %p, <8 x i16> %v) {
; CHECK-LABEL: load_lane_i16_with_folded_gep_offset:
; CHECK: .functype load_lane_i16_with_folded_gep_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i16, i16* %p, i32 6
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
ret <8 x i16> %t
}
define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(i16* %p, <8 x i16> %v) {
; CHECK-LABEL: load_lane_i16_with_unfolded_gep_negative_offset:
; CHECK: .functype load_lane_i16_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const -12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i16, i16* %p, i32 -6
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
ret <8 x i16> %t
}
define <8 x i16> @load_lane_i16_with_unfolded_offset(i16* %p, <8 x i16> %v) {
; CHECK-LABEL: load_lane_i16_with_unfolded_offset:
; CHECK: .functype load_lane_i16_with_unfolded_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i16* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i16*
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
ret <8 x i16> %t
}
define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(i16* %p, <8 x i16> %v) {
; CHECK-LABEL: load_lane_i16_with_unfolded_gep_offset:
; CHECK: .functype load_lane_i16_with_unfolded_gep_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i16, i16* %p, i32 6
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
ret <8 x i16> %t
}
define <8 x i16> @load_lane_i16_from_numeric_address(<8 x i16> %v) {
; CHECK-LABEL: load_lane_i16_from_numeric_address:
; CHECK: .functype load_lane_i16_from_numeric_address (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.load16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i16*
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
ret <8 x i16> %t
}
@gv_i16 = global i16 0
define <8 x i16> @load_lane_i16_from_global_address(<8 x i16> %v) {
; CHECK-LABEL: load_lane_i16_from_global_address:
; CHECK: .functype load_lane_i16_from_global_address (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i16
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.load16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
ret <8 x i16> %t
}
define void @store_lane_i16_no_offset(<8 x i16> %v, i16* %p) {
; CHECK-LABEL: store_lane_i16_no_offset:
; CHECK: .functype store_lane_i16_no_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store16.lane(i16* %p, <8 x i16> %v, i32 0)
ret void
}
define void @store_lane_i16_with_folded_offset(<8 x i16> %v, i16* %p) {
; CHECK-LABEL: store_lane_i16_with_folded_offset:
; CHECK: .functype store_lane_i16_with_folded_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i16* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i16*
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
ret void
}
define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, i16* %p) {
; CHECK-LABEL: store_lane_i16_with_folded_gep_offset:
; CHECK: .functype store_lane_i16_with_folded_gep_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i16, i16* %p, i32 6
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
ret void
}
define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, i16* %p) {
; CHECK-LABEL: store_lane_i16_with_unfolded_gep_negative_offset:
; CHECK: .functype store_lane_i16_with_unfolded_gep_negative_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const -12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i16, i16* %p, i32 -6
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
ret void
}
define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, i16* %p) {
; CHECK-LABEL: store_lane_i16_with_unfolded_offset:
; CHECK: .functype store_lane_i16_with_unfolded_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i16* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i16*
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
ret void
}
define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, i16* %p) {
; CHECK-LABEL: store_lane_i16_with_unfolded_gep_offset:
; CHECK: .functype store_lane_i16_with_unfolded_gep_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 12
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i16, i16* %p, i32 6
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
ret void
}
define void @store_lane_i16_to_numeric_address(<8 x i16> %v) {
; CHECK-LABEL: store_lane_i16_to_numeric_address:
; CHECK: .functype store_lane_i16_to_numeric_address (v128) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i16*
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
ret void
}
define void @store_lane_i16_from_global_address(<8 x i16> %v) {
; CHECK-LABEL: store_lane_i16_from_global_address:
; CHECK: .functype store_lane_i16_from_global_address (v128) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i16
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store16_lane 0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
ret void
}
;===----------------------------------------------------------------------------
; v128.load32_lane / v128.store32_lane
;===----------------------------------------------------------------------------
define <4 x i32> @load_lane_i32_no_offset(i32* %p, <4 x i32> %v) {
; CHECK-LABEL: load_lane_i32_no_offset:
; CHECK: .functype load_lane_i32_no_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %p, <4 x i32> %v, i32 0)
ret <4 x i32> %t
}
define <4 x i32> @load_lane_i32_with_folded_offset(i32* %p, <4 x i32> %v) {
; CHECK-LABEL: load_lane_i32_with_folded_offset:
; CHECK: .functype load_lane_i32_with_folded_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i32*
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
ret <4 x i32> %t
}
define <4 x i32> @load_lane_i32_with_folded_gep_offset(i32* %p, <4 x i32> %v) {
; CHECK-LABEL: load_lane_i32_with_folded_gep_offset:
; CHECK: .functype load_lane_i32_with_folded_gep_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 6
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
ret <4 x i32> %t
}
define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(i32* %p, <4 x i32> %v) {
; CHECK-LABEL: load_lane_i32_with_unfolded_gep_negative_offset:
; CHECK: .functype load_lane_i32_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const -24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 -6
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
ret <4 x i32> %t
}
define <4 x i32> @load_lane_i32_with_unfolded_offset(i32* %p, <4 x i32> %v) {
; CHECK-LABEL: load_lane_i32_with_unfolded_offset:
; CHECK: .functype load_lane_i32_with_unfolded_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i32*
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
ret <4 x i32> %t
}
define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(i32* %p, <4 x i32> %v) {
; CHECK-LABEL: load_lane_i32_with_unfolded_gep_offset:
; CHECK: .functype load_lane_i32_with_unfolded_gep_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i32, i32* %p, i32 6
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
ret <4 x i32> %t
}
define <4 x i32> @load_lane_i32_from_numeric_address(<4 x i32> %v) {
; CHECK-LABEL: load_lane_i32_from_numeric_address:
; CHECK: .functype load_lane_i32_from_numeric_address (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.load32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i32*
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
ret <4 x i32> %t
}
@gv_i32 = global i32 0
define <4 x i32> @load_lane_i32_from_global_address(<4 x i32> %v) {
; CHECK-LABEL: load_lane_i32_from_global_address:
; CHECK: .functype load_lane_i32_from_global_address (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i32
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.load32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
ret <4 x i32> %t
}
define void @store_lane_i32_no_offset(<4 x i32> %v, i32* %p) {
; CHECK-LABEL: store_lane_i32_no_offset:
; CHECK: .functype store_lane_i32_no_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store32.lane(i32* %p, <4 x i32> %v, i32 0)
ret void
}
define void @store_lane_i32_with_folded_offset(<4 x i32> %v, i32* %p) {
; CHECK-LABEL: store_lane_i32_with_folded_offset:
; CHECK: .functype store_lane_i32_with_folded_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i32*
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
ret void
}
define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, i32* %p) {
; CHECK-LABEL: store_lane_i32_with_folded_gep_offset:
; CHECK: .functype store_lane_i32_with_folded_gep_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 6
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
ret void
}
define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, i32* %p) {
; CHECK-LABEL: store_lane_i32_with_unfolded_gep_negative_offset:
; CHECK: .functype store_lane_i32_with_unfolded_gep_negative_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const -24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i32, i32* %p, i32 -6
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
ret void
}
define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, i32* %p) {
; CHECK-LABEL: store_lane_i32_with_unfolded_offset:
; CHECK: .functype store_lane_i32_with_unfolded_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i32* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i32*
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
ret void
}
define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, i32* %p) {
; CHECK-LABEL: store_lane_i32_with_unfolded_gep_offset:
; CHECK: .functype store_lane_i32_with_unfolded_gep_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i32, i32* %p, i32 6
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
ret void
}
define void @store_lane_i32_to_numeric_address(<4 x i32> %v) {
; CHECK-LABEL: store_lane_i32_to_numeric_address:
; CHECK: .functype store_lane_i32_to_numeric_address (v128) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i32*
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
ret void
}
define void @store_lane_i32_from_global_address(<4 x i32> %v) {
; CHECK-LABEL: store_lane_i32_from_global_address:
; CHECK: .functype store_lane_i32_from_global_address (v128) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i32
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store32_lane 0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
ret void
}
;===----------------------------------------------------------------------------
; v128.load64_lane / v128.store64_lane
;===----------------------------------------------------------------------------
define <2 x i64> @load_lane_i64_no_offset(i64* %p, <2 x i64> %v) {
; CHECK-LABEL: load_lane_i64_no_offset:
; CHECK: .functype load_lane_i64_no_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %p, <2 x i64> %v, i32 0)
ret <2 x i64> %t
}
define <2 x i64> @load_lane_i64_with_folded_offset(i64* %p, <2 x i64> %v) {
; CHECK-LABEL: load_lane_i64_with_folded_offset:
; CHECK: .functype load_lane_i64_with_folded_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i64*
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
ret <2 x i64> %t
}
define <2 x i64> @load_lane_i64_with_folded_gep_offset(i64* %p, <2 x i64> %v) {
; CHECK-LABEL: load_lane_i64_with_folded_gep_offset:
; CHECK: .functype load_lane_i64_with_folded_gep_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i32 6
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
ret <2 x i64> %t
}
define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(i64* %p, <2 x i64> %v) {
; CHECK-LABEL: load_lane_i64_with_unfolded_gep_negative_offset:
; CHECK: .functype load_lane_i64_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const -48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i32 -6
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
ret <2 x i64> %t
}
define <2 x i64> @load_lane_i64_with_unfolded_offset(i64* %p, <2 x i64> %v) {
; CHECK-LABEL: load_lane_i64_with_unfolded_offset:
; CHECK: .functype load_lane_i64_with_unfolded_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i64*
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
ret <2 x i64> %t
}
define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(i64* %p, <2 x i64> %v) {
; CHECK-LABEL: load_lane_i64_with_unfolded_gep_offset:
; CHECK: .functype load_lane_i64_with_unfolded_gep_offset (i32, v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i32.const 48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 1
; CHECK-NEXT: v128.load64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i64, i64* %p, i32 6
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
ret <2 x i64> %t
}
define <2 x i64> @load_lane_i64_from_numeric_address(<2 x i64> %v) {
; CHECK-LABEL: load_lane_i64_from_numeric_address:
; CHECK: .functype load_lane_i64_from_numeric_address (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.load64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i64*
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
ret <2 x i64> %t
}
@gv_i64 = global i64 0
define <2 x i64> @load_lane_i64_from_global_address(<2 x i64> %v) {
; CHECK-LABEL: load_lane_i64_from_global_address:
; CHECK: .functype load_lane_i64_from_global_address (v128) -> (v128)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i64
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.load64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
ret <2 x i64> %t
}
define void @store_lane_i64_no_offset(<2 x i64> %v, i64* %p) {
; CHECK-LABEL: store_lane_i64_no_offset:
; CHECK: .functype store_lane_i64_no_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store64.lane(i64* %p, <2 x i64> %v, i32 0)
ret void
}
define void @store_lane_i64_with_folded_offset(<2 x i64> %v, i64* %p) {
; CHECK-LABEL: store_lane_i64_with_folded_offset:
; CHECK: .functype store_lane_i64_with_folded_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i64*
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
ret void
}
define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, i64* %p) {
; CHECK-LABEL: store_lane_i64_with_folded_gep_offset:
; CHECK: .functype store_lane_i64_with_folded_gep_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i32 6
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
ret void
}
define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, i64* %p) {
; CHECK-LABEL: store_lane_i64_with_unfolded_gep_negative_offset:
; CHECK: .functype store_lane_i64_with_unfolded_gep_negative_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const -48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr inbounds i64, i64* %p, i32 -6
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
ret void
}
define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, i64* %p) {
; CHECK-LABEL: store_lane_i64_with_unfolded_offset:
; CHECK: .functype store_lane_i64_with_unfolded_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 24
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%q = ptrtoint i64* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i64*
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
ret void
}
define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, i64* %p) {
; CHECK-LABEL: store_lane_i64_with_unfolded_gep_offset:
; CHECK: .functype store_lane_i64_with_unfolded_gep_offset (v128, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: local.get 1
; CHECK-NEXT: i32.const 48
; CHECK-NEXT: i32.add
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = getelementptr i64, i64* %p, i32 6
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
ret void
}
define void @store_lane_i64_to_numeric_address(<2 x i64> %v) {
; CHECK-LABEL: store_lane_i64_to_numeric_address:
; CHECK: .functype store_lane_i64_to_numeric_address (v128) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const 42
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
%s = inttoptr i32 42 to i64*
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
ret void
}
define void @store_lane_i64_from_global_address(<2 x i64> %v) {
; CHECK-LABEL: store_lane_i64_from_global_address:
; CHECK: .functype store_lane_i64_from_global_address (v128) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: i32.const gv_i64
; CHECK-NEXT: local.get 0
; CHECK-NEXT: v128.store64_lane 0, 0
; CHECK-NEXT: # fallthrough-return
tail call void @llvm.wasm.store64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
ret void
}

View File

@ -280,6 +280,30 @@ main:
# CHECK: v128.bitselect # encoding: [0xfd,0x52]
v128.bitselect
# CHECK: v128.load8_lane 32, 1 # encoding: [0xfd,0x58,0x00,0x20,0x01]
v128.load8_lane 32, 1
# CHECK: v128.load16_lane 32, 1 # encoding: [0xfd,0x59,0x01,0x20,0x01]
v128.load16_lane 32, 1
# CHECK: v128.load32_lane 32, 1 # encoding: [0xfd,0x5a,0x02,0x20,0x01]
v128.load32_lane 32, 1
# CHECK: v128.load64_lane 32, 1 # encoding: [0xfd,0x5b,0x03,0x20,0x01]
v128.load64_lane 32, 1
# CHECK: v128.store8_lane 32, 1 # encoding: [0xfd,0x5c,0x00,0x20,0x01]
v128.store8_lane 32, 1
# CHECK: v128.store16_lane 32, 1 # encoding: [0xfd,0x5d,0x01,0x20,0x01]
v128.store16_lane 32, 1
# CHECK: v128.store32_lane 32, 1 # encoding: [0xfd,0x5e,0x02,0x20,0x01]
v128.store32_lane 32, 1
# CHECK: v128.store64_lane 32, 1 # encoding: [0xfd,0x5f,0x03,0x20,0x01]
v128.store64_lane 32, 1
# CHECK: i8x16.abs # encoding: [0xfd,0x60]
i8x16.abs