mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 04:02:41 +01:00
[WebAssembly] Codegen for v128.storeX_lane instructions
Replace the experimental clang builtins and LLVM intrinsics for these instructions with normal codegen patterns. Resolves PR50435. Differential Revision: https://reviews.llvm.org/D106019
This commit is contained in:
parent
daf8a095a1
commit
3c50e4a7a7
@ -186,32 +186,6 @@ def int_wasm_load64_zero :
|
||||
[IntrReadMem, IntrArgMemOnly],
|
||||
"", [SDNPMemOperand]>;
|
||||
|
||||
// These intrinsics do not mark their lane index arguments as immediate because
|
||||
// that changes the corresponding SDNode from ISD::Constant to
|
||||
// ISD::TargetConstant, which would require extra complications in the ISel
|
||||
// tablegen patterns. TODO: Replace these intrinsic with normal ISel patterns
|
||||
// once the load_lane instructions are merged to the proposal.
|
||||
def int_wasm_store8_lane :
|
||||
Intrinsic<[],
|
||||
[LLVMPointerType<llvm_i8_ty>, llvm_v16i8_ty, llvm_i32_ty],
|
||||
[IntrWriteMem, IntrArgMemOnly],
|
||||
"", [SDNPMemOperand]>;
|
||||
def int_wasm_store16_lane :
|
||||
Intrinsic<[],
|
||||
[LLVMPointerType<llvm_i16_ty>, llvm_v8i16_ty, llvm_i32_ty],
|
||||
[IntrWriteMem, IntrArgMemOnly],
|
||||
"", [SDNPMemOperand]>;
|
||||
def int_wasm_store32_lane :
|
||||
Intrinsic<[],
|
||||
[LLVMPointerType<llvm_i32_ty>, llvm_v4i32_ty, llvm_i32_ty],
|
||||
[IntrWriteMem, IntrArgMemOnly],
|
||||
"", [SDNPMemOperand]>;
|
||||
def int_wasm_store64_lane :
|
||||
Intrinsic<[],
|
||||
[LLVMPointerType<llvm_i64_ty>, llvm_v2i64_ty, llvm_i32_ty],
|
||||
[IntrWriteMem, IntrArgMemOnly],
|
||||
"", [SDNPMemOperand]>;
|
||||
|
||||
// TODO: Replace this intrinsic with normal ISel patterns once popcnt is merged
|
||||
// to the proposal.
|
||||
def int_wasm_popcnt :
|
||||
|
@ -767,35 +767,6 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
Info.align = Align(1);
|
||||
Info.flags = MachineMemOperand::MOLoad;
|
||||
return true;
|
||||
case Intrinsic::wasm_store8_lane:
|
||||
case Intrinsic::wasm_store16_lane:
|
||||
case Intrinsic::wasm_store32_lane:
|
||||
case Intrinsic::wasm_store64_lane: {
|
||||
MVT MemVT;
|
||||
switch (Intrinsic) {
|
||||
case Intrinsic::wasm_store8_lane:
|
||||
MemVT = MVT::i8;
|
||||
break;
|
||||
case Intrinsic::wasm_store16_lane:
|
||||
MemVT = MVT::i16;
|
||||
break;
|
||||
case Intrinsic::wasm_store32_lane:
|
||||
MemVT = MVT::i32;
|
||||
break;
|
||||
case Intrinsic::wasm_store64_lane:
|
||||
MemVT = MVT::i64;
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("unexpected intrinsic");
|
||||
}
|
||||
Info.opc = ISD::INTRINSIC_VOID;
|
||||
Info.memVT = MemVT;
|
||||
Info.ptrVal = I.getArgOperand(0);
|
||||
Info.offset = 0;
|
||||
Info.align = Align(1);
|
||||
Info.flags = MachineMemOperand::MOStore;
|
||||
return true;
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
@ -385,15 +385,13 @@ multiclass SIMDStoreLane<Vec vec, bits<32> simdop> {
|
||||
} // mayStore = 1, UseNamedOperandTable = 1
|
||||
}
|
||||
|
||||
// TODO: Also support v4f32 and v2f64 once the instructions are merged
|
||||
// to the proposal
|
||||
defm "" : SIMDStoreLane<I8x16, 0x58>;
|
||||
defm "" : SIMDStoreLane<I16x8, 0x59>;
|
||||
defm "" : SIMDStoreLane<I32x4, 0x5a>;
|
||||
defm "" : SIMDStoreLane<I64x2, 0x5b>;
|
||||
|
||||
// Select stores with no constant offset.
|
||||
multiclass StoreLanePatNoOffset<Vec vec, Intrinsic kind> {
|
||||
multiclass StoreLanePatNoOffset<Vec vec, SDPatternOperator kind> {
|
||||
def : Pat<(kind (i32 I32:$addr), (vec.vt V128:$vec), (i32 vec.lane_idx:$idx)),
|
||||
(!cast<NI>("STORE_LANE_"#vec#"_A32") 0, 0, imm:$idx, $addr, $vec)>,
|
||||
Requires<[HasAddr32]>;
|
||||
@ -402,13 +400,26 @@ multiclass StoreLanePatNoOffset<Vec vec, Intrinsic kind> {
|
||||
Requires<[HasAddr64]>;
|
||||
}
|
||||
|
||||
defm : StoreLanePatNoOffset<I8x16, int_wasm_store8_lane>;
|
||||
defm : StoreLanePatNoOffset<I16x8, int_wasm_store16_lane>;
|
||||
defm : StoreLanePatNoOffset<I32x4, int_wasm_store32_lane>;
|
||||
defm : StoreLanePatNoOffset<I64x2, int_wasm_store64_lane>;
|
||||
def store8_lane :
|
||||
PatFrag<(ops node:$ptr, node:$vec, node:$idx),
|
||||
(truncstorei8 (i32 (vector_extract $vec, $idx)), $ptr)>;
|
||||
def store16_lane :
|
||||
PatFrag<(ops node:$ptr, node:$vec, node:$idx),
|
||||
(truncstorei16 (i32 (vector_extract $vec, $idx)), $ptr)>;
|
||||
def store32_lane :
|
||||
PatFrag<(ops node:$ptr, node:$vec, node:$idx),
|
||||
(store (i32 (vector_extract $vec, $idx)), $ptr)>;
|
||||
def store64_lane :
|
||||
PatFrag<(ops node:$ptr, node:$vec, node:$idx),
|
||||
(store (i64 (vector_extract $vec, $idx)), $ptr)>;
|
||||
// TODO: floating point lanes as well
|
||||
|
||||
// TODO: Also support the other store patterns for store_lane once the
|
||||
// instructions are merged to the proposal.
|
||||
let AddedComplexity = 1 in {
|
||||
defm : StoreLanePatNoOffset<I8x16, store8_lane>;
|
||||
defm : StoreLanePatNoOffset<I16x8, store16_lane>;
|
||||
defm : StoreLanePatNoOffset<I32x4, store32_lane>;
|
||||
defm : StoreLanePatNoOffset<I64x2, store64_lane>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Constructing SIMD values
|
||||
|
@ -13,8 +13,7 @@ target triple = "wasm32-unknown-unknown"
|
||||
; t8: ch = store<(store 8 into `i64* undef`, align 1)> t3:1, t24, undef:i32, undef:i32
|
||||
; t9: ch = WebAssemblyISD::RETURN t8
|
||||
|
||||
; CHECK: i64x2.extract_lane
|
||||
; CHECK-NEXT: i64.store
|
||||
; CHECK: v128.store64_lane
|
||||
define void @build_pair_i32s() {
|
||||
entry:
|
||||
%0 = load <4 x i32>, <4 x i32>* undef, align 16
|
||||
|
@ -8,11 +8,6 @@
|
||||
|
||||
target triple = "wasm32-unknown-unknown"
|
||||
|
||||
declare void @llvm.wasm.store8.lane(i8*, <16 x i8>, i32)
|
||||
declare void @llvm.wasm.store16.lane(i16*, <8 x i16>, i32)
|
||||
declare void @llvm.wasm.store32.lane(i32*, <4 x i32>, i32)
|
||||
declare void @llvm.wasm.store64.lane(i64*, <2 x i64>, i32)
|
||||
|
||||
;===----------------------------------------------------------------------------
|
||||
; v128.load8_lane / v128.store8_lane
|
||||
;===----------------------------------------------------------------------------
|
||||
@ -150,7 +145,8 @@ define void @store_lane_i8_no_offset(<16 x i8> %v, i8* %p) {
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store8_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
tail call void @llvm.wasm.store8.lane(i8* %p, <16 x i8> %v, i32 0)
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* %p
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -167,7 +163,8 @@ define void @store_lane_i8_with_folded_offset(<16 x i8> %v, i8* %p) {
|
||||
%q = ptrtoint i8* %p to i32
|
||||
%r = add nuw i32 %q, 24
|
||||
%s = inttoptr i32 %r to i8*
|
||||
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -182,7 +179,8 @@ define void @store_lane_i8_with_folded_gep_offset(<16 x i8> %v, i8* %p) {
|
||||
; CHECK-NEXT: v128.store8_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr inbounds i8, i8* %p, i32 6
|
||||
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -197,7 +195,8 @@ define void @store_lane_i8_with_unfolded_gep_negative_offset(<16 x i8> %v, i8* %
|
||||
; CHECK-NEXT: v128.store8_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr inbounds i8, i8* %p, i32 -6
|
||||
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -214,7 +213,8 @@ define void @store_lane_i8_with_unfolded_offset(<16 x i8> %v, i8* %p) {
|
||||
%q = ptrtoint i8* %p to i32
|
||||
%r = add nsw i32 %q, 24
|
||||
%s = inttoptr i32 %r to i8*
|
||||
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -229,7 +229,8 @@ define void @store_lane_i8_with_unfolded_gep_offset(<16 x i8> %v, i8* %p) {
|
||||
; CHECK-NEXT: v128.store8_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr i8, i8* %p, i32 6
|
||||
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -242,7 +243,8 @@ define void @store_lane_i8_to_numeric_address(<16 x i8> %v) {
|
||||
; CHECK-NEXT: v128.store8_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = inttoptr i32 42 to i8*
|
||||
tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -254,7 +256,8 @@ define void @store_lane_i8_from_global_address(<16 x i8> %v) {
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store8_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
tail call void @llvm.wasm.store8.lane(i8* @gv_i8, <16 x i8> %v, i32 0)
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* @gv_i8
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -393,9 +396,10 @@ define void @store_lane_i16_no_offset(<8 x i16> %v, i16* %p) {
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
tail call void @llvm.wasm.store16.lane(i16* %p, <8 x i16> %v, i32 0)
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %p
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -407,12 +411,13 @@ define void @store_lane_i16_with_folded_offset(<8 x i16> %v, i16* %p) {
|
||||
; CHECK-NEXT: i32.const 24
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%q = ptrtoint i16* %p to i32
|
||||
%r = add nuw i32 %q, 24
|
||||
%s = inttoptr i32 %r to i16*
|
||||
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -424,10 +429,11 @@ define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, i16* %p) {
|
||||
; CHECK-NEXT: i32.const 12
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr inbounds i16, i16* %p, i32 6
|
||||
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -439,10 +445,11 @@ define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, i16*
|
||||
; CHECK-NEXT: i32.const -12
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr inbounds i16, i16* %p, i32 -6
|
||||
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -454,12 +461,13 @@ define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, i16* %p) {
|
||||
; CHECK-NEXT: i32.const 24
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%q = ptrtoint i16* %p to i32
|
||||
%r = add nsw i32 %q, 24
|
||||
%s = inttoptr i32 %r to i16*
|
||||
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -471,10 +479,11 @@ define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, i16* %p) {
|
||||
; CHECK-NEXT: i32.const 12
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr i16, i16* %p, i32 6
|
||||
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -484,10 +493,11 @@ define void @store_lane_i16_to_numeric_address(<8 x i16> %v) {
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: i32.const 42
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = inttoptr i32 42 to i16*
|
||||
tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -497,9 +507,10 @@ define void @store_lane_i16_from_global_address(<8 x i16> %v) {
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: i32.const gv_i16
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
tail call void @llvm.wasm.store16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* @gv_i16
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -638,9 +649,10 @@ define void @store_lane_i32_no_offset(<4 x i32> %v, i32* %p) {
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
tail call void @llvm.wasm.store32.lane(i32* %p, <4 x i32> %v, i32 0)
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %p
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -652,12 +664,13 @@ define void @store_lane_i32_with_folded_offset(<4 x i32> %v, i32* %p) {
|
||||
; CHECK-NEXT: i32.const 24
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%q = ptrtoint i32* %p to i32
|
||||
%r = add nuw i32 %q, 24
|
||||
%s = inttoptr i32 %r to i32*
|
||||
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -669,10 +682,11 @@ define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, i32* %p) {
|
||||
; CHECK-NEXT: i32.const 24
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr inbounds i32, i32* %p, i32 6
|
||||
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -684,10 +698,11 @@ define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, i32*
|
||||
; CHECK-NEXT: i32.const -24
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr inbounds i32, i32* %p, i32 -6
|
||||
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -699,12 +714,13 @@ define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, i32* %p) {
|
||||
; CHECK-NEXT: i32.const 24
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%q = ptrtoint i32* %p to i32
|
||||
%r = add nsw i32 %q, 24
|
||||
%s = inttoptr i32 %r to i32*
|
||||
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -716,10 +732,11 @@ define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, i32* %p) {
|
||||
; CHECK-NEXT: i32.const 24
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr i32, i32* %p, i32 6
|
||||
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -729,10 +746,11 @@ define void @store_lane_i32_to_numeric_address(<4 x i32> %v) {
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: i32.const 42
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = inttoptr i32 42 to i32*
|
||||
tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -742,9 +760,10 @@ define void @store_lane_i32_from_global_address(<4 x i32> %v) {
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: i32.const gv_i32
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
tail call void @llvm.wasm.store32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* @gv_i32
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -883,9 +902,10 @@ define void @store_lane_i64_no_offset(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
tail call void @llvm.wasm.store64.lane(i64* %p, <2 x i64> %v, i32 0)
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %p
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -897,12 +917,13 @@ define void @store_lane_i64_with_folded_offset(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-NEXT: i32.const 24
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%q = ptrtoint i64* %p to i32
|
||||
%r = add nuw i32 %q, 24
|
||||
%s = inttoptr i32 %r to i64*
|
||||
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -914,10 +935,11 @@ define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-NEXT: i32.const 48
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr inbounds i64, i64* %p, i32 6
|
||||
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -929,10 +951,11 @@ define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, i64*
|
||||
; CHECK-NEXT: i32.const -48
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr inbounds i64, i64* %p, i32 -6
|
||||
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -944,12 +967,13 @@ define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-NEXT: i32.const 24
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%q = ptrtoint i64* %p to i32
|
||||
%r = add nsw i32 %q, 24
|
||||
%s = inttoptr i32 %r to i64*
|
||||
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -961,10 +985,11 @@ define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-NEXT: i32.const 48
|
||||
; CHECK-NEXT: i32.add
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = getelementptr i64, i64* %p, i32 6
|
||||
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -974,10 +999,11 @@ define void @store_lane_i64_to_numeric_address(<2 x i64> %v) {
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: i32.const 42
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%s = inttoptr i32 42 to i64*
|
||||
tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %s
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -987,8 +1013,9 @@ define void @store_lane_i64_from_global_address(<2 x i64> %v) {
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: i32.const gv_i64
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
tail call void @llvm.wasm.store64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* @gv_i64
|
||||
ret void
|
||||
}
|
||||
|
@ -161,6 +161,34 @@ define <16 x i8> @load_lane_i8_a2(i8* %p, <16 x i8> %v) {
|
||||
ret <16 x i8> %v1
|
||||
}
|
||||
|
||||
; 1 is the default alignment for v128.store8_lane so no attribute is needed.
|
||||
define void @store_lane_i8_a1(<16 x i8> %v, i8* %p) {
|
||||
; CHECK-LABEL: store_lane_i8_a1:
|
||||
; CHECK: .functype store_lane_i8_a1 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store8_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; 2 is greater than the default alignment so it is ignored.
|
||||
define void @store_lane_i8_a2(<16 x i8> %v, i8* %p) {
|
||||
; CHECK-LABEL: store_lane_i8_a2:
|
||||
; CHECK: .functype store_lane_i8_a2 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store8_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <16 x i8> %v, i32 0
|
||||
store i8 %x, i8* %p, align 2
|
||||
ret void
|
||||
}
|
||||
|
||||
; ==============================================================================
|
||||
; 8 x i16
|
||||
; ==============================================================================
|
||||
@ -462,6 +490,47 @@ define <8 x i16> @load_lane_i16_a4(i16* %p, <8 x i16> %v) {
|
||||
ret <8 x i16> %v1
|
||||
}
|
||||
|
||||
define void @store_lane_i16_a1(<8 x i16> %v, i16* %p) {
|
||||
; CHECK-LABEL: store_lane_i16_a1:
|
||||
; CHECK: .functype store_lane_i16_a1 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; 2 is the default alignment for v128.store16_lane so no attribute is needed.
|
||||
define void @store_lane_i16_a2(<8 x i16> %v, i16* %p) {
|
||||
; CHECK-LABEL: store_lane_i16_a2:
|
||||
; CHECK: .functype store_lane_i16_a2 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %p, align 2
|
||||
ret void
|
||||
}
|
||||
|
||||
; 4 is greater than the default alignment so it is ignored.
|
||||
define void @store_lane_i16_a4(<8 x i16> %v, i16* %p) {
|
||||
; CHECK-LABEL: store_lane_i16_a4:
|
||||
; CHECK: .functype store_lane_i16_a4 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store16_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <8 x i16> %v, i32 0
|
||||
store i16 %x, i16* %p, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; ==============================================================================
|
||||
; 4 x i32
|
||||
; ==============================================================================
|
||||
@ -789,6 +858,60 @@ define <4 x i32> @load_lane_i32_a8(i32* %p, <4 x i32> %v) {
|
||||
ret <4 x i32> %v1
|
||||
}
|
||||
|
||||
define void @store_lane_i32_a1(<4 x i32> %v, i32* %p) {
|
||||
; CHECK-LABEL: store_lane_i32_a1:
|
||||
; CHECK: .functype store_lane_i32_a1 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @store_lane_i32_a2(<4 x i32> %v, i32* %p) {
|
||||
; CHECK-LABEL: store_lane_i32_a2:
|
||||
; CHECK: .functype store_lane_i32_a2 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0:p2align=1, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %p, align 2
|
||||
ret void
|
||||
}
|
||||
|
||||
; 4 is the default alignment for v128.store32_lane so no attribute is needed.
|
||||
define void @store_lane_i32_a4(<4 x i32> %v, i32* %p) {
|
||||
; CHECK-LABEL: store_lane_i32_a4:
|
||||
; CHECK: .functype store_lane_i32_a4 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %p, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; 8 is greater than the default alignment so it is ignored.
|
||||
define void @store_lane_i32_a8(<4 x i32> %v, i32* %p) {
|
||||
; CHECK-LABEL: store_lane_i32_a8:
|
||||
; CHECK: .functype store_lane_i32_a8 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store32_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <4 x i32> %v, i32 0
|
||||
store i32 %x, i32* %p, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; ==============================================================================
|
||||
; 2 x i64
|
||||
; ==============================================================================
|
||||
@ -1023,6 +1146,73 @@ define <2 x i64> @load_lane_i64_a16(i64* %p, <2 x i64> %v) {
|
||||
ret <2 x i64> %v1
|
||||
}
|
||||
|
||||
define void @store_lane_i64_a1(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-LABEL: store_lane_i64_a1:
|
||||
; CHECK: .functype store_lane_i64_a1 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %p, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @store_lane_i64_a2(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-LABEL: store_lane_i64_a2:
|
||||
; CHECK: .functype store_lane_i64_a2 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=1, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %p, align 2
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @store_lane_i64_a4(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-LABEL: store_lane_i64_a4:
|
||||
; CHECK: .functype store_lane_i64_a4 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0:p2align=2, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %p, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
; 8 is the default alignment for v128.store64_lane so no attribute is needed.
|
||||
define void @store_lane_i64_a8(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-LABEL: store_lane_i64_a8:
|
||||
; CHECK: .functype store_lane_i64_a8 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %p, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; 16 is greater than the default alignment so it is ignored.
|
||||
define void @store_lane_i64_a16(<2 x i64> %v, i64* %p) {
|
||||
; CHECK-LABEL: store_lane_i64_a16:
|
||||
; CHECK: .functype store_lane_i64_a16 (v128, i32) -> ()
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 1
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: v128.store64_lane 0, 0
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%x = extractelement <2 x i64> %v, i32 0
|
||||
store i64 %x, i64* %p, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; ==============================================================================
|
||||
; 4 x float
|
||||
; ==============================================================================
|
||||
|
Loading…
Reference in New Issue
Block a user