mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
[WebAssembly] Codegen for v128.loadX_lane instructions
Replace the experimental clang builtin and LLVM intrinsics for these instructions with normal codegen patterns. Resolves PR50433. Differential Revision: https://reviews.llvm.org/D105950
This commit is contained in:
parent
81bb5f99ad
commit
cf44692539
@ -191,26 +191,6 @@ def int_wasm_load64_zero :
|
|||||||
// ISD::TargetConstant, which would require extra complications in the ISel
|
// ISD::TargetConstant, which would require extra complications in the ISel
|
||||||
// tablegen patterns. TODO: Replace these intrinsic with normal ISel patterns
|
// tablegen patterns. TODO: Replace these intrinsic with normal ISel patterns
|
||||||
// once the load_lane instructions are merged to the proposal.
|
// once the load_lane instructions are merged to the proposal.
|
||||||
def int_wasm_load8_lane :
|
|
||||||
Intrinsic<[llvm_v16i8_ty],
|
|
||||||
[LLVMPointerType<llvm_i8_ty>, llvm_v16i8_ty, llvm_i32_ty],
|
|
||||||
[IntrReadMem, IntrArgMemOnly],
|
|
||||||
"", [SDNPMemOperand]>;
|
|
||||||
def int_wasm_load16_lane :
|
|
||||||
Intrinsic<[llvm_v8i16_ty],
|
|
||||||
[LLVMPointerType<llvm_i16_ty>, llvm_v8i16_ty, llvm_i32_ty],
|
|
||||||
[IntrReadMem, IntrArgMemOnly],
|
|
||||||
"", [SDNPMemOperand]>;
|
|
||||||
def int_wasm_load32_lane :
|
|
||||||
Intrinsic<[llvm_v4i32_ty],
|
|
||||||
[LLVMPointerType<llvm_i32_ty>, llvm_v4i32_ty, llvm_i32_ty],
|
|
||||||
[IntrReadMem, IntrArgMemOnly],
|
|
||||||
"", [SDNPMemOperand]>;
|
|
||||||
def int_wasm_load64_lane :
|
|
||||||
Intrinsic<[llvm_v2i64_ty],
|
|
||||||
[LLVMPointerType<llvm_i64_ty>, llvm_v2i64_ty, llvm_i32_ty],
|
|
||||||
[IntrReadMem, IntrArgMemOnly],
|
|
||||||
"", [SDNPMemOperand]>;
|
|
||||||
def int_wasm_store8_lane :
|
def int_wasm_store8_lane :
|
||||||
Intrinsic<[],
|
Intrinsic<[],
|
||||||
[LLVMPointerType<llvm_i8_ty>, llvm_v16i8_ty, llvm_i32_ty],
|
[LLVMPointerType<llvm_i8_ty>, llvm_v16i8_ty, llvm_i32_ty],
|
||||||
|
@ -767,49 +767,33 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
|||||||
Info.align = Align(1);
|
Info.align = Align(1);
|
||||||
Info.flags = MachineMemOperand::MOLoad;
|
Info.flags = MachineMemOperand::MOLoad;
|
||||||
return true;
|
return true;
|
||||||
case Intrinsic::wasm_load8_lane:
|
|
||||||
case Intrinsic::wasm_load16_lane:
|
|
||||||
case Intrinsic::wasm_load32_lane:
|
|
||||||
case Intrinsic::wasm_load64_lane:
|
|
||||||
case Intrinsic::wasm_store8_lane:
|
case Intrinsic::wasm_store8_lane:
|
||||||
case Intrinsic::wasm_store16_lane:
|
case Intrinsic::wasm_store16_lane:
|
||||||
case Intrinsic::wasm_store32_lane:
|
case Intrinsic::wasm_store32_lane:
|
||||||
case Intrinsic::wasm_store64_lane: {
|
case Intrinsic::wasm_store64_lane: {
|
||||||
MVT MemVT;
|
MVT MemVT;
|
||||||
switch (Intrinsic) {
|
switch (Intrinsic) {
|
||||||
case Intrinsic::wasm_load8_lane:
|
|
||||||
case Intrinsic::wasm_store8_lane:
|
case Intrinsic::wasm_store8_lane:
|
||||||
MemVT = MVT::i8;
|
MemVT = MVT::i8;
|
||||||
break;
|
break;
|
||||||
case Intrinsic::wasm_load16_lane:
|
|
||||||
case Intrinsic::wasm_store16_lane:
|
case Intrinsic::wasm_store16_lane:
|
||||||
MemVT = MVT::i16;
|
MemVT = MVT::i16;
|
||||||
break;
|
break;
|
||||||
case Intrinsic::wasm_load32_lane:
|
|
||||||
case Intrinsic::wasm_store32_lane:
|
case Intrinsic::wasm_store32_lane:
|
||||||
MemVT = MVT::i32;
|
MemVT = MVT::i32;
|
||||||
break;
|
break;
|
||||||
case Intrinsic::wasm_load64_lane:
|
|
||||||
case Intrinsic::wasm_store64_lane:
|
case Intrinsic::wasm_store64_lane:
|
||||||
MemVT = MVT::i64;
|
MemVT = MVT::i64;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
llvm_unreachable("unexpected intrinsic");
|
llvm_unreachable("unexpected intrinsic");
|
||||||
}
|
}
|
||||||
if (Intrinsic == Intrinsic::wasm_load8_lane ||
|
Info.opc = ISD::INTRINSIC_VOID;
|
||||||
Intrinsic == Intrinsic::wasm_load16_lane ||
|
|
||||||
Intrinsic == Intrinsic::wasm_load32_lane ||
|
|
||||||
Intrinsic == Intrinsic::wasm_load64_lane) {
|
|
||||||
Info.opc = ISD::INTRINSIC_W_CHAIN;
|
|
||||||
Info.flags = MachineMemOperand::MOLoad;
|
|
||||||
} else {
|
|
||||||
Info.opc = ISD::INTRINSIC_VOID;
|
|
||||||
Info.flags = MachineMemOperand::MOStore;
|
|
||||||
}
|
|
||||||
Info.ptrVal = I.getArgOperand(0);
|
|
||||||
Info.memVT = MemVT;
|
Info.memVT = MemVT;
|
||||||
|
Info.ptrVal = I.getArgOperand(0);
|
||||||
Info.offset = 0;
|
Info.offset = 0;
|
||||||
Info.align = Align(1);
|
Info.align = Align(1);
|
||||||
|
Info.flags = MachineMemOperand::MOStore;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -300,8 +300,6 @@ multiclass SIMDLoadLane<Vec vec, bits<32> simdop> {
|
|||||||
} // mayLoad = 1, UseNamedOperandTable = 1
|
} // mayLoad = 1, UseNamedOperandTable = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Also support v4f32 and v2f64 once the instructions are merged
|
|
||||||
// to the proposal
|
|
||||||
defm "" : SIMDLoadLane<I8x16, 0x54>;
|
defm "" : SIMDLoadLane<I8x16, 0x54>;
|
||||||
defm "" : SIMDLoadLane<I16x8, 0x55>;
|
defm "" : SIMDLoadLane<I16x8, 0x55>;
|
||||||
defm "" : SIMDLoadLane<I32x4, 0x56>;
|
defm "" : SIMDLoadLane<I32x4, 0x56>;
|
||||||
@ -321,10 +319,24 @@ multiclass LoadLanePatNoOffset<Vec vec, SDPatternOperator kind> {
|
|||||||
Requires<[HasAddr64]>;
|
Requires<[HasAddr64]>;
|
||||||
}
|
}
|
||||||
|
|
||||||
defm : LoadLanePatNoOffset<I8x16, int_wasm_load8_lane>;
|
def load8_lane :
|
||||||
defm : LoadLanePatNoOffset<I16x8, int_wasm_load16_lane>;
|
PatFrag<(ops node:$ptr, node:$vec, node:$idx),
|
||||||
defm : LoadLanePatNoOffset<I32x4, int_wasm_load32_lane>;
|
(vector_insert $vec, (i32 (extloadi8 $ptr)), $idx)>;
|
||||||
defm : LoadLanePatNoOffset<I64x2, int_wasm_load64_lane>;
|
def load16_lane :
|
||||||
|
PatFrag<(ops node:$ptr, node:$vec, node:$idx),
|
||||||
|
(vector_insert $vec, (i32 (extloadi16 $ptr)), $idx)>;
|
||||||
|
def load32_lane :
|
||||||
|
PatFrag<(ops node:$ptr, node:$vec, node:$idx),
|
||||||
|
(vector_insert $vec, (i32 (load $ptr)), $idx)>;
|
||||||
|
def load64_lane :
|
||||||
|
PatFrag<(ops node:$ptr, node:$vec, node:$idx),
|
||||||
|
(vector_insert $vec, (i64 (load $ptr)), $idx)>;
|
||||||
|
// TODO: floating point lanes as well
|
||||||
|
|
||||||
|
defm : LoadLanePatNoOffset<I8x16, load8_lane>;
|
||||||
|
defm : LoadLanePatNoOffset<I16x8, load16_lane>;
|
||||||
|
defm : LoadLanePatNoOffset<I32x4, load32_lane>;
|
||||||
|
defm : LoadLanePatNoOffset<I64x2, load64_lane>;
|
||||||
|
|
||||||
// TODO: Also support the other load patterns for load_lane once the instructions
|
// TODO: Also support the other load patterns for load_lane once the instructions
|
||||||
// are merged to the proposal.
|
// are merged to the proposal.
|
||||||
|
@ -211,7 +211,7 @@ define <16 x i8> @mashup_swizzle_i8x16(<16 x i8> %src, <16 x i8> %mask, i8 %spla
|
|||||||
; CHECK-LABEL: mashup_const_i8x16:
|
; CHECK-LABEL: mashup_const_i8x16:
|
||||||
; CHECK-NEXT: .functype mashup_const_i8x16 (v128, v128, i32) -> (v128)
|
; CHECK-NEXT: .functype mashup_const_i8x16 (v128, v128, i32) -> (v128)
|
||||||
; CHECK: v128.const $push[[L0:[0-9]+]]=, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0
|
; CHECK: v128.const $push[[L0:[0-9]+]]=, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0
|
||||||
; CHECK: i8x16.replace_lane
|
; CHECK: v128.load8_lane
|
||||||
; CHECK: i8x16.replace_lane
|
; CHECK: i8x16.replace_lane
|
||||||
; CHECK: i8x16.replace_lane
|
; CHECK: i8x16.replace_lane
|
||||||
; CHECK: return
|
; CHECK: return
|
||||||
@ -234,7 +234,7 @@ define <16 x i8> @mashup_const_i8x16(<16 x i8> %src, <16 x i8> %mask, i8 %splatt
|
|||||||
; CHECK-LABEL: mashup_splat_i8x16:
|
; CHECK-LABEL: mashup_splat_i8x16:
|
||||||
; CHECK-NEXT: .functype mashup_splat_i8x16 (v128, v128, i32) -> (v128)
|
; CHECK-NEXT: .functype mashup_splat_i8x16 (v128, v128, i32) -> (v128)
|
||||||
; CHECK: i8x16.splat $push[[L0:[0-9]+]]=, $2
|
; CHECK: i8x16.splat $push[[L0:[0-9]+]]=, $2
|
||||||
; CHECK: i8x16.replace_lane
|
; CHECK: v128.load8_lane
|
||||||
; CHECK: i8x16.replace_lane
|
; CHECK: i8x16.replace_lane
|
||||||
; CHECK: return
|
; CHECK: return
|
||||||
define <16 x i8> @mashup_splat_i8x16(<16 x i8> %src, <16 x i8> %mask, i8 %splatted) {
|
define <16 x i8> @mashup_splat_i8x16(<16 x i8> %src, <16 x i8> %mask, i8 %splatted) {
|
||||||
|
@ -8,11 +8,6 @@
|
|||||||
|
|
||||||
target triple = "wasm32-unknown-unknown"
|
target triple = "wasm32-unknown-unknown"
|
||||||
|
|
||||||
declare <16 x i8> @llvm.wasm.load8.lane(i8*, <16 x i8>, i32)
|
|
||||||
declare <8 x i16> @llvm.wasm.load16.lane(i16*, <8 x i16>, i32)
|
|
||||||
declare <4 x i32> @llvm.wasm.load32.lane(i32*, <4 x i32>, i32)
|
|
||||||
declare <2 x i64> @llvm.wasm.load64.lane(i64*, <2 x i64>, i32)
|
|
||||||
|
|
||||||
declare void @llvm.wasm.store8.lane(i8*, <16 x i8>, i32)
|
declare void @llvm.wasm.store8.lane(i8*, <16 x i8>, i32)
|
||||||
declare void @llvm.wasm.store16.lane(i16*, <8 x i16>, i32)
|
declare void @llvm.wasm.store16.lane(i16*, <8 x i16>, i32)
|
||||||
declare void @llvm.wasm.store32.lane(i32*, <4 x i32>, i32)
|
declare void @llvm.wasm.store32.lane(i32*, <4 x i32>, i32)
|
||||||
@ -30,7 +25,8 @@ define <16 x i8> @load_lane_i8_no_offset(i8* %p, <16 x i8> %v) {
|
|||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load8_lane 0, 0
|
; CHECK-NEXT: v128.load8_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %p, <16 x i8> %v, i32 0)
|
%x = load i8, i8* %p
|
||||||
|
%t = insertelement <16 x i8> %v, i8 %x, i32 0
|
||||||
ret <16 x i8> %t
|
ret <16 x i8> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -47,7 +43,8 @@ define <16 x i8> @load_lane_i8_with_folded_offset(i8* %p, <16 x i8> %v) {
|
|||||||
%q = ptrtoint i8* %p to i32
|
%q = ptrtoint i8* %p to i32
|
||||||
%r = add nuw i32 %q, 24
|
%r = add nuw i32 %q, 24
|
||||||
%s = inttoptr i32 %r to i8*
|
%s = inttoptr i32 %r to i8*
|
||||||
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
|
%x = load i8, i8* %s
|
||||||
|
%t = insertelement <16 x i8> %v, i8 %x, i32 0
|
||||||
ret <16 x i8> %t
|
ret <16 x i8> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,7 +59,8 @@ define <16 x i8> @load_lane_i8_with_folded_gep_offset(i8* %p, <16 x i8> %v) {
|
|||||||
; CHECK-NEXT: v128.load8_lane 0, 0
|
; CHECK-NEXT: v128.load8_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr inbounds i8, i8* %p, i32 6
|
%s = getelementptr inbounds i8, i8* %p, i32 6
|
||||||
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
|
%x = load i8, i8* %s
|
||||||
|
%t = insertelement <16 x i8> %v, i8 %x, i32 0
|
||||||
ret <16 x i8> %t
|
ret <16 x i8> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,7 +75,8 @@ define <16 x i8> @load_lane_i8_with_unfolded_gep_negative_offset(i8* %p, <16 x i
|
|||||||
; CHECK-NEXT: v128.load8_lane 0, 0
|
; CHECK-NEXT: v128.load8_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr inbounds i8, i8* %p, i32 -6
|
%s = getelementptr inbounds i8, i8* %p, i32 -6
|
||||||
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
|
%x = load i8, i8* %s
|
||||||
|
%t = insertelement <16 x i8> %v, i8 %x, i32 0
|
||||||
ret <16 x i8> %t
|
ret <16 x i8> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,7 +93,8 @@ define <16 x i8> @load_lane_i8_with_unfolded_offset(i8* %p, <16 x i8> %v) {
|
|||||||
%q = ptrtoint i8* %p to i32
|
%q = ptrtoint i8* %p to i32
|
||||||
%r = add nsw i32 %q, 24
|
%r = add nsw i32 %q, 24
|
||||||
%s = inttoptr i32 %r to i8*
|
%s = inttoptr i32 %r to i8*
|
||||||
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
|
%x = load i8, i8* %s
|
||||||
|
%t = insertelement <16 x i8> %v, i8 %x, i32 0
|
||||||
ret <16 x i8> %t
|
ret <16 x i8> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,7 +109,8 @@ define <16 x i8> @load_lane_i8_with_unfolded_gep_offset(i8* %p, <16 x i8> %v) {
|
|||||||
; CHECK-NEXT: v128.load8_lane 0, 0
|
; CHECK-NEXT: v128.load8_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr i8, i8* %p, i32 6
|
%s = getelementptr i8, i8* %p, i32 6
|
||||||
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
|
%x = load i8, i8* %s
|
||||||
|
%t = insertelement <16 x i8> %v, i8 %x, i32 0
|
||||||
ret <16 x i8> %t
|
ret <16 x i8> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,7 +123,8 @@ define <16 x i8> @load_lane_i8_from_numeric_address(<16 x i8> %v) {
|
|||||||
; CHECK-NEXT: v128.load8_lane 0, 0
|
; CHECK-NEXT: v128.load8_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = inttoptr i32 42 to i8*
|
%s = inttoptr i32 42 to i8*
|
||||||
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
|
%x = load i8, i8* %s
|
||||||
|
%t = insertelement <16 x i8> %v, i8 %x, i32 0
|
||||||
ret <16 x i8> %t
|
ret <16 x i8> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,7 +137,8 @@ define <16 x i8> @load_lane_i8_from_global_address(<16 x i8> %v) {
|
|||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: v128.load8_lane 0, 0
|
; CHECK-NEXT: v128.load8_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* @gv_i8, <16 x i8> %v, i32 0)
|
%x = load i8, i8* @gv_i8
|
||||||
|
%t = insertelement <16 x i8> %v, i8 %x, i32 0
|
||||||
ret <16 x i8> %t
|
ret <16 x i8> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,9 +268,10 @@ define <8 x i16> @load_lane_i16_no_offset(i16* %p, <8 x i16> %v) {
|
|||||||
; CHECK-NEXT: # %bb.0:
|
; CHECK-NEXT: # %bb.0:
|
||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %p, <8 x i16> %v, i32 0)
|
%x = load i16, i16* %p
|
||||||
|
%t = insertelement <8 x i16> %v, i16 %x, i32 0
|
||||||
ret <8 x i16> %t
|
ret <8 x i16> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -279,12 +283,13 @@ define <8 x i16> @load_lane_i16_with_folded_offset(i16* %p, <8 x i16> %v) {
|
|||||||
; CHECK-NEXT: i32.const 24
|
; CHECK-NEXT: i32.const 24
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%q = ptrtoint i16* %p to i32
|
%q = ptrtoint i16* %p to i32
|
||||||
%r = add nuw i32 %q, 24
|
%r = add nuw i32 %q, 24
|
||||||
%s = inttoptr i32 %r to i16*
|
%s = inttoptr i32 %r to i16*
|
||||||
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
|
%x = load i16, i16* %s
|
||||||
|
%t = insertelement <8 x i16> %v, i16 %x, i32 0
|
||||||
ret <8 x i16> %t
|
ret <8 x i16> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,10 +301,11 @@ define <8 x i16> @load_lane_i16_with_folded_gep_offset(i16* %p, <8 x i16> %v) {
|
|||||||
; CHECK-NEXT: i32.const 12
|
; CHECK-NEXT: i32.const 12
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr inbounds i16, i16* %p, i32 6
|
%s = getelementptr inbounds i16, i16* %p, i32 6
|
||||||
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
|
%x = load i16, i16* %s
|
||||||
|
%t = insertelement <8 x i16> %v, i16 %x, i32 0
|
||||||
ret <8 x i16> %t
|
ret <8 x i16> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -311,10 +317,11 @@ define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(i16* %p, <8 x
|
|||||||
; CHECK-NEXT: i32.const -12
|
; CHECK-NEXT: i32.const -12
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr inbounds i16, i16* %p, i32 -6
|
%s = getelementptr inbounds i16, i16* %p, i32 -6
|
||||||
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
|
%x = load i16, i16* %s
|
||||||
|
%t = insertelement <8 x i16> %v, i16 %x, i32 0
|
||||||
ret <8 x i16> %t
|
ret <8 x i16> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,12 +333,13 @@ define <8 x i16> @load_lane_i16_with_unfolded_offset(i16* %p, <8 x i16> %v) {
|
|||||||
; CHECK-NEXT: i32.const 24
|
; CHECK-NEXT: i32.const 24
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%q = ptrtoint i16* %p to i32
|
%q = ptrtoint i16* %p to i32
|
||||||
%r = add nsw i32 %q, 24
|
%r = add nsw i32 %q, 24
|
||||||
%s = inttoptr i32 %r to i16*
|
%s = inttoptr i32 %r to i16*
|
||||||
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
|
%x = load i16, i16* %s
|
||||||
|
%t = insertelement <8 x i16> %v, i16 %x, i32 0
|
||||||
ret <8 x i16> %t
|
ret <8 x i16> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -343,10 +351,11 @@ define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(i16* %p, <8 x i16> %v)
|
|||||||
; CHECK-NEXT: i32.const 12
|
; CHECK-NEXT: i32.const 12
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr i16, i16* %p, i32 6
|
%s = getelementptr i16, i16* %p, i32 6
|
||||||
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
|
%x = load i16, i16* %s
|
||||||
|
%t = insertelement <8 x i16> %v, i16 %x, i32 0
|
||||||
ret <8 x i16> %t
|
ret <8 x i16> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -356,10 +365,11 @@ define <8 x i16> @load_lane_i16_from_numeric_address(<8 x i16> %v) {
|
|||||||
; CHECK-NEXT: # %bb.0:
|
; CHECK-NEXT: # %bb.0:
|
||||||
; CHECK-NEXT: i32.const 42
|
; CHECK-NEXT: i32.const 42
|
||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = inttoptr i32 42 to i16*
|
%s = inttoptr i32 42 to i16*
|
||||||
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
|
%x = load i16, i16* %s
|
||||||
|
%t = insertelement <8 x i16> %v, i16 %x, i32 0
|
||||||
ret <8 x i16> %t
|
ret <8 x i16> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -370,9 +380,10 @@ define <8 x i16> @load_lane_i16_from_global_address(<8 x i16> %v) {
|
|||||||
; CHECK-NEXT: # %bb.0:
|
; CHECK-NEXT: # %bb.0:
|
||||||
; CHECK-NEXT: i32.const gv_i16
|
; CHECK-NEXT: i32.const gv_i16
|
||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
|
%x = load i16, i16* @gv_i16
|
||||||
|
%t = insertelement <8 x i16> %v, i16 %x, i32 0
|
||||||
ret <8 x i16> %t
|
ret <8 x i16> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -502,9 +513,10 @@ define <4 x i32> @load_lane_i32_no_offset(i32* %p, <4 x i32> %v) {
|
|||||||
; CHECK-NEXT: # %bb.0:
|
; CHECK-NEXT: # %bb.0:
|
||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %p, <4 x i32> %v, i32 0)
|
%x = load i32, i32* %p
|
||||||
|
%t = insertelement <4 x i32> %v, i32 %x, i32 0
|
||||||
ret <4 x i32> %t
|
ret <4 x i32> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -516,12 +528,13 @@ define <4 x i32> @load_lane_i32_with_folded_offset(i32* %p, <4 x i32> %v) {
|
|||||||
; CHECK-NEXT: i32.const 24
|
; CHECK-NEXT: i32.const 24
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%q = ptrtoint i32* %p to i32
|
%q = ptrtoint i32* %p to i32
|
||||||
%r = add nuw i32 %q, 24
|
%r = add nuw i32 %q, 24
|
||||||
%s = inttoptr i32 %r to i32*
|
%s = inttoptr i32 %r to i32*
|
||||||
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
|
%x = load i32, i32* %s
|
||||||
|
%t = insertelement <4 x i32> %v, i32 %x, i32 0
|
||||||
ret <4 x i32> %t
|
ret <4 x i32> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -533,10 +546,11 @@ define <4 x i32> @load_lane_i32_with_folded_gep_offset(i32* %p, <4 x i32> %v) {
|
|||||||
; CHECK-NEXT: i32.const 24
|
; CHECK-NEXT: i32.const 24
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr inbounds i32, i32* %p, i32 6
|
%s = getelementptr inbounds i32, i32* %p, i32 6
|
||||||
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
|
%x = load i32, i32* %s
|
||||||
|
%t = insertelement <4 x i32> %v, i32 %x, i32 0
|
||||||
ret <4 x i32> %t
|
ret <4 x i32> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -548,10 +562,11 @@ define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(i32* %p, <4 x
|
|||||||
; CHECK-NEXT: i32.const -24
|
; CHECK-NEXT: i32.const -24
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr inbounds i32, i32* %p, i32 -6
|
%s = getelementptr inbounds i32, i32* %p, i32 -6
|
||||||
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
|
%x = load i32, i32* %s
|
||||||
|
%t = insertelement <4 x i32> %v, i32 %x, i32 0
|
||||||
ret <4 x i32> %t
|
ret <4 x i32> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -563,12 +578,13 @@ define <4 x i32> @load_lane_i32_with_unfolded_offset(i32* %p, <4 x i32> %v) {
|
|||||||
; CHECK-NEXT: i32.const 24
|
; CHECK-NEXT: i32.const 24
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%q = ptrtoint i32* %p to i32
|
%q = ptrtoint i32* %p to i32
|
||||||
%r = add nsw i32 %q, 24
|
%r = add nsw i32 %q, 24
|
||||||
%s = inttoptr i32 %r to i32*
|
%s = inttoptr i32 %r to i32*
|
||||||
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
|
%x = load i32, i32* %s
|
||||||
|
%t = insertelement <4 x i32> %v, i32 %x, i32 0
|
||||||
ret <4 x i32> %t
|
ret <4 x i32> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -580,10 +596,11 @@ define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(i32* %p, <4 x i32> %v)
|
|||||||
; CHECK-NEXT: i32.const 24
|
; CHECK-NEXT: i32.const 24
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr i32, i32* %p, i32 6
|
%s = getelementptr i32, i32* %p, i32 6
|
||||||
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
|
%x = load i32, i32* %s
|
||||||
|
%t = insertelement <4 x i32> %v, i32 %x, i32 0
|
||||||
ret <4 x i32> %t
|
ret <4 x i32> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -593,10 +610,11 @@ define <4 x i32> @load_lane_i32_from_numeric_address(<4 x i32> %v) {
|
|||||||
; CHECK-NEXT: # %bb.0:
|
; CHECK-NEXT: # %bb.0:
|
||||||
; CHECK-NEXT: i32.const 42
|
; CHECK-NEXT: i32.const 42
|
||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = inttoptr i32 42 to i32*
|
%s = inttoptr i32 42 to i32*
|
||||||
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
|
%x = load i32, i32* %s
|
||||||
|
%t = insertelement <4 x i32> %v, i32 %x, i32 0
|
||||||
ret <4 x i32> %t
|
ret <4 x i32> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -607,9 +625,10 @@ define <4 x i32> @load_lane_i32_from_global_address(<4 x i32> %v) {
|
|||||||
; CHECK-NEXT: # %bb.0:
|
; CHECK-NEXT: # %bb.0:
|
||||||
; CHECK-NEXT: i32.const gv_i32
|
; CHECK-NEXT: i32.const gv_i32
|
||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
|
%x = load i32, i32* @gv_i32
|
||||||
|
%t = insertelement <4 x i32> %v, i32 %x, i32 0
|
||||||
ret <4 x i32> %t
|
ret <4 x i32> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -739,9 +758,10 @@ define <2 x i64> @load_lane_i64_no_offset(i64* %p, <2 x i64> %v) {
|
|||||||
; CHECK-NEXT: # %bb.0:
|
; CHECK-NEXT: # %bb.0:
|
||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %p, <2 x i64> %v, i32 0)
|
%x = load i64, i64* %p
|
||||||
|
%t = insertelement <2 x i64> %v, i64 %x, i32 0
|
||||||
ret <2 x i64> %t
|
ret <2 x i64> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -753,12 +773,13 @@ define <2 x i64> @load_lane_i64_with_folded_offset(i64* %p, <2 x i64> %v) {
|
|||||||
; CHECK-NEXT: i32.const 24
|
; CHECK-NEXT: i32.const 24
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%q = ptrtoint i64* %p to i32
|
%q = ptrtoint i64* %p to i32
|
||||||
%r = add nuw i32 %q, 24
|
%r = add nuw i32 %q, 24
|
||||||
%s = inttoptr i32 %r to i64*
|
%s = inttoptr i32 %r to i64*
|
||||||
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
|
%x = load i64, i64* %s
|
||||||
|
%t = insertelement <2 x i64> %v, i64 %x, i32 0
|
||||||
ret <2 x i64> %t
|
ret <2 x i64> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -770,10 +791,11 @@ define <2 x i64> @load_lane_i64_with_folded_gep_offset(i64* %p, <2 x i64> %v) {
|
|||||||
; CHECK-NEXT: i32.const 48
|
; CHECK-NEXT: i32.const 48
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr inbounds i64, i64* %p, i32 6
|
%s = getelementptr inbounds i64, i64* %p, i32 6
|
||||||
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
|
%x = load i64, i64* %s
|
||||||
|
%t = insertelement <2 x i64> %v, i64 %x, i32 0
|
||||||
ret <2 x i64> %t
|
ret <2 x i64> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -785,10 +807,11 @@ define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(i64* %p, <2 x
|
|||||||
; CHECK-NEXT: i32.const -48
|
; CHECK-NEXT: i32.const -48
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr inbounds i64, i64* %p, i32 -6
|
%s = getelementptr inbounds i64, i64* %p, i32 -6
|
||||||
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
|
%x = load i64, i64* %s
|
||||||
|
%t = insertelement <2 x i64> %v, i64 %x, i32 0
|
||||||
ret <2 x i64> %t
|
ret <2 x i64> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -800,12 +823,13 @@ define <2 x i64> @load_lane_i64_with_unfolded_offset(i64* %p, <2 x i64> %v) {
|
|||||||
; CHECK-NEXT: i32.const 24
|
; CHECK-NEXT: i32.const 24
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%q = ptrtoint i64* %p to i32
|
%q = ptrtoint i64* %p to i32
|
||||||
%r = add nsw i32 %q, 24
|
%r = add nsw i32 %q, 24
|
||||||
%s = inttoptr i32 %r to i64*
|
%s = inttoptr i32 %r to i64*
|
||||||
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
|
%x = load i64, i64* %s
|
||||||
|
%t = insertelement <2 x i64> %v, i64 %x, i32 0
|
||||||
ret <2 x i64> %t
|
ret <2 x i64> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -817,10 +841,11 @@ define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(i64* %p, <2 x i64> %v)
|
|||||||
; CHECK-NEXT: i32.const 48
|
; CHECK-NEXT: i32.const 48
|
||||||
; CHECK-NEXT: i32.add
|
; CHECK-NEXT: i32.add
|
||||||
; CHECK-NEXT: local.get 1
|
; CHECK-NEXT: local.get 1
|
||||||
; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = getelementptr i64, i64* %p, i32 6
|
%s = getelementptr i64, i64* %p, i32 6
|
||||||
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
|
%x = load i64, i64* %s
|
||||||
|
%t = insertelement <2 x i64> %v, i64 %x, i32 0
|
||||||
ret <2 x i64> %t
|
ret <2 x i64> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -830,10 +855,11 @@ define <2 x i64> @load_lane_i64_from_numeric_address(<2 x i64> %v) {
|
|||||||
; CHECK-NEXT: # %bb.0:
|
; CHECK-NEXT: # %bb.0:
|
||||||
; CHECK-NEXT: i32.const 42
|
; CHECK-NEXT: i32.const 42
|
||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%s = inttoptr i32 42 to i64*
|
%s = inttoptr i32 42 to i64*
|
||||||
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
|
%x = load i64, i64* %s
|
||||||
|
%t = insertelement <2 x i64> %v, i64 %x, i32 0
|
||||||
ret <2 x i64> %t
|
ret <2 x i64> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -844,9 +870,10 @@ define <2 x i64> @load_lane_i64_from_global_address(<2 x i64> %v) {
|
|||||||
; CHECK-NEXT: # %bb.0:
|
; CHECK-NEXT: # %bb.0:
|
||||||
; CHECK-NEXT: i32.const gv_i64
|
; CHECK-NEXT: i32.const gv_i64
|
||||||
; CHECK-NEXT: local.get 0
|
; CHECK-NEXT: local.get 0
|
||||||
; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
; CHECK-NEXT: # fallthrough-return
|
; CHECK-NEXT: # fallthrough-return
|
||||||
%t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
|
%x = load i64, i64* @gv_i64
|
||||||
|
%t = insertelement <2 x i64> %v, i64 %x, i32 0
|
||||||
ret <2 x i64> %t
|
ret <2 x i64> %t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,6 +133,34 @@ define <16 x i8> @load_splat_v16i8_a2(i8* %p) {
|
|||||||
ret <16 x i8> %v2
|
ret <16 x i8> %v2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
; 1 is the default alignment for v128.load8_lane so no attribute is needed.
|
||||||
|
define <16 x i8> @load_lane_i8_a1(i8* %p, <16 x i8> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i8_a1:
|
||||||
|
; CHECK: .functype load_lane_i8_a1 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load8_lane 0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i8, i8* %p, align 1
|
||||||
|
%v1 = insertelement <16 x i8> %v, i8 %e, i32 0
|
||||||
|
ret <16 x i8> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
; 2 is greater than the default alignment so it is ignored.
|
||||||
|
define <16 x i8> @load_lane_i8_a2(i8* %p, <16 x i8> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i8_a2:
|
||||||
|
; CHECK: .functype load_lane_i8_a2 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load8_lane 0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i8, i8* %p, align 2
|
||||||
|
%v1 = insertelement <16 x i8> %v, i8 %e, i32 0
|
||||||
|
ret <16 x i8> %v1
|
||||||
|
}
|
||||||
|
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
; 8 x i16
|
; 8 x i16
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
@ -393,6 +421,47 @@ define <8 x i16> @load_splat_v8i16_a4(i16* %p) {
|
|||||||
ret <8 x i16> %v2
|
ret <8 x i16> %v2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
define <8 x i16> @load_lane_i16_a1(i16* %p, <8 x i16> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i16_a1:
|
||||||
|
; CHECK: .functype load_lane_i16_a1 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load16_lane 0:p2align=0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i16, i16* %p, align 1
|
||||||
|
%v1 = insertelement <8 x i16> %v, i16 %e, i32 0
|
||||||
|
ret <8 x i16> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
; 2 is the default alignment for v128.load16_lane so no attribute is needed.
|
||||||
|
define <8 x i16> @load_lane_i16_a2(i16* %p, <8 x i16> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i16_a2:
|
||||||
|
; CHECK: .functype load_lane_i16_a2 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i16, i16* %p, align 2
|
||||||
|
%v1 = insertelement <8 x i16> %v, i16 %e, i32 0
|
||||||
|
ret <8 x i16> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
; 4 is greater than the default alignment so it is ignored.
|
||||||
|
define <8 x i16> @load_lane_i16_a4(i16* %p, <8 x i16> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i16_a4:
|
||||||
|
; CHECK: .functype load_lane_i16_a4 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load16_lane 0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i16, i16* %p, align 4
|
||||||
|
%v1 = insertelement <8 x i16> %v, i16 %e, i32 0
|
||||||
|
ret <8 x i16> %v1
|
||||||
|
}
|
||||||
|
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
; 4 x i32
|
; 4 x i32
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
@ -666,6 +735,60 @@ define <4 x i32> @load_splat_v4i32_a8(i32* %addr) {
|
|||||||
ret <4 x i32> %v2
|
ret <4 x i32> %v2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
define <4 x i32> @load_lane_i32_a1(i32* %p, <4 x i32> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i32_a1:
|
||||||
|
; CHECK: .functype load_lane_i32_a1 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load32_lane 0:p2align=0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i32, i32* %p, align 1
|
||||||
|
%v1 = insertelement <4 x i32> %v, i32 %e, i32 0
|
||||||
|
ret <4 x i32> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
define <4 x i32> @load_lane_i32_a2(i32* %p, <4 x i32> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i32_a2:
|
||||||
|
; CHECK: .functype load_lane_i32_a2 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load32_lane 0:p2align=1, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i32, i32* %p, align 2
|
||||||
|
%v1 = insertelement <4 x i32> %v, i32 %e, i32 0
|
||||||
|
ret <4 x i32> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
; 4 is the default alignment for v128.load32_lane so no attribute is needed.
|
||||||
|
define <4 x i32> @load_lane_i32_a4(i32* %p, <4 x i32> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i32_a4:
|
||||||
|
; CHECK: .functype load_lane_i32_a4 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i32, i32* %p, align 4
|
||||||
|
%v1 = insertelement <4 x i32> %v, i32 %e, i32 0
|
||||||
|
ret <4 x i32> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
; 8 is greater than the default alignment so it is ignored.
|
||||||
|
define <4 x i32> @load_lane_i32_a8(i32* %p, <4 x i32> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i32_a8:
|
||||||
|
; CHECK: .functype load_lane_i32_a8 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load32_lane 0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i32, i32* %p, align 8
|
||||||
|
%v1 = insertelement <4 x i32> %v, i32 %e, i32 0
|
||||||
|
ret <4 x i32> %v1
|
||||||
|
}
|
||||||
|
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
; 2 x i64
|
; 2 x i64
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
@ -833,6 +956,73 @@ define <2 x i64> @load_splat_v2i64_a16(i64* %p) {
|
|||||||
ret <2 x i64> %v2
|
ret <2 x i64> %v2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
define <2 x i64> @load_lane_i64_a1(i64* %p, <2 x i64> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i64_a1:
|
||||||
|
; CHECK: .functype load_lane_i64_a1 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load64_lane 0:p2align=0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i64, i64* %p, align 1
|
||||||
|
%v1 = insertelement <2 x i64> %v, i64 %e, i32 0
|
||||||
|
ret <2 x i64> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
define <2 x i64> @load_lane_i64_a2(i64* %p, <2 x i64> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i64_a2:
|
||||||
|
; CHECK: .functype load_lane_i64_a2 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load64_lane 0:p2align=1, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i64, i64* %p, align 2
|
||||||
|
%v1 = insertelement <2 x i64> %v, i64 %e, i32 0
|
||||||
|
ret <2 x i64> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
define <2 x i64> @load_lane_i64_a4(i64* %p, <2 x i64> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i64_a4:
|
||||||
|
; CHECK: .functype load_lane_i64_a4 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load64_lane 0:p2align=2, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i64, i64* %p, align 4
|
||||||
|
%v1 = insertelement <2 x i64> %v, i64 %e, i32 0
|
||||||
|
ret <2 x i64> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
; 8 is the default alignment for v128.load64_lane so no attribute is needed.
|
||||||
|
define <2 x i64> @load_lane_i64_a8(i64* %p, <2 x i64> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i64_a8:
|
||||||
|
; CHECK: .functype load_lane_i64_a8 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i64, i64* %p, align 8
|
||||||
|
%v1 = insertelement <2 x i64> %v, i64 %e, i32 0
|
||||||
|
ret <2 x i64> %v1
|
||||||
|
}
|
||||||
|
|
||||||
|
; 16 is greater than the default alignment so it is ignored.
|
||||||
|
define <2 x i64> @load_lane_i64_a16(i64* %p, <2 x i64> %v) {
|
||||||
|
; CHECK-LABEL: load_lane_i64_a16:
|
||||||
|
; CHECK: .functype load_lane_i64_a16 (i32, v128) -> (v128)
|
||||||
|
; CHECK-NEXT: # %bb.0:
|
||||||
|
; CHECK-NEXT: local.get 0
|
||||||
|
; CHECK-NEXT: local.get 1
|
||||||
|
; CHECK-NEXT: v128.load64_lane 0, 0
|
||||||
|
; CHECK-NEXT: # fallthrough-return
|
||||||
|
%e = load i64, i64* %p, align 16
|
||||||
|
%v1 = insertelement <2 x i64> %v, i64 %e, i32 0
|
||||||
|
ret <2 x i64> %v1
|
||||||
|
}
|
||||||
|
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
; 4 x float
|
; 4 x float
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
|
Loading…
Reference in New Issue
Block a user