mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
[RISCV] Expand unaligned fixed-length vector memory accesses
RVV vectors must be aligned to their element types, so anything less is unaligned. For regular loads and stores, our custom-lowering of fixed-length vectors meant that we opted out of LegalizeDAG's built-in unaligned expansion. This patch adds that logic in to our custom lower function. For masked intrinsics, we declare that anything unaligned is not legal, leaving the ScalarizeMaskedMemIntrin pass to do the expansion for us. Note that neither of these methods can handle the expansion of scalable-vector memory ops, so those cases are left alone by this patch. Scalable loads and stores already go through expansion by default but hit an assertion, and scalable masked intrinsics will silently generate incorrect code. It may be prudent to return an error in both of these cases. Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D102493
This commit is contained in:
parent
ce7972786d
commit
be68e4ef95
@ -4022,9 +4022,17 @@ SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
|
||||
SDValue
|
||||
RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
SDLoc DL(Op);
|
||||
auto *Load = cast<LoadSDNode>(Op);
|
||||
|
||||
SDLoc DL(Op);
|
||||
if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
|
||||
Load->getMemoryVT(),
|
||||
*Load->getMemOperand())) {
|
||||
SDValue Result, Chain;
|
||||
std::tie(Result, Chain) = expandUnalignedLoad(Load, DAG);
|
||||
return DAG.getMergeValues({Result, Chain}, DL);
|
||||
}
|
||||
|
||||
MVT VT = Op.getSimpleValueType();
|
||||
MVT ContainerVT = getContainerForFixedLengthVector(VT);
|
||||
|
||||
@ -4043,9 +4051,14 @@ RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
|
||||
SDValue
|
||||
RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
SDLoc DL(Op);
|
||||
auto *Store = cast<StoreSDNode>(Op);
|
||||
|
||||
SDLoc DL(Op);
|
||||
if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
|
||||
Store->getMemoryVT(),
|
||||
*Store->getMemOperand()))
|
||||
return expandUnalignedStore(Store, DAG);
|
||||
|
||||
SDValue StoreVal = Store->getValue();
|
||||
MVT VT = StoreVal.getSimpleValueType();
|
||||
|
||||
|
@ -104,6 +104,10 @@ public:
|
||||
if (isa<FixedVectorType>(DataType) && ST->getMinRVVVectorSizeInBits() == 0)
|
||||
return false;
|
||||
|
||||
if (Alignment <
|
||||
DL.getTypeStoreSize(DataType->getScalarType()).getFixedSize())
|
||||
return false;
|
||||
|
||||
return isLegalElementTypeForRVV(DataType->getScalarType());
|
||||
}
|
||||
|
||||
@ -122,6 +126,10 @@ public:
|
||||
if (isa<FixedVectorType>(DataType) && ST->getMinRVVVectorSizeInBits() == 0)
|
||||
return false;
|
||||
|
||||
if (Alignment <
|
||||
DL.getTypeStoreSize(DataType->getScalarType()).getFixedSize())
|
||||
return false;
|
||||
|
||||
return isLegalElementTypeForRVV(DataType->getScalarType());
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,36 @@ define i32 @masked_gather() {
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4I8 = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> undef, i32 1, <4 x i1> undef, <4 x i8> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2I8 = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> undef, i32 1, <2 x i1> undef, <2 x i8> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1I8 = call <1 x i8> @llvm.masked.gather.v1i8.v1p0i8(<1 x i8*> undef, i32 1, <1 x i1> undef, <1 x i8> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8F64.u = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> undef, i32 2, <8 x i1> undef, <8 x double> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4F64.u = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> undef, i32 2, <4 x i1> undef, <4 x double> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F64.u = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> undef, i32 2, <2 x i1> undef, <2 x double> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F64.u = call <1 x double> @llvm.masked.gather.v1f64.v1p0f64(<1 x double*> undef, i32 2, <1 x i1> undef, <1 x double> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V16F32.u = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> undef, i32 2, <16 x i1> undef, <16 x float> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8F32.u = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> undef, i32 2, <8 x i1> undef, <8 x float> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4F32.u = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> undef, i32 2, <4 x i1> undef, <4 x float> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F32.u = call <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*> undef, i32 2, <2 x i1> undef, <2 x float> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F32.u = call <1 x float> @llvm.masked.gather.v1f32.v1p0f32(<1 x float*> undef, i32 2, <1 x i1> undef, <1 x float> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V32F16.u = call <32 x half> @llvm.masked.gather.v32f16.v32p0f16(<32 x half*> undef, i32 1, <32 x i1> undef, <32 x half> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V16F16.u = call <16 x half> @llvm.masked.gather.v16f16.v16p0f16(<16 x half*> undef, i32 1, <16 x i1> undef, <16 x half> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8F16.u = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> undef, i32 1, <8 x i1> undef, <8 x half> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4F16.u = call <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*> undef, i32 1, <4 x i1> undef, <4 x half> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F16.u = call <2 x half> @llvm.masked.gather.v2f16.v2p0f16(<2 x half*> undef, i32 1, <2 x i1> undef, <2 x half> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16.u = call <1 x half> @llvm.masked.gather.v1f16.v1p0f16(<1 x half*> undef, i32 1, <1 x i1> undef, <1 x half> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8I64.u = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> undef, i32 4, <8 x i1> undef, <8 x i64> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4I64.u = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> undef, i32 4, <4 x i1> undef, <4 x i64> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2I64.u = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> undef, i32 4, <2 x i1> undef, <2 x i64> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1I64.u = call <1 x i64> @llvm.masked.gather.v1i64.v1p0i64(<1 x i64*> undef, i32 4, <1 x i1> undef, <1 x i64> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V16I32.u = call <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*> undef, i32 1, <16 x i1> undef, <16 x i32> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8I32.u = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> undef, i32 1, <8 x i1> undef, <8 x i32> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4I32.u = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> undef, i32 1, <4 x i1> undef, <4 x i32> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2I32.u = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> undef, i32 1, <2 x i1> undef, <2 x i32> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1I32.u = call <1 x i32> @llvm.masked.gather.v1i32.v1p0i32(<1 x i32*> undef, i32 1, <1 x i1> undef, <1 x i32> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V32I16.u = call <32 x i16> @llvm.masked.gather.v32i16.v32p0i16(<32 x i16*> undef, i32 1, <32 x i1> undef, <32 x i16> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V16I16.u = call <16 x i16> @llvm.masked.gather.v16i16.v16p0i16(<16 x i16*> undef, i32 1, <16 x i1> undef, <16 x i16> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8I16.u = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> undef, i32 1, <8 x i1> undef, <8 x i16> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4I16.u = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> undef, i32 1, <4 x i1> undef, <4 x i16> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2I16.u = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> undef, i32 1, <2 x i1> undef, <2 x i16> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1I16.u = call <1 x i16> @llvm.masked.gather.v1i16.v1p0i16(<1 x i16*> undef, i32 1, <1 x i1> undef, <1 x i16> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 0
|
||||
;
|
||||
%V8F64 = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> undef, i32 8, <8 x i1> undef, <8 x double> undef)
|
||||
@ -88,6 +118,43 @@ define i32 @masked_gather() {
|
||||
%V2I8 = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> undef, i32 1, <2 x i1> undef, <2 x i8> undef)
|
||||
%V1I8 = call <1 x i8> @llvm.masked.gather.v1i8.v1p0i8(<1 x i8*> undef, i32 1, <1 x i1> undef, <1 x i8> undef)
|
||||
|
||||
; Test unaligned gathers
|
||||
%V8F64.u = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> undef, i32 2, <8 x i1> undef, <8 x double> undef)
|
||||
%V4F64.u = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> undef, i32 2, <4 x i1> undef, <4 x double> undef)
|
||||
%V2F64.u = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> undef, i32 2, <2 x i1> undef, <2 x double> undef)
|
||||
%V1F64.u = call <1 x double> @llvm.masked.gather.v1f64.v1p0f64(<1 x double*> undef, i32 2, <1 x i1> undef, <1 x double> undef)
|
||||
|
||||
%V16F32.u = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> undef, i32 2, <16 x i1> undef, <16 x float> undef)
|
||||
%V8F32.u = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> undef, i32 2, <8 x i1> undef, <8 x float> undef)
|
||||
%V4F32.u = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> undef, i32 2, <4 x i1> undef, <4 x float> undef)
|
||||
%V2F32.u = call <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*> undef, i32 2, <2 x i1> undef, <2 x float> undef)
|
||||
%V1F32.u = call <1 x float> @llvm.masked.gather.v1f32.v1p0f32(<1 x float*> undef, i32 2, <1 x i1> undef, <1 x float> undef)
|
||||
|
||||
%V32F16.u = call <32 x half> @llvm.masked.gather.v32f16.v32p0f16(<32 x half*> undef, i32 1, <32 x i1> undef, <32 x half> undef)
|
||||
%V16F16.u = call <16 x half> @llvm.masked.gather.v16f16.v16p0f16(<16 x half*> undef, i32 1, <16 x i1> undef, <16 x half> undef)
|
||||
%V8F16.u = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> undef, i32 1, <8 x i1> undef, <8 x half> undef)
|
||||
%V4F16.u = call <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*> undef, i32 1, <4 x i1> undef, <4 x half> undef)
|
||||
%V2F16.u = call <2 x half> @llvm.masked.gather.v2f16.v2p0f16(<2 x half*> undef, i32 1, <2 x i1> undef, <2 x half> undef)
|
||||
%V1F16.u = call <1 x half> @llvm.masked.gather.v1f16.v1p0f16(<1 x half*> undef, i32 1, <1 x i1> undef, <1 x half> undef)
|
||||
|
||||
%V8I64.u = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> undef, i32 4, <8 x i1> undef, <8 x i64> undef)
|
||||
%V4I64.u = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> undef, i32 4, <4 x i1> undef, <4 x i64> undef)
|
||||
%V2I64.u = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> undef, i32 4, <2 x i1> undef, <2 x i64> undef)
|
||||
%V1I64.u = call <1 x i64> @llvm.masked.gather.v1i64.v1p0i64(<1 x i64*> undef, i32 4, <1 x i1> undef, <1 x i64> undef)
|
||||
|
||||
%V16I32.u = call <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*> undef, i32 1, <16 x i1> undef, <16 x i32> undef)
|
||||
%V8I32.u = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> undef, i32 1, <8 x i1> undef, <8 x i32> undef)
|
||||
%V4I32.u = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> undef, i32 1, <4 x i1> undef, <4 x i32> undef)
|
||||
%V2I32.u = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> undef, i32 1, <2 x i1> undef, <2 x i32> undef)
|
||||
%V1I32.u = call <1 x i32> @llvm.masked.gather.v1i32.v1p0i32(<1 x i32*> undef, i32 1, <1 x i1> undef, <1 x i32> undef)
|
||||
|
||||
%V32I16.u = call <32 x i16> @llvm.masked.gather.v32i16.v32p0i16(<32 x i16*> undef, i32 1, <32 x i1> undef, <32 x i16> undef)
|
||||
%V16I16.u = call <16 x i16> @llvm.masked.gather.v16i16.v16p0i16(<16 x i16*> undef, i32 1, <16 x i1> undef, <16 x i16> undef)
|
||||
%V8I16.u = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> undef, i32 1, <8 x i1> undef, <8 x i16> undef)
|
||||
%V4I16.u = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> undef, i32 1, <4 x i1> undef, <4 x i16> undef)
|
||||
%V2I16.u = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> undef, i32 1, <2 x i1> undef, <2 x i16> undef)
|
||||
%V1I16.u = call <1 x i16> @llvm.masked.gather.v1i16.v1p0i16(<1 x i16*> undef, i32 1, <1 x i1> undef, <1 x i16> undef)
|
||||
|
||||
ret i32 0
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,36 @@ define i32 @masked_scatter() {
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> undef, <4 x i8*> undef, i32 1, <4 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.masked.scatter.v2i8.v2p0i8(<2 x i8> undef, <2 x i8*> undef, i32 1, <2 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.masked.scatter.v1i8.v1p0i8(<1 x i8> undef, <1 x i8*> undef, i32 1, <1 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> undef, <8 x double*> undef, i32 2, <8 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.masked.scatter.v4f64.v4p0f64(<4 x double> undef, <4 x double*> undef, i32 2, <4 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> undef, <2 x double*> undef, i32 2, <2 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: call void @llvm.masked.scatter.v1f64.v1p0f64(<1 x double> undef, <1 x double*> undef, i32 2, <1 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float> undef, <16 x float*> undef, i32 2, <16 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.masked.scatter.v8f32.v8p0f32(<8 x float> undef, <8 x float*> undef, i32 2, <8 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> undef, <4 x float*> undef, i32 2, <4 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: call void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float> undef, <2 x float*> undef, i32 2, <2 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: call void @llvm.masked.scatter.v1f32.v1p0f32(<1 x float> undef, <1 x float*> undef, i32 2, <1 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.masked.scatter.v32f16.v32p0f16(<32 x half> undef, <32 x half*> undef, i32 1, <32 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.masked.scatter.v16f16.v16p0f16(<16 x half> undef, <16 x half*> undef, i32 1, <16 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> undef, <8 x half*> undef, i32 1, <8 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half> undef, <4 x half*> undef, i32 1, <4 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: call void @llvm.masked.scatter.v2f16.v2p0f16(<2 x half> undef, <2 x half*> undef, i32 1, <2 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: call void @llvm.masked.scatter.v1f16.v1p0f16(<1 x half> undef, <1 x half*> undef, i32 1, <1 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> undef, <8 x i64*> undef, i32 1, <8 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.masked.scatter.v4i64.v4p0i64(<4 x i64> undef, <4 x i64*> undef, i32 1, <4 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: call void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64> undef, <2 x i64*> undef, i32 1, <2 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: call void @llvm.masked.scatter.v1i64.v1p0i64(<1 x i64> undef, <1 x i64*> undef, i32 1, <1 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.masked.scatter.v16i32.v16p0i32(<16 x i32> undef, <16 x i32*> undef, i32 1, <16 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> undef, <8 x i32*> undef, i32 1, <8 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> undef, <4 x i32*> undef, i32 1, <4 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: call void @llvm.masked.scatter.v2i32.v2p0i32(<2 x i32> undef, <2 x i32*> undef, i32 1, <2 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: call void @llvm.masked.scatter.v1i32.v1p0i32(<1 x i32> undef, <1 x i32*> undef, i32 1, <1 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: call void @llvm.masked.scatter.v32i16.v32p0i16(<32 x i16> undef, <32 x i16*> undef, i32 1, <32 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: call void @llvm.masked.scatter.v16i16.v16p0i16(<16 x i16> undef, <16 x i16*> undef, i32 1, <16 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> undef, <8 x i16*> undef, i32 1, <8 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> undef, <4 x i16*> undef, i32 1, <4 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: call void @llvm.masked.scatter.v2i16.v2p0i16(<2 x i16> undef, <2 x i16*> undef, i32 1, <2 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: call void @llvm.masked.scatter.v1i16.v1p0i16(<1 x i16> undef, <1 x i16*> undef, i32 1, <1 x i1> undef)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret i32 0
|
||||
;
|
||||
call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> undef, <8 x double*> undef, i32 8, <8 x i1> undef)
|
||||
@ -88,6 +118,43 @@ define i32 @masked_scatter() {
|
||||
call void @llvm.masked.scatter.v2i8.v2p0i8(<2 x i8> undef, <2 x i8*> undef, i32 1, <2 x i1> undef)
|
||||
call void @llvm.masked.scatter.v1i8.v1p0i8(<1 x i8> undef, <1 x i8*> undef, i32 1, <1 x i1> undef)
|
||||
|
||||
; Test unaligned scatters
|
||||
call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> undef, <8 x double*> undef, i32 2, <8 x i1> undef)
|
||||
call void @llvm.masked.scatter.v4f64.v4p0f64(<4 x double> undef, <4 x double*> undef, i32 2, <4 x i1> undef)
|
||||
call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> undef, <2 x double*> undef, i32 2, <2 x i1> undef)
|
||||
call void @llvm.masked.scatter.v1f64.v1p0f64(<1 x double> undef, <1 x double*> undef, i32 2, <1 x i1> undef)
|
||||
|
||||
call void @llvm.masked.scatter.v16f32.v16p0f32(<16 x float> undef, <16 x float*> undef, i32 2, <16 x i1> undef)
|
||||
call void @llvm.masked.scatter.v8f32.v8p0f32(<8 x float> undef, <8 x float*> undef, i32 2, <8 x i1> undef)
|
||||
call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> undef, <4 x float*> undef, i32 2, <4 x i1> undef)
|
||||
call void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float> undef, <2 x float*> undef, i32 2, <2 x i1> undef)
|
||||
call void @llvm.masked.scatter.v1f32.v1p0f32(<1 x float> undef, <1 x float*> undef, i32 2, <1 x i1> undef)
|
||||
|
||||
call void @llvm.masked.scatter.v32f16.v32p0f16(<32 x half> undef, <32 x half*> undef, i32 1, <32 x i1> undef)
|
||||
call void @llvm.masked.scatter.v16f16.v16p0f16(<16 x half> undef, <16 x half*> undef, i32 1, <16 x i1> undef)
|
||||
call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> undef, <8 x half*> undef, i32 1, <8 x i1> undef)
|
||||
call void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half> undef, <4 x half*> undef, i32 1, <4 x i1> undef)
|
||||
call void @llvm.masked.scatter.v2f16.v2p0f16(<2 x half> undef, <2 x half*> undef, i32 1, <2 x i1> undef)
|
||||
call void @llvm.masked.scatter.v1f16.v1p0f16(<1 x half> undef, <1 x half*> undef, i32 1, <1 x i1> undef)
|
||||
|
||||
call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> undef, <8 x i64*> undef, i32 1, <8 x i1> undef)
|
||||
call void @llvm.masked.scatter.v4i64.v4p0i64(<4 x i64> undef, <4 x i64*> undef, i32 1, <4 x i1> undef)
|
||||
call void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64> undef, <2 x i64*> undef, i32 1, <2 x i1> undef)
|
||||
call void @llvm.masked.scatter.v1i64.v1p0i64(<1 x i64> undef, <1 x i64*> undef, i32 1, <1 x i1> undef)
|
||||
|
||||
call void @llvm.masked.scatter.v16i32.v16p0i32(<16 x i32> undef, <16 x i32*> undef, i32 1, <16 x i1> undef)
|
||||
call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> undef, <8 x i32*> undef, i32 1, <8 x i1> undef)
|
||||
call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> undef, <4 x i32*> undef, i32 1, <4 x i1> undef)
|
||||
call void @llvm.masked.scatter.v2i32.v2p0i32(<2 x i32> undef, <2 x i32*> undef, i32 1, <2 x i1> undef)
|
||||
call void @llvm.masked.scatter.v1i32.v1p0i32(<1 x i32> undef, <1 x i32*> undef, i32 1, <1 x i1> undef)
|
||||
|
||||
call void @llvm.masked.scatter.v32i16.v32p0i16(<32 x i16> undef, <32 x i16*> undef, i32 1, <32 x i1> undef)
|
||||
call void @llvm.masked.scatter.v16i16.v16p0i16(<16 x i16> undef, <16 x i16*> undef, i32 1, <16 x i1> undef)
|
||||
call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> undef, <8 x i16*> undef, i32 1, <8 x i1> undef)
|
||||
call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> undef, <4 x i16*> undef, i32 1, <4 x i1> undef)
|
||||
call void @llvm.masked.scatter.v2i16.v2p0i16(<2 x i16> undef, <2 x i16*> undef, i32 1, <2 x i1> undef)
|
||||
call void @llvm.masked.scatter.v1i16.v1p0i16(<1 x i16> undef, <1 x i16*> undef, i32 1, <1 x i1> undef)
|
||||
|
||||
ret i32 0
|
||||
}
|
||||
|
||||
|
1000
test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
Normal file
1000
test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user