mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
[ARM][MVE] Enable tail predication for loops containing MVE gather/scatters
Widen the scope of memory operations that are allowed to be tail predicated to include gathers and scatters, such that loops that are auto-vectorized with the option -enable-arm-maskedgatscat (and actually end up containing an MVE gather or scatter) can be tail predicated. Differential Revision: https://reviews.llvm.org/D85138
This commit is contained in:
parent
a7f874dd31
commit
7da989b1b5
@ -838,8 +838,8 @@ inline bool isLegalAddressImm(unsigned Opcode, int Imm,
|
||||
}
|
||||
}
|
||||
|
||||
// Return true if the given intrinsic is a gather or scatter
|
||||
inline bool isGatherScatter(IntrinsicInst *IntInst) {
|
||||
// Return true if the given intrinsic is a gather
|
||||
inline bool isGather(IntrinsicInst *IntInst) {
|
||||
if (IntInst == nullptr)
|
||||
return false;
|
||||
unsigned IntrinsicID = IntInst->getIntrinsicID();
|
||||
@ -849,8 +849,15 @@ inline bool isGatherScatter(IntrinsicInst *IntInst) {
|
||||
IntrinsicID == Intrinsic::arm_mve_vldr_gather_base_wb ||
|
||||
IntrinsicID == Intrinsic::arm_mve_vldr_gather_base_wb_predicated ||
|
||||
IntrinsicID == Intrinsic::arm_mve_vldr_gather_offset ||
|
||||
IntrinsicID == Intrinsic::arm_mve_vldr_gather_offset_predicated ||
|
||||
IntrinsicID == Intrinsic::masked_scatter ||
|
||||
IntrinsicID == Intrinsic::arm_mve_vldr_gather_offset_predicated);
|
||||
}
|
||||
|
||||
// Return true if the given intrinsic is a scatter
|
||||
inline bool isScatter(IntrinsicInst *IntInst) {
|
||||
if (IntInst == nullptr)
|
||||
return false;
|
||||
unsigned IntrinsicID = IntInst->getIntrinsicID();
|
||||
return (IntrinsicID == Intrinsic::masked_scatter ||
|
||||
IntrinsicID == Intrinsic::arm_mve_vstr_scatter_base ||
|
||||
IntrinsicID == Intrinsic::arm_mve_vstr_scatter_base_predicated ||
|
||||
IntrinsicID == Intrinsic::arm_mve_vstr_scatter_base_wb ||
|
||||
@ -859,6 +866,13 @@ inline bool isGatherScatter(IntrinsicInst *IntInst) {
|
||||
IntrinsicID == Intrinsic::arm_mve_vstr_scatter_offset_predicated);
|
||||
}
|
||||
|
||||
// Return true if the given intrinsic is a gather or scatter
|
||||
inline bool isGatherScatter(IntrinsicInst *IntInst) {
|
||||
if (IntInst == nullptr)
|
||||
return false;
|
||||
return isGather(IntInst) || isScatter(IntInst);
|
||||
}
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_LIB_TARGET_ARM_ARMBASEINSTRINFO_H
|
||||
|
@ -153,8 +153,8 @@ static bool IsMasked(Instruction *I) {
|
||||
return false;
|
||||
|
||||
Intrinsic::ID ID = Call->getIntrinsicID();
|
||||
// TODO: Support gather/scatter expand/compress operations.
|
||||
return ID == Intrinsic::masked_store || ID == Intrinsic::masked_load;
|
||||
return ID == Intrinsic::masked_store || ID == Intrinsic::masked_load ||
|
||||
isGatherScatter(Call);
|
||||
}
|
||||
|
||||
bool MVETailPredication::runOnLoop(Loop *L, LPPassManager&) {
|
||||
@ -233,9 +233,19 @@ bool MVETailPredication::runOnLoop(Loop *L, LPPassManager&) {
|
||||
}
|
||||
|
||||
static FixedVectorType *getVectorType(IntrinsicInst *I) {
|
||||
unsigned TypeOp = I->getIntrinsicID() == Intrinsic::masked_load ? 0 : 1;
|
||||
auto *PtrTy = cast<PointerType>(I->getOperand(TypeOp)->getType());
|
||||
auto *VecTy = cast<FixedVectorType>(PtrTy->getElementType());
|
||||
unsigned ID = I->getIntrinsicID();
|
||||
FixedVectorType *VecTy;
|
||||
if (ID == Intrinsic::masked_load || isGather(I)) {
|
||||
if (ID == Intrinsic::arm_mve_vldr_gather_base_wb_predicated)
|
||||
// then the type is a StructType
|
||||
VecTy = dyn_cast<FixedVectorType>(I->getType()->getContainedType(0));
|
||||
else
|
||||
VecTy = dyn_cast<FixedVectorType>(I->getType());
|
||||
} else if (ID == Intrinsic::masked_store) {
|
||||
VecTy = dyn_cast<FixedVectorType>(I->getOperand(0)->getType());
|
||||
} else {
|
||||
VecTy = dyn_cast<FixedVectorType>(I->getOperand(2)->getType());
|
||||
}
|
||||
assert(VecTy && "No scalable vectors expected here");
|
||||
return VecTy;
|
||||
}
|
||||
@ -274,7 +284,6 @@ bool MVETailPredication::IsPredicatedVectorLoop() {
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (IsMasked(&I)) {
|
||||
auto *VecTy = getVectorType(Int);
|
||||
unsigned Lanes = VecTy->getNumElements();
|
||||
@ -590,7 +599,8 @@ bool MVETailPredication::TryConvert(Value *TripCount) {
|
||||
// Walk through the masked intrinsics and try to find whether the predicate
|
||||
// operand is generated by intrinsic @llvm.get.active.lane.mask().
|
||||
for (auto *I : MaskedInsts) {
|
||||
unsigned PredOp = I->getIntrinsicID() == Intrinsic::masked_load ? 2 : 3;
|
||||
unsigned PredOp =
|
||||
(I->getIntrinsicID() == Intrinsic::masked_load || isGather(I)) ? 2 : 3;
|
||||
auto *Predicate = dyn_cast<Instruction>(I->getArgOperand(PredOp));
|
||||
if (!Predicate || Predicates.count(Predicate))
|
||||
continue;
|
||||
|
212
test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll
Normal file
212
test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll
Normal file
@ -0,0 +1,212 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst -enable-mem-access-versioning=false -enable-arm-maskedgatscat -tail-predication=force-enabled %s -o - | FileCheck %s
|
||||
|
||||
define dso_local void @mve_gather_qi_wb(i32* noalias nocapture readonly %A, i32* noalias nocapture readonly %B, i32* noalias nocapture %C, i32 %n, i32 %m, i32 %l) {
|
||||
; CHECK-LABEL: mve_gather_qi_wb:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: .save {r4, lr}
|
||||
; CHECK-NEXT: push {r4, lr}
|
||||
; CHECK-NEXT: adr r4, .LCPI0_0
|
||||
; CHECK-NEXT: add.w r12, r0, r3, lsl #2
|
||||
; CHECK-NEXT: vldrw.u32 q0, [r4]
|
||||
; CHECK-NEXT: adds r0, r3, #1
|
||||
; CHECK-NEXT: vmov.i32 q2, #0x0
|
||||
; CHECK-NEXT: vadd.i32 q0, q0, r1
|
||||
; CHECK-NEXT: adds r1, r3, #4
|
||||
; CHECK-NEXT: dlstp.32 lr, r0
|
||||
; CHECK-NEXT: .LBB0_1: @ %vector.body
|
||||
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: vldrw.u32 q1, [r12], #16
|
||||
; CHECK-NEXT: vldrw.u32 q3, [q0, #80]!
|
||||
; CHECK-NEXT: vmul.i32 q1, q3, q1
|
||||
; CHECK-NEXT: vadd.i32 q2, q2, q1
|
||||
; CHECK-NEXT: letp lr, .LBB0_1
|
||||
; CHECK-NEXT: @ %bb.2: @ %middle.block
|
||||
; CHECK-NEXT: vmov q0, q2
|
||||
; CHECK-NEXT: vaddv.u32 r0, q0
|
||||
; CHECK-NEXT: str.w r0, [r2, r1, lsl #2]
|
||||
; CHECK-NEXT: pop {r4, pc}
|
||||
; CHECK-NEXT: .p2align 4
|
||||
; CHECK-NEXT: @ %bb.3:
|
||||
; CHECK-NEXT: .LCPI0_0:
|
||||
; CHECK-NEXT: .long 4294967228 @ 0xffffffbc
|
||||
; CHECK-NEXT: .long 4294967248 @ 0xffffffd0
|
||||
; CHECK-NEXT: .long 4294967268 @ 0xffffffe4
|
||||
; CHECK-NEXT: .long 4294967288 @ 0xfffffff8
|
||||
entry: ; preds = %middle.
|
||||
%add.us.us = add i32 4, %n
|
||||
%arrayidx.us.us = getelementptr inbounds i32, i32* %C, i32 %add.us.us
|
||||
br label %vector.body
|
||||
vector.body: ; preds = %vector.body, %entry
|
||||
%index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
|
||||
%vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %7, %vector.body ]
|
||||
%vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %entry ], [ %vec.ind.next, %vector.body ]
|
||||
%0 = add i32 %index, %n
|
||||
%1 = getelementptr inbounds i32, i32* %A, i32 %0
|
||||
%active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
|
||||
%2 = bitcast i32* %1 to <4 x i32>*
|
||||
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
|
||||
%3 = mul <4 x i32> %vec.ind, <i32 5, i32 5, i32 5, i32 5>
|
||||
%4 = add <4 x i32> %3, <i32 3, i32 3, i32 3, i32 3>
|
||||
%5 = getelementptr inbounds i32, i32* %B, <4 x i32> %4
|
||||
%wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %5, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
|
||||
%6 = mul nsw <4 x i32> %wide.masked.gather, %wide.masked.load
|
||||
%7 = add <4 x i32> %vec.phi, %6
|
||||
%index.next = add i32 %index, 4
|
||||
%vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
|
||||
%8 = icmp eq i32 %index.next, 5000
|
||||
br i1 %8, label %middle.block, label %vector.body
|
||||
middle.block: ; preds = %vector.body
|
||||
%9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
|
||||
%10 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %9)
|
||||
store i32 %10, i32* %arrayidx.us.us, align 4
|
||||
%inc21.us.us = add nuw i32 4, 1
|
||||
%exitcond81.not = icmp eq i32 %inc21.us.us, %n
|
||||
br label %end
|
||||
end: ; preds = %middle.block
|
||||
ret void
|
||||
}
|
||||
|
||||
define dso_local void @mve_gatherscatter_offset(i32* noalias nocapture readonly %A, i32* noalias nocapture readonly %B, i32* noalias nocapture %C, i32 %n, i32 %m, i32 %l) {
|
||||
; CHECK-LABEL: mve_gatherscatter_offset:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: .save {r4, lr}
|
||||
; CHECK-NEXT: push {r4, lr}
|
||||
; CHECK-NEXT: .vsave {d8, d9}
|
||||
; CHECK-NEXT: vpush {d8, d9}
|
||||
; CHECK-NEXT: adr r4, .LCPI1_0
|
||||
; CHECK-NEXT: add.w r12, r0, r3, lsl #2
|
||||
; CHECK-NEXT: adds r0, r3, #1
|
||||
; CHECK-NEXT: vldrw.u32 q1, [r4]
|
||||
; CHECK-NEXT: adds r3, #4
|
||||
; CHECK-NEXT: vmov.i32 q3, #0x0
|
||||
; CHECK-NEXT: vmov.i32 q0, #0x14
|
||||
; CHECK-NEXT: dlstp.32 lr, r0
|
||||
; CHECK-NEXT: .LBB1_1: @ %vector.body
|
||||
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: vldrw.u32 q2, [r1, q1, uxtw #2]
|
||||
; CHECK-NEXT: vldrw.u32 q4, [r12], #16
|
||||
; CHECK-NEXT: vmul.i32 q2, q2, q4
|
||||
; CHECK-NEXT: vstrw.32 q2, [r1, q1, uxtw #2]
|
||||
; CHECK-NEXT: vadd.i32 q1, q1, q0
|
||||
; CHECK-NEXT: vadd.i32 q3, q3, q2
|
||||
; CHECK-NEXT: letp lr, .LBB1_1
|
||||
; CHECK-NEXT: @ %bb.2: @ %middle.block
|
||||
; CHECK-NEXT: vmov q0, q3
|
||||
; CHECK-NEXT: vaddv.u32 r0, q0
|
||||
; CHECK-NEXT: str.w r0, [r2, r3, lsl #2]
|
||||
; CHECK-NEXT: vpop {d8, d9}
|
||||
; CHECK-NEXT: pop {r4, pc}
|
||||
; CHECK-NEXT: .p2align 4
|
||||
; CHECK-NEXT: @ %bb.3:
|
||||
; CHECK-NEXT: .LCPI1_0:
|
||||
; CHECK-NEXT: .long 3 @ 0x3
|
||||
; CHECK-NEXT: .long 8 @ 0x8
|
||||
; CHECK-NEXT: .long 13 @ 0xd
|
||||
; CHECK-NEXT: .long 18 @ 0x12
|
||||
entry: ; preds = %middle.
|
||||
%add.us.us = add i32 4, %n
|
||||
%arrayidx.us.us = getelementptr inbounds i32, i32* %C, i32 %add.us.us
|
||||
br label %vector.body
|
||||
vector.body: ; preds = %vector.body, %entry
|
||||
%index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
|
||||
%vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %7, %vector.body ]
|
||||
%vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %entry ], [ %vec.ind.next, %vector.body ]
|
||||
%0 = add i32 %index, %n
|
||||
%1 = getelementptr inbounds i32, i32* %A, i32 %0
|
||||
%active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
|
||||
%2 = bitcast i32* %1 to <4 x i32>*
|
||||
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
|
||||
%3 = mul <4 x i32> %vec.ind, <i32 5, i32 5, i32 5, i32 5>
|
||||
%4 = add <4 x i32> %3, <i32 3, i32 3, i32 3, i32 3>
|
||||
%5 = getelementptr inbounds i32, i32* %B, <4 x i32> %4
|
||||
%wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %5, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
|
||||
%6 = mul nsw <4 x i32> %wide.masked.gather, %wide.masked.load
|
||||
call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %6, <4 x i32*> %5, i32 4, <4 x i1> %active.lane.mask)
|
||||
%7 = add <4 x i32> %vec.phi, %6
|
||||
%index.next = add i32 %index, 4
|
||||
%vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
|
||||
%8 = icmp eq i32 %index.next, 5000
|
||||
br i1 %8, label %middle.block, label %vector.body
|
||||
middle.block: ; preds = %vector.body
|
||||
%9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
|
||||
%10 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %9)
|
||||
store i32 %10, i32* %arrayidx.us.us, align 4
|
||||
%inc21.us.us = add nuw i32 4, 1
|
||||
%exitcond81.not = icmp eq i32 %inc21.us.us, %n
|
||||
br label %end
|
||||
end: ; preds = %middle.block
|
||||
ret void
|
||||
}
|
||||
define dso_local void @mve_scatter_qi(i32* noalias nocapture readonly %A, i32* noalias nocapture readonly %B, i32* noalias nocapture %C, i32 %n, i32 %m, i32 %l) {
|
||||
; CHECK-LABEL: mve_scatter_qi:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: .save {r4, lr}
|
||||
; CHECK-NEXT: push {r4, lr}
|
||||
; CHECK-NEXT: adr r4, .LCPI2_0
|
||||
; CHECK-NEXT: add.w r12, r0, r3, lsl #2
|
||||
; CHECK-NEXT: vldrw.u32 q0, [r4]
|
||||
; CHECK-NEXT: adds r0, r3, #1
|
||||
; CHECK-NEXT: vmov.i32 q3, #0x0
|
||||
; CHECK-NEXT: vadd.i32 q0, q0, r1
|
||||
; CHECK-NEXT: adds r1, r3, #4
|
||||
; CHECK-NEXT: vmov.i32 q2, #0x3
|
||||
; CHECK-NEXT: dlstp.32 lr, r0
|
||||
; CHECK-NEXT: .LBB2_1: @ %vector.body
|
||||
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: vldrw.u32 q1, [r12], #16
|
||||
; CHECK-NEXT: vmul.i32 q1, q1, q2
|
||||
; CHECK-NEXT: vstrw.32 q1, [q0, #80]!
|
||||
; CHECK-NEXT: vadd.i32 q3, q3, q1
|
||||
; CHECK-NEXT: letp lr, .LBB2_1
|
||||
; CHECK-NEXT: @ %bb.2: @ %middle.block
|
||||
; CHECK-NEXT: vmov q0, q3
|
||||
; CHECK-NEXT: vaddv.u32 r0, q0
|
||||
; CHECK-NEXT: str.w r0, [r2, r1, lsl #2]
|
||||
; CHECK-NEXT: pop {r4, pc}
|
||||
; CHECK-NEXT: .p2align 4
|
||||
; CHECK-NEXT: @ %bb.3:
|
||||
; CHECK-NEXT: .LCPI2_0:
|
||||
; CHECK-NEXT: .long 4294967228 @ 0xffffffbc
|
||||
; CHECK-NEXT: .long 4294967248 @ 0xffffffd0
|
||||
; CHECK-NEXT: .long 4294967268 @ 0xffffffe4
|
||||
; CHECK-NEXT: .long 4294967288 @ 0xfffffff8
|
||||
entry: ; preds = %middle.
|
||||
%add.us.us = add i32 4, %n
|
||||
%arrayidx.us.us = getelementptr inbounds i32, i32* %C, i32 %add.us.us
|
||||
br label %vector.body
|
||||
vector.body: ; preds = %vector.body, %entry
|
||||
%index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
|
||||
%vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %7, %vector.body ]
|
||||
%vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %entry ], [ %vec.ind.next, %vector.body ]
|
||||
%0 = add i32 %index, %n
|
||||
%1 = getelementptr inbounds i32, i32* %A, i32 %0
|
||||
%active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
|
||||
%2 = bitcast i32* %1 to <4 x i32>*
|
||||
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
|
||||
%3 = mul <4 x i32> %vec.ind, <i32 5, i32 5, i32 5, i32 5>
|
||||
%4 = add <4 x i32> %3, <i32 3, i32 3, i32 3, i32 3>
|
||||
%5 = getelementptr inbounds i32, i32* %B, <4 x i32> %4
|
||||
%6 = mul nsw <4 x i32> <i32 3, i32 3, i32 3, i32 3>, %wide.masked.load
|
||||
call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %6, <4 x i32*> %5, i32 4, <4 x i1> %active.lane.mask)
|
||||
%7 = add <4 x i32> %vec.phi, %6
|
||||
%index.next = add i32 %index, 4
|
||||
%vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
|
||||
%8 = icmp eq i32 %index.next, 5000
|
||||
br i1 %8, label %middle.block, label %vector.body
|
||||
middle.block: ; preds = %vector.body
|
||||
%9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
|
||||
%10 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %9)
|
||||
store i32 %10, i32* %arrayidx.us.us, align 4
|
||||
%inc21.us.us = add nuw i32 4, 1
|
||||
%exitcond81.not = icmp eq i32 %inc21.us.us, %n
|
||||
br label %end
|
||||
end: ; preds = %middle.block
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
|
||||
declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
|
||||
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
|
||||
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
|
||||
declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
|
Loading…
Reference in New Issue
Block a user