1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[SVE]Clarify TypeSize comparisons in llvm/lib/Transforms

Use isKnownXY comparators when one of the operands can be with
scalable vectors or getFixedSize() for all the other cases.

This patch also does bug fixes for getPrimitiveSizeInBits by using
getFixedSize() near the places with the TypeSize comparison.

Differential Revision: https://reviews.llvm.org/D89703
This commit is contained in:
Caroline Concatto 2020-10-16 09:21:28 +01:00
parent 14eb23caae
commit e854d1462d
6 changed files with 19 additions and 15 deletions

View File

@ -1880,7 +1880,8 @@ static bool isPointerValueDeadOnEntryToFunction(
// and the number of bits loaded in L is less than or equal to
// the number of bits stored in S.
return DT.dominates(S, L) &&
DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy);
DL.getTypeStoreSize(LTy).getFixedSize() <=
DL.getTypeStoreSize(STy).getFixedSize();
}))
return false;
}

View File

@ -314,7 +314,7 @@ InstCombinerImpl::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
if (!GEP->isInBounds()) {
Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
if (Idx->getType()->getPrimitiveSizeInBits().getFixedSize() > PtrSize)
Idx = Builder.CreateTrunc(Idx, IntPtrTy);
}
@ -487,7 +487,8 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombinerImpl &IC,
// Cast to intptrty in case a truncation occurs. If an extension is needed,
// we don't need to bother extending: the extension won't affect where the
// computation crosses zero.
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
if (VariableIdx->getType()->getPrimitiveSizeInBits().getFixedSize() >
IntPtrWidth) {
VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
}
return VariableIdx;
@ -942,8 +943,8 @@ Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
Type *LHSIndexTy = LOffset->getType();
Type *RHSIndexTy = ROffset->getType();
if (LHSIndexTy != RHSIndexTy) {
if (LHSIndexTy->getPrimitiveSizeInBits() <
RHSIndexTy->getPrimitiveSizeInBits()) {
if (LHSIndexTy->getPrimitiveSizeInBits().getFixedSize() <
RHSIndexTy->getPrimitiveSizeInBits().getFixedSize()) {
ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
} else
LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);

View File

@ -896,8 +896,8 @@ static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
// If we're going to need to zero extend the BE count, check if we can add
// one to it prior to zero extending without overflow. Provided this is safe,
// it allows better simplification of the +1.
if (DL->getTypeSizeInBits(BECount->getType()) <
DL->getTypeSizeInBits(IntPtr) &&
if (DL->getTypeSizeInBits(BECount->getType()).getFixedSize() <
DL->getTypeSizeInBits(IntPtr).getFixedSize() &&
SE->isLoopEntryGuardedByCond(
CurLoop, ICmpInst::ICMP_NE, BECount,
SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {

View File

@ -439,8 +439,8 @@ static bool isSafeToTruncateWideIVType(const DataLayout &DL,
Type *RangeCheckType) {
if (!EnableIVTruncation)
return false;
assert(DL.getTypeSizeInBits(LatchCheck.IV->getType()) >
DL.getTypeSizeInBits(RangeCheckType) &&
assert(DL.getTypeSizeInBits(LatchCheck.IV->getType()).getFixedSize() >
DL.getTypeSizeInBits(RangeCheckType).getFixedSize() &&
"Expected latch check IV type to be larger than range check operand "
"type!");
// The start and end values of the IV should be known. This is to guarantee
@ -460,7 +460,8 @@ static bool isSafeToTruncateWideIVType(const DataLayout &DL,
// The active bits should be less than the bits in the RangeCheckType. This
// guarantees that truncating the latch check to RangeCheckType is a safe
// operation.
auto RangeCheckTypeBitSize = DL.getTypeSizeInBits(RangeCheckType);
auto RangeCheckTypeBitSize =
DL.getTypeSizeInBits(RangeCheckType).getFixedSize();
return Start->getAPInt().getActiveBits() < RangeCheckTypeBitSize &&
Limit->getAPInt().getActiveBits() < RangeCheckTypeBitSize;
}
@ -477,7 +478,8 @@ static Optional<LoopICmp> generateLoopLatchCheck(const DataLayout &DL,
if (RangeCheckType == LatchType)
return LatchCheck;
// For now, bail out if latch type is narrower than range type.
if (DL.getTypeSizeInBits(LatchType) < DL.getTypeSizeInBits(RangeCheckType))
if (DL.getTypeSizeInBits(LatchType).getFixedSize() <
DL.getTypeSizeInBits(RangeCheckType).getFixedSize())
return None;
if (!isSafeToTruncateWideIVType(DL, SE, LatchCheck, RangeCheckType))
return None;

View File

@ -375,8 +375,8 @@ NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
// Replace the I-th index with LHS.
IndexExprs[I] = SE->getSCEV(LHS);
if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) &&
DL->getTypeSizeInBits(LHS->getType()) <
DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) {
DL->getTypeSizeInBits(LHS->getType()).getFixedSize() <
DL->getTypeSizeInBits(GEP->getOperand(I)->getType()).getFixedSize()) {
// Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to
// zext if the source operand is proved non-negative. We should do that
// consistently so that CandidateExpr more likely appears before. See

View File

@ -2020,8 +2020,8 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
// Put pointers at the back and make sure pointer < pointer = false.
if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
return RHS->getType()->getPrimitiveSizeInBits() <
LHS->getType()->getPrimitiveSizeInBits();
return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() <
LHS->getType()->getPrimitiveSizeInBits().getFixedSize();
});
unsigned NumElim = 0;