diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index 9a560a14ca6..f78f0014510 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -633,12 +633,6 @@ public: const SCEV *getNotSCEV(const SCEV *V); /// Return LHS-RHS. Minus is represented in SCEV as A+B*-1. - /// - /// If the LHS and RHS are pointers which don't share a common base - /// (according to getPointerBase()), this returns a SCEVCouldNotCompute. - /// To compute the difference between two unrelated pointers, you can - /// explicitly convert the arguments using getPtrToIntExpr(), for pointer - /// types that support it. const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap, unsigned Depth = 0); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 6aedd43fa46..97ea60f93c5 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -4138,15 +4138,6 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, if (LHS == RHS) return getZero(LHS->getType()); - // If we subtract two pointers with different pointer bases, bail. - // Eventually, we're going to add an assertion to getMulExpr that we - // can't multiply by a pointer. - if (RHS->getType()->isPointerTy()) { - if (!LHS->getType()->isPointerTy() || - getPointerBase(LHS) != getPointerBase(RHS)) - return getCouldNotCompute(); - } - // We represent LHS - RHS as LHS + (-1)*RHS. This transformation // makes it so that we cannot make much use of NUW. auto AddFlags = SCEV::FlagAnyWrap; @@ -8038,16 +8029,6 @@ ScalarEvolution::computeExitLimitFromICmp(const Loop *L, } case ICmpInst::ICMP_EQ: { // while (X == Y) // Convert to: while (X-Y == 0) - if (LHS->getType()->isPointerTy()) { - LHS = getLosslessPtrToIntExpr(LHS); - if (isa(LHS)) - return LHS; - } - if (RHS->getType()->isPointerTy()) { - RHS = getLosslessPtrToIntExpr(RHS); - if (isa(RHS)) - return RHS; - } ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); if (EL.hasAnyInfo()) return EL; break; @@ -10085,13 +10066,10 @@ bool ScalarEvolution::isKnownPredicateViaConstantRanges( if (Pred == CmpInst::ICMP_EQ) return false; - if (Pred == CmpInst::ICMP_NE) { - if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || - CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS))) - return true; - auto *Diff = getMinusSCEV(LHS, RHS); - return !isa(Diff) && isKnownNonZero(Diff); - } + if (Pred == CmpInst::ICMP_NE) + return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || + CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || + isKnownNonZero(getMinusSCEV(LHS, RHS)); if (CmpInst::isSigned(Pred)) return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); @@ -10612,10 +10590,6 @@ bool ScalarEvolution::isImpliedCondBalancedTypes( if (!isa(FoundRHS) && !isa(FoundLHS)) return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); - // Don't try to getNotSCEV pointers. - if (LHS->getType()->isPointerTy() || FoundLHS->getType()->isPointerTy()) - return false; - // There's no clear preference between forms 3. and 4., try both. return isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), FoundLHS, FoundRHS, Context) || diff --git a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp index 2262fc9d791..f447f5c13b8 100644 --- a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp +++ b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp @@ -57,8 +57,7 @@ AliasResult SCEVAAResult::alias(const MemoryLocation &LocA, // Test whether the difference is known to be great enough that memory of // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt // are non-zero, which is special-cased above. - if (!isa(BA) && - ASizeInt.ule(SE.getUnsignedRange(BA).getUnsignedMin()) && + if (ASizeInt.ule(SE.getUnsignedRange(BA).getUnsignedMin()) && (-BSizeInt).uge(SE.getUnsignedRange(BA).getUnsignedMax())) return AliasResult::NoAlias; @@ -72,8 +71,7 @@ AliasResult SCEVAAResult::alias(const MemoryLocation &LocA, // Test whether the difference is known to be great enough that memory of // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt // are non-zero, which is special-cased above. - if (!isa(AB) && - BSizeInt.ule(SE.getUnsignedRange(AB).getUnsignedMin()) && + if (BSizeInt.ule(SE.getUnsignedRange(AB).getUnsignedMin()) && (-ASizeInt).uge(SE.getUnsignedRange(AB).getUnsignedMax())) return AliasResult::NoAlias; } diff --git a/lib/Analysis/StackSafetyAnalysis.cpp b/lib/Analysis/StackSafetyAnalysis.cpp index 76f195fedf3..73096eb4bae 100644 --- a/lib/Analysis/StackSafetyAnalysis.cpp +++ b/lib/Analysis/StackSafetyAnalysis.cpp @@ -263,8 +263,6 @@ ConstantRange StackSafetyLocalAnalysis::offsetFrom(Value *Addr, Value *Base) { const SCEV *AddrExp = SE.getTruncateOrZeroExtend(SE.getSCEV(Addr), PtrTy); const SCEV *BaseExp = SE.getTruncateOrZeroExtend(SE.getSCEV(Base), PtrTy); const SCEV *Diff = SE.getMinusSCEV(AddrExp, BaseExp); - if (isa(Diff)) - return UnknownRange; ConstantRange Offset = SE.getSignedRange(Diff); if (isUnsafe(Offset)) diff --git a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp index be21db9087d..5a8eeed9c4f 100644 --- a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -135,8 +135,6 @@ static Align getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, PtrSCEV = SE->getTruncateOrZeroExtend( PtrSCEV, SE->getEffectiveSCEVType(AASCEV->getType())); const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV); - if (isa(DiffSCEV)) - return Align(1); // On 32-bit platforms, DiffSCEV might now have type i32 -- we've always // sign-extended OffSCEV to i64, so make sure they agree again. diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp index 98889f9a19d..d455f886ebf 100644 --- a/lib/Transforms/Scalar/LoopRerollPass.cpp +++ b/lib/Transforms/Scalar/LoopRerollPass.cpp @@ -911,8 +911,6 @@ bool LoopReroll::DAGRootTracker::validateRootSet(DAGRootSet &DRS) { // Check that the first root is evenly spaced. unsigned N = DRS.Roots.size() + 1; const SCEV *StepSCEV = SE->getMinusSCEV(SE->getSCEV(DRS.Roots[0]), ADR); - if (isa(StepSCEV)) - return false; const SCEV *ScaleSCEV = SE->getConstant(StepSCEV->getType(), N); if (ADR->getStepRecurrence(*SE) != SE->getMulExpr(StepSCEV, ScaleSCEV)) return false; diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index d49df59a9d6..c573b1a3a77 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -2963,7 +2963,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, // The increment must be loop-invariant so it can be kept in a register. const SCEV *PrevExpr = SE.getSCEV(PrevIV); const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); - if (isa(IncExpr) || !SE.isLoopInvariant(IncExpr, L)) + if (!SE.isLoopInvariant(IncExpr, L)) continue; if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { @@ -3316,9 +3316,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() { // x == y --> x - y == 0 const SCEV *N = SE.getSCEV(NV); - if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE) && - (!NV->getType()->isPointerTy() || - SE.getPointerBase(N) == SE.getPointerBase(S))) { + if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) { // S is normalized, so normalize N before folding it into S // to keep the result normalized. N = normalizeForPostIncUse(N, TmpPostIncLoops, SE); diff --git a/test/Analysis/StackSafetyAnalysis/local.ll b/test/Analysis/StackSafetyAnalysis/local.ll index f6bad471503..a1714bf4364 100644 --- a/test/Analysis/StackSafetyAnalysis/local.ll +++ b/test/Analysis/StackSafetyAnalysis/local.ll @@ -71,7 +71,7 @@ define void @StoreInBounds4() { ; CHECK-LABEL: @StoreInBounds4 dso_preemptable{{$}} ; CHECK-NEXT: args uses: ; CHECK-NEXT: allocas uses: -; CHECK-NEXT: x[4]: full-set{{$}} +; CHECK-NEXT: x[4]: [-9223372036854775808,9223372036854775807){{$}} ; CHECK-EMPTY: entry: %x = alloca i32, align 4 diff --git a/test/CodeGen/ARM/lsr-undef-in-binop.ll b/test/CodeGen/ARM/lsr-undef-in-binop.ll new file mode 100644 index 00000000000..564328d9999 --- /dev/null +++ b/test/CodeGen/ARM/lsr-undef-in-binop.ll @@ -0,0 +1,251 @@ +; REQUIRES: arm-registered-target +; RUN: opt -S -loop-reduce %s -o - | FileCheck %s + +target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv8-unknown-hurd-eabihf" + +%"class.std::__1::vector.182" = type { %"class.std::__1::__vector_base.183" } +%"class.std::__1::__vector_base.183" = type { i8*, i8*, %"class.std::__1::__compressed_pair.184" } +%"class.std::__1::__compressed_pair.184" = type { %"struct.std::__1::__compressed_pair_elem.185" } +%"struct.std::__1::__compressed_pair_elem.185" = type { i8* } +%"class.std::__1::__vector_base_common" = type { i8 } + +$vector_insert = comdat any + +declare i8* @Allocate(i32) local_unnamed_addr +declare void @Free(i8*) local_unnamed_addr +declare void @_ZNKSt3__120__vector_base_commonILb1EE20__throw_length_errorEv(%"class.std::__1::__vector_base_common"*) local_unnamed_addr +declare i8* @memmove(i8*, i8*, i32) local_unnamed_addr + +; Function Attrs: noimplicitfloat nounwind uwtable +define linkonce_odr i32 @vector_insert(%"class.std::__1::vector.182"*, [1 x i32], i8*, i8*) local_unnamed_addr #1 comdat align 2 { +; CHECK-LABEL: vector_insert + %5 = extractvalue [1 x i32] %1, 0 + %6 = getelementptr inbounds %"class.std::__1::vector.182", %"class.std::__1::vector.182"* %0, i32 0, i32 0, i32 0 + %7 = load i8*, i8** %6, align 4 +; CHECK: [[LOAD:%[0-9]+]] = load i8*, i8** + %8 = bitcast %"class.std::__1::vector.182"* %0 to i32* + %9 = ptrtoint i8* %7 to i32 +; CHECK: [[NEW_CAST:%[0-9]+]] = ptrtoint i8* [[LOAD]] to i32 +; CHECK: [[OLD_CAST:%[0-9]+]] = ptrtoint i8* [[LOAD]] to i32 + %10 = sub i32 %5, %9 + %11 = getelementptr inbounds i8, i8* %7, i32 %10 + %12 = ptrtoint i8* %3 to i32 + %13 = ptrtoint i8* %2 to i32 + %14 = sub i32 %12, %13 + %15 = icmp sgt i32 %14, 0 + br i1 %15, label %18, label %16 + +;