1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

Revert "[BasicAA] Use known lower bounds for index values for size based check."

This potentially related to https://bugs.llvm.org/show_bug.cgi?id=46335
and causes a slight compile-time regression. Revert while investigating.

This reverts commit d99a1848c4f8ca164c0c0768e10eafc850b2a68a.
This commit is contained in:
Florian Hahn 2020-06-20 09:54:45 +01:00
parent 30f3ecfea1
commit fddd42bd0e
4 changed files with 9 additions and 115 deletions

View File

@ -140,11 +140,6 @@ private:
// Total constant offset w.r.t the base from indexing through
// pointers/arrays/vectors
APInt OtherOffset;
// Constant offset w.r.t the base from indexing through
// pointers/arrays/vectors, including the lower bounds of index variables,
// if there are any. Currently only known non-negative lower bounds are
// added.
APInt MinOtherOffset;
// Scaled variable (non-constant) indices.
SmallVector<VariableGEPIndex, 4> VarIndices;
// Is GEP index scale compile-time constant.

View File

@ -31,7 +31,6 @@
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
@ -565,11 +564,10 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
if (CIdx->isZero())
continue;
APInt Offset = (DL.getTypeAllocSize(GTI.getIndexedType()) *
CIdx->getValue().sextOrSelf(MaxPointerSize))
.sextOrTrunc(MaxPointerSize);
Decomposed.OtherOffset += Offset;
Decomposed.MinOtherOffset += Offset;
Decomposed.OtherOffset +=
(DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
CIdx->getValue().sextOrSelf(MaxPointerSize))
.sextOrTrunc(MaxPointerSize);
continue;
}
@ -613,18 +611,7 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
if (PointerSize > Width)
SExtBits += PointerSize - Width;
} else {
APInt Offset = IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
Decomposed.OtherOffset += Offset;
APInt IndexBound =
computeConstantRange(Index, true, AC, dyn_cast<Instruction>(GEPOp))
.getLower()
.sextOrTrunc(MaxPointerSize);
// If we find a non-negative lower bound for the index value, we can
// improve the known offset to include it. By just using non-negative
// lower bounds, we conveniently skip any index values for which we do
// not find a useful lower bound.
if (IndexBound.isNonNegative())
Decomposed.MinOtherOffset += Offset + IndexBound * Scale;
Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
}
@ -1341,8 +1328,6 @@ AliasResult BasicAAResult::aliasGEP(
DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0);
DecompGEP1.HasCompileTimeConstantScale =
DecompGEP2.HasCompileTimeConstantScale = true;
DecompGEP1.MinOtherOffset = APInt(MaxPointerSize, 0);
DecompGEP2.MinOtherOffset = APInt(MaxPointerSize, 0);
bool GEP1MaxLookupReached =
DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
@ -1357,8 +1342,6 @@ AliasResult BasicAAResult::aliasGEP(
APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
APInt GEP1BaseOffsetMin = DecompGEP1.StructOffset + DecompGEP1.MinOtherOffset;
APInt GEP2BaseOffsetMin = DecompGEP2.StructOffset + DecompGEP2.MinOtherOffset;
assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
"DecomposeGEPExpression returned a result different from "
@ -1433,7 +1416,6 @@ AliasResult BasicAAResult::aliasGEP(
// Subtract the GEP2 pointer from the GEP1 pointer to find out their
// symbolic difference.
GEP1BaseOffset -= GEP2BaseOffset;
GEP1BaseOffsetMin -= GEP2BaseOffsetMin;
GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
} else {
@ -1552,11 +1534,10 @@ AliasResult BasicAAResult::aliasGEP(
// If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
// If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
// don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
if (AllPositive && GEP1BaseOffsetMin.sgt(0) &&
if (AllPositive && GEP1BaseOffset.sgt(0) &&
V2Size != LocationSize::unknown() &&
GEP1BaseOffsetMin.uge(V2Size.getValue())) {
GEP1BaseOffset.uge(V2Size.getValue()))
return NoAlias;
}
if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
GEP1BaseOffset, &AC, DT))

View File

@ -113,86 +113,4 @@ define void @test4(double* %ptr, i32 %skip) {
ret void
}
define void @test5(double* %ptr, i32 %stride) {
; CHECK-LABEL: Function: test5: 4 pointers, 1 call sites
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.1, double* %ptr
; CHECK-NEXT: NoAlias: double* %col.ptr.2, double* %ptr
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.1, double* %col.ptr.2
; CHECK-NEXT: NoAlias: <6 x double>* %col.ptr.2.cast, double* %ptr
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.1, <6 x double>* %col.ptr.2.cast
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.2.cast, double* %col.ptr.2
; CHECK-NEXT: NoModRef: Ptr: double* %ptr <-> call void @llvm.assume(i1 %gt)
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.1 <-> call void @llvm.assume(i1 %gt)
; CHECK-NEXT: NoModRef: Ptr: double* %col.ptr.2 <-> call void @llvm.assume(i1 %gt)
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.2.cast <-> call void @llvm.assume(i1 %gt)
;
%gt = icmp sge i32 %stride, 5
call void @llvm.assume(i1 %gt)
%col.ptr.1 = bitcast double* %ptr to <6 x double>*
%lv.1 = load <6 x double>, <6 x double>* %col.ptr.1, align 8
%col.ptr.2= getelementptr double, double* %ptr, i32 %stride
%col.ptr.2.cast = bitcast double* %col.ptr.2 to <6 x double>*
%lv.2 = load <6 x double>, <6 x double>* %col.ptr.2.cast, align 8
%res.1 = fadd <6 x double> %lv.1, %lv.1
%res.2 = fadd <6 x double> %lv.2, %lv.2
store <6 x double> %res.1, <6 x double>* %col.ptr.1, align 8
store <6 x double> %res.2, <6 x double>* %col.ptr.2.cast, align 8
ret void
}
define void @test6(double* %ptr, i32 %stride) {
; CHECK-LABEL: Function: test6: 4 pointers, 1 call sites
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.1, double* %ptr
; CHECK-NEXT: NoAlias: double* %col.ptr.2, double* %ptr
; CHECK-NEXT: NoAlias: <6 x double>* %col.ptr.1, double* %col.ptr.2
; CHECK-NEXT: NoAlias: <6 x double>* %col.ptr.2.cast, double* %ptr
; CHECK-NEXT: NoAlias: <6 x double>* %col.ptr.1, <6 x double>* %col.ptr.2.cast
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.2.cast, double* %col.ptr.2
; CHECK-NEXT: NoModRef: Ptr: double* %ptr <-> call void @llvm.assume(i1 %gt)
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.1 <-> call void @llvm.assume(i1 %gt)
; CHECK-NEXT: NoModRef: Ptr: double* %col.ptr.2 <-> call void @llvm.assume(i1 %gt)
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.2.cast <-> call void @llvm.assume(i1 %gt)
;
%gt = icmp sge i32 %stride, 6
call void @llvm.assume(i1 %gt)
%col.ptr.1 = bitcast double* %ptr to <6 x double>*
%lv.1 = load <6 x double>, <6 x double>* %col.ptr.1, align 8
%col.ptr.2= getelementptr double, double* %ptr, i32 %stride
%col.ptr.2.cast = bitcast double* %col.ptr.2 to <6 x double>*
%lv.2 = load <6 x double>, <6 x double>* %col.ptr.2.cast, align 8
%res.1 = fadd <6 x double> %lv.1, %lv.1
%res.2 = fadd <6 x double> %lv.2, %lv.2
store <6 x double> %res.1, <6 x double>* %col.ptr.1, align 8
store <6 x double> %res.2, <6 x double>* %col.ptr.2.cast, align 8
ret void
}
define void @test7(double* %ptr, i32 %stride) {
; CHECK-LABEL: Function: test7: 4 pointers, 1 call sites
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.1, double* %ptr
; CHECK-NEXT: MayAlias: double* %col.ptr.2, double* %ptr
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.1, double* %col.ptr.2
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.2.cast, double* %ptr
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.1, <6 x double>* %col.ptr.2.cast
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.2.cast, double* %col.ptr.2
; CHECK-NEXT: NoModRef: Ptr: double* %ptr <-> call void @llvm.assume(i1 %gt)
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.1 <-> call void @llvm.assume(i1 %gt)
; CHECK-NEXT: NoModRef: Ptr: double* %col.ptr.2 <-> call void @llvm.assume(i1 %gt)
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.2.cast <-> call void @llvm.assume(i1 %gt)
;
%gt = icmp sge i32 %stride, 0
call void @llvm.assume(i1 %gt)
%col.ptr.1 = bitcast double* %ptr to <6 x double>*
%lv.1 = load <6 x double>, <6 x double>* %col.ptr.1, align 8
%col.ptr.2= getelementptr double, double* %ptr, i32 %stride
%col.ptr.2.cast = bitcast double* %col.ptr.2 to <6 x double>*
%lv.2 = load <6 x double>, <6 x double>* %col.ptr.2.cast, align 8
%res.1 = fadd <6 x double> %lv.1, %lv.1
%res.2 = fadd <6 x double> %lv.2, %lv.2
store <6 x double> %res.1, <6 x double>* %col.ptr.1, align 8
store <6 x double> %res.2, <6 x double>* %col.ptr.2.cast, align 8
ret void
}
declare void @llvm.assume(i1 %cond)

View File

@ -4,9 +4,9 @@ target triple = "x86_64-apple-darwin13.4.0"
; CHECK-LABEL: compute1
; CHECK: MayAlias: i32* %arrayidx8, i32* %out
; CHECK: NoAlias: i32* %arrayidx11, i32* %out
; CHECK: MayAlias: i32* %arrayidx11, i32* %out
; CHECK: MayAlias: i32* %arrayidx11, i32* %arrayidx8
; CHECK: NoAlias: i32* %arrayidx14, i32* %out
; CHECK: MayAlias: i32* %arrayidx14, i32* %out
; CHECK: MayAlias: i32* %arrayidx14, i32* %arrayidx8
; CHECK: MayAlias: i32* %arrayidx11, i32* %arrayidx14
define void @compute1(i32 %num.0.lcssa, i32* %out) {