mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
[BasicAA] Use known lower bounds for index values for size based check.
Currently, BasicAA does not exploit information about value ranges of indexes. For example, consider the 2 pointers %a = %base and %b = %base + %stride below, assuming they are used to access 4 elements. If we know that %stride >= 4, we know the accesses do not alias. If %stride is a constant, BasicAA currently gets that. But if the >= 4 constraint is encoded using an assume, it misses the NoAlias. This patch extends DecomposedGEP to include an additional MinOtherOffset field, which tracks the constant offset similar to the existing OtherOffset, which the difference that it also includes non-negative lower bounds on the range of the index value. When checking if the distance between 2 accesses exceeds the access size, we can use this improved bound. For now this is limited to using non-negative lower bounds for indices, as this conveniently skips cases where we do not have a useful lower bound (because it is not constrained). We potential miss out in cases where the lower bound is constrained but negative, but that can be exploited in the future. Reviewers: sanjoy, hfinkel, reames, asbirlea Reviewed By: asbirlea Differential Revision: https://reviews.llvm.org/D76194
This commit is contained in:
parent
abf14921b4
commit
1cc7eb0b48
@ -140,6 +140,11 @@ private:
|
||||
// Total constant offset w.r.t the base from indexing through
|
||||
// pointers/arrays/vectors
|
||||
APInt OtherOffset;
|
||||
// Constant offset w.r.t the base from indexing through
|
||||
// pointers/arrays/vectors, including the lower bounds of index variables,
|
||||
// if there are any. Currently only known non-negative lower bounds are
|
||||
// added.
|
||||
APInt MinOtherOffset;
|
||||
// Scaled variable (non-constant) indices.
|
||||
SmallVector<VariableGEPIndex, 4> VarIndices;
|
||||
// Is GEP index scale compile-time constant.
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "llvm/IR/Argument.h"
|
||||
#include "llvm/IR/Attributes.h"
|
||||
#include "llvm/IR/Constant.h"
|
||||
#include "llvm/IR/ConstantRange.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/DataLayout.h"
|
||||
#include "llvm/IR/DerivedTypes.h"
|
||||
@ -564,10 +565,11 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
|
||||
if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
|
||||
if (CIdx->isZero())
|
||||
continue;
|
||||
Decomposed.OtherOffset +=
|
||||
(DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
|
||||
CIdx->getValue().sextOrSelf(MaxPointerSize))
|
||||
.sextOrTrunc(MaxPointerSize);
|
||||
APInt Offset = (DL.getTypeAllocSize(GTI.getIndexedType()) *
|
||||
CIdx->getValue().sextOrSelf(MaxPointerSize))
|
||||
.sextOrTrunc(MaxPointerSize);
|
||||
Decomposed.OtherOffset += Offset;
|
||||
Decomposed.MinOtherOffset += Offset;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -611,7 +613,18 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
|
||||
if (PointerSize > Width)
|
||||
SExtBits += PointerSize - Width;
|
||||
} else {
|
||||
Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
|
||||
APInt Offset = IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
|
||||
Decomposed.OtherOffset += Offset;
|
||||
APInt IndexBound =
|
||||
computeConstantRange(Index, true, AC, dyn_cast<Instruction>(GEPOp))
|
||||
.getLower()
|
||||
.sextOrTrunc(MaxPointerSize);
|
||||
// If we find a non-negative lower bound for the index value, we can
|
||||
// improve the known offset to include it. By just using non-negative
|
||||
// lower bounds, we conveniently skip any index values for which we do
|
||||
// not find a useful lower bound.
|
||||
if (IndexBound.isNonNegative())
|
||||
Decomposed.MinOtherOffset += Offset + IndexBound * Scale;
|
||||
Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
|
||||
}
|
||||
|
||||
@ -1328,6 +1341,8 @@ AliasResult BasicAAResult::aliasGEP(
|
||||
DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0);
|
||||
DecompGEP1.HasCompileTimeConstantScale =
|
||||
DecompGEP2.HasCompileTimeConstantScale = true;
|
||||
DecompGEP1.MinOtherOffset = APInt(MaxPointerSize, 0);
|
||||
DecompGEP2.MinOtherOffset = APInt(MaxPointerSize, 0);
|
||||
|
||||
bool GEP1MaxLookupReached =
|
||||
DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
|
||||
@ -1342,6 +1357,8 @@ AliasResult BasicAAResult::aliasGEP(
|
||||
|
||||
APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
|
||||
APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
|
||||
APInt GEP1BaseOffsetMin = DecompGEP1.StructOffset + DecompGEP1.MinOtherOffset;
|
||||
APInt GEP2BaseOffsetMin = DecompGEP2.StructOffset + DecompGEP2.MinOtherOffset;
|
||||
|
||||
assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
|
||||
"DecomposeGEPExpression returned a result different from "
|
||||
@ -1416,6 +1433,7 @@ AliasResult BasicAAResult::aliasGEP(
|
||||
// Subtract the GEP2 pointer from the GEP1 pointer to find out their
|
||||
// symbolic difference.
|
||||
GEP1BaseOffset -= GEP2BaseOffset;
|
||||
GEP1BaseOffsetMin -= GEP2BaseOffsetMin;
|
||||
GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
|
||||
|
||||
} else {
|
||||
@ -1534,10 +1552,11 @@ AliasResult BasicAAResult::aliasGEP(
|
||||
// If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
|
||||
// If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
|
||||
// don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
|
||||
if (AllPositive && GEP1BaseOffset.sgt(0) &&
|
||||
if (AllPositive && GEP1BaseOffsetMin.sgt(0) &&
|
||||
V2Size != LocationSize::unknown() &&
|
||||
GEP1BaseOffset.uge(V2Size.getValue()))
|
||||
GEP1BaseOffsetMin.uge(V2Size.getValue())) {
|
||||
return NoAlias;
|
||||
}
|
||||
|
||||
if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
|
||||
GEP1BaseOffset, &AC, DT))
|
||||
|
@ -113,4 +113,86 @@ define void @test4(double* %ptr, i32 %skip) {
|
||||
ret void
|
||||
}
|
||||
|
||||
|
||||
define void @test5(double* %ptr, i32 %stride) {
|
||||
; CHECK-LABEL: Function: test5: 4 pointers, 1 call sites
|
||||
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.1, double* %ptr
|
||||
; CHECK-NEXT: NoAlias: double* %col.ptr.2, double* %ptr
|
||||
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.1, double* %col.ptr.2
|
||||
; CHECK-NEXT: NoAlias: <6 x double>* %col.ptr.2.cast, double* %ptr
|
||||
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.1, <6 x double>* %col.ptr.2.cast
|
||||
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.2.cast, double* %col.ptr.2
|
||||
; CHECK-NEXT: NoModRef: Ptr: double* %ptr <-> call void @llvm.assume(i1 %gt)
|
||||
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.1 <-> call void @llvm.assume(i1 %gt)
|
||||
; CHECK-NEXT: NoModRef: Ptr: double* %col.ptr.2 <-> call void @llvm.assume(i1 %gt)
|
||||
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.2.cast <-> call void @llvm.assume(i1 %gt)
|
||||
;
|
||||
%gt = icmp sge i32 %stride, 5
|
||||
call void @llvm.assume(i1 %gt)
|
||||
%col.ptr.1 = bitcast double* %ptr to <6 x double>*
|
||||
%lv.1 = load <6 x double>, <6 x double>* %col.ptr.1, align 8
|
||||
%col.ptr.2= getelementptr double, double* %ptr, i32 %stride
|
||||
%col.ptr.2.cast = bitcast double* %col.ptr.2 to <6 x double>*
|
||||
%lv.2 = load <6 x double>, <6 x double>* %col.ptr.2.cast, align 8
|
||||
%res.1 = fadd <6 x double> %lv.1, %lv.1
|
||||
%res.2 = fadd <6 x double> %lv.2, %lv.2
|
||||
store <6 x double> %res.1, <6 x double>* %col.ptr.1, align 8
|
||||
store <6 x double> %res.2, <6 x double>* %col.ptr.2.cast, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test6(double* %ptr, i32 %stride) {
|
||||
; CHECK-LABEL: Function: test6: 4 pointers, 1 call sites
|
||||
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.1, double* %ptr
|
||||
; CHECK-NEXT: NoAlias: double* %col.ptr.2, double* %ptr
|
||||
; CHECK-NEXT: NoAlias: <6 x double>* %col.ptr.1, double* %col.ptr.2
|
||||
; CHECK-NEXT: NoAlias: <6 x double>* %col.ptr.2.cast, double* %ptr
|
||||
; CHECK-NEXT: NoAlias: <6 x double>* %col.ptr.1, <6 x double>* %col.ptr.2.cast
|
||||
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.2.cast, double* %col.ptr.2
|
||||
; CHECK-NEXT: NoModRef: Ptr: double* %ptr <-> call void @llvm.assume(i1 %gt)
|
||||
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.1 <-> call void @llvm.assume(i1 %gt)
|
||||
; CHECK-NEXT: NoModRef: Ptr: double* %col.ptr.2 <-> call void @llvm.assume(i1 %gt)
|
||||
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.2.cast <-> call void @llvm.assume(i1 %gt)
|
||||
;
|
||||
%gt = icmp sge i32 %stride, 6
|
||||
call void @llvm.assume(i1 %gt)
|
||||
%col.ptr.1 = bitcast double* %ptr to <6 x double>*
|
||||
%lv.1 = load <6 x double>, <6 x double>* %col.ptr.1, align 8
|
||||
%col.ptr.2= getelementptr double, double* %ptr, i32 %stride
|
||||
%col.ptr.2.cast = bitcast double* %col.ptr.2 to <6 x double>*
|
||||
%lv.2 = load <6 x double>, <6 x double>* %col.ptr.2.cast, align 8
|
||||
%res.1 = fadd <6 x double> %lv.1, %lv.1
|
||||
%res.2 = fadd <6 x double> %lv.2, %lv.2
|
||||
store <6 x double> %res.1, <6 x double>* %col.ptr.1, align 8
|
||||
store <6 x double> %res.2, <6 x double>* %col.ptr.2.cast, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test7(double* %ptr, i32 %stride) {
|
||||
; CHECK-LABEL: Function: test7: 4 pointers, 1 call sites
|
||||
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.1, double* %ptr
|
||||
; CHECK-NEXT: MayAlias: double* %col.ptr.2, double* %ptr
|
||||
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.1, double* %col.ptr.2
|
||||
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.2.cast, double* %ptr
|
||||
; CHECK-NEXT: MayAlias: <6 x double>* %col.ptr.1, <6 x double>* %col.ptr.2.cast
|
||||
; CHECK-NEXT: MustAlias: <6 x double>* %col.ptr.2.cast, double* %col.ptr.2
|
||||
; CHECK-NEXT: NoModRef: Ptr: double* %ptr <-> call void @llvm.assume(i1 %gt)
|
||||
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.1 <-> call void @llvm.assume(i1 %gt)
|
||||
; CHECK-NEXT: NoModRef: Ptr: double* %col.ptr.2 <-> call void @llvm.assume(i1 %gt)
|
||||
; CHECK-NEXT: NoModRef: Ptr: <6 x double>* %col.ptr.2.cast <-> call void @llvm.assume(i1 %gt)
|
||||
;
|
||||
%gt = icmp sge i32 %stride, 0
|
||||
call void @llvm.assume(i1 %gt)
|
||||
%col.ptr.1 = bitcast double* %ptr to <6 x double>*
|
||||
%lv.1 = load <6 x double>, <6 x double>* %col.ptr.1, align 8
|
||||
%col.ptr.2= getelementptr double, double* %ptr, i32 %stride
|
||||
%col.ptr.2.cast = bitcast double* %col.ptr.2 to <6 x double>*
|
||||
%lv.2 = load <6 x double>, <6 x double>* %col.ptr.2.cast, align 8
|
||||
%res.1 = fadd <6 x double> %lv.1, %lv.1
|
||||
%res.2 = fadd <6 x double> %lv.2, %lv.2
|
||||
store <6 x double> %res.1, <6 x double>* %col.ptr.1, align 8
|
||||
store <6 x double> %res.2, <6 x double>* %col.ptr.2.cast, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.assume(i1 %cond)
|
||||
|
@ -4,9 +4,9 @@ target triple = "x86_64-apple-darwin13.4.0"
|
||||
|
||||
; CHECK-LABEL: compute1
|
||||
; CHECK: MayAlias: i32* %arrayidx8, i32* %out
|
||||
; CHECK: MayAlias: i32* %arrayidx11, i32* %out
|
||||
; CHECK: NoAlias: i32* %arrayidx11, i32* %out
|
||||
; CHECK: MayAlias: i32* %arrayidx11, i32* %arrayidx8
|
||||
; CHECK: MayAlias: i32* %arrayidx14, i32* %out
|
||||
; CHECK: NoAlias: i32* %arrayidx14, i32* %out
|
||||
; CHECK: MayAlias: i32* %arrayidx14, i32* %arrayidx8
|
||||
; CHECK: MayAlias: i32* %arrayidx11, i32* %arrayidx14
|
||||
define void @compute1(i32 %num.0.lcssa, i32* %out) {
|
||||
|
Loading…
Reference in New Issue
Block a user