1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-22 20:43:44 +02:00
llvm-mirror/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll
Silviu Baranga f1d9b77877 [LAA] Merge memchecks for accesses separated by a constant offset
Summary:
Often filter-like loops will do memory accesses that are
separated by constant offsets. In these cases it is
common that we will exceed the threshold for the
allowable number of checks.

However, it should be possible to merge such checks,
sice a check of any interval againt two other intervals separated
by a constant offset (a,b), (a+c, b+c) will be equivalent with
a check againt (a, b+c), as long as (a,b) and (a+c, b+c) overlap.
Assuming the loop will be executed for a sufficient number of
iterations, this will be true. If not true, checking against
(a, b+c) is still safe (although not equivalent).

As long as there are no dependencies between two accesses,
we can merge their checks into a single one. We use this
technique to construct groups of accesses, and then check
the intervals associated with the groups instead of
checking the accesses directly.

Reviewers: anemet

Subscribers: llvm-commits

Differential Revision: http://reviews.llvm.org/D10386

llvm-svn: 241673
2015-07-08 09:16:33 +00:00

60 lines
1.9 KiB
LLVM

; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
; We give up analyzing the dependences in this loop due to non-constant
; distance between A[i+offset] and A[i] and add memchecks to prove
; independence. Make sure that no interesting dependences are reported in
; this case.
;
; for (i = 0; i < n; i++)
; A[i + offset] = A[i] * B[i] * C[i];
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
; CHECK: Memory dependences are safe with run-time checks
; CHECK-NEXT: Interesting Dependences:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: 0:
; CHECK-NEXT: Comparing group
; CHECK-NEXT: %arrayidxA2 = getelementptr inbounds i16, i16* %a, i64 %idx
; CHECK-NEXT: Against group
; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %indvar
@B = common global i16* null, align 8
@A = common global i16* null, align 8
@C = common global i16* null, align 8
define void @f(i64 %offset) {
entry:
%a = load i16*, i16** @A, align 8
%b = load i16*, i16** @B, align 8
%c = load i16*, i16** @C, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%indvar = phi i64 [ 0, %entry ], [ %add, %for.body ]
%arrayidxA = getelementptr inbounds i16, i16* %a, i64 %indvar
%loadA = load i16, i16* %arrayidxA, align 2
%arrayidxB = getelementptr inbounds i16, i16* %b, i64 %indvar
%loadB = load i16, i16* %arrayidxB, align 2
%arrayidxC = getelementptr inbounds i16, i16* %c, i64 %indvar
%loadC = load i16, i16* %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA
%mul1 = mul i16 %mul, %loadC
%idx = add i64 %indvar, %offset
%arrayidxA2 = getelementptr inbounds i16, i16* %a, i64 %idx
store i16 %mul1, i16* %arrayidxA2, align 2
%add = add nuw nsw i64 %indvar, 1
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
ret void
}