mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 13:11:39 +01:00
318f4a3446
The SCEV code for constructing GEP expressions currently assumes that the addition of the base and all the offsets is nsw if the GEP is inbounds. While the addition of the offsets is indeed nsw, the addition to the base address is not, as the base address is interpreted as an unsigned value. Fix the GEP expression code to not assume nsw for the base+offset calculation. However, do assume nuw if we know that the offset is non-negative. With this, we use the same behavior as the construction of GEP addrecs does. (Modulo the fact that we disregard SCEV unification, as the pre-existing FIXME points out). Differential Revision: https://reviews.llvm.org/D90648
91 lines
2.7 KiB
LLVM
91 lines
2.7 KiB
LLVM
; RUN: opt -loop-accesses -analyze -enable-new-pm=0 < %s | FileCheck %s
|
|
; RUN: opt -passes='require<scalar-evolution>,require<aa>,loop(print-access-info)' -disable-output < %s 2>&1 | FileCheck %s
|
|
|
|
; The runtime memory check code and the access grouping
|
|
; algorithm both assume that the start and end values
|
|
; for an access range are ordered (start <= stop).
|
|
; When generating checks for accesses with negative stride
|
|
; we need to take this into account and swap the interval
|
|
; ends.
|
|
;
|
|
; for (i = 0; i < 10000; i++) {
|
|
; B[i] = A[15000 - i] * 3;
|
|
; }
|
|
|
|
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
|
|
target triple = "aarch64--linux-gnueabi"
|
|
|
|
; CHECK: function 'f':
|
|
; CHECK: (Low: (20000 + %a) High: (60004 + %a))
|
|
|
|
@B = common global i32* null, align 8
|
|
@A = common global i32* null, align 8
|
|
|
|
define void @f() {
|
|
entry:
|
|
%a = load i32*, i32** @A, align 8
|
|
%b = load i32*, i32** @B, align 8
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
%idx = phi i64 [ 0, %entry ], [ %add, %for.body ]
|
|
%negidx = sub i64 15000, %idx
|
|
|
|
%arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx
|
|
%loadA0 = load i32, i32* %arrayidxA0, align 2
|
|
|
|
%res = mul i32 %loadA0, 3
|
|
|
|
%add = add nuw nsw i64 %idx, 1
|
|
|
|
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx
|
|
store i32 %res, i32* %arrayidxB, align 2
|
|
|
|
%exitcond = icmp eq i64 %idx, 10000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end: ; preds = %for.body
|
|
ret void
|
|
}
|
|
|
|
; CHECK: function 'g':
|
|
; When the stride is not constant, we are forced to do umin/umax to get
|
|
; the interval limits.
|
|
|
|
; for (i = 0; i < 10000; i++) {
|
|
; B[i] = A[15000 - step * i] * 3;
|
|
; }
|
|
|
|
; Here it is not obvious what the limits are, since 'step' could be negative.
|
|
|
|
; CHECK: Low: ((60000 + %a) umin (60000 + (-40000 * %step) + %a))
|
|
; CHECK: High: (4 + ((60000 + %a) umax (60000 + (-40000 * %step) + %a)))
|
|
|
|
define void @g(i64 %step) {
|
|
entry:
|
|
%a = load i32*, i32** @A, align 8
|
|
%b = load i32*, i32** @B, align 8
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
%idx = phi i64 [ 0, %entry ], [ %add, %for.body ]
|
|
%idx_mul = mul i64 %idx, %step
|
|
%negidx = sub i64 15000, %idx_mul
|
|
|
|
%arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx
|
|
%loadA0 = load i32, i32* %arrayidxA0, align 2
|
|
|
|
%res = mul i32 %loadA0, 3
|
|
|
|
%add = add nuw nsw i64 %idx, 1
|
|
|
|
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx
|
|
store i32 %res, i32* %arrayidxB, align 2
|
|
|
|
%exitcond = icmp eq i64 %idx, 10000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end: ; preds = %for.body
|
|
ret void
|
|
}
|