1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 19:52:54 +01:00
llvm-mirror/test/Transforms/PhaseOrdering/scev.ll
Jakob Stoklund Olesen 6c1440cf27 Reapply r155136 after fixing PR12599.
Original commit message:

Defer some shl transforms to DAGCombine.

The shl instruction is used to represent multiplication by a constant
power of two as well as bitwise left shifts. Some InstCombine
transformations would turn an shl instruction into a bit mask operation,
making it difficult for later analysis passes to recognize the
constsnt multiplication.

Disable those shl transformations, deferring them to DAGCombine time.
An 'shl X, C' instruction is now treated mostly the same was as 'mul X, C'.

These transformations are deferred:

  (X >>? C) << C   --> X & (-1 << C)  (When X >> C has multiple uses)
  (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)   (When C2 > C1)
  (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)  (When C1 > C2)

The corresponding exact transformations are preserved, just like
div-exact + mul:

  (X >>?,exact C) << C   --> X
  (X >>?,exact C1) << C2 --> X << (C2-C1)
  (X >>?,exact C1) << C2 --> X >>?,exact (C1-C2)

The disabled transformations could also prevent the instruction selector
from recognizing rotate patterns in hash functions and cryptographic
primitives. I have a test case for that, but it is too fragile.

llvm-svn: 155362
2012-04-23 17:39:52 +00:00

65 lines
2.3 KiB
LLVM

; RUN: opt -O3 -S -analyze -scalar-evolution %s | FileCheck %s
;
; This file contains phase ordering tests for scalar evolution.
; Test that the standard passes don't obfuscate the IR so scalar evolution can't
; recognize expressions.
; CHECK: test1
; The loop body contains two increments by %div.
; Make sure that 2*%div is recognizable, and not expressed as a bit mask of %d.
; CHECK: --> {%p,+,(2 * (%d /u 4) * sizeof(i32))}
define void @test1(i64 %d, i32* %p) nounwind uwtable ssp {
entry:
%div = udiv i64 %d, 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%p.addr.0 = phi i32* [ %p, %entry ], [ %add.ptr1, %for.inc ]
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
%cmp = icmp ne i32 %i.0, 64
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
store i32 0, i32* %p.addr.0, align 4
%add.ptr = getelementptr inbounds i32* %p.addr.0, i64 %div
store i32 1, i32* %add.ptr, align 4
%add.ptr1 = getelementptr inbounds i32* %add.ptr, i64 %div
br label %for.inc
for.inc: ; preds = %for.body
%inc = add i32 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
; CHECK: test1a
; Same thing as test1, but it is even more tempting to fold 2 * (%d /u 2)
; CHECK: --> {%p,+,(2 * (%d /u 2) * sizeof(i32))}
define void @test1a(i64 %d, i32* %p) nounwind uwtable ssp {
entry:
%div = udiv i64 %d, 2
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%p.addr.0 = phi i32* [ %p, %entry ], [ %add.ptr1, %for.inc ]
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
%cmp = icmp ne i32 %i.0, 64
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
store i32 0, i32* %p.addr.0, align 4
%add.ptr = getelementptr inbounds i32* %p.addr.0, i64 %div
store i32 1, i32* %add.ptr, align 4
%add.ptr1 = getelementptr inbounds i32* %add.ptr, i64 %div
br label %for.inc
for.inc: ; preds = %for.body
%inc = add i32 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}