2009-09-09 01:54:48 +02:00
|
|
|
; RUN: llc < %s -march=x86 | grep inc | count 1
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
llvm-svn: 43375
2007-10-26 03:56:11 +02:00
|
|
|
|
|
|
|
@X = weak global i16 0 ; <i16*> [#uses=1]
|
|
|
|
@Y = weak global i16 0 ; <i16*> [#uses=1]
|
|
|
|
|
2009-11-11 08:11:02 +01:00
|
|
|
define void @foo(i32 %N) nounwind {
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
llvm-svn: 43375
2007-10-26 03:56:11 +02:00
|
|
|
entry:
|
|
|
|
%tmp1019 = icmp sgt i32 %N, 0 ; <i1> [#uses=1]
|
|
|
|
br i1 %tmp1019, label %bb, label %return
|
|
|
|
|
|
|
|
bb: ; preds = %bb, %entry
|
|
|
|
%i.014.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
|
|
|
|
%tmp1 = trunc i32 %i.014.0 to i16 ; <i16> [#uses=2]
|
2011-11-27 07:54:59 +01:00
|
|
|
store volatile i16 %tmp1, i16* @X, align 2
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
llvm-svn: 43375
2007-10-26 03:56:11 +02:00
|
|
|
%tmp34 = shl i16 %tmp1, 2 ; <i16> [#uses=1]
|
2011-11-27 07:54:59 +01:00
|
|
|
store volatile i16 %tmp34, i16* @Y, align 2
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
llvm-svn: 43375
2007-10-26 03:56:11 +02:00
|
|
|
%indvar.next = add i32 %i.014.0, 1 ; <i32> [#uses=2]
|
|
|
|
%exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
|
|
|
|
br i1 %exitcond, label %return, label %bb
|
|
|
|
|
|
|
|
return: ; preds = %bb, %entry
|
|
|
|
ret void
|
|
|
|
}
|