2012-01-15 10:32:57 +01:00
|
|
|
; RUN: llc -march=x86 < %s | FileCheck %s
|
implement a readme entry, compiling the code into:
_foo:
movl $12, %eax
andl 4(%esp), %eax
movl _array(%eax), %eax
ret
instead of:
_foo:
movl 4(%esp), %eax
shrl $2, %eax
andl $3, %eax
movl _array(,%eax,4), %eax
ret
As it turns out, this triggers all the time, in a wide variety of
situations, for example, I see diffs like this in various programs:
- movl 8(%eax), %eax
- shll $2, %eax
- andl $1020, %eax
- movl (%esi,%eax), %eax
+ movzbl 8(%eax), %eax
+ movl (%esi,%eax,4), %eax
- shll $2, %edx
- andl $1020, %edx
- movl (%edi,%edx), %edx
+ andl $255, %edx
+ movl (%edi,%edx,4), %edx
Unfortunately, I also see stuff like this, which can be fixed in the
X86 backend:
- andl $85, %ebx
- addl _bit_count(,%ebx,4), %ebp
+ shll $2, %ebx
+ andl $340, %ebx
+ addl _bit_count(%ebx), %ebp
llvm-svn: 44656
2007-12-06 08:33:36 +01:00
|
|
|
|
2012-01-15 10:32:57 +01:00
|
|
|
@array = weak global [4 x i32] zeroinitializer
|
|
|
|
|
|
|
|
define i32 @test_lshr_and(i32 %x) {
|
2013-07-13 22:38:47 +02:00
|
|
|
; CHECK-LABEL: test_lshr_and:
|
2012-01-15 10:32:57 +01:00
|
|
|
; CHECK-NOT: shrl
|
|
|
|
; CHECK: andl $12,
|
2012-01-15 10:38:59 +01:00
|
|
|
; CHECK: movl {{.*}}array{{.*}},
|
2012-01-15 10:32:57 +01:00
|
|
|
; CHECK: ret
|
implement a readme entry, compiling the code into:
_foo:
movl $12, %eax
andl 4(%esp), %eax
movl _array(%eax), %eax
ret
instead of:
_foo:
movl 4(%esp), %eax
shrl $2, %eax
andl $3, %eax
movl _array(,%eax,4), %eax
ret
As it turns out, this triggers all the time, in a wide variety of
situations, for example, I see diffs like this in various programs:
- movl 8(%eax), %eax
- shll $2, %eax
- andl $1020, %eax
- movl (%esi,%eax), %eax
+ movzbl 8(%eax), %eax
+ movl (%esi,%eax,4), %eax
- shll $2, %edx
- andl $1020, %edx
- movl (%edi,%edx), %edx
+ andl $255, %edx
+ movl (%edi,%edx,4), %edx
Unfortunately, I also see stuff like this, which can be fixed in the
X86 backend:
- andl $85, %ebx
- addl _bit_count(,%ebx,4), %ebp
+ shll $2, %ebx
+ andl $340, %ebx
+ addl _bit_count(%ebx), %ebp
llvm-svn: 44656
2007-12-06 08:33:36 +01:00
|
|
|
|
|
|
|
entry:
|
2012-01-15 10:32:57 +01:00
|
|
|
%tmp2 = lshr i32 %x, 2
|
|
|
|
%tmp3 = and i32 %tmp2, 3
|
|
|
|
%tmp4 = getelementptr [4 x i32]* @array, i32 0, i32 %tmp3
|
|
|
|
%tmp5 = load i32* %tmp4, align 4
|
|
|
|
ret i32 %tmp5
|
implement a readme entry, compiling the code into:
_foo:
movl $12, %eax
andl 4(%esp), %eax
movl _array(%eax), %eax
ret
instead of:
_foo:
movl 4(%esp), %eax
shrl $2, %eax
andl $3, %eax
movl _array(,%eax,4), %eax
ret
As it turns out, this triggers all the time, in a wide variety of
situations, for example, I see diffs like this in various programs:
- movl 8(%eax), %eax
- shll $2, %eax
- andl $1020, %eax
- movl (%esi,%eax), %eax
+ movzbl 8(%eax), %eax
+ movl (%esi,%eax,4), %eax
- shll $2, %edx
- andl $1020, %edx
- movl (%edi,%edx), %edx
+ andl $255, %edx
+ movl (%edi,%edx,4), %edx
Unfortunately, I also see stuff like this, which can be fixed in the
X86 backend:
- andl $85, %ebx
- addl _bit_count(,%ebx,4), %ebp
+ shll $2, %ebx
+ andl $340, %ebx
+ addl _bit_count(%ebx), %ebp
llvm-svn: 44656
2007-12-06 08:33:36 +01:00
|
|
|
}
|
|
|
|
|