1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00
llvm-mirror/test/CodeGen/X86/narrow_op-1.ll
Evan Cheng 40810c4d1b Added optimization that narrow load / op / store and the 'op' is a bit twiddling instruction and its second operand is an immediate. If bits that are touched by 'op' can be done with a narrower instruction, reduce the width of the load and store as well. This happens a lot with bitfield manipulation code.
e.g.
orl     $65536, 8(%rax)
=>
orb     $1, 10(%rax)

Since narrowing is not always a win, e.g. i32 -> i16 is a loss on x86, dag combiner consults with the target before performing the optimization.

llvm-svn: 72507
2009-05-28 00:35:15 +00:00

24 lines
909 B
LLVM

; RUN: llvm-as < %s | llc -march=x86-64 | grep orb | count 1
; RUN: llvm-as < %s | llc -march=x86-64 | grep orb | grep 1
; RUN: llvm-as < %s | llc -march=x86-64 | grep orl | count 1
; RUN: llvm-as < %s | llc -march=x86-64 | grep orl | grep 16842752
%struct.bf = type { i64, i16, i16, i32 }
@bfi = common global %struct.bf zeroinitializer, align 16
define void @t1() nounwind optsize ssp {
entry:
%0 = load i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
%1 = or i32 %0, 65536
store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
ret void
}
define void @t2() nounwind optsize ssp {
entry:
%0 = load i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
%1 = or i32 %0, 16842752
store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
ret void
}