diff --git a/test/Regression/CodeGen/PowerPC/rlwimi.ll b/test/Regression/CodeGen/PowerPC/rlwimi.ll index 8c87cd64b23..f5c740bb9dd 100644 --- a/test/Regression/CodeGen/PowerPC/rlwimi.ll +++ b/test/Regression/CodeGen/PowerPC/rlwimi.ll @@ -1,5 +1,6 @@ ; All of these ands and shifts should be folded into rlwimi's -; RUN: llvm-as < rlwimi.ll | llc -march=ppc32 | not grep and +; RUN: llvm-as < rlwimi.ll | llc -march=ppc32 | not grep and && +; RUN: llvm-as < rlwimi.ll | llc -march=ppc32 | grep rlwimi | wc -l | grep 8 implementation ; Functions: @@ -53,10 +54,19 @@ entry: ret int %tmp.9 } -int %test9(int %x, int %y) { +int %test7(int %x, int %y) { entry: %tmp.2 = and int %x, -65536 ; [#uses=1] %tmp.5 = and int %y, 65535 ; [#uses=1] %tmp.7 = or int %tmp.5, %tmp.2 ; [#uses=1] ret int %tmp.7 } + +uint %test8(uint %bar) { +entry: + %tmp.3 = shl uint %bar, ubyte 1 ; [#uses=1] + %tmp.4 = and uint %tmp.3, 2 ; [#uses=1] + %tmp.6 = and uint %bar, 4294967293 ; [#uses=1] + %tmp.7 = or uint %tmp.4, %tmp.6 ; [#uses=1] + ret uint %tmp.7 +} diff --git a/test/Regression/CodeGen/PowerPC/rlwimi2.ll b/test/Regression/CodeGen/PowerPC/rlwimi2.ll new file mode 100644 index 00000000000..9bc53319a28 --- /dev/null +++ b/test/Regression/CodeGen/PowerPC/rlwimi2.ll @@ -0,0 +1,30 @@ +; All of these ands and shifts should be folded into rlwimi's +; RUN: llvm-as < rlwimi2.ll | llc -march=ppc32 | grep rlwimi | wc -l | grep 3 && +; RUN: llvm-as < rlwimi2.ll | llc -march=ppc32 | grep srwi | wc -l | grep 1 && +; RUN: llvm-as < rlwimi2.ll | llc -march=ppc32 | not grep slwi + +implementation ; Functions: + +ushort %test1(uint %srcA, uint %srcB, uint %alpha) { +entry: + %tmp.1 = shl uint %srcA, ubyte 15 ; [#uses=1] + %tmp.4 = and uint %tmp.1, 32505856 ; [#uses=1] + %tmp.6 = and uint %srcA, 31775 ; [#uses=1] + %tmp.7 = or uint %tmp.4, %tmp.6 ; [#uses=1] + %tmp.9 = shl uint %srcB, ubyte 15 ; [#uses=1] + %tmp.12 = and uint %tmp.9, 32505856 ; [#uses=1] + %tmp.14 = and uint %srcB, 31775 ; [#uses=1] + %tmp.15 = or uint %tmp.12, %tmp.14 ; [#uses=1] + %tmp.18 = mul uint %tmp.7, %alpha ; [#uses=1] + %tmp.20 = sub uint 32, %alpha ; [#uses=1] + %tmp.22 = mul uint %tmp.15, %tmp.20 ; [#uses=1] + %tmp.23 = add uint %tmp.22, %tmp.18 ; [#uses=2] + %tmp.27 = shr uint %tmp.23, ubyte 5 ; [#uses=1] + %tmp.28 = cast uint %tmp.27 to ushort ; [#uses=1] + %tmp.29 = and ushort %tmp.28, 31775 ; [#uses=1] + %tmp.33 = shr uint %tmp.23, ubyte 20 ; [#uses=1] + %tmp.34 = cast uint %tmp.33 to ushort ; [#uses=1] + %tmp.35 = and ushort %tmp.34, 992 ; [#uses=1] + %tmp.36 = or ushort %tmp.29, %tmp.35 ; [#uses=1] + ret ushort %tmp.36 +}