1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00
llvm-mirror/test/CodeGen/ARM/and-cmpz.ll

72 lines
1.3 KiB
LLVM
Raw Normal View History

[Thumb] Teach ISel how to lower compares of AND bitmasks efficiently This is essentially a recommit of r285893, but with a correctness fix. The problem of the original commit was that this: bic r5, r7, #31 cbz r5, .LBB2_10 got rewritten into: lsrs r5, r7, #5 beq .LBB2_10 The result in destination register r5 is not the same and this is incorrect when r5 is not dead. So this fix includes checking the uses of the AND destination register. And also, compared to the original commit, some regression tests didn't need changing anymore because of this extra check. For completeness, this was the original commit message: For the common pattern (CMPZ (AND x, #bitmask), #0), we can do some more efficient instruction selection if the bitmask is one consecutive sequence of set bits (32 - clz(bm) - ctz(bm) == popcount(bm)). 1) If the bitmask touches the LSB, then we can remove all the upper bits and set the flags by doing one LSLS. 2) If the bitmask touches the MSB, then we can remove all the lower bits and set the flags with one LSRS. 3) If the bitmask has popcount == 1 (only one set bit), we can shift that bit into the sign bit with one LSLS and change the condition query from NE/EQ to MI/PL (we could also implement this by shifting into the carry bit and branching on BCC/BCS). 4) Otherwise, we can emit a sequence of LSLS+LSRS to remove the upper and lower zero bits of the mask. 1-3 require only one 16-bit instruction and can elide the CMP. 4 requires two 16-bit instructions but can elide the CMP and doesn't require materializing a complex immediate, so is also a win. Differential Revision: https://reviews.llvm.org/D27761 llvm-svn: 289794
2016-12-15 10:38:59 +01:00
; RUN: llc -mtriple=thumbv7m-linux-gnu < %s | FileCheck %s --check-prefix=CHECK --check-prefix=T2
; RUN: llc -mtriple=thumbv6m-linux-gnu < %s | FileCheck %s --check-prefix=CHECK --check-prefix=T1
; CHECK-LABEL: single_bit:
; CHECK: lsls r0, r0, #23
; T2-NEXT: mov
; T2-NEXT: it
; T1-NEXT: bmi
define i32 @single_bit(i32 %p) {
%a = and i32 %p, 256
%b = icmp eq i32 %a, 0
br i1 %b, label %true, label %false
true:
ret i32 1
false:
ret i32 2
}
; CHECK-LABEL: multi_bit_lsb_ubfx:
; CHECK: lsls r0, r0, #24
; T2-NEXT: mov
; T2-NEXT: it
; T1-NEXT: beq
define i32 @multi_bit_lsb_ubfx(i32 %p) {
%a = and i32 %p, 255
%b = icmp eq i32 %a, 0
br i1 %b, label %true, label %false
true:
ret i32 1
false:
ret i32 2
}
; CHECK-LABEL: multi_bit_msb:
; CHECK: lsrs r0, r0, #24
; T2-NEXT: mov
; T2-NEXT: it
; T1-NEXT: beq
define i32 @multi_bit_msb(i32 %p) {
%a = and i32 %p, 4278190080 ; 0xff000000
%b = icmp eq i32 %a, 0
br i1 %b, label %true, label %false
true:
ret i32 1
false:
ret i32 2
}
; CHECK-LABEL: multi_bit_nosb:
; T1: lsls r0, r0, #8
; T1-NEXT: lsrs r0, r0, #24
; T2: tst.w
; T2-NEXT: it
; T1-NEXT: beq
define i32 @multi_bit_nosb(i32 %p) {
%a = and i32 %p, 16711680 ; 0x00ff0000
%b = icmp eq i32 %a, 0
br i1 %b, label %true, label %false
true:
ret i32 1
false:
ret i32 2
}