1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[NFC][CodeGen][X86][AArch64] Add 64-bit constant bit field extract pattern tests

llvm-svn: 343404
This commit is contained in:
Roman Lebedev 2018-09-30 12:42:08 +00:00
parent dd1c35c05a
commit eb90d49956
2 changed files with 174 additions and 0 deletions

View File

@ -788,3 +788,53 @@ define i32 @c4_i32_bad(i32 %arg) {
%tmp1 = and i32 %tmp0, 16382
ret i32 %tmp1
}
; i64
; The most canonical variant
define i64 @c0_i64(i64 %arg) {
; CHECK-LABEL: c0_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx x0, x0, #51, #10
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 1023
ret i64 %tmp1
}
; Should be still fine, but the mask is shifted
define i64 @c1_i64(i64 %arg) {
; CHECK-LABEL: c1_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: lsr x8, x0, #51
; CHECK-NEXT: and x0, x8, #0xffc
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 4092
ret i64 %tmp1
}
; Should be still fine, but the result is shifted left afterwards
define i64 @c2_i64(i64 %arg) {
; CHECK-LABEL: c2_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx x8, x0, #51, #10
; CHECK-NEXT: lsl x0, x8, #2
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 1023
%tmp2 = shl i64 %tmp1, 2
ret i64 %tmp2
}
; The mask covers newly shifted-in bit
define i64 @c4_i64_bad(i64 %arg) {
; CHECK-LABEL: c4_i64_bad:
; CHECK: // %bb.0:
; CHECK-NEXT: lsr x8, x0, #51
; CHECK-NEXT: and x0, x8, #0x1ffe
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 16382
ret i64 %tmp1
}

View File

@ -5756,3 +5756,127 @@ define i32 @c4_i32_bad(i32 %arg) {
%tmp1 = and i32 %tmp0, 16382
ret i32 %tmp1
}
; i64
; The most canonical variant
define i64 @c0_i64(i64 %arg) {
; X86-NOBMI-LABEL: c0_i64:
; X86-NOBMI: # %bb.0:
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOBMI-NEXT: shrl $19, %eax
; X86-NOBMI-NEXT: andl $1023, %eax # imm = 0x3FF
; X86-NOBMI-NEXT: xorl %edx, %edx
; X86-NOBMI-NEXT: retl
;
; X86-BMI1NOTBM-LABEL: c0_i64:
; X86-BMI1NOTBM: # %bb.0:
; X86-BMI1NOTBM-NEXT: movl $2579, %eax # imm = 0xA13
; X86-BMI1NOTBM-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax
; X86-BMI1NOTBM-NEXT: xorl %edx, %edx
; X86-BMI1NOTBM-NEXT: retl
;
; X86-BMI1TBM-LABEL: c0_i64:
; X86-BMI1TBM: # %bb.0:
; X86-BMI1TBM-NEXT: bextrl $2579, {{[0-9]+}}(%esp), %eax # imm = 0xA13
; X86-BMI1TBM-NEXT: xorl %edx, %edx
; X86-BMI1TBM-NEXT: retl
;
; X86-BMI1NOTBMBMI2-LABEL: c0_i64:
; X86-BMI1NOTBMBMI2: # %bb.0:
; X86-BMI1NOTBMBMI2-NEXT: movl $2579, %eax # imm = 0xA13
; X86-BMI1NOTBMBMI2-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax
; X86-BMI1NOTBMBMI2-NEXT: xorl %edx, %edx
; X86-BMI1NOTBMBMI2-NEXT: retl
;
; X64-NOBMI-LABEL: c0_i64:
; X64-NOBMI: # %bb.0:
; X64-NOBMI-NEXT: movq %rdi, %rax
; X64-NOBMI-NEXT: shrq $51, %rax
; X64-NOBMI-NEXT: andl $1023, %eax # imm = 0x3FF
; X64-NOBMI-NEXT: retq
;
; X64-BMI1NOTBM-LABEL: c0_i64:
; X64-BMI1NOTBM: # %bb.0:
; X64-BMI1NOTBM-NEXT: movl $2611, %eax # imm = 0xA33
; X64-BMI1NOTBM-NEXT: bextrq %rax, %rdi, %rax
; X64-BMI1NOTBM-NEXT: retq
;
; X64-BMI1TBM-LABEL: c0_i64:
; X64-BMI1TBM: # %bb.0:
; X64-BMI1TBM-NEXT: bextrq $2611, %rdi, %rax # imm = 0xA33
; X64-BMI1TBM-NEXT: retq
;
; X64-BMI1NOTBMBMI2-LABEL: c0_i64:
; X64-BMI1NOTBMBMI2: # %bb.0:
; X64-BMI1NOTBMBMI2-NEXT: movl $2611, %eax # imm = 0xA33
; X64-BMI1NOTBMBMI2-NEXT: bextrq %rax, %rdi, %rax
; X64-BMI1NOTBMBMI2-NEXT: retq
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 1023
ret i64 %tmp1
}
; Should be still fine, but the mask is shifted
define i64 @c1_i64(i64 %arg) {
; X86-LABEL: c1_i64:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shrl $19, %eax
; X86-NEXT: andl $4092, %eax # imm = 0xFFC
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: c1_i64:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: shrq $51, %rax
; X64-NEXT: andl $4092, %eax # imm = 0xFFC
; X64-NEXT: retq
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 4092
ret i64 %tmp1
}
; Should be still fine, but the result is shifted left afterwards
define i64 @c2_i64(i64 %arg) {
; X86-LABEL: c2_i64:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shrl $17, %eax
; X86-NEXT: andl $4092, %eax # imm = 0xFFC
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: c2_i64:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: shrq $49, %rax
; X64-NEXT: andl $4092, %eax # imm = 0xFFC
; X64-NEXT: retq
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 1023
%tmp2 = shl i64 %tmp1, 2
ret i64 %tmp2
}
; The mask covers newly shifted-in bit
define i64 @c4_i64_bad(i64 %arg) {
; X86-LABEL: c4_i64_bad:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shrl $19, %eax
; X86-NEXT: andl $-2, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: c4_i64_bad:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: shrq $51, %rax
; X64-NEXT: andl $-2, %eax
; X64-NEXT: retq
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 16382
ret i64 %tmp1
}