1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00
llvm-mirror/test/CodeGen/X86/andimm8.ll
Simon Pilgrim d654e7d40c [X86] Handle COPYs of physregs better (regalloc hints)
Enable enableMultipleCopyHints() on X86.

Original Patch by @jonpa:

While enabling the mischeduler for SystemZ, it was discovered that for some reason a test needed one extra seemingly needless COPY (test/CodeGen/SystemZ/call-03.ll). The handling for that is resulted in this patch, which improves the register coalescing by providing not just one copy hint, but a sorted list of copy hints. On SystemZ, this gives ~12500 less register moves on SPEC, as well as marginally less spilling.

Instead of improving just the SystemZ backend, the improvement has been implemented in common-code (calculateSpillWeightAndHint(). This gives a lot of test failures, but since this should be a general improvement I hope that the involved targets will help and review the test updates.

Differential Revision: https://reviews.llvm.org/D38128

llvm-svn: 342578
2018-09-19 18:59:08 +00:00

84 lines
3.0 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-pc-linux-gnu -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86
; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64
; PR8365
define i64 @bra(i32 %zed) nounwind {
; X86-LABEL: bra:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: andl $-64, %eax # encoding: [0x83,0xe0,0xc0]
; X86-NEXT: xorl %edx, %edx # encoding: [0x31,0xd2]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: bra:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax # encoding: [0x89,0xf8]
; X64-NEXT: andl $-64, %eax # encoding: [0x83,0xe0,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
%t1 = zext i32 %zed to i64
%t2 = and i64 %t1, 4294967232
ret i64 %t2
}
define void @foo(i64 %zed, i64* %x) nounwind {
; X86-LABEL: foo:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x0c]
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx # encoding: [0x8b,0x54,0x24,0x08]
; X86-NEXT: andl $-4, %ecx # encoding: [0x83,0xe1,0xfc]
; X86-NEXT: orl $2, %ecx # encoding: [0x83,0xc9,0x02]
; X86-NEXT: movl %edx, 4(%eax) # encoding: [0x89,0x50,0x04]
; X86-NEXT: movl %ecx, (%eax) # encoding: [0x89,0x08]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: foo:
; X64: # %bb.0:
; X64-NEXT: andq $-4, %rdi # encoding: [0x48,0x83,0xe7,0xfc]
; X64-NEXT: orq $2, %rdi # encoding: [0x48,0x83,0xcf,0x02]
; X64-NEXT: movq %rdi, (%rsi) # encoding: [0x48,0x89,0x3e]
; X64-NEXT: retq # encoding: [0xc3]
%t1 = and i64 %zed, -4
%t2 = or i64 %t1, 2
store i64 %t2, i64* %x, align 8
ret void
}
define i64 @bar(i64 %zed) nounwind {
; X86-LABEL: bar:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: andl $42, %eax # encoding: [0x83,0xe0,0x2a]
; X86-NEXT: xorl %edx, %edx # encoding: [0x31,0xd2]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: bar:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax # encoding: [0x48,0x89,0xf8]
; X64-NEXT: andl $42, %eax # encoding: [0x83,0xe0,0x2a]
; X64-NEXT: retq # encoding: [0xc3]
%t1 = and i64 %zed, 42
ret i64 %t1
}
define i64 @baz(i64 %zed) nounwind {
; X86-LABEL: baz:
; X86: # %bb.0:
; X86-NEXT: movl $2147483647, %eax # encoding: [0xb8,0xff,0xff,0xff,0x7f]
; X86-NEXT: # imm = 0x7FFFFFFF
; X86-NEXT: andl {{[0-9]+}}(%esp), %eax # encoding: [0x23,0x44,0x24,0x04]
; X86-NEXT: xorl %edx, %edx # encoding: [0x31,0xd2]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: baz:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax # encoding: [0x48,0x89,0xf8]
; X64-NEXT: andl $2147483647, %eax # encoding: [0x25,0xff,0xff,0xff,0x7f]
; X64-NEXT: # imm = 0x7FFFFFFF
; X64-NEXT: retq # encoding: [0xc3]
%t1 = and i64 %zed, 2147483647
ret i64 %t1
}