1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 19:42:54 +02:00
llvm-mirror/test/CodeGen/X86/fixup-bw-copy.ll
Ahmed Bougacha 0eb872067d [X86] Teach X86FixupBWInsts to promote MOV8rr/MOV16rr to MOV32rr.
This re-applies r268760, reverted in r268794.
Fixes http://llvm.org/PR27670

The original imp-defs assertion was way overzealous: forward all
implicit operands, except imp-defs of the new super-reg def (r268787
for GR64, but also possible for GR16->GR32), or imp-uses of the new
super-reg use.
While there, mark the source use as Undef, and add an imp-use of the
old source reg: that should cover any case of dead super-regs.

At the stage the pass runs, flags are unlikely to matter anyway;
still, let's be as correct as possible.

Also add MIR tests for the various interesting cases.

Original commit message:
Codesize is less (16) or equal (8), and we avoid partial
dependencies.

Differential Revision: http://reviews.llvm.org/D19999

llvm-svn: 268831
2016-05-07 01:11:17 +00:00

71 lines
2.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=1 -mtriple=x86_64-- < %s | FileCheck --check-prefix=X64 --check-prefix=BWON64 %s
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=0 -mtriple=x86_64-- < %s | FileCheck --check-prefix=X64 --check-prefix=BWOFF64 %s
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=1 -mtriple=i386-- < %s | FileCheck --check-prefix=X32 --check-prefix=BWON32 %s
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=0 -mtriple=i386-- < %s | FileCheck --check-prefix=X32 --check-prefix=BWOFF32 %s
target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
define i8 @test_movb(i8 %a0) {
; BWON64-LABEL: test_movb:
; BWON64: # BB#0:
; BWON64-NEXT: movl %edi, %eax
; BWON64-NEXT: retq
;
; BWOFF64-LABEL: test_movb:
; BWOFF64: # BB#0:
; BWOFF64-NEXT: movb %dil, %al
; BWOFF64-NEXT: retq
;
; X32-LABEL: test_movb:
; X32: # BB#0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: retl
ret i8 %a0
}
define i16 @test_movw(i16 %a0) {
; BWON64-LABEL: test_movw:
; BWON64: # BB#0:
; BWON64-NEXT: movl %edi, %eax
; BWON64-NEXT: retq
;
; BWOFF64-LABEL: test_movw:
; BWOFF64: # BB#0:
; BWOFF64-NEXT: movw %di, %ax
; BWOFF64-NEXT: retq
;
; BWON32-LABEL: test_movw:
; BWON32: # BB#0:
; BWON32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; BWON32-NEXT: retl
;
; BWOFF32-LABEL: test_movw:
; BWOFF32: # BB#0:
; BWOFF32-NEXT: movw {{[0-9]+}}(%esp), %ax
; BWOFF32-NEXT: retl
ret i16 %a0
}
; Verify we don't mess with H-reg copies (only generated in 32-bit mode).
define i8 @test_movb_hreg(i16 %a0) {
; X64-LABEL: test_movb_hreg:
; X64: # BB#0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shrl $8, %eax
; X64-NEXT: addb %dil, %al
; X64-NEXT: retq
;
; X32-LABEL: test_movb_hreg:
; X32: # BB#0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addb %al, %ah
; X32-NEXT: movb %ah, %al
; X32-NEXT: retl
%tmp0 = trunc i16 %a0 to i8
%tmp1 = lshr i16 %a0, 8
%tmp2 = trunc i16 %tmp1 to i8
%tmp3 = add i8 %tmp0, %tmp2
ret i8 %tmp3
}