1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-30 23:42:52 +01:00
llvm-mirror/test/CodeGen/PowerPC/fast-isel-binary.ll
Bill Schmidt b3f46e50b4 [PowerPC] Add loads, stores, and related things to fast-isel.
This is the next big chunk of fast-isel code.  The primary purpose is
to implement selection of loads and stores, but there is a lot of
drag-along to support this.  The common code to analyze addresses for
both loads and stores is substantial.  It's also necessary to add the
materialization code for global values.

Related to load-store processing is the code to fold loads into
integer extends, since otherwise we generate lots of redundant
instructions.  We also need to add some overrides to some FastEmit
routines to ensure we don't assign GPR 0 to a virtual register when
this would change the meaning of an instruction.

I added handling selection of a few binary arithmetic instructions, to
enable committing some test cases I wrote a while back.

Finally, ap couple of miscellaneous changes:
 * I cleaned up some poor style from a previous patch in
   PPCISelLowering.cpp, pointed out by David Blaikie.
 * I enlarged the Addr.Offset field to avoid sign problems with 32-bit
   offsets. 

llvm-svn: 189636
2013-08-30 02:29:45 +00:00

138 lines
2.7 KiB
LLVM

; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
; Test add with non-legal types
define void @add_i8(i8 %a, i8 %b) nounwind ssp {
entry:
; ELF64: add_i8
%a.addr = alloca i8, align 4
%0 = add i8 %a, %b
; ELF64: add
store i8 %0, i8* %a.addr, align 4
ret void
}
define void @add_i8_imm(i8 %a) nounwind ssp {
entry:
; ELF64: add_i8_imm
%a.addr = alloca i8, align 4
%0 = add i8 %a, 22;
; ELF64: addi
store i8 %0, i8* %a.addr, align 4
ret void
}
define void @add_i16(i16 %a, i16 %b) nounwind ssp {
entry:
; ELF64: add_i16
%a.addr = alloca i16, align 4
%0 = add i16 %a, %b
; ELF64: add
store i16 %0, i16* %a.addr, align 4
ret void
}
define void @add_i16_imm(i16 %a, i16 %b) nounwind ssp {
entry:
; ELF64: add_i16_imm
%a.addr = alloca i16, align 4
%0 = add i16 %a, 243;
; ELF64: addi
store i16 %0, i16* %a.addr, align 4
ret void
}
; Test or with non-legal types
define void @or_i8(i8 %a, i8 %b) nounwind ssp {
entry:
; ELF64: or_i8
%a.addr = alloca i8, align 4
%0 = or i8 %a, %b
; ELF64: or
store i8 %0, i8* %a.addr, align 4
ret void
}
define void @or_i8_imm(i8 %a) nounwind ssp {
entry:
; ELF64: or_i8_imm
%a.addr = alloca i8, align 4
%0 = or i8 %a, -13;
; ELF64: ori
store i8 %0, i8* %a.addr, align 4
ret void
}
define void @or_i16(i16 %a, i16 %b) nounwind ssp {
entry:
; ELF64: or_i16
%a.addr = alloca i16, align 4
%0 = or i16 %a, %b
; ELF64: or
store i16 %0, i16* %a.addr, align 4
ret void
}
define void @or_i16_imm(i16 %a) nounwind ssp {
entry:
; ELF64: or_i16_imm
%a.addr = alloca i16, align 4
%0 = or i16 %a, 273;
; ELF64: ori
store i16 %0, i16* %a.addr, align 4
ret void
}
; Test sub with non-legal types
define void @sub_i8(i8 %a, i8 %b) nounwind ssp {
entry:
; ELF64: sub_i8
%a.addr = alloca i8, align 4
%0 = sub i8 %a, %b
; ELF64: subf
store i8 %0, i8* %a.addr, align 4
ret void
}
define void @sub_i8_imm(i8 %a) nounwind ssp {
entry:
; ELF64: sub_i8_imm
%a.addr = alloca i8, align 4
%0 = sub i8 %a, 22;
; ELF64: addi
store i8 %0, i8* %a.addr, align 4
ret void
}
define void @sub_i16(i16 %a, i16 %b) nounwind ssp {
entry:
; ELF64: sub_i16
%a.addr = alloca i16, align 4
%0 = sub i16 %a, %b
; ELF64: subf
store i16 %0, i16* %a.addr, align 4
ret void
}
define void @sub_i16_imm(i16 %a) nounwind ssp {
entry:
; ELF64: sub_i16_imm
%a.addr = alloca i16, align 4
%0 = sub i16 %a, 247;
; ELF64: addi
store i16 %0, i16* %a.addr, align 4
ret void
}
define void @sub_i16_badimm(i16 %a) nounwind ssp {
entry:
; ELF64: sub_i16_imm
%a.addr = alloca i16, align 4
%0 = sub i16 %a, -32768;
; ELF64: subf
store i16 %0, i16* %a.addr, align 4
ret void
}