1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00
llvm-mirror/test/CodeGen/SystemZ/addr-02.ll
Ulrich Weigand 1431b3c2f5 [SystemZ] Add CodeGen test cases
This adds all CodeGen tests for the SystemZ target.

This version of the patch incorporates feedback from a review by
Sean Silva.  Thanks to all reviewers!

Patch by Richard Sandiford.

llvm-svn: 181204
2013-05-06 16:17:29 +00:00

117 lines
3.1 KiB
LLVM

; addr-01.ll in which the address is also used in a non-address context.
; The assumption here is that we should match complex addresses where
; possible, but this might well need to change in future.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
; A simple index address.
define void @f1(i64 %addr, i64 %index, i8 **%dst) {
; CHECK: f1:
; CHECK: lb %r0, 0(%r3,%r2)
; CHECK: br %r14
%add = add i64 %addr, %index
%ptr = inttoptr i64 %add to i8 *
%a = load volatile i8 *%ptr
store volatile i8 *%ptr, i8 **%dst
ret void
}
; An address with an index and a displacement (order 1).
define void @f2(i64 %addr, i64 %index, i8 **%dst) {
; CHECK: f2:
; CHECK: lb %r0, 100(%r3,%r2)
; CHECK: br %r14
%add1 = add i64 %addr, %index
%add2 = add i64 %add1, 100
%ptr = inttoptr i64 %add2 to i8 *
%a = load volatile i8 *%ptr
store volatile i8 *%ptr, i8 **%dst
ret void
}
; An address with an index and a displacement (order 2).
define void @f3(i64 %addr, i64 %index, i8 **%dst) {
; CHECK: f3:
; CHECK: lb %r0, 100(%r3,%r2)
; CHECK: br %r14
%add1 = add i64 %addr, 100
%add2 = add i64 %add1, %index
%ptr = inttoptr i64 %add2 to i8 *
%a = load volatile i8 *%ptr
store volatile i8 *%ptr, i8 **%dst
ret void
}
; An address with an index and a subtracted displacement (order 1).
define void @f4(i64 %addr, i64 %index, i8 **%dst) {
; CHECK: f4:
; CHECK: lb %r0, -100(%r3,%r2)
; CHECK: br %r14
%add1 = add i64 %addr, %index
%add2 = sub i64 %add1, 100
%ptr = inttoptr i64 %add2 to i8 *
%a = load volatile i8 *%ptr
store volatile i8 *%ptr, i8 **%dst
ret void
}
; An address with an index and a subtracted displacement (order 2).
define void @f5(i64 %addr, i64 %index, i8 **%dst) {
; CHECK: f5:
; CHECK: lb %r0, -100(%r3,%r2)
; CHECK: br %r14
%add1 = sub i64 %addr, 100
%add2 = add i64 %add1, %index
%ptr = inttoptr i64 %add2 to i8 *
%a = load volatile i8 *%ptr
store volatile i8 *%ptr, i8 **%dst
ret void
}
; An address with an index and a displacement added using OR.
define void @f6(i64 %addr, i64 %index, i8 **%dst) {
; CHECK: f6:
; CHECK: nill %r2, 65528
; CHECK: lb %r0, 6(%r3,%r2)
; CHECK: br %r14
%aligned = and i64 %addr, -8
%or = or i64 %aligned, 6
%add = add i64 %or, %index
%ptr = inttoptr i64 %add to i8 *
%a = load volatile i8 *%ptr
store volatile i8 *%ptr, i8 **%dst
ret void
}
; Like f6, but without the masking. This OR doesn't count as a displacement.
define void @f7(i64 %addr, i64 %index, i8 **%dst) {
; CHECK: f7:
; CHECK: oill %r2, 6
; CHECK: lb %r0, 0(%r3,%r2)
; CHECK: br %r14
%or = or i64 %addr, 6
%add = add i64 %or, %index
%ptr = inttoptr i64 %add to i8 *
%a = load volatile i8 *%ptr
store volatile i8 *%ptr, i8 **%dst
ret void
}
; Like f6, but with the OR applied after the index. We don't know anything
; about the alignment of %add here.
define void @f8(i64 %addr, i64 %index, i8 **%dst) {
; CHECK: f8:
; CHECK: nill %r2, 65528
; CHECK: agr %r2, %r3
; CHECK: oill %r2, 6
; CHECK: lb %r0, 0(%r2)
; CHECK: br %r14
%aligned = and i64 %addr, -8
%add = add i64 %aligned, %index
%or = or i64 %add, 6
%ptr = inttoptr i64 %or to i8 *
%a = load volatile i8 *%ptr
store volatile i8 *%ptr, i8 **%dst
ret void
}