mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
13a750213e
We already have reassociation code for Adds and Ors separately in DAG combiner, this adds it for the combination of the two where Ors act like Adds. It reassociates (add (or (x, c), y) -> (add (add (x, y), c)) where we know that the Ors operands have no common bits set, and the Or has one use. Differential Revision: https://reviews.llvm.org/D104765
141 lines
4.2 KiB
LLVM
141 lines
4.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; addr-01.ll in which the address is also used in a non-address context.
|
|
; The assumption here is that we should match complex addresses where
|
|
; possible, but this might well need to change in future.
|
|
;
|
|
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
|
|
|
|
; A simple index address.
|
|
define void @f1(i64 %addr, i64 %index, i8 **%dst) {
|
|
; CHECK-LABEL: f1:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: lb %r0, 0(%r3,%r2)
|
|
; CHECK-NEXT: la %r0, 0(%r3,%r2)
|
|
; CHECK-NEXT: stg %r0, 0(%r4)
|
|
; CHECK-NEXT: br %r14
|
|
%add = add i64 %addr, %index
|
|
%ptr = inttoptr i64 %add to i8 *
|
|
%a = load volatile i8, i8 *%ptr
|
|
store volatile i8 *%ptr, i8 **%dst
|
|
ret void
|
|
}
|
|
|
|
; An address with an index and a displacement (order 1).
|
|
define void @f2(i64 %addr, i64 %index, i8 **%dst) {
|
|
; CHECK-LABEL: f2:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: lb %r0, 100(%r3,%r2)
|
|
; CHECK-NEXT: la %r0, 100(%r3,%r2)
|
|
; CHECK-NEXT: stg %r0, 0(%r4)
|
|
; CHECK-NEXT: br %r14
|
|
%add1 = add i64 %addr, %index
|
|
%add2 = add i64 %add1, 100
|
|
%ptr = inttoptr i64 %add2 to i8 *
|
|
%a = load volatile i8, i8 *%ptr
|
|
store volatile i8 *%ptr, i8 **%dst
|
|
ret void
|
|
}
|
|
|
|
; An address with an index and a displacement (order 2).
|
|
define void @f3(i64 %addr, i64 %index, i8 **%dst) {
|
|
; CHECK-LABEL: f3:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: lb %r0, 100(%r3,%r2)
|
|
; CHECK-NEXT: la %r0, 100(%r3,%r2)
|
|
; CHECK-NEXT: stg %r0, 0(%r4)
|
|
; CHECK-NEXT: br %r14
|
|
%add1 = add i64 %addr, 100
|
|
%add2 = add i64 %add1, %index
|
|
%ptr = inttoptr i64 %add2 to i8 *
|
|
%a = load volatile i8, i8 *%ptr
|
|
store volatile i8 *%ptr, i8 **%dst
|
|
ret void
|
|
}
|
|
|
|
; An address with an index and a subtracted displacement (order 1).
|
|
define void @f4(i64 %addr, i64 %index, i8 **%dst) {
|
|
; CHECK-LABEL: f4:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: lb %r0, -100(%r3,%r2)
|
|
; CHECK-NEXT: lay %r0, -100(%r3,%r2)
|
|
; CHECK-NEXT: stg %r0, 0(%r4)
|
|
; CHECK-NEXT: br %r14
|
|
%add1 = add i64 %addr, %index
|
|
%add2 = sub i64 %add1, 100
|
|
%ptr = inttoptr i64 %add2 to i8 *
|
|
%a = load volatile i8, i8 *%ptr
|
|
store volatile i8 *%ptr, i8 **%dst
|
|
ret void
|
|
}
|
|
|
|
; An address with an index and a subtracted displacement (order 2).
|
|
define void @f5(i64 %addr, i64 %index, i8 **%dst) {
|
|
; CHECK-LABEL: f5:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: lb %r0, -100(%r3,%r2)
|
|
; CHECK-NEXT: lay %r0, -100(%r3,%r2)
|
|
; CHECK-NEXT: stg %r0, 0(%r4)
|
|
; CHECK-NEXT: br %r14
|
|
%add1 = sub i64 %addr, 100
|
|
%add2 = add i64 %add1, %index
|
|
%ptr = inttoptr i64 %add2 to i8 *
|
|
%a = load volatile i8, i8 *%ptr
|
|
store volatile i8 *%ptr, i8 **%dst
|
|
ret void
|
|
}
|
|
|
|
; An address with an index and a displacement added using OR.
|
|
define void @f6(i64 %addr, i64 %index, i8 **%dst) {
|
|
; CHECK-LABEL: f6:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: nill %r2, 65528
|
|
; CHECK-NEXT: lb %r0, 6(%r2,%r3)
|
|
; CHECK-NEXT: la %r0, 6(%r2,%r3)
|
|
; CHECK-NEXT: stg %r0, 0(%r4)
|
|
; CHECK-NEXT: br %r14
|
|
%aligned = and i64 %addr, -8
|
|
%or = or i64 %aligned, 6
|
|
%add = add i64 %or, %index
|
|
%ptr = inttoptr i64 %add to i8 *
|
|
%a = load volatile i8, i8 *%ptr
|
|
store volatile i8 *%ptr, i8 **%dst
|
|
ret void
|
|
}
|
|
|
|
; Like f6, but without the masking. This OR doesn't count as a displacement.
|
|
define void @f7(i64 %addr, i64 %index, i8 **%dst) {
|
|
; CHECK-LABEL: f7:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: oill %r2, 6
|
|
; CHECK-NEXT: lb %r0, 0(%r3,%r2)
|
|
; CHECK-NEXT: la %r0, 0(%r3,%r2)
|
|
; CHECK-NEXT: stg %r0, 0(%r4)
|
|
; CHECK-NEXT: br %r14
|
|
%or = or i64 %addr, 6
|
|
%add = add i64 %or, %index
|
|
%ptr = inttoptr i64 %add to i8 *
|
|
%a = load volatile i8, i8 *%ptr
|
|
store volatile i8 *%ptr, i8 **%dst
|
|
ret void
|
|
}
|
|
|
|
; Like f6, but with the OR applied after the index. We don't know anything
|
|
; about the alignment of %add here.
|
|
define void @f8(i64 %addr, i64 %index, i8 **%dst) {
|
|
; CHECK-LABEL: f8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: nill %r2, 65528
|
|
; CHECK-NEXT: agr %r2, %r3
|
|
; CHECK-NEXT: oill %r2, 6
|
|
; CHECK-NEXT: lb %r0, 0(%r2)
|
|
; CHECK-NEXT: stg %r2, 0(%r4)
|
|
; CHECK-NEXT: br %r14
|
|
%aligned = and i64 %addr, -8
|
|
%add = add i64 %aligned, %index
|
|
%or = or i64 %add, 6
|
|
%ptr = inttoptr i64 %or to i8 *
|
|
%a = load volatile i8, i8 *%ptr
|
|
store volatile i8 *%ptr, i8 **%dst
|
|
ret void
|
|
}
|