mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-30 15:32:52 +01:00
4b6cfd7cec
This patch adds support for the CRJ and CGRJ instructions. Support for the immediate forms will be a separate patch. The architecture has a large number of comparison instructions. I think it's generally better to concentrate on using the "best" comparison instruction first and foremost, then only use something like CRJ if CR really was the natual choice of comparison instruction. The patch therefore opportunistically converts separate CR and BRC instructions into a single CRJ while emitting instructions in ISelLowering. llvm-svn: 182764
118 lines
3.1 KiB
LLVM
118 lines
3.1 KiB
LLVM
; Test 64-bit signed comparison in which the second operand is a variable.
|
|
;
|
|
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
|
|
|
|
; Check CGR.
|
|
define double @f1(double %a, double %b, i64 %i1, i64 %i2) {
|
|
; CHECK: f1:
|
|
; CHECK: cgrjl %r2, %r3
|
|
; CHECK: ldr %f0, %f2
|
|
; CHECK: br %r14
|
|
%cond = icmp slt i64 %i1, %i2
|
|
%res = select i1 %cond, double %a, double %b
|
|
ret double %res
|
|
}
|
|
|
|
; Check CG with no displacement.
|
|
define double @f2(double %a, double %b, i64 %i1, i64 *%ptr) {
|
|
; CHECK: f2:
|
|
; CHECK: cg %r2, 0(%r3)
|
|
; CHECK-NEXT: jl
|
|
; CHECK: ldr %f0, %f2
|
|
; CHECK: br %r14
|
|
%i2 = load i64 *%ptr
|
|
%cond = icmp slt i64 %i1, %i2
|
|
%res = select i1 %cond, double %a, double %b
|
|
ret double %res
|
|
}
|
|
|
|
; Check the high end of the aligned CG range.
|
|
define double @f3(double %a, double %b, i64 %i1, i64 *%base) {
|
|
; CHECK: f3:
|
|
; CHECK: cg %r2, 524280(%r3)
|
|
; CHECK-NEXT: jl
|
|
; CHECK: ldr %f0, %f2
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i64 *%base, i64 65535
|
|
%i2 = load i64 *%ptr
|
|
%cond = icmp slt i64 %i1, %i2
|
|
%res = select i1 %cond, double %a, double %b
|
|
ret double %res
|
|
}
|
|
|
|
; Check the next doubleword up, which needs separate address logic.
|
|
; Other sequences besides this one would be OK.
|
|
define double @f4(double %a, double %b, i64 %i1, i64 *%base) {
|
|
; CHECK: f4:
|
|
; CHECK: agfi %r3, 524288
|
|
; CHECK: cg %r2, 0(%r3)
|
|
; CHECK-NEXT: jl
|
|
; CHECK: ldr %f0, %f2
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i64 *%base, i64 65536
|
|
%i2 = load i64 *%ptr
|
|
%cond = icmp slt i64 %i1, %i2
|
|
%res = select i1 %cond, double %a, double %b
|
|
ret double %res
|
|
}
|
|
|
|
; Check the high end of the negative aligned CG range.
|
|
define double @f5(double %a, double %b, i64 %i1, i64 *%base) {
|
|
; CHECK: f5:
|
|
; CHECK: cg %r2, -8(%r3)
|
|
; CHECK-NEXT: jl
|
|
; CHECK: ldr %f0, %f2
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i64 *%base, i64 -1
|
|
%i2 = load i64 *%ptr
|
|
%cond = icmp slt i64 %i1, %i2
|
|
%res = select i1 %cond, double %a, double %b
|
|
ret double %res
|
|
}
|
|
|
|
; Check the low end of the CG range.
|
|
define double @f6(double %a, double %b, i64 %i1, i64 *%base) {
|
|
; CHECK: f6:
|
|
; CHECK: cg %r2, -524288(%r3)
|
|
; CHECK-NEXT: jl
|
|
; CHECK: ldr %f0, %f2
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i64 *%base, i64 -65536
|
|
%i2 = load i64 *%ptr
|
|
%cond = icmp slt i64 %i1, %i2
|
|
%res = select i1 %cond, double %a, double %b
|
|
ret double %res
|
|
}
|
|
|
|
; Check the next doubleword down, which needs separate address logic.
|
|
; Other sequences besides this one would be OK.
|
|
define double @f7(double %a, double %b, i64 %i1, i64 *%base) {
|
|
; CHECK: f7:
|
|
; CHECK: agfi %r3, -524296
|
|
; CHECK: cg %r2, 0(%r3)
|
|
; CHECK-NEXT: jl
|
|
; CHECK: ldr %f0, %f2
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i64 *%base, i64 -65537
|
|
%i2 = load i64 *%ptr
|
|
%cond = icmp slt i64 %i1, %i2
|
|
%res = select i1 %cond, double %a, double %b
|
|
ret double %res
|
|
}
|
|
|
|
; Check that CG allows an index.
|
|
define double @f8(double %a, double %b, i64 %i1, i64 %base, i64 %index) {
|
|
; CHECK: f8:
|
|
; CHECK: cg %r2, 524280({{%r4,%r3|%r3,%r4}})
|
|
; CHECK-NEXT: jl
|
|
; CHECK: ldr %f0, %f2
|
|
; CHECK: br %r14
|
|
%add1 = add i64 %base, %index
|
|
%add2 = add i64 %add1, 524280
|
|
%ptr = inttoptr i64 %add2 to i64 *
|
|
%i2 = load i64 *%ptr
|
|
%cond = icmp slt i64 %i1, %i2
|
|
%res = select i1 %cond, double %a, double %b
|
|
ret double %res
|
|
}
|