1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00
llvm-mirror/test/CodeGen/SystemZ/fp-strict-div-02.ll
Ulrich Weigand fba10ebb96 Allow target to handle STRICT floating-point nodes
The ISD::STRICT_ nodes used to implement the constrained floating-point
intrinsics are currently never passed to the target back-end, which makes
it impossible to handle them correctly (e.g. mark instructions are depending
on a floating-point status and control register, or mark instructions as
possibly trapping).

This patch allows the target to use setOperationAction to switch the action
on ISD::STRICT_ nodes to Legal. If this is done, the SelectionDAG common code
will stop converting the STRICT nodes to regular floating-point nodes, but
instead pass the STRICT nodes to the target using normal SelectionDAG
matching rules.

To avoid having the back-end duplicate all the floating-point instruction
patterns to handle both strict and non-strict variants, we make the MI
codegen explicitly aware of the floating-point exceptions by introducing
two new concepts:

- A new MCID flag "mayRaiseFPException" that the target should set on any
  instruction that possibly can raise FP exception according to the
  architecture definition.
- A new MI flag FPExcept that CodeGen/SelectionDAG will set on any MI
  instruction resulting from expansion of any constrained FP intrinsic.

Any MI instruction that is *both* marked as mayRaiseFPException *and*
FPExcept then needs to be considered as raising exceptions by MI-level
codegen (e.g. scheduling).

Setting those two new flags is straightforward. The mayRaiseFPException
flag is simply set via TableGen by marking all relevant instruction
patterns in the .td files.

The FPExcept flag is set in SDNodeFlags when creating the STRICT_ nodes
in the SelectionDAG, and gets inherited in the MachineSDNode nodes created
from it during instruction selection. The flag is then transfered to an
MIFlag when creating the MI from the MachineSDNode. This is handled just
like fast-math flags like no-nans are handled today.

This patch includes both common code changes required to implement the
new features, and the SystemZ implementation.

Reviewed By: andrew.w.kaylor

Differential Revision: https://reviews.llvm.org/D55506

llvm-svn: 362663
2019-06-05 22:33:10 +00:00

174 lines
6.7 KiB
LLVM

; Test strict 64-bit floating-point division.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
declare double @foo()
declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
; Check register division.
define double @f1(double %f1, double %f2) {
; CHECK-LABEL: f1:
; CHECK: ddbr %f0, %f2
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret double %res
}
; Check the low end of the DDB range.
define double @f2(double %f1, double *%ptr) {
; CHECK-LABEL: f2:
; CHECK: ddb %f0, 0(%r2)
; CHECK: br %r14
%f2 = load double, double *%ptr
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret double %res
}
; Check the high end of the aligned DDB range.
define double @f3(double %f1, double *%base) {
; CHECK-LABEL: f3:
; CHECK: ddb %f0, 4088(%r2)
; CHECK: br %r14
%ptr = getelementptr double, double *%base, i64 511
%f2 = load double, double *%ptr
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define double @f4(double %f1, double *%base) {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: ddb %f0, 0(%r2)
; CHECK: br %r14
%ptr = getelementptr double, double *%base, i64 512
%f2 = load double, double *%ptr
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret double %res
}
; Check negative displacements, which also need separate address logic.
define double @f5(double %f1, double *%base) {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: ddb %f0, 0(%r2)
; CHECK: br %r14
%ptr = getelementptr double, double *%base, i64 -1
%f2 = load double, double *%ptr
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret double %res
}
; Check that DDB allows indices.
define double @f6(double %f1, double *%base, i64 %index) {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: ddb %f0, 800(%r1,%r2)
; CHECK: br %r14
%ptr1 = getelementptr double, double *%base, i64 %index
%ptr2 = getelementptr double, double *%ptr1, i64 100
%f2 = load double, double *%ptr2
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret double %res
}
; Check that divisions of spilled values can use DDB rather than DDBR.
define double @f7(double *%ptr0) {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: ddb %f0, 160(%r15)
; CHECK: br %r14
%ptr1 = getelementptr double, double *%ptr0, i64 2
%ptr2 = getelementptr double, double *%ptr0, i64 4
%ptr3 = getelementptr double, double *%ptr0, i64 6
%ptr4 = getelementptr double, double *%ptr0, i64 8
%ptr5 = getelementptr double, double *%ptr0, i64 10
%ptr6 = getelementptr double, double *%ptr0, i64 12
%ptr7 = getelementptr double, double *%ptr0, i64 14
%ptr8 = getelementptr double, double *%ptr0, i64 16
%ptr9 = getelementptr double, double *%ptr0, i64 18
%ptr10 = getelementptr double, double *%ptr0, i64 20
%val0 = load double, double *%ptr0
%val1 = load double, double *%ptr1
%val2 = load double, double *%ptr2
%val3 = load double, double *%ptr3
%val4 = load double, double *%ptr4
%val5 = load double, double *%ptr5
%val6 = load double, double *%ptr6
%val7 = load double, double *%ptr7
%val8 = load double, double *%ptr8
%val9 = load double, double *%ptr9
%val10 = load double, double *%ptr10
%ret = call double @foo()
%div0 = call double @llvm.experimental.constrained.fdiv.f64(
double %ret, double %val0,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div1 = call double @llvm.experimental.constrained.fdiv.f64(
double %div0, double %val1,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div2 = call double @llvm.experimental.constrained.fdiv.f64(
double %div1, double %val2,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div3 = call double @llvm.experimental.constrained.fdiv.f64(
double %div2, double %val3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div4 = call double @llvm.experimental.constrained.fdiv.f64(
double %div3, double %val4,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div5 = call double @llvm.experimental.constrained.fdiv.f64(
double %div4, double %val5,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div6 = call double @llvm.experimental.constrained.fdiv.f64(
double %div5, double %val6,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div7 = call double @llvm.experimental.constrained.fdiv.f64(
double %div6, double %val7,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div8 = call double @llvm.experimental.constrained.fdiv.f64(
double %div7, double %val8,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div9 = call double @llvm.experimental.constrained.fdiv.f64(
double %div8, double %val9,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
%div10 = call double @llvm.experimental.constrained.fdiv.f64(
double %div9, double %val10,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret double %div10
}