mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 05:01:59 +01:00
2ce7528398
The AMDGPU handling of f16 vectors is terrible still since it gets scalarized even when the vector operation is legal. The code is is essentially duplicated between the non-strict and strict case. Apparently no other expansions are currently trying to do this. This is mostly because I found the behavior of getStrictFPOperationAction to be confusing. In the ARM case, it would expand strict_fsub even though it shouldn't due to the later check. At that point, the logic required to check for legality was more complex than just duplicating the 2 instruction expansion.
97 lines
4.7 KiB
LLVM
97 lines
4.7 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s
|
|
|
|
define double @v_constained_fsub_f64_fpexcept_strict(double %x, double %y) #0 {
|
|
; GCN-LABEL: v_constained_fsub_f64_fpexcept_strict:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_add_f64 v[0:1], v[0:1], -v[2:3]
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
|
|
ret double %val
|
|
}
|
|
|
|
define double @v_constained_fsub_f64_fpexcept_ignore(double %x, double %y) #0 {
|
|
; GCN-LABEL: v_constained_fsub_f64_fpexcept_ignore:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_add_f64 v[0:1], v[0:1], -v[2:3]
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
|
|
ret double %val
|
|
}
|
|
|
|
define double @v_constained_fsub_f64_fpexcept_maytrap(double %x, double %y) #0 {
|
|
; GCN-LABEL: v_constained_fsub_f64_fpexcept_maytrap:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_add_f64 v[0:1], v[0:1], -v[2:3]
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
|
|
ret double %val
|
|
}
|
|
|
|
define <2 x double> @v_constained_fsub_v2f64_fpexcept_strict(<2 x double> %x, <2 x double> %y) #0 {
|
|
; GCN-LABEL: v_constained_fsub_v2f64_fpexcept_strict:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_add_f64 v[0:1], v[0:1], -v[4:5]
|
|
; GCN-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7]
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
|
|
ret <2 x double> %val
|
|
}
|
|
|
|
define <2 x double> @v_constained_fsub_v2f64_fpexcept_ignore(<2 x double> %x, <2 x double> %y) #0 {
|
|
; GCN-LABEL: v_constained_fsub_v2f64_fpexcept_ignore:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_add_f64 v[0:1], v[0:1], -v[4:5]
|
|
; GCN-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7]
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
|
|
ret <2 x double> %val
|
|
}
|
|
|
|
define <2 x double> @v_constained_fsub_v2f64_fpexcept_maytrap(<2 x double> %x, <2 x double> %y) #0 {
|
|
; GCN-LABEL: v_constained_fsub_v2f64_fpexcept_maytrap:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_add_f64 v[0:1], v[0:1], -v[4:5]
|
|
; GCN-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7]
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
|
|
ret <2 x double> %val
|
|
}
|
|
|
|
define <3 x double> @v_constained_fsub_v3f64_fpexcept_strict(<3 x double> %x, <3 x double> %y) #0 {
|
|
; GCN-LABEL: v_constained_fsub_v3f64_fpexcept_strict:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_add_f64 v[0:1], v[0:1], -v[6:7]
|
|
; GCN-NEXT: v_add_f64 v[2:3], v[2:3], -v[8:9]
|
|
; GCN-NEXT: v_add_f64 v[4:5], v[4:5], -v[10:11]
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%val = call <3 x double> @llvm.experimental.constrained.fsub.v3f64(<3 x double> %x, <3 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
|
|
ret <3 x double> %val
|
|
}
|
|
|
|
define amdgpu_ps <2 x float> @s_constained_fsub_f64_fpexcept_strict(double inreg %x, double inreg %y) #0 {
|
|
; GCN-LABEL: s_constained_fsub_f64_fpexcept_strict:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: v_mov_b32_e32 v0, s4
|
|
; GCN-NEXT: v_mov_b32_e32 v1, s5
|
|
; GCN-NEXT: v_add_f64 v[0:1], s[2:3], -v[0:1]
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
%val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
|
|
%cast = bitcast double %val to <2 x float>
|
|
ret <2 x float> %cast
|
|
}
|
|
|
|
declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata) #1
|
|
declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata) #1
|
|
declare <3 x double> @llvm.experimental.constrained.fsub.v3f64(<3 x double>, <3 x double>, metadata, metadata) #1
|
|
|
|
attributes #0 = { strictfp }
|
|
attributes #1 = { inaccessiblememonly nounwind willreturn }
|