1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

[SelectionDAG] fold FP binops with 2 undef operands to undef

llvm-svn: 348016
This commit is contained in:
Sanjay Patel 2018-11-30 18:38:52 +00:00
parent d3e19dcb49
commit 39d631d21b
3 changed files with 10 additions and 23 deletions

View File

@ -4945,14 +4945,16 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
}
}
// Any FP binop with an undef operand is folded to NaN. This matches the
// behavior of the IR optimizer.
switch (Opcode) {
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
case ISD::FDIV:
case ISD::FREM:
// If both operands are undef, the result is undef. If 1 operand is undef,
// the result is NaN. This should match the behavior of the IR optimizer.
if (N1.isUndef() && N2.isUndef())
return getUNDEF(VT);
if (N1.isUndef() || N2.isUndef())
return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
}

View File

@ -192,7 +192,6 @@ define float @frem_undef_op1_fast(float %x) {
define double @fadd_undef_undef(double %x) {
; ANY-LABEL: fadd_undef_undef:
; ANY: # %bb.0:
; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; ANY-NEXT: retq
%r = fadd double undef, undef
ret double %r
@ -201,7 +200,6 @@ define double @fadd_undef_undef(double %x) {
define double @fsub_undef_undef(double %x) {
; ANY-LABEL: fsub_undef_undef:
; ANY: # %bb.0:
; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; ANY-NEXT: retq
%r = fsub double undef, undef
ret double %r
@ -210,7 +208,6 @@ define double @fsub_undef_undef(double %x) {
define double @fmul_undef_undef(double %x) {
; ANY-LABEL: fmul_undef_undef:
; ANY: # %bb.0:
; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; ANY-NEXT: retq
%r = fmul double undef, undef
ret double %r
@ -219,7 +216,6 @@ define double @fmul_undef_undef(double %x) {
define double @fdiv_undef_undef(double %x) {
; ANY-LABEL: fdiv_undef_undef:
; ANY: # %bb.0:
; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; ANY-NEXT: retq
%r = fdiv double undef, undef
ret double %r
@ -228,7 +224,6 @@ define double @fdiv_undef_undef(double %x) {
define double @frem_undef_undef(double %x) {
; ANY-LABEL: frem_undef_undef:
; ANY: # %bb.0:
; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; ANY-NEXT: retq
%r = frem double undef, undef
ret double %r

View File

@ -194,7 +194,6 @@ define <4 x double> @fadd_op1_constant_v4f64(double %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: retq
;
; AVX-LABEL: fadd_op1_constant_v4f64:
@ -214,7 +213,6 @@ define <4 x double> @load_fadd_op1_constant_v4f64(double* %p) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: retq
;
; AVX-LABEL: load_fadd_op1_constant_v4f64:
@ -232,10 +230,9 @@ define <4 x double> @load_fadd_op1_constant_v4f64(double* %p) nounwind {
define <4 x double> @fsub_op0_constant_v4f64(double %x) nounwind {
; SSE-LABEL: fsub_op0_constant_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: subpd %xmm0, %xmm2
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: subpd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fsub_op0_constant_v4f64:
@ -255,7 +252,6 @@ define <4 x double> @load_fsub_op0_constant_v4f64(double* %p) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: subpd %xmm1, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: retq
;
; AVX-LABEL: load_fsub_op0_constant_v4f64:
@ -275,7 +271,6 @@ define <4 x double> @fmul_op1_constant_v4f64(double %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: mulpd %xmm1, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: retq
;
; AVX-LABEL: fmul_op1_constant_v4f64:
@ -295,7 +290,6 @@ define <4 x double> @load_fmul_op1_constant_v4f64(double* %p) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: mulpd %xmm1, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: retq
;
; AVX-LABEL: load_fmul_op1_constant_v4f64:
@ -315,7 +309,6 @@ define <4 x double> @fdiv_op1_constant_v4f64(double %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: divpd %xmm1, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: retq
;
; AVX-LABEL: fdiv_op1_constant_v4f64:
@ -335,7 +328,6 @@ define <4 x double> @load_fdiv_op1_constant_v4f64(double* %p) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: divpd %xmm1, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: retq
;
; AVX-LABEL: load_fdiv_op1_constant_v4f64:
@ -353,10 +345,9 @@ define <4 x double> @load_fdiv_op1_constant_v4f64(double* %p) nounwind {
define <4 x double> @fdiv_op0_constant_v4f64(double %x) nounwind {
; SSE-LABEL: fdiv_op0_constant_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: divpd %xmm0, %xmm2
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: divpd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fdiv_op0_constant_v4f64:
@ -376,7 +367,6 @@ define <4 x double> @load_fdiv_op0_constant_v4f64(double* %p) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: divpd %xmm1, %xmm0
; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN]
; SSE-NEXT: retq
;
; AVX-LABEL: load_fdiv_op0_constant_v4f64: