2019-10-01 19:49:58 +02:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
|
|
; RUN: opt %s -instcombine -S | FileCheck %s
|
|
|
|
|
|
|
|
define i32 @t0(i64 %x) {
|
|
|
|
; CHECK-LABEL: @t0(
|
[InstCombine] Negator - sink sinkable negations
Summary:
As we have discussed previously (e.g. in D63992 / D64090 / [[ https://bugs.llvm.org/show_bug.cgi?id=42457 | PR42457 ]]), `sub` instruction
can almost be considered non-canonical. While we do convert `sub %x, C` -> `add %x, -C`,
we sparsely do that for non-constants. But we should.
Here, i propose to interpret `sub %x, %y` as `add (sub 0, %y), %x` IFF the negation can be sinked into the `%y`
This has some potential to cause endless combine loops (either around PHI's, or if there are some opposite transforms).
For former there's `-instcombine-negator-max-depth` option to mitigate it, should this expose any such issues
For latter, if there are still any such opposing folds, we'd need to remove the colliding fold.
In any case, reproducers welcomed!
Reviewers: spatel, nikic, efriedma, xbolva00
Reviewed By: spatel
Subscribers: xbolva00, mgorny, hiraditya, reames, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68408
2020-04-21 20:24:36 +02:00
|
|
|
; CHECK-NEXT: [[T0_NEG:%.*]] = ashr i64 [[X:%.*]], 63
|
|
|
|
; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
|
|
|
|
; CHECK-NEXT: ret i32 [[T1_NEG]]
|
2019-10-01 19:49:58 +02:00
|
|
|
;
|
|
|
|
%t0 = lshr i64 %x, 63
|
|
|
|
%t1 = trunc i64 %t0 to i32
|
|
|
|
%r = sub i32 0, %t1
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
define i32 @t1_exact(i64 %x) {
|
|
|
|
; CHECK-LABEL: @t1_exact(
|
[InstCombine] Negator - sink sinkable negations
Summary:
As we have discussed previously (e.g. in D63992 / D64090 / [[ https://bugs.llvm.org/show_bug.cgi?id=42457 | PR42457 ]]), `sub` instruction
can almost be considered non-canonical. While we do convert `sub %x, C` -> `add %x, -C`,
we sparsely do that for non-constants. But we should.
Here, i propose to interpret `sub %x, %y` as `add (sub 0, %y), %x` IFF the negation can be sinked into the `%y`
This has some potential to cause endless combine loops (either around PHI's, or if there are some opposite transforms).
For former there's `-instcombine-negator-max-depth` option to mitigate it, should this expose any such issues
For latter, if there are still any such opposing folds, we'd need to remove the colliding fold.
In any case, reproducers welcomed!
Reviewers: spatel, nikic, efriedma, xbolva00
Reviewed By: spatel
Subscribers: xbolva00, mgorny, hiraditya, reames, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68408
2020-04-21 20:24:36 +02:00
|
|
|
; CHECK-NEXT: [[T0_NEG:%.*]] = ashr exact i64 [[X:%.*]], 63
|
|
|
|
; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
|
|
|
|
; CHECK-NEXT: ret i32 [[T1_NEG]]
|
2019-10-01 19:49:58 +02:00
|
|
|
;
|
|
|
|
%t0 = lshr exact i64 %x, 63
|
|
|
|
%t1 = trunc i64 %t0 to i32
|
|
|
|
%r = sub i32 0, %t1
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
define i32 @t2(i64 %x) {
|
|
|
|
; CHECK-LABEL: @t2(
|
[InstCombine] Negator - sink sinkable negations
Summary:
As we have discussed previously (e.g. in D63992 / D64090 / [[ https://bugs.llvm.org/show_bug.cgi?id=42457 | PR42457 ]]), `sub` instruction
can almost be considered non-canonical. While we do convert `sub %x, C` -> `add %x, -C`,
we sparsely do that for non-constants. But we should.
Here, i propose to interpret `sub %x, %y` as `add (sub 0, %y), %x` IFF the negation can be sinked into the `%y`
This has some potential to cause endless combine loops (either around PHI's, or if there are some opposite transforms).
For former there's `-instcombine-negator-max-depth` option to mitigate it, should this expose any such issues
For latter, if there are still any such opposing folds, we'd need to remove the colliding fold.
In any case, reproducers welcomed!
Reviewers: spatel, nikic, efriedma, xbolva00
Reviewed By: spatel
Subscribers: xbolva00, mgorny, hiraditya, reames, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68408
2020-04-21 20:24:36 +02:00
|
|
|
; CHECK-NEXT: [[T0_NEG:%.*]] = lshr i64 [[X:%.*]], 63
|
|
|
|
; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
|
|
|
|
; CHECK-NEXT: ret i32 [[T1_NEG]]
|
2019-10-01 19:49:58 +02:00
|
|
|
;
|
|
|
|
%t0 = ashr i64 %x, 63
|
|
|
|
%t1 = trunc i64 %t0 to i32
|
|
|
|
%r = sub i32 0, %t1
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
define i32 @t3_exact(i64 %x) {
|
|
|
|
; CHECK-LABEL: @t3_exact(
|
[InstCombine] Negator - sink sinkable negations
Summary:
As we have discussed previously (e.g. in D63992 / D64090 / [[ https://bugs.llvm.org/show_bug.cgi?id=42457 | PR42457 ]]), `sub` instruction
can almost be considered non-canonical. While we do convert `sub %x, C` -> `add %x, -C`,
we sparsely do that for non-constants. But we should.
Here, i propose to interpret `sub %x, %y` as `add (sub 0, %y), %x` IFF the negation can be sinked into the `%y`
This has some potential to cause endless combine loops (either around PHI's, or if there are some opposite transforms).
For former there's `-instcombine-negator-max-depth` option to mitigate it, should this expose any such issues
For latter, if there are still any such opposing folds, we'd need to remove the colliding fold.
In any case, reproducers welcomed!
Reviewers: spatel, nikic, efriedma, xbolva00
Reviewed By: spatel
Subscribers: xbolva00, mgorny, hiraditya, reames, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68408
2020-04-21 20:24:36 +02:00
|
|
|
; CHECK-NEXT: [[T0_NEG:%.*]] = lshr exact i64 [[X:%.*]], 63
|
|
|
|
; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
|
|
|
|
; CHECK-NEXT: ret i32 [[T1_NEG]]
|
2019-10-01 19:49:58 +02:00
|
|
|
;
|
|
|
|
%t0 = ashr exact i64 %x, 63
|
|
|
|
%t1 = trunc i64 %t0 to i32
|
|
|
|
%r = sub i32 0, %t1
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i32> @t4(<2 x i64> %x) {
|
|
|
|
; CHECK-LABEL: @t4(
|
[InstCombine] Negator - sink sinkable negations
Summary:
As we have discussed previously (e.g. in D63992 / D64090 / [[ https://bugs.llvm.org/show_bug.cgi?id=42457 | PR42457 ]]), `sub` instruction
can almost be considered non-canonical. While we do convert `sub %x, C` -> `add %x, -C`,
we sparsely do that for non-constants. But we should.
Here, i propose to interpret `sub %x, %y` as `add (sub 0, %y), %x` IFF the negation can be sinked into the `%y`
This has some potential to cause endless combine loops (either around PHI's, or if there are some opposite transforms).
For former there's `-instcombine-negator-max-depth` option to mitigate it, should this expose any such issues
For latter, if there are still any such opposing folds, we'd need to remove the colliding fold.
In any case, reproducers welcomed!
Reviewers: spatel, nikic, efriedma, xbolva00
Reviewed By: spatel
Subscribers: xbolva00, mgorny, hiraditya, reames, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68408
2020-04-21 20:24:36 +02:00
|
|
|
; CHECK-NEXT: [[T0_NEG:%.*]] = ashr <2 x i64> [[X:%.*]], <i64 63, i64 63>
|
|
|
|
; CHECK-NEXT: [[T1_NEG:%.*]] = trunc <2 x i64> [[T0_NEG]] to <2 x i32>
|
|
|
|
; CHECK-NEXT: ret <2 x i32> [[T1_NEG]]
|
2019-10-01 19:49:58 +02:00
|
|
|
;
|
|
|
|
%t0 = lshr <2 x i64> %x, <i64 63, i64 63>
|
|
|
|
%t1 = trunc <2 x i64> %t0 to <2 x i32>
|
|
|
|
%r = sub <2 x i32> zeroinitializer, %t1
|
|
|
|
ret <2 x i32> %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i32> @t5(<2 x i64> %x) {
|
|
|
|
; CHECK-LABEL: @t5(
|
|
|
|
; CHECK-NEXT: [[T0:%.*]] = lshr <2 x i64> [[X:%.*]], <i64 63, i64 undef>
|
|
|
|
; CHECK-NEXT: [[T1:%.*]] = trunc <2 x i64> [[T0]] to <2 x i32>
|
|
|
|
; CHECK-NEXT: [[R:%.*]] = sub <2 x i32> <i32 0, i32 undef>, [[T1]]
|
|
|
|
; CHECK-NEXT: ret <2 x i32> [[R]]
|
|
|
|
;
|
|
|
|
%t0 = lshr <2 x i64> %x, <i64 63, i64 undef>
|
|
|
|
%t1 = trunc <2 x i64> %t0 to <2 x i32>
|
|
|
|
%r = sub <2 x i32> <i32 0, i32 undef>, %t1
|
|
|
|
ret <2 x i32> %r
|
|
|
|
}
|
|
|
|
|
|
|
|
declare void @use64(i64)
|
|
|
|
declare void @use32(i32)
|
|
|
|
|
|
|
|
define i32 @t6(i64 %x) {
|
|
|
|
; CHECK-LABEL: @t6(
|
[InstCombine] Negator - sink sinkable negations
Summary:
As we have discussed previously (e.g. in D63992 / D64090 / [[ https://bugs.llvm.org/show_bug.cgi?id=42457 | PR42457 ]]), `sub` instruction
can almost be considered non-canonical. While we do convert `sub %x, C` -> `add %x, -C`,
we sparsely do that for non-constants. But we should.
Here, i propose to interpret `sub %x, %y` as `add (sub 0, %y), %x` IFF the negation can be sinked into the `%y`
This has some potential to cause endless combine loops (either around PHI's, or if there are some opposite transforms).
For former there's `-instcombine-negator-max-depth` option to mitigate it, should this expose any such issues
For latter, if there are still any such opposing folds, we'd need to remove the colliding fold.
In any case, reproducers welcomed!
Reviewers: spatel, nikic, efriedma, xbolva00
Reviewed By: spatel
Subscribers: xbolva00, mgorny, hiraditya, reames, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68408
2020-04-21 20:24:36 +02:00
|
|
|
; CHECK-NEXT: [[T0_NEG:%.*]] = ashr i64 [[X:%.*]], 63
|
|
|
|
; CHECK-NEXT: [[T0:%.*]] = lshr i64 [[X]], 63
|
2019-10-01 19:49:58 +02:00
|
|
|
; CHECK-NEXT: call void @use64(i64 [[T0]])
|
[InstCombine] Negator - sink sinkable negations
Summary:
As we have discussed previously (e.g. in D63992 / D64090 / [[ https://bugs.llvm.org/show_bug.cgi?id=42457 | PR42457 ]]), `sub` instruction
can almost be considered non-canonical. While we do convert `sub %x, C` -> `add %x, -C`,
we sparsely do that for non-constants. But we should.
Here, i propose to interpret `sub %x, %y` as `add (sub 0, %y), %x` IFF the negation can be sinked into the `%y`
This has some potential to cause endless combine loops (either around PHI's, or if there are some opposite transforms).
For former there's `-instcombine-negator-max-depth` option to mitigate it, should this expose any such issues
For latter, if there are still any such opposing folds, we'd need to remove the colliding fold.
In any case, reproducers welcomed!
Reviewers: spatel, nikic, efriedma, xbolva00
Reviewed By: spatel
Subscribers: xbolva00, mgorny, hiraditya, reames, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68408
2020-04-21 20:24:36 +02:00
|
|
|
; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
|
|
|
|
; CHECK-NEXT: ret i32 [[T1_NEG]]
|
2019-10-01 19:49:58 +02:00
|
|
|
;
|
|
|
|
%t0 = lshr i64 %x, 63
|
|
|
|
call void @use64(i64 %t0)
|
|
|
|
%t1 = trunc i64 %t0 to i32
|
|
|
|
%r = sub i32 0, %t1
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @n7(i64 %x) {
|
|
|
|
; CHECK-LABEL: @n7(
|
|
|
|
; CHECK-NEXT: [[T0:%.*]] = lshr i64 [[X:%.*]], 63
|
|
|
|
; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[T0]] to i32
|
|
|
|
; CHECK-NEXT: call void @use32(i32 [[T1]])
|
|
|
|
; CHECK-NEXT: [[R:%.*]] = sub nsw i32 0, [[T1]]
|
|
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
|
|
;
|
|
|
|
%t0 = lshr i64 %x, 63
|
|
|
|
%t1 = trunc i64 %t0 to i32
|
|
|
|
call void @use32(i32 %t1)
|
|
|
|
%r = sub i32 0, %t1
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @n8(i64 %x) {
|
|
|
|
; CHECK-LABEL: @n8(
|
|
|
|
; CHECK-NEXT: [[T0:%.*]] = lshr i64 [[X:%.*]], 63
|
|
|
|
; CHECK-NEXT: call void @use64(i64 [[T0]])
|
|
|
|
; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[T0]] to i32
|
|
|
|
; CHECK-NEXT: call void @use32(i32 [[T1]])
|
|
|
|
; CHECK-NEXT: [[R:%.*]] = sub nsw i32 0, [[T1]]
|
|
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
|
|
;
|
|
|
|
%t0 = lshr i64 %x, 63
|
|
|
|
call void @use64(i64 %t0)
|
|
|
|
%t1 = trunc i64 %t0 to i32
|
|
|
|
call void @use32(i32 %t1)
|
|
|
|
%r = sub i32 0, %t1
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @n9(i64 %x) {
|
|
|
|
; CHECK-LABEL: @n9(
|
|
|
|
; CHECK-NEXT: [[T0:%.*]] = lshr i64 [[X:%.*]], 62
|
|
|
|
; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[T0]] to i32
|
|
|
|
; CHECK-NEXT: [[R:%.*]] = sub nsw i32 0, [[T1]]
|
|
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
|
|
;
|
|
|
|
%t0 = lshr i64 %x, 62
|
|
|
|
%t1 = trunc i64 %t0 to i32
|
|
|
|
%r = sub i32 0, %t1
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @n10(i64 %x) {
|
|
|
|
; CHECK-LABEL: @n10(
|
[InstCombine] Negator - sink sinkable negations
Summary:
As we have discussed previously (e.g. in D63992 / D64090 / [[ https://bugs.llvm.org/show_bug.cgi?id=42457 | PR42457 ]]), `sub` instruction
can almost be considered non-canonical. While we do convert `sub %x, C` -> `add %x, -C`,
we sparsely do that for non-constants. But we should.
Here, i propose to interpret `sub %x, %y` as `add (sub 0, %y), %x` IFF the negation can be sinked into the `%y`
This has some potential to cause endless combine loops (either around PHI's, or if there are some opposite transforms).
For former there's `-instcombine-negator-max-depth` option to mitigate it, should this expose any such issues
For latter, if there are still any such opposing folds, we'd need to remove the colliding fold.
In any case, reproducers welcomed!
Reviewers: spatel, nikic, efriedma, xbolva00
Reviewed By: spatel
Subscribers: xbolva00, mgorny, hiraditya, reames, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68408
2020-04-21 20:24:36 +02:00
|
|
|
; CHECK-NEXT: [[T0_NEG:%.*]] = ashr i64 [[X:%.*]], 63
|
|
|
|
; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
|
|
|
|
; CHECK-NEXT: [[R:%.*]] = add i32 [[T1_NEG]], 1
|
2019-10-01 19:49:58 +02:00
|
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
|
|
;
|
|
|
|
%t0 = lshr i64 %x, 63
|
|
|
|
%t1 = trunc i64 %t0 to i32
|
|
|
|
%r = sub i32 1, %t1
|
|
|
|
ret i32 %r
|
|
|
|
}
|