mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
[InstCombine] reassociate sub+add to increase adds and throughput
The -reassociate pass tends to transform this kind of pattern into something that is worse for vectorization and codegen. See PR43953: https://bugs.llvm.org/show_bug.cgi?id=43953 Follows-up the FP version of the same transform: rGa0ce2338a083
This commit is contained in:
parent
52cc60a2c2
commit
f1cc72d892
@ -1765,6 +1765,17 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
|
||||
if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes()))))
|
||||
return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);
|
||||
|
||||
// Reassociate sub/add sequences to create more add instructions and
|
||||
// reduce dependency chains:
|
||||
// ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1)
|
||||
Value *Z;
|
||||
if (match(Op0, m_OneUse(m_c_Add(m_OneUse(m_Sub(m_Value(X), m_Value(Y))),
|
||||
m_Value(Z))))) {
|
||||
Value *XZ = Builder.CreateAdd(X, Z);
|
||||
Value *YW = Builder.CreateAdd(Y, Op1);
|
||||
return BinaryOperator::CreateSub(XZ, YW);
|
||||
}
|
||||
|
||||
if (Constant *C = dyn_cast<Constant>(Op0)) {
|
||||
Value *X;
|
||||
if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
|
||||
|
@ -1543,9 +1543,9 @@ define i8 @test75(i8 %x) {
|
||||
|
||||
define i8 @sub_add_sub_reassoc(i8 %w, i8 %x, i8 %y, i8 %z) {
|
||||
; CHECK-LABEL: @sub_add_sub_reassoc(
|
||||
; CHECK-NEXT: [[S1:%.*]] = sub i8 [[W:%.*]], [[X:%.*]]
|
||||
; CHECK-NEXT: [[A:%.*]] = add i8 [[S1]], [[Y:%.*]]
|
||||
; CHECK-NEXT: [[S2:%.*]] = sub i8 [[A]], [[Z:%.*]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[W:%.*]], [[Y:%.*]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[X:%.*]], [[Z:%.*]]
|
||||
; CHECK-NEXT: [[S2:%.*]] = sub i8 [[TMP1]], [[TMP2]]
|
||||
; CHECK-NEXT: ret i8 [[S2]]
|
||||
;
|
||||
%s1 = sub i8 %w, %x
|
||||
@ -1559,9 +1559,9 @@ define i8 @sub_add_sub_reassoc(i8 %w, i8 %x, i8 %y, i8 %z) {
|
||||
define <2 x i8> @sub_add_sub_reassoc_commute(<2 x i8> %w, <2 x i8> %x, <2 x i8> %y, <2 x i8> %z) {
|
||||
; CHECK-LABEL: @sub_add_sub_reassoc_commute(
|
||||
; CHECK-NEXT: [[D:%.*]] = sdiv <2 x i8> [[Y:%.*]], <i8 42, i8 -42>
|
||||
; CHECK-NEXT: [[S1:%.*]] = sub <2 x i8> [[W:%.*]], [[X:%.*]]
|
||||
; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[D]], [[S1]]
|
||||
; CHECK-NEXT: [[S2:%.*]] = sub <2 x i8> [[A]], [[Z:%.*]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[D]], [[W:%.*]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i8> [[X:%.*]], [[Z:%.*]]
|
||||
; CHECK-NEXT: [[S2:%.*]] = sub <2 x i8> [[TMP1]], [[TMP2]]
|
||||
; CHECK-NEXT: ret <2 x i8> [[S2]]
|
||||
;
|
||||
%d = sdiv <2 x i8> %y, <i8 42, i8 -42> ; thwart complexity-based canonicalization
|
||||
@ -1575,10 +1575,10 @@ define <2 x i8> @sub_add_sub_reassoc_commute(<2 x i8> %w, <2 x i8> %x, <2 x i8>
|
||||
|
||||
define i8 @sub_add_sub_reassoc_twice(i8 %v, i8 %w, i8 %x, i8 %y, i8 %z) {
|
||||
; CHECK-LABEL: @sub_add_sub_reassoc_twice(
|
||||
; CHECK-NEXT: [[S1:%.*]] = sub i8 [[V:%.*]], [[W:%.*]]
|
||||
; CHECK-NEXT: [[S2:%.*]] = sub i8 [[X:%.*]], [[Y:%.*]]
|
||||
; CHECK-NEXT: [[A:%.*]] = add i8 [[S1]], [[S2]]
|
||||
; CHECK-NEXT: [[S3:%.*]] = sub i8 [[A]], [[Z:%.*]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[W:%.*]], [[Z:%.*]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[X:%.*]], [[V:%.*]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[TMP1]], [[Y:%.*]]
|
||||
; CHECK-NEXT: [[S3:%.*]] = sub i8 [[TMP2]], [[TMP3]]
|
||||
; CHECK-NEXT: ret i8 [[S3]]
|
||||
;
|
||||
%s1 = sub i8 %v, %w
|
||||
|
@ -152,9 +152,9 @@ define i32 @test_struct_load4(%struct.ST4* nocapture readonly %S) {
|
||||
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
|
||||
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> [[STRIDED_VEC]], [[VEC_PHI]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = sub <4 x i32> [[TMP2]], [[STRIDED_VEC1]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = add <4 x i32> [[TMP3]], [[STRIDED_VEC2]]
|
||||
; CHECK-NEXT: [[TMP5]] = sub <4 x i32> [[TMP4]], [[STRIDED_VEC3]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = add <4 x i32> [[TMP2]], [[STRIDED_VEC2]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = add <4 x i32> [[STRIDED_VEC1]], [[STRIDED_VEC3]]
|
||||
; CHECK-NEXT: [[TMP5]] = sub <4 x i32> [[TMP3]], [[TMP4]]
|
||||
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
|
||||
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !6
|
||||
|
Loading…
Reference in New Issue
Block a user