mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
[InstCombine] Eliminate casts to optimize ctlz operation
If a ctlz operation is performed on higher datatype and then downcasted, then this can be optimized by doing a ctlz operation on a lower datatype and adding the difference bitsize to the result of ctlz to provide the same output: https://alive2.llvm.org/ce/z/8uup9M The original problem is shown in https://llvm.org/PR50173 Differential Revision: https://reviews.llvm.org/D103788
This commit is contained in:
parent
c4a535aaff
commit
9ca79457ad
@ -836,7 +836,7 @@ Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
|
||||
}
|
||||
}
|
||||
|
||||
Value *A;
|
||||
Value *A, *B;
|
||||
Constant *C;
|
||||
if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) {
|
||||
unsigned AWidth = A->getType()->getScalarSizeInBits();
|
||||
@ -950,6 +950,17 @@ Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
|
||||
}
|
||||
}
|
||||
|
||||
// trunc (ctlz_i32(zext(A), B) --> add(ctlz_i16(A, B), C)
|
||||
if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ctlz>(m_ZExt(m_Value(A)),
|
||||
m_Value(B))))) {
|
||||
unsigned AWidth = A->getType()->getScalarSizeInBits();
|
||||
if (AWidth == DestWidth && AWidth > Log2_32(SrcWidth)) {
|
||||
Value *WidthDiff = ConstantInt::get(A->getType(), SrcWidth - AWidth);
|
||||
Value *NarrowCtlz =
|
||||
Builder.CreateIntrinsic(Intrinsic::ctlz, {Trunc.getType()}, {A, B});
|
||||
return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff);
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,9 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt < %s -instcombine -S | FileCheck %s
|
||||
|
||||
declare i3 @llvm.ctlz.i3 (i3 , i1)
|
||||
declare i32 @llvm.ctlz.i32 (i32, i1)
|
||||
declare i34 @llvm.ctlz.i34 (i34, i1)
|
||||
declare <2 x i33> @llvm.ctlz.v2i33 (<2 x i33>, i1)
|
||||
declare <2 x i32> @llvm.ctlz.v2i32 (<2 x i32>, i1)
|
||||
declare <vscale x 2 x i64> @llvm.ctlz.nxv2i64 (<vscale x 2 x i64>, i1)
|
||||
@ -11,9 +13,8 @@ declare void @use1(<vscale x 2 x i63>)
|
||||
|
||||
define i16 @trunc_ctlz_zext_i16_i32(i16 %x) {
|
||||
; CHECK-LABEL: @trunc_ctlz_zext_i16_i32(
|
||||
; CHECK-NEXT: [[Z:%.*]] = zext i16 [[X:%.*]] to i32
|
||||
; CHECK-NEXT: [[P:%.*]] = call i32 @llvm.ctlz.i32(i32 [[Z]], i1 false), !range [[RNG0:![0-9]+]]
|
||||
; CHECK-NEXT: [[ZZ:%.*]] = trunc i32 [[P]] to i16
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0:![0-9]+]]
|
||||
; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw i16 [[TMP1]], 16
|
||||
; CHECK-NEXT: ret i16 [[ZZ]]
|
||||
;
|
||||
%z = zext i16 %x to i32
|
||||
@ -26,9 +27,8 @@ define i16 @trunc_ctlz_zext_i16_i32(i16 %x) {
|
||||
|
||||
define <2 x i8> @trunc_ctlz_zext_v2i8_v2i33(<2 x i8> %x) {
|
||||
; CHECK-LABEL: @trunc_ctlz_zext_v2i8_v2i33(
|
||||
; CHECK-NEXT: [[Z:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i33>
|
||||
; CHECK-NEXT: [[P:%.*]] = call <2 x i33> @llvm.ctlz.v2i33(<2 x i33> [[Z]], i1 true)
|
||||
; CHECK-NEXT: [[ZZ:%.*]] = trunc <2 x i33> [[P]] to <2 x i8>
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> [[X:%.*]], i1 true)
|
||||
; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <2 x i8> [[TMP1]], <i8 25, i8 25>
|
||||
; CHECK-NEXT: ret <2 x i8> [[ZZ]]
|
||||
;
|
||||
%z = zext <2 x i8> %x to <2 x i33>
|
||||
@ -41,9 +41,8 @@ define <2 x i8> @trunc_ctlz_zext_v2i8_v2i33(<2 x i8> %x) {
|
||||
|
||||
define <vscale x 2 x i16> @trunc_ctlz_zext_nxv2i16_nxv2i64(<vscale x 2 x i16> %x) {
|
||||
; CHECK-LABEL: @trunc_ctlz_zext_nxv2i16_nxv2i64(
|
||||
; CHECK-NEXT: [[Z:%.*]] = zext <vscale x 2 x i16> [[X:%.*]] to <vscale x 2 x i64>
|
||||
; CHECK-NEXT: [[P:%.*]] = call <vscale x 2 x i64> @llvm.ctlz.nxv2i64(<vscale x 2 x i64> [[Z]], i1 false)
|
||||
; CHECK-NEXT: [[ZZ:%.*]] = trunc <vscale x 2 x i64> [[P]] to <vscale x 2 x i16>
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.ctlz.nxv2i16(<vscale x 2 x i16> [[X:%.*]], i1 false)
|
||||
; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <vscale x 2 x i16> [[TMP1]], shufflevector (<vscale x 2 x i16> insertelement (<vscale x 2 x i16> undef, i16 48, i32 0), <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer)
|
||||
; CHECK-NEXT: ret <vscale x 2 x i16> [[ZZ]]
|
||||
;
|
||||
%z = zext <vscale x 2 x i16> %x to <vscale x 2 x i64>
|
||||
@ -52,6 +51,8 @@ define <vscale x 2 x i16> @trunc_ctlz_zext_nxv2i16_nxv2i64(<vscale x 2 x i16> %x
|
||||
ret <vscale x 2 x i16> %zz
|
||||
}
|
||||
|
||||
; Multiple uses of ctlz for which the opt is disabled
|
||||
|
||||
define <2 x i17> @trunc_ctlz_zext_v2i17_v2i32_multiple_uses(<2 x i17> %x) {
|
||||
; CHECK-LABEL: @trunc_ctlz_zext_v2i17_v2i32_multiple_uses(
|
||||
; CHECK-NEXT: [[Z:%.*]] = zext <2 x i17> [[X:%.*]] to <2 x i32>
|
||||
@ -67,11 +68,13 @@ define <2 x i17> @trunc_ctlz_zext_v2i17_v2i32_multiple_uses(<2 x i17> %x) {
|
||||
ret <2 x i17> %zz
|
||||
}
|
||||
|
||||
; Multiple uses of zext
|
||||
|
||||
define <vscale x 2 x i16> @trunc_ctlz_zext_nxv2i16_nxv2i63_multiple_uses(<vscale x 2 x i16> %x) {
|
||||
; CHECK-LABEL: @trunc_ctlz_zext_nxv2i16_nxv2i63_multiple_uses(
|
||||
; CHECK-NEXT: [[Z:%.*]] = zext <vscale x 2 x i16> [[X:%.*]] to <vscale x 2 x i63>
|
||||
; CHECK-NEXT: [[P:%.*]] = call <vscale x 2 x i63> @llvm.ctlz.nxv2i63(<vscale x 2 x i63> [[Z]], i1 true)
|
||||
; CHECK-NEXT: [[ZZ:%.*]] = trunc <vscale x 2 x i63> [[P]] to <vscale x 2 x i16>
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.ctlz.nxv2i16(<vscale x 2 x i16> [[X]], i1 true)
|
||||
; CHECK-NEXT: [[ZZ:%.*]] = add nuw nsw <vscale x 2 x i16> [[TMP1]], shufflevector (<vscale x 2 x i16> insertelement (<vscale x 2 x i16> undef, i16 47, i32 0), <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer)
|
||||
; CHECK-NEXT: call void @use1(<vscale x 2 x i63> [[Z]])
|
||||
; CHECK-NEXT: ret <vscale x 2 x i16> [[ZZ]]
|
||||
;
|
||||
@ -81,3 +84,36 @@ define <vscale x 2 x i16> @trunc_ctlz_zext_nxv2i16_nxv2i63_multiple_uses(<vscale
|
||||
call void @use1(<vscale x 2 x i63> %z)
|
||||
ret <vscale x 2 x i16> %zz
|
||||
}
|
||||
|
||||
; Negative case where types of x and zz don't match
|
||||
|
||||
define i16 @trunc_ctlz_zext_i10_i32(i10 %x) {
|
||||
; CHECK-LABEL: @trunc_ctlz_zext_i10_i32(
|
||||
; CHECK-NEXT: [[Z:%.*]] = zext i10 [[X:%.*]] to i32
|
||||
; CHECK-NEXT: [[P:%.*]] = call i32 @llvm.ctlz.i32(i32 [[Z]], i1 false), !range [[RNG1:![0-9]+]]
|
||||
; CHECK-NEXT: [[ZZ:%.*]] = trunc i32 [[P]] to i16
|
||||
; CHECK-NEXT: ret i16 [[ZZ]]
|
||||
;
|
||||
%z = zext i10 %x to i32
|
||||
%p = call i32 @llvm.ctlz.i32(i32 %z, i1 false)
|
||||
%zz = trunc i32 %p to i16
|
||||
ret i16 %zz
|
||||
}
|
||||
|
||||
; Test width difference of more than log2 between x and t
|
||||
; TODO: Enable the opt for this case if it is proved that the
|
||||
; opt works for all combinations of bitwidth of zext src and dst.
|
||||
; Refer : https://reviews.llvm.org/D103788
|
||||
|
||||
define i3 @trunc_ctlz_zext_i3_i34(i3 %x) {
|
||||
; CHECK-LABEL: @trunc_ctlz_zext_i3_i34(
|
||||
; CHECK-NEXT: [[Z:%.*]] = zext i3 [[X:%.*]] to i34
|
||||
; CHECK-NEXT: [[P:%.*]] = call i34 @llvm.ctlz.i34(i34 [[Z]], i1 false), !range [[RNG2:![0-9]+]]
|
||||
; CHECK-NEXT: [[T:%.*]] = trunc i34 [[P]] to i3
|
||||
; CHECK-NEXT: ret i3 [[T]]
|
||||
;
|
||||
%z = zext i3 %x to i34
|
||||
%p = call i34 @llvm.ctlz.i34(i34 %z, i1 false)
|
||||
%t = trunc i34 %p to i3
|
||||
ret i3 %t
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user