1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

Revert rGb97093e520036f8 - "[InstCombine] matchFunnelShift - fold or(shl(a,x),lshr(b,sub(bw,x))) -> fshl(a,b,x) iff x < bw"

This reverts commit b97093e520036f88c5b39e572966f1c8c387661e.

Funnel shift argument commutation isn't working correctly
This commit is contained in:
Simon Pilgrim 2020-10-12 11:38:52 +01:00
parent 7f874dd3f0
commit 030fbbba7c
3 changed files with 35 additions and 21 deletions

View File

@ -2053,7 +2053,7 @@ Instruction *InstCombinerImpl::matchBSwap(BinaryOperator &Or) {
}
/// Match UB-safe variants of the funnel shift intrinsic.
static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) {
static Instruction *matchFunnelShift(Instruction &Or) {
// TODO: Can we reduce the code duplication between this and the related
// rotate matching code under visitSelect and visitTrunc?
unsigned Width = Or.getType()->getScalarSizeInBits();
@ -2094,16 +2094,6 @@ static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) {
return L;
}
// (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width.
// We limit this to X < Width in case the backend re-expands the intrinsic,
// and has to reintroduce a shift modulo operation (InstCombine might remove
// it after this fold). This still doesn't guarantee that the final codegen
// will match this original pattern.
if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) {
KnownBits KnownL = IC.computeKnownBits(L, /*Depth*/ 0, &Or);
return KnownL.getMaxValue().ult(Width) ? L : nullptr;
}
// For non-constant cases, the following patterns currently only work for
// rotation patterns.
// TODO: Add general funnel-shift compatible patterns.
@ -2600,7 +2590,7 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
if (Instruction *BSwap = matchBSwap(I))
return BSwap;
if (Instruction *Funnel = matchFunnelShift(I, *this))
if (Instruction *Funnel = matchFunnelShift(I))
return Funnel;
if (Instruction *Concat = matchOrConcat(I, Builder))

View File

@ -168,7 +168,11 @@ define <3 x i36> @fshl_v3i36_constant_nonsplat_undef0(<3 x i36> %x, <3 x i36> %y
define i64 @fshl_sub_mask(i64 %x, i64 %y, i64 %a) {
; CHECK-LABEL: @fshl_sub_mask(
; CHECK-NEXT: [[R:%.*]] = call i64 @llvm.fshl.i64(i64 [[X:%.*]], i64 [[Y:%.*]], i64 [[A:%.*]])
; CHECK-NEXT: [[MASK:%.*]] = and i64 [[A:%.*]], 63
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[X:%.*]], [[MASK]]
; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i64 64, [[MASK]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i64 [[Y:%.*]], [[SUB]]
; CHECK-NEXT: [[R:%.*]] = or i64 [[SHL]], [[SHR]]
; CHECK-NEXT: ret i64 [[R]]
;
%mask = and i64 %a, 63
@ -183,7 +187,11 @@ define i64 @fshl_sub_mask(i64 %x, i64 %y, i64 %a) {
define i64 @fshr_sub_mask(i64 %x, i64 %y, i64 %a) {
; CHECK-LABEL: @fshr_sub_mask(
; CHECK-NEXT: [[R:%.*]] = call i64 @llvm.fshr.i64(i64 [[X:%.*]], i64 [[Y:%.*]], i64 [[A:%.*]])
; CHECK-NEXT: [[MASK:%.*]] = and i64 [[A:%.*]], 63
; CHECK-NEXT: [[SHR:%.*]] = lshr i64 [[X:%.*]], [[MASK]]
; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i64 64, [[MASK]]
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[Y:%.*]], [[SUB]]
; CHECK-NEXT: [[R:%.*]] = or i64 [[SHL]], [[SHR]]
; CHECK-NEXT: ret i64 [[R]]
;
%mask = and i64 %a, 63
@ -196,7 +204,11 @@ define i64 @fshr_sub_mask(i64 %x, i64 %y, i64 %a) {
define <2 x i64> @fshr_sub_mask_vector(<2 x i64> %x, <2 x i64> %y, <2 x i64> %a) {
; CHECK-LABEL: @fshr_sub_mask_vector(
; CHECK-NEXT: [[R:%.*]] = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> [[X:%.*]], <2 x i64> [[Y:%.*]], <2 x i64> [[A:%.*]])
; CHECK-NEXT: [[MASK:%.*]] = and <2 x i64> [[A:%.*]], <i64 63, i64 63>
; CHECK-NEXT: [[SHR:%.*]] = lshr <2 x i64> [[X:%.*]], [[MASK]]
; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw <2 x i64> <i64 64, i64 64>, [[MASK]]
; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i64> [[Y:%.*]], [[SUB]]
; CHECK-NEXT: [[R:%.*]] = or <2 x i64> [[SHL]], [[SHR]]
; CHECK-NEXT: ret <2 x i64> [[R]]
;
%mask = and <2 x i64> %a, <i64 63, i64 63>

View File

@ -676,8 +676,12 @@ define i9 @rotateleft_9_neg_mask_wide_amount_commute(i9 %v, i33 %shamt) {
define i64 @rotl_sub_mask(i64 %0, i64 %1) {
; CHECK-LABEL: @rotl_sub_mask(
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP0:%.*]], i64 [[TMP0]], i64 [[TMP1:%.*]])
; CHECK-NEXT: ret i64 [[TMP3]]
; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP1:%.*]], 63
; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[TMP0:%.*]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = sub nuw nsw i64 64, [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP0]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = or i64 [[TMP6]], [[TMP4]]
; CHECK-NEXT: ret i64 [[TMP7]]
;
%3 = and i64 %1, 63
%4 = shl i64 %0, %3
@ -691,8 +695,12 @@ define i64 @rotl_sub_mask(i64 %0, i64 %1) {
define i64 @rotr_sub_mask(i64 %0, i64 %1) {
; CHECK-LABEL: @rotr_sub_mask(
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.fshr.i64(i64 [[TMP0:%.*]], i64 [[TMP0]], i64 [[TMP1:%.*]])
; CHECK-NEXT: ret i64 [[TMP3]]
; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP1:%.*]], 63
; CHECK-NEXT: [[TMP4:%.*]] = lshr i64 [[TMP0:%.*]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = sub nuw nsw i64 64, [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP0]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = or i64 [[TMP6]], [[TMP4]]
; CHECK-NEXT: ret i64 [[TMP7]]
;
%3 = and i64 %1, 63
%4 = lshr i64 %0, %3
@ -704,8 +712,12 @@ define i64 @rotr_sub_mask(i64 %0, i64 %1) {
define <2 x i64> @rotr_sub_mask_vector(<2 x i64> %0, <2 x i64> %1) {
; CHECK-LABEL: @rotr_sub_mask_vector(
; CHECK-NEXT: [[TMP3:%.*]] = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> [[TMP0:%.*]], <2 x i64> [[TMP0]], <2 x i64> [[TMP1:%.*]])
; CHECK-NEXT: ret <2 x i64> [[TMP3]]
; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP1:%.*]], <i64 63, i64 63>
; CHECK-NEXT: [[TMP4:%.*]] = lshr <2 x i64> [[TMP0:%.*]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = sub nuw nsw <2 x i64> <i64 64, i64 64>, [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = shl <2 x i64> [[TMP0]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = or <2 x i64> [[TMP6]], [[TMP4]]
; CHECK-NEXT: ret <2 x i64> [[TMP7]]
;
%3 = and <2 x i64> %1, <i64 63, i64 63>
%4 = lshr <2 x i64> %0, %3