From cdaf6001ac374da63dbcffd5b76e2468462145a9 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Wed, 18 Sep 2019 18:38:32 +0000 Subject: [PATCH] [NFC][InstCombine] More tests for PR42563 "Dropping pointless masking before left shift" For patterns c/d/e we too can deal with the pattern even if we can't just drop the mask, we can just apply it afterwars: https://rise4fun.com/Alive/gslRa llvm-svn: 372244 --- ...dant-left-shift-input-masking-variant-c.ll | 98 +++++++++++++++ ...dant-left-shift-input-masking-variant-d.ll | 114 ++++++++++++++++++ ...dant-left-shift-input-masking-variant-e.ll | 98 +++++++++++++++ ...dant-left-shift-input-masking-variant-c.ll | 20 --- ...dant-left-shift-input-masking-variant-d.ll | 23 ---- ...dant-left-shift-input-masking-variant-e.ll | 20 --- ...dant-left-shift-input-masking-variant-f.ll | 7 +- 7 files changed, 313 insertions(+), 67 deletions(-) create mode 100644 test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll create mode 100644 test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll create mode 100644 test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll new file mode 100644 index 00000000000..eb59f8c0a0d --- /dev/null +++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll @@ -0,0 +1,98 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instcombine -S | FileCheck %s + +; If we have some pattern that leaves only some low bits set, and then performs +; left-shift of those bits, we can combine those two shifts into a shift+mask. + +; There are many variants to this pattern: +; c) (x & (-1 >> maskNbits)) << shiftNbits +; simplify to: +; (x << shiftNbits) & (-1 >> ((-(maskNbits+shiftNbits))+32)) + +; Simple tests. + +declare void @use32(i32) + +define i32 @t0_basic(i32 %x, i32 %nbits) { +; CHECK-LABEL: @t0_basic( +; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T2]]) +; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: ret i32 [[T3]] +; + %t0 = lshr i32 -1, %nbits + %t1 = and i32 %t0, %x + %t2 = add i32 %nbits, -1 + call void @use32(i32 %t0) + call void @use32(i32 %t2) + %t3 = shl i32 %t1, %t2 ; shift is smaller than mask + ret i32 %t3 +} + +; Vectors + +declare void @use8xi32(<8 x i32>) + +define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat( +; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = and <8 x i32> [[T0]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: ret <8 x i32> [[T3]] +; + %t0 = lshr <8 x i32> , %nbits + %t1 = and <8 x i32> %t0, %x + %t2 = add <8 x i32> %nbits, + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t2) + %t3 = shl <8 x i32> %t1, %t2 ; shift is smaller than mask + ret <8 x i32> %t3 +} + +define <8 x i32> @t1_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_nonsplat( +; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = and <8 x i32> [[T0]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: ret <8 x i32> [[T3]] +; + %t0 = lshr <8 x i32> , %nbits + %t1 = and <8 x i32> %t0, %x + %t2 = add <8 x i32> %nbits, + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t2) + %t3 = shl <8 x i32> %t1, %t2 ; shift is smaller than mask + ret <8 x i32> %t3 +} + +; Extra uses. + +define i32 @n3_extrause(i32 %x, i32 %nbits) { +; CHECK-LABEL: @n3_extrause( +; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T1]]) +; CHECK-NEXT: call void @use32(i32 [[T2]]) +; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: ret i32 [[T3]] +; + %t0 = lshr i32 -1, %nbits + %t1 = and i32 %t0, %x + %t2 = add i32 %nbits, -1 + call void @use32(i32 %t0) + call void @use32(i32 %t1) ; BAD + call void @use32(i32 %t2) + %t3 = shl i32 %t1, %t2 ; shift is smaller than mask + ret i32 %t3 +} diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll new file mode 100644 index 00000000000..de80f764771 --- /dev/null +++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instcombine -S | FileCheck %s + +; If we have some pattern that leaves only some low bits set, and then performs +; left-shift of those bits, we can combine those two shifts into a shift+mask. + +; There are many variants to this pattern: +; d) (x & ((-1 << maskNbits) >> maskNbits)) << shiftNbits +; simplify to: +; (x << shiftNbits) & (-1 >> ((-(maskNbits+shiftNbits))+32)) + +; Simple tests. + +declare void @use32(i32) + +define i32 @t0_basic(i32 %x, i32 %nbits) { +; CHECK-LABEL: @t0_basic( +; CHECK-NEXT: [[T0:%.*]] = shl i32 -1, [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1 +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T1]]) +; CHECK-NEXT: call void @use32(i32 [[T3]]) +; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] +; CHECK-NEXT: ret i32 [[T4]] +; + %t0 = shl i32 -1, %nbits + %t1 = lshr i32 %t0, %nbits + %t2 = and i32 %t1, %x + %t3 = add i32 %nbits, -1 + call void @use32(i32 %t0) + call void @use32(i32 %t1) + call void @use32(i32 %t3) + %t4 = shl i32 %t2, %t3 ; shift is smaller than mask + ret i32 %t4 +} + +; Vectors + +declare void @use8xi32(<8 x i32>) + +define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_splat( +; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> , [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = and <8 x i32> [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]]) +; CHECK-NEXT: [[T4:%.*]] = shl <8 x i32> [[T2]], [[T3]] +; CHECK-NEXT: ret <8 x i32> [[T4]] +; + %t0 = shl <8 x i32> , %nbits + %t1 = lshr <8 x i32> %t0, %nbits + %t2 = and <8 x i32> %t1, %x + %t3 = add <8 x i32> %nbits, + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t1) + call void @use8xi32(<8 x i32> %t3) + %t4 = shl <8 x i32> %t2, %t3 ; shift is smaller than mask + ret <8 x i32> %t4 +} + +define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_nonsplat( +; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> , [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = and <8 x i32> [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]]) +; CHECK-NEXT: [[T4:%.*]] = shl <8 x i32> [[T2]], [[T3]] +; CHECK-NEXT: ret <8 x i32> [[T4]] +; + %t0 = shl <8 x i32> , %nbits + %t1 = lshr <8 x i32> %t0, %nbits + %t2 = and <8 x i32> %t1, %x + %t3 = add <8 x i32> %nbits, + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t1) + call void @use8xi32(<8 x i32> %t3) + %t4 = shl <8 x i32> %t2, %t3 ; shift is smaller than mask + ret <8 x i32> %t4 +} + +; Extra uses. + +define i32 @n3_extrause(i32 %x, i32 %nbits) { +; CHECK-LABEL: @n3_extrause( +; CHECK-NEXT: [[T0:%.*]] = shl i32 -1, [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1 +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T1]]) +; CHECK-NEXT: call void @use32(i32 [[T2]]) +; CHECK-NEXT: call void @use32(i32 [[T3]]) +; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] +; CHECK-NEXT: ret i32 [[T4]] +; + %t0 = shl i32 -1, %nbits + %t1 = lshr i32 %t0, %nbits + %t2 = and i32 %t1, %x + %t3 = add i32 %nbits, -1 + call void @use32(i32 %t0) + call void @use32(i32 %t1) + call void @use32(i32 %t2) ; BAD + call void @use32(i32 %t3) + %t4 = shl i32 %t2, %t3 ; shift is smaller than mask + ret i32 %t4 +} diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll new file mode 100644 index 00000000000..609b3b94adb --- /dev/null +++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll @@ -0,0 +1,98 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instcombine -S | FileCheck %s + +; If we have some pattern that leaves only some low bits set, and then performs +; left-shift of those bits, we can combine those two shifts into a shift+mask. + +; There are many variants to this pattern: +; e) ((x << maskNbits) l>> maskNbits) << shiftNbits +; simplify to: +; (x << shiftNbits) & (-1 >> ((-(maskNbits+shiftNbits))+32)) + +; Simple tests. + +declare void @use32(i32) + +define i32 @t0_basic(i32 %x, i32 %nbits) { +; CHECK-LABEL: @t0_basic( +; CHECK-NEXT: [[T0:%.*]] = shl i32 [[X:%.*]], [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T2]]) +; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: ret i32 [[T3]] +; + %t0 = shl i32 %x, %nbits + %t1 = lshr i32 %t0, %nbits + %t2 = add i32 %nbits, -1 + call void @use32(i32 %t0) + call void @use32(i32 %t2) + %t3 = shl i32 %t1, %t2 ; shift is smaller than mask + ret i32 %t3 +} + +; Vectors + +declare void @use8xi32(<8 x i32>) + +define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat( +; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> [[X:%.*]], [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: ret <8 x i32> [[T3]] +; + %t0 = shl <8 x i32> %x, %nbits + %t1 = lshr <8 x i32> %t0, %nbits + %t2 = add <8 x i32> %nbits, + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t2) + %t3 = shl <8 x i32> %t1, %t2 ; shift is smaller than mask + ret <8 x i32> %t3 +} + +define <8 x i32> @t1_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_nonsplat( +; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> [[X:%.*]], [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: ret <8 x i32> [[T3]] +; + %t0 = shl <8 x i32> %x, %nbits + %t1 = lshr <8 x i32> %t0, %nbits + %t2 = add <8 x i32> %nbits, + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t2) + %t3 = shl <8 x i32> %t1, %t2 ; shift is smaller than mask + ret <8 x i32> %t3 +} + +; Extra uses. + +define i32 @n3_extrause(i32 %x, i32 %nbits) { +; CHECK-LABEL: @n3_extrause( +; CHECK-NEXT: [[T0:%.*]] = shl i32 [[X:%.*]], [[NBITS:%.*]] +; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T1]]) +; CHECK-NEXT: call void @use32(i32 [[T2]]) +; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: ret i32 [[T3]] +; + %t0 = shl i32 %x, %nbits + %t1 = lshr i32 %t0, %nbits + %t2 = add i32 %nbits, -1 + call void @use32(i32 %t0) + call void @use32(i32 %t1) ; BAD + call void @use32(i32 %t2) + %t3 = shl i32 %t1, %t2 ; shift is smaller than mask + ret i32 %t3 +} diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-c.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-c.ll index 79a540d3452..fb480e3f3ec 100644 --- a/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-c.ll +++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-c.ll @@ -281,23 +281,3 @@ define i32 @n12_not_minus_one(i32 %x, i32 %nbits) { %t2 = shl i32 %t1, %nbits ret i32 %t2 } - -define i32 @n13_shamt_is_smaller(i32 %x, i32 %nbits) { -; CHECK-LABEL: @n13_shamt_is_smaller( -; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]] -; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 -; CHECK-NEXT: call void @use32(i32 [[T0]]) -; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: ret i32 [[T2]] -; - %t0 = lshr i32 -1, %nbits - %t1 = and i32 %t0, %x - %t2 = add i32 %nbits, -1 - call void @use32(i32 %t0) - call void @use32(i32 %t1) - call void @use32(i32 %t2) - %t3 = shl i32 %t1, %t2 ; shift is smaller than mask - ret i32 %t2 -} diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll index e01ac41055f..0871e82dd28 100644 --- a/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll +++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll @@ -361,26 +361,3 @@ define i32 @n13_different_shamts1(i32 %x, i32 %nbits0, i32 %nbits1) { %t3 = shl i32 %t2, %nbits1 ret i32 %t3 } - -define i32 @n14_shamt_is_smaller(i32 %x, i32 %nbits) { -; CHECK-LABEL: @n14_shamt_is_smaller( -; CHECK-NEXT: [[T0:%.*]] = shl i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1 -; CHECK-NEXT: call void @use32(i32 [[T0]]) -; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] -; CHECK-NEXT: ret i32 [[T4]] -; - %t0 = shl i32 -1, %nbits - %t1 = lshr i32 %t0, %nbits - %t2 = and i32 %t1, %x - %t3 = add i32 %nbits, -1 ; shift is smaller than mask - call void @use32(i32 %t0) - call void @use32(i32 %t1) - call void @use32(i32 %t2) - %t4 = shl i32 %t2, %t3 - ret i32 %t4 -} diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-e.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-e.ll index 104340b08d5..f3682ef8cdd 100644 --- a/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-e.ll +++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-e.ll @@ -234,23 +234,3 @@ define i32 @n10_different_shamts1(i32 %x, i32 %nbits0, i32 %nbits1) { %t2 = shl i32 %t1, %nbits1 ret i32 %t2 } - -define i32 @n11_shamt_is_smaller(i32 %x, i32 %nbits) { -; CHECK-LABEL: @n11_shamt_is_smaller( -; CHECK-NEXT: [[T0:%.*]] = shl i32 [[X:%.*]], [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] -; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 -; CHECK-NEXT: call void @use32(i32 [[T0]]) -; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: ret i32 [[T2]] -; - %t0 = shl i32 %x, %nbits - %t1 = lshr i32 %t0, %nbits - %t2 = add i32 %nbits, -1 - call void @use32(i32 %t0) - call void @use32(i32 %t1) - call void @use32(i32 %t2) - %t3 = shl i32 %t1, %t2 ; shift is smaller than mask - ret i32 %t2 -} diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll index 331d85edb73..3e2bb330ab6 100644 --- a/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll +++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll @@ -241,16 +241,15 @@ define i32 @n11_shamt_is_smaller(i32 %x, i32 %nbits) { ; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[T0]], [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) -; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: ret i32 [[T2]] +; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: ret i32 [[T3]] ; %t0 = shl i32 %x, %nbits %t1 = ashr i32 %t0, %nbits %t2 = add i32 %nbits, -1 call void @use32(i32 %t0) - call void @use32(i32 %t1) call void @use32(i32 %t2) %t3 = shl i32 %t1, %t2 ; shift is smaller than mask - ret i32 %t2 + ret i32 %t3 }