; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -instcombine -S | FileCheck %s define <4 x i32> @test_v4i32_splatconst_pow2(<4 x i32> %a0) { ; CHECK-LABEL: @test_v4i32_splatconst_pow2( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <4 x i32> [[A0:%.*]], ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; %1 = udiv <4 x i32> %a0, ret <4 x i32> %1 } define <4 x i32> @test_v4i32_const_pow2(<4 x i32> %a0) { ; CHECK-LABEL: @test_v4i32_const_pow2( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <4 x i32> [[A0:%.*]], ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; %1 = udiv <4 x i32> %a0, ret <4 x i32> %1 } ; X udiv C, where C >= signbit define <4 x i32> @test_v4i32_negconstsplat(<4 x i32> %a0) { ; CHECK-LABEL: @test_v4i32_negconstsplat( ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <4 x i32> [[A0:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i1> [[TMP1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; %1 = udiv <4 x i32> %a0, ret <4 x i32> %1 } define <4 x i32> @test_v4i32_negconst(<4 x i32> %a0) { ; CHECK-LABEL: @test_v4i32_negconst( ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <4 x i32> [[A0:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i1> [[TMP1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; %1 = udiv <4 x i32> %a0, ret <4 x i32> %1 } define <4 x i32> @test_v4i32_negconst_undef(<4 x i32> %a0) { ; CHECK-LABEL: @test_v4i32_negconst_undef( ; CHECK-NEXT: ret <4 x i32> undef ; %1 = udiv <4 x i32> %a0, ret <4 x i32> %1 } ; X udiv (C1 << N), where C1 is "1< X >> (N+C2) define <4 x i32> @test_v4i32_shl_splatconst_pow2(<4 x i32> %a0, <4 x i32> %a1) { ; CHECK-LABEL: @test_v4i32_shl_splatconst_pow2( ; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[A1:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = lshr <4 x i32> [[A0:%.*]], [[TMP1]] ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; %1 = shl <4 x i32> , %a1 %2 = udiv <4 x i32> %a0, %1 ret <4 x i32> %2 } define <4 x i32> @test_v4i32_shl_const_pow2(<4 x i32> %a0, <4 x i32> %a1) { ; CHECK-LABEL: @test_v4i32_shl_const_pow2( ; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[A1:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = lshr <4 x i32> [[A0:%.*]], [[TMP1]] ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; %1 = shl <4 x i32> , %a1 %2 = udiv <4 x i32> %a0, %1 ret <4 x i32> %2 } ; X udiv (zext (C1 << N)), where C1 is "1< X >> (N+C2) define <4 x i32> @test_v4i32_zext_shl_splatconst_pow2(<4 x i32> %a0, <4 x i16> %a1) { ; CHECK-LABEL: @test_v4i32_zext_shl_splatconst_pow2( ; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i16> [[A1:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = lshr <4 x i32> [[A0:%.*]], [[TMP2]] ; CHECK-NEXT: ret <4 x i32> [[TMP3]] ; %1 = shl <4 x i16> , %a1 %2 = zext <4 x i16> %1 to <4 x i32> %3 = udiv <4 x i32> %a0, %2 ret <4 x i32> %3 } define <4 x i32> @test_v4i32_zext_shl_const_pow2(<4 x i32> %a0, <4 x i16> %a1) { ; CHECK-LABEL: @test_v4i32_zext_shl_const_pow2( ; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i16> [[A1:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = lshr <4 x i32> [[A0:%.*]], [[TMP2]] ; CHECK-NEXT: ret <4 x i32> [[TMP3]] ; %1 = shl <4 x i16> , %a1 %2 = zext <4 x i16> %1 to <4 x i32> %3 = udiv <4 x i32> %a0, %2 ret <4 x i32> %3 }