diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp index 654de3ee017..bd548eefdd7 100644 --- a/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3308,6 +3308,34 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { } break; } + case Intrinsic::arm_mve_pred_i2v: { + Value *Arg = II->getArgOperand(0); + Value *ArgArg; + if (match(Arg, m_Intrinsic(m_Value(ArgArg))) && + II->getType() == ArgArg->getType()) + return replaceInstUsesWith(*II, ArgArg); + KnownBits ScalarKnown(32); + if (SimplifyDemandedBits(II, 0, APInt::getLowBitsSet(32, 16), + ScalarKnown, 0)) + return II; + break; + } + case Intrinsic::arm_mve_pred_v2i: { + Value *Arg = II->getArgOperand(0); + Value *ArgArg; + if (match(Arg, m_Intrinsic(m_Value(ArgArg)))) + return replaceInstUsesWith(*II, ArgArg); + if (!II->getMetadata(LLVMContext::MD_range)) { + Type *IntTy32 = Type::getInt32Ty(II->getContext()); + Metadata *M[] = { + ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)), + ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF)) + }; + II->setMetadata(LLVMContext::MD_range, MDNode::get(II->getContext(), M)); + return II; + } + break; + } case Intrinsic::arm_mve_vadc: case Intrinsic::arm_mve_vadc_predicated: { unsigned CarryOp = diff --git a/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll b/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll new file mode 100644 index 00000000000..c7533503fa7 --- /dev/null +++ b/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll @@ -0,0 +1,22 @@ +; RUN: opt -instcombine %s | llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve --verify-machineinstrs -o - | FileCheck %s + +define arm_aapcs_vfpcc <8 x i16> @test_vpt_block(<8 x i16> %v_inactive, <8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) { +; CHECK-LABEL: test_vpt_block: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vpt.i16 eq, q1, q2 +; CHECK-NEXT: vaddt.i16 q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %0 = icmp eq <8 x i16> %v1, %v2 + %1 = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %0) + %2 = trunc i32 %1 to i16 + %3 = zext i16 %2 to i32 + %4 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %3) + %5 = call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %v3, <8 x i16> %v2, <8 x i1> %4, <8 x i16> %v_inactive) + ret <8 x i16> %5 +} + +declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>) +declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) +declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) + diff --git a/test/Transforms/InstCombine/ARM/mve-v2i2v.ll b/test/Transforms/InstCombine/ARM/mve-v2i2v.ll new file mode 100644 index 00000000000..4594102a468 --- /dev/null +++ b/test/Transforms/InstCombine/ARM/mve-v2i2v.ll @@ -0,0 +1,236 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -instcombine -S -o - %s | FileCheck %s + +declare i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1>) +declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>) +declare i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1>) + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) +declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) +declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) + +; Round-trip conversions from predicate vector to i32 back to the same +; size of vector should be eliminated. + +define <4 x i1> @v2i2v_4(<4 x i1> %vin) { +; CHECK-LABEL: @v2i2v_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret <4 x i1> [[VIN:%.*]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %int) + ret <4 x i1> %vout +} + +define <8 x i1> @v2i2v_8(<8 x i1> %vin) { +; CHECK-LABEL: @v2i2v_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret <8 x i1> [[VIN:%.*]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) + %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %int) + ret <8 x i1> %vout +} + +define <16 x i1> @v2i2v_16(<16 x i1> %vin) { +; CHECK-LABEL: @v2i2v_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret <16 x i1> [[VIN:%.*]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) + %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %int) + ret <16 x i1> %vout +} + +; Conversions from a predicate vector to i32 and then to a _different_ +; size of predicate vector should be left alone. + +define <16 x i1> @v2i2v_4_16(<4 x i1> %vin) { +; CHECK-LABEL: @v2i2v_4_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[INT:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[VOUT:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[INT]]) +; CHECK-NEXT: ret <16 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %int) + ret <16 x i1> %vout +} + +define <4 x i1> @v2i2v_8_4(<8 x i1> %vin) { +; CHECK-LABEL: @v2i2v_8_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[INT:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[INT]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %int) + ret <4 x i1> %vout +} + +define <8 x i1> @v2i2v_16_8(<16 x i1> %vin) { +; CHECK-LABEL: @v2i2v_16_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[INT:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[VOUT:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[INT]]) +; CHECK-NEXT: ret <8 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) + %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %int) + ret <8 x i1> %vout +} + +; Round-trip conversions from i32 to predicate vector back to i32 +; should be eliminated. + +define i32 @i2v2i_4(i32 %iin) { +; CHECK-LABEL: @i2v2i_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 [[IIN:%.*]] +; +entry: + %vec = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %iin) + %iout = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vec) + ret i32 %iout +} + +define i32 @i2v2i_8(i32 %iin) { +; CHECK-LABEL: @i2v2i_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 [[IIN:%.*]] +; +entry: + %vec = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %iin) + %iout = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vec) + ret i32 %iout +} + +define i32 @i2v2i_16(i32 %iin) { +; CHECK-LABEL: @i2v2i_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 [[IIN:%.*]] +; +entry: + %vec = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %iin) + %iout = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vec) + ret i32 %iout +} + +; v2i leaves the top 16 bits clear. So a trunc/zext pair applied to +; its output, going via i16, can be completely eliminated - but not +; one going via i8. Similarly with other methods of clearing the top +; bits, like bitwise and. + +define i32 @v2i_truncext_i16(<4 x i1> %vin) { +; CHECK-LABEL: @v2i_truncext_i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: ret i32 [[WIDE1]] +; +entry: + %wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %narrow = trunc i32 %wide1 to i16 + %wide2 = zext i16 %narrow to i32 + ret i32 %wide2 +} + +define i32 @v2i_truncext_i8(<4 x i1> %vin) { +; CHECK-LABEL: @v2i_truncext_i8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1]], 255 +; CHECK-NEXT: ret i32 [[WIDE2]] +; +entry: + %wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %narrow = trunc i32 %wide1 to i8 + %wide2 = zext i8 %narrow to i32 + ret i32 %wide2 +} + +define i32 @v2i_and_16(<4 x i1> %vin) { +; CHECK-LABEL: @v2i_and_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: ret i32 [[WIDE1]] +; +entry: + %wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %wide2 = and i32 %wide1, 65535 + ret i32 %wide2 +} + +define i32 @v2i_and_15(<4 x i1> %vin) { +; CHECK-LABEL: @v2i_and_15( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0 +; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1]], 32767 +; CHECK-NEXT: ret i32 [[WIDE2]] +; +entry: + %wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %wide2 = and i32 %wide1, 32767 + ret i32 %wide2 +} + +; i2v doesn't use the top bits of its input. So the same operations +; on a value that's about to be passed to i2v can be eliminated. + +define <4 x i1> @i2v_truncext_i16(i32 %wide1) { +; CHECK-LABEL: @i2v_truncext_i16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE1:%.*]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %narrow = trunc i32 %wide1 to i16 + %wide2 = zext i16 %narrow to i32 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) + ret <4 x i1> %vout +} + +define <4 x i1> @i2v_truncext_i8(i32 %wide1) { +; CHECK-LABEL: @i2v_truncext_i8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1:%.*]], 255 +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE2]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %narrow = trunc i32 %wide1 to i8 + %wide2 = zext i8 %narrow to i32 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) + ret <4 x i1> %vout +} + +define <4 x i1> @i2v_and_16(i32 %wide1) { +; CHECK-LABEL: @i2v_and_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE1:%.*]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %wide2 = and i32 %wide1, 65535 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) + ret <4 x i1> %vout +} + +define <4 x i1> @i2v_and_15(i32 %wide1) { +; CHECK-LABEL: @i2v_and_15( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1:%.*]], 32767 +; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE2]]) +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %wide2 = and i32 %wide1, 32767 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) + ret <4 x i1> %vout +}