diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h index b4c0740cca3..66540d20fdf 100644 --- a/include/llvm/IR/IntrinsicInst.h +++ b/include/llvm/IR/IntrinsicInst.h @@ -417,12 +417,12 @@ public: } // Equivalent non-predicated opcode - unsigned getFunctionalOpcode() const { + Optional getFunctionalOpcode() const { return GetFunctionalOpcodeForVP(getIntrinsicID()); } // Equivalent non-predicated opcode - static unsigned GetFunctionalOpcodeForVP(Intrinsic::ID ID); + static Optional GetFunctionalOpcodeForVP(Intrinsic::ID ID); }; /// This is the common base class for constrained floating point intrinsics. diff --git a/lib/CodeGen/ExpandVectorPredication.cpp b/lib/CodeGen/ExpandVectorPredication.cpp index 9f34eb08510..a8d4d4ebe8b 100644 --- a/lib/CodeGen/ExpandVectorPredication.cpp +++ b/lib/CodeGen/ExpandVectorPredication.cpp @@ -217,7 +217,7 @@ CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder, VPI.canIgnoreVectorLengthParam()) && "Implicitly dropping %evl in non-speculatable operator!"); - auto OC = static_cast(VPI.getFunctionalOpcode()); + auto OC = static_cast(*VPI.getFunctionalOpcode()); assert(Instruction::isBinaryOp(OC)); Value *Op0 = VPI.getOperand(0); @@ -316,9 +316,9 @@ Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) { IRBuilder<> Builder(&VPI); // Try lowering to a LLVM instruction first. - unsigned OC = VPI.getFunctionalOpcode(); + auto OC = VPI.getFunctionalOpcode(); - if (Instruction::isBinaryOp(OC)) + if (OC && Instruction::isBinaryOp(*OC)) return expandPredicationInBinaryOperator(Builder, VPI); return &VPI; diff --git a/lib/IR/IntrinsicInst.cpp b/lib/IR/IntrinsicInst.cpp index 2dd8c98a413..a78b05109f8 100644 --- a/lib/IR/IntrinsicInst.cpp +++ b/lib/IR/IntrinsicInst.cpp @@ -317,8 +317,8 @@ bool VPIntrinsic::IsVPIntrinsic(Intrinsic::ID ID) { } // Equivalent non-predicated opcode -unsigned VPIntrinsic::GetFunctionalOpcodeForVP(Intrinsic::ID ID) { - unsigned FunctionalOC = Instruction::Call; +Optional VPIntrinsic::GetFunctionalOpcodeForVP(Intrinsic::ID ID) { + Optional FunctionalOC; switch (ID) { default: break; diff --git a/unittests/IR/VPIntrinsicTest.cpp b/unittests/IR/VPIntrinsicTest.cpp index c04ebf35fe6..cfa68c8c8e3 100644 --- a/unittests/IR/VPIntrinsicTest.cpp +++ b/unittests/IR/VPIntrinsicTest.cpp @@ -183,16 +183,17 @@ TEST_F(VPIntrinsicTest, OpcodeRoundTrip) { unsigned FullTripCounts = 0; for (unsigned OC : Opcodes) { Intrinsic::ID VPID = VPIntrinsic::GetForOpcode(OC); - // no equivalent VP intrinsic available + // No equivalent VP intrinsic available. if (VPID == Intrinsic::not_intrinsic) continue; - unsigned RoundTripOC = VPIntrinsic::GetFunctionalOpcodeForVP(VPID); - // no equivalent Opcode available - if (RoundTripOC == Instruction::Call) + Optional RoundTripOC = + VPIntrinsic::GetFunctionalOpcodeForVP(VPID); + // No equivalent Opcode available. + if (!RoundTripOC) continue; - ASSERT_EQ(RoundTripOC, OC); + ASSERT_EQ(*RoundTripOC, OC); ++FullTripCounts; } ASSERT_NE(FullTripCounts, 0u); @@ -207,13 +208,13 @@ TEST_F(VPIntrinsicTest, IntrinsicIDRoundTrip) { unsigned FullTripCounts = 0; for (const auto &VPDecl : *M) { auto VPID = VPDecl.getIntrinsicID(); - unsigned OC = VPIntrinsic::GetFunctionalOpcodeForVP(VPID); + Optional OC = VPIntrinsic::GetFunctionalOpcodeForVP(VPID); // no equivalent Opcode available - if (OC == Instruction::Call) + if (!OC) continue; - Intrinsic::ID RoundTripVPID = VPIntrinsic::GetForOpcode(OC); + Intrinsic::ID RoundTripVPID = VPIntrinsic::GetForOpcode(*OC); ASSERT_EQ(RoundTripVPID, VPID); ++FullTripCounts;