From 29ffba4b56cf6fa26b705e9f323c6458aabc920b Mon Sep 17 00:00:00 2001 From: Bjorn Pettersson Date: Fri, 26 Mar 2021 21:02:26 +0100 Subject: [PATCH] Update @llvm.powi to handle different int sizes for the exponent This can be seen as a follow up to commit 0ee439b705e82a4fe20e2, that changed the second argument of __powidf2, __powisf2 and __powitf2 in compiler-rt from si_int to int. That was to align with how those runtimes are defined in libgcc. One thing that seem to have been missing in that patch was to make sure that the rest of LLVM also handle that the argument now depends on the size of int (not using the si_int machine mode for 32-bit). When using __builtin_powi for a target with 16-bit int clang crashed. And when emitting libcalls to those rtlib functions, typically when lowering @llvm.powi), the backend would always prepare the exponent argument as an i32 which caused miscompiles when the rtlib was compiled with 16-bit int. The solution used here is to use an overloaded type for the second argument in @llvm.powi. This way clang can use the "correct" type when lowering __builtin_powi, and then later when emitting the libcall it is assumed that the type used in @llvm.powi matches the rtlib function. One thing that needed some extra attention was that when vectorizing calls several passes did not support that several arguments could be overloaded in the intrinsics. This patch allows overload of a scalar operand by adding hasVectorInstrinsicOverloadedScalarOpd, with an entry for powi. Differential Revision: https://reviews.llvm.org/D99439 --- docs/LangRef.rst | 13 +-- include/llvm/Analysis/VectorUtils.h | 5 ++ include/llvm/CodeGen/ISDOpcodes.h | 4 +- include/llvm/IR/Intrinsics.td | 2 +- lib/Analysis/VectorUtils.cpp | 10 +++ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 11 +++ .../SelectionDAG/LegalizeFloatTypes.cpp | 12 ++- lib/Target/Mips/Mips16HardFloat.cpp | 2 +- .../InstCombine/InstCombineMulDivRem.cpp | 7 +- lib/Transforms/Scalar/Scalarizer.cpp | 11 ++- lib/Transforms/Utils/SimplifyLibCalls.cpp | 14 ++-- lib/Transforms/Vectorize/LoopVectorize.cpp | 7 +- lib/Transforms/Vectorize/SLPVectorizer.cpp | 7 +- .../CostModel/AArch64/sve-intrinsics.ll | 6 +- test/Bitcode/upgrade-powi-intrinsics.ll | 24 ++++++ test/Bitcode/upgrade-powi-intrinsics.ll.bc | Bin 0 -> 1260 bytes .../AArch64/GlobalISel/arm64-irtranslator.ll | 4 +- .../CodeGen/AArch64/arm64-vfloatintrinsics.ll | 20 ++--- test/CodeGen/AArch64/f16-instructions.ll | 4 +- test/CodeGen/AArch64/illegal-float-ops.ll | 12 +-- test/CodeGen/AArch64/powi-windows.ll | 12 +-- test/CodeGen/AMDGPU/GlobalISel/llvm.powi.ll | 32 ++++---- test/CodeGen/AMDGPU/llvm.powi.ll | 32 ++++---- .../ARM/2011-11-29-128bitArithmetics.ll | 4 +- test/CodeGen/ARM/Windows/powi.ll | 12 +-- test/CodeGen/ARM/fp16-fullfp16.ll | 4 +- test/CodeGen/ARM/fp16-promote.ll | 4 +- test/CodeGen/ARM/fpowi.ll | 4 +- test/CodeGen/ARM/vfloatintrinsics.ll | 12 +-- test/CodeGen/Generic/fpowi-promote.ll | 4 +- .../Generic/replace-intrinsics-with-veclib.ll | 6 +- test/CodeGen/Mips/mips64-f128.ll | 4 +- test/CodeGen/Mips/msa/f16-llvm-ir.ll | 6 +- test/CodeGen/Mips/powif64_16.ll | 8 +- test/CodeGen/Mips/pr36061.ll | 8 +- test/CodeGen/NVPTX/f16-instructions.ll | 4 +- test/CodeGen/NVPTX/f16x2-instructions.ll | 4 +- test/CodeGen/NVPTX/libcall-intrinsic.ll | 4 +- test/CodeGen/PowerPC/f128-arith.ll | 4 +- test/CodeGen/RISCV/double-intrinsics.ll | 4 +- test/CodeGen/RISCV/float-intrinsics.ll | 4 +- test/CodeGen/RISCV/rv64i-single-softfloat.ll | 4 +- test/CodeGen/SystemZ/fp-libcall.ll | 12 +-- .../CodeGen/Thumb2/float-intrinsics-double.ll | 4 +- test/CodeGen/Thumb2/float-intrinsics-float.ll | 4 +- test/CodeGen/Thumb2/intrinsics-cc.ll | 8 +- test/CodeGen/WebAssembly/libcalls.ll | 8 +- test/CodeGen/WebAssembly/simd-unsupported.ll | 8 +- test/CodeGen/X86/2007-09-27-LDIntrinsics.ll | 4 +- test/CodeGen/X86/2010-05-07-ldconvert.ll | 4 +- test/CodeGen/X86/powi-windows.ll | 12 +-- test/CodeGen/X86/powi.ll | 10 +-- test/CodeGen/X86/tailcall-multiret.ll | 8 +- test/CodeGen/X86/vector-intrinsics.ll | 8 +- test/CodeGen/XCore/float-intrinsics.ll | 8 +- test/Transforms/InstCombine/fdiv.ll | 24 +++--- test/Transforms/InstCombine/intrinsics.ll | 17 +++- test/Transforms/InstCombine/pow-4.ll | 27 ++++--- test/Transforms/InstCombine/pow_fp_int.ll | 30 +++---- test/Transforms/InstCombine/pow_fp_int16.ll | 75 ++++++++---------- .../InstSimplify/ConstProp/math-2.ll | 4 +- test/Transforms/InstSimplify/call.ll | 26 +++++- .../InstSimplify/floating-point-compare.ll | 6 +- .../InstSimplify/fold-intrinsics.ll | 20 ++++- test/Transforms/LICM/hoist-round.ll | 4 +- test/Transforms/LoopVectorize/intrinsic.ll | 6 +- .../SLPVectorizer/X86/extract_in_tree_user.ll | 12 +-- .../Transforms/SLPVectorizer/X86/intrinsic.ll | 28 +++---- test/Transforms/Scalarizer/intrinsics.ll | 8 +- unittests/Analysis/ValueTrackingTest.cpp | 4 +- 70 files changed, 427 insertions(+), 317 deletions(-) create mode 100644 test/Bitcode/upgrade-powi-intrinsics.ll create mode 100644 test/Bitcode/upgrade-powi-intrinsics.ll.bc diff --git a/docs/LangRef.rst b/docs/LangRef.rst index f6a01040aee..3f1492e67a3 100644 --- a/docs/LangRef.rst +++ b/docs/LangRef.rst @@ -13647,13 +13647,16 @@ This is an overloaded intrinsic. You can use ``llvm.powi`` on any floating-point or vector of floating-point type. Not all targets support all types however. +Generally, the only supported type for the exponent is the one matching +with the C type ``int``. + :: - declare float @llvm.powi.f32(float %Val, i32 %power) - declare double @llvm.powi.f64(double %Val, i32 %power) - declare x86_fp80 @llvm.powi.f80(x86_fp80 %Val, i32 %power) - declare fp128 @llvm.powi.f128(fp128 %Val, i32 %power) - declare ppc_fp128 @llvm.powi.ppcf128(ppc_fp128 %Val, i32 %power) + declare float @llvm.powi.f32.i32(float %Val, i32 %power) + declare double @llvm.powi.f64.i16(double %Val, i16 %power) + declare x86_fp80 @llvm.powi.f80.i32(x86_fp80 %Val, i32 %power) + declare fp128 @llvm.powi.f128.i32(fp128 %Val, i32 %power) + declare ppc_fp128 @llvm.powi.ppcf128.i32(ppc_fp128 %Val, i32 %power) Overview: """"""""" diff --git a/include/llvm/Analysis/VectorUtils.h b/include/llvm/Analysis/VectorUtils.h index 7f53354af16..fc4203effcf 100644 --- a/include/llvm/Analysis/VectorUtils.h +++ b/include/llvm/Analysis/VectorUtils.h @@ -317,6 +317,11 @@ bool isTriviallyVectorizable(Intrinsic::ID ID); /// Identifies if the vector form of the intrinsic has a scalar operand. bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx); +/// Identifies if the vector form of the intrinsic has a scalar operand that has +/// an overloaded type. +bool hasVectorInstrinsicOverloadedScalarOpd(Intrinsic::ID ID, + unsigned ScalarOpdIdx); + /// Returns intrinsic ID for call. /// For the input call instruction it finds mapping intrinsic and returns /// its intrinsic ID, in case it does not found it return not_intrinsic. diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h index ef927648c9e..adad8c18e55 100644 --- a/include/llvm/CodeGen/ISDOpcodes.h +++ b/include/llvm/CodeGen/ISDOpcodes.h @@ -851,8 +851,8 @@ enum NodeType { STRICT_FP_TO_FP16, /// Perform various unary floating-point operations inspired by libm. For - /// FPOWI, the result is undefined if if the integer operand doesn't fit - /// into 32 bits. + /// FPOWI, the result is undefined if if the integer operand doesn't fit into + /// sizeof(int). FNEG, FABS, FSQRT, diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td index ea4f965780f..58483ff47ba 100644 --- a/include/llvm/IR/Intrinsics.td +++ b/include/llvm/IR/Intrinsics.td @@ -652,7 +652,7 @@ let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in { // rounding mode. LLVM purposely does not model changes to the FP // environment so they can be treated as readnone. def int_sqrt : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; - def int_powi : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty]>; + def int_powi : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_anyint_ty]>; def int_sin : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; def int_cos : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; def int_pow : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], diff --git a/lib/Analysis/VectorUtils.cpp b/lib/Analysis/VectorUtils.cpp index 800cd3be46e..884a9fa213c 100644 --- a/lib/Analysis/VectorUtils.cpp +++ b/lib/Analysis/VectorUtils.cpp @@ -114,6 +114,16 @@ bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, } } +bool llvm::hasVectorInstrinsicOverloadedScalarOpd(Intrinsic::ID ID, + unsigned ScalarOpdIdx) { + switch (ID) { + case Intrinsic::powi: + return (ScalarOpdIdx == 1); + default: + return false; + } +} + /// Returns intrinsic ID for call. /// For the input call instruction it finds mapping intrinsic and returns /// its ID, in case it does not found it return not_intrinsic. diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index fc510b5291b..8392a5def80 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -4044,6 +4044,17 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) { Exponent)); break; } + unsigned Offset = Node->isStrictFPOpcode() ? 1 : 0; + bool ExponentHasSizeOfInt = + DAG.getLibInfo().getIntSize() == + Node->getOperand(1 + Offset).getValueType().getSizeInBits(); + if (!ExponentHasSizeOfInt) { + // If the exponent does not match with sizeof(int) a libcall to + // RTLIB::POWI would use the wrong type for the argument. + DAG.getContext()->emitError("POWI exponent does not match sizeof(int)"); + Results.push_back(DAG.getUNDEF(Node->getValueType(0))); + break; + } ExpandFPLibCall(Node, LC, Results); break; } diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index b0af5e5ee02..3553f9ec16c 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -19,6 +19,7 @@ //===----------------------------------------------------------------------===// #include "LegalizeTypes.h" +#include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; @@ -572,7 +573,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FPOW(SDNode *N) { SDValue DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) { bool IsStrict = N->isStrictFPOpcode(); unsigned Offset = IsStrict ? 1 : 0; - assert(N->getOperand(1 + Offset).getValueType() == MVT::i32 && + assert((N->getOperand(1 + Offset).getValueType() == MVT::i16 || + N->getOperand(1 + Offset).getValueType() == MVT::i32) && "Unsupported power type!"); RTLIB::Libcall LC = RTLIB::getPOWI(N->getValueType(0)); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fpowi."); @@ -583,6 +585,14 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) { return DAG.getUNDEF(N->getValueType(0)); } + if (DAG.getLibInfo().getIntSize() != + N->getOperand(1 + Offset).getValueType().getSizeInBits()) { + // If the exponent does not match with sizeof(int) a libcall to RTLIB::POWI + // would use the wrong type for the argument. + DAG.getContext()->emitError("POWI exponent does not match sizeof(int)"); + return DAG.getUNDEF(N->getValueType(0)); + } + EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0 + Offset)), N->getOperand(1 + Offset) }; diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp index cc1f72c0363..6c5f63804d1 100644 --- a/lib/Target/Mips/Mips16HardFloat.cpp +++ b/lib/Target/Mips/Mips16HardFloat.cpp @@ -359,7 +359,7 @@ static const char *const IntrinsicInline[] = { "llvm.log10.f32", "llvm.log10.f64", "llvm.nearbyint.f32", "llvm.nearbyint.f64", "llvm.pow.f32", "llvm.pow.f64", - "llvm.powi.f32", "llvm.powi.f64", + "llvm.powi.f32.i32", "llvm.powi.f64.i32", "llvm.rint.f32", "llvm.rint.f64", "llvm.round.f32", "llvm.round.f64", "llvm.sin.f32", "llvm.sin.f64", diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index 24796c3ccc1..4e7b8d0ddb3 100644 --- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -1300,7 +1300,7 @@ static Instruction *foldFDivPowDivisor(BinaryOperator &I, Args.push_back(II->getArgOperand(0)); Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(1), &I)); break; - case Intrinsic::powi: + case Intrinsic::powi: { // Require 'ninf' assuming that makes powi(X, -INT_MIN) acceptable. // That is, X ** (huge negative number) is 0.0, ~1.0, or INF and so // dividing by that is INF, ~1.0, or 0.0. Code that uses powi allows @@ -1310,7 +1310,10 @@ static Instruction *foldFDivPowDivisor(BinaryOperator &I, return nullptr; Args.push_back(II->getArgOperand(0)); Args.push_back(Builder.CreateNeg(II->getArgOperand(1))); - break; + Type *Tys[] = {I.getType(), II->getArgOperand(1)->getType()}; + Value *Pow = Builder.CreateIntrinsic(IID, Tys, Args, &I); + return BinaryOperator::CreateFMulFMF(Op0, Pow, &I); + } case Intrinsic::exp: case Intrinsic::exp2: Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(0), &I)); diff --git a/lib/Transforms/Scalar/Scalarizer.cpp b/lib/Transforms/Scalar/Scalarizer.cpp index c95984fe198..8ef6b69673b 100644 --- a/lib/Transforms/Scalar/Scalarizer.cpp +++ b/lib/Transforms/Scalar/Scalarizer.cpp @@ -510,8 +510,8 @@ static bool isTriviallyScalariable(Intrinsic::ID ID) { // All of the current scalarizable intrinsics only have one mangled type. static Function *getScalarIntrinsicDeclaration(Module *M, Intrinsic::ID ID, - VectorType *Ty) { - return Intrinsic::getDeclaration(M, ID, { Ty->getScalarType() }); + ArrayRef Tys) { + return Intrinsic::getDeclaration(M, ID, Tys); } /// If a call to a vector typed intrinsic function, split into a scalar call per @@ -537,6 +537,9 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) { Scattered.resize(NumArgs); + SmallVector Tys; + Tys.push_back(VT->getScalarType()); + // Assumes that any vector type has the same number of elements as the return // vector type, which is true for all current intrinsics. for (unsigned I = 0; I != NumArgs; ++I) { @@ -546,13 +549,15 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) { assert(Scattered[I].size() == NumElems && "mismatched call operands"); } else { ScalarOperands[I] = OpI; + if (hasVectorInstrinsicOverloadedScalarOpd(ID, I)) + Tys.push_back(OpI->getType()); } } ValueVector Res(NumElems); ValueVector ScalarCallOps(NumArgs); - Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, VT); + Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, Tys); IRBuilder<> Builder(&CI); // Perform actual scalarization, taking care to preserve any scalar operands. diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp index 5b650f8c5dd..1589bb5d5fd 100644 --- a/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -1664,7 +1664,8 @@ Value *LibCallSimplifier::replacePowWithSqrt(CallInst *Pow, IRBuilderBase &B) { static Value *createPowWithIntegerExponent(Value *Base, Value *Expo, Module *M, IRBuilderBase &B) { Value *Args[] = {Base, Expo}; - Function *F = Intrinsic::getDeclaration(M, Intrinsic::powi, Base->getType()); + Type *Types[] = {Base->getType(), Expo->getType()}; + Function *F = Intrinsic::getDeclaration(M, Intrinsic::powi, Types); return B.CreateCall(F, Args); } @@ -1765,24 +1766,19 @@ Value *LibCallSimplifier::optimizePow(CallInst *Pow, IRBuilderBase &B) { return FMul; } - APSInt IntExpo(32, /*isUnsigned=*/false); + APSInt IntExpo(TLI->getIntSize(), /*isUnsigned=*/false); // powf(x, n) -> powi(x, n) if n is a constant signed integer value if (ExpoF->isInteger() && ExpoF->convertToInteger(IntExpo, APFloat::rmTowardZero, &Ignored) == APFloat::opOK) { return createPowWithIntegerExponent( - Base, ConstantInt::get(B.getInt32Ty(), IntExpo), M, B); + Base, ConstantInt::get(B.getIntNTy(TLI->getIntSize()), IntExpo), M, B); } } // powf(x, itofp(y)) -> powi(x, y) if (AllowApprox && (isa(Expo) || isa(Expo))) { - // FIXME: Currently we always use 32 bits for the exponent in llvm.powi. In - // the future we want to use the target dependent "size of int", or - // otherwise we could end up using the wrong type for the exponent when - // mapping llvm.powi back to an rtlib call. See - // https://reviews.llvm.org/D99439 for such a fix. - if (Value *ExpoI = getIntToFPVal(Expo, B, 32)) + if (Value *ExpoI = getIntToFPVal(Expo, B, TLI->getIntSize())) return createPowWithIntegerExponent(Base, ExpoI, M, B); } diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 06b415e548b..ea227a72b7d 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -5098,6 +5098,7 @@ void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, "Either the intrinsic cost or vector call cost must be valid"); for (unsigned Part = 0; Part < UF; ++Part) { + SmallVector TysForDecl = {CI->getType()}; SmallVector Args; for (auto &I : enumerate(ArgOperands.operands())) { // Some intrinsics have a scalar argument - don't replace it with a @@ -5105,15 +5106,17 @@ void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, Value *Arg; if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) Arg = State.get(I.value(), Part); - else + else { Arg = State.get(I.value(), VPIteration(0, 0)); + if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) + TysForDecl.push_back(Arg->getType()); + } Args.push_back(Arg); } Function *VectorF; if (UseVectorIntrinsic) { // Use vector version of the intrinsic. - Type *TysForDecl[] = {CI->getType()}; if (VF.isVector()) TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index 41b88f1caa4..8d907dfbc34 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -5499,6 +5499,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { Value *ScalarArg = nullptr; std::vector OpVecs; + SmallVector TysForDecl = + {FixedVectorType::get(CI->getType(), E->Scalars.size())}; for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { ValueList OpVL; // Some intrinsics have scalar arguments. This argument should not be @@ -5507,6 +5509,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { CallInst *CEI = cast(VL0); ScalarArg = CEI->getArgOperand(j); OpVecs.push_back(CEI->getArgOperand(j)); + if (hasVectorInstrinsicOverloadedScalarOpd(IID, j)) + TysForDecl.push_back(ScalarArg->getType()); continue; } @@ -5523,8 +5527,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { false /*HasGlobalPred*/); CF = VFDatabase(*CI).getVectorizedFunction(Shape); } else { - Type *Tys[] = {FixedVectorType::get(CI->getType(), E->Scalars.size())}; - CF = Intrinsic::getDeclaration(F->getParent(), ID, Tys); + CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); } SmallVector OpBundles; diff --git a/test/Analysis/CostModel/AArch64/sve-intrinsics.ll b/test/Analysis/CostModel/AArch64/sve-intrinsics.ll index 1877b190ae7..dbaecf34534 100644 --- a/test/Analysis/CostModel/AArch64/sve-intrinsics.ll +++ b/test/Analysis/CostModel/AArch64/sve-intrinsics.ll @@ -202,7 +202,7 @@ define void @unsupported_fp_ops( %vec) { ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %sin = call @llvm.sin.nxv4f32( %vec) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %cos = call @llvm.cos.nxv4f32( %vec) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %pow = call @llvm.pow.nxv4f32( %vec, %vec) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %powi = call @llvm.powi.nxv4f32( %vec, i32 42) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %powi = call @llvm.powi.nxv4f32.i32( %vec, i32 42) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %exp = call @llvm.exp.nxv4f32( %vec) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %exp2 = call @llvm.exp2.nxv4f32( %vec) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %log = call @llvm.log.nxv4f32( %vec) @@ -212,7 +212,7 @@ define void @unsupported_fp_ops( %vec) { %sin = call @llvm.sin.nxv4f32( %vec) %cos = call @llvm.cos.nxv4f32( %vec) %pow = call @llvm.pow.nxv4f32( %vec, %vec) - %powi = call @llvm.powi.nxv4f32( %vec, i32 42) + %powi = call @llvm.powi.nxv4f32.i32( %vec, i32 42) %exp = call @llvm.exp.nxv4f32( %vec) %exp2 = call @llvm.exp2.nxv4f32( %vec) %log = call @llvm.log.nxv4f32( %vec) @@ -224,7 +224,7 @@ define void @unsupported_fp_ops( %vec) { declare @llvm.sin.nxv4f32() declare @llvm.cos.nxv4f32() declare @llvm.pow.nxv4f32(, ) -declare @llvm.powi.nxv4f32(, i32) +declare @llvm.powi.nxv4f32.i32(, i32) declare @llvm.exp.nxv4f32() declare @llvm.exp2.nxv4f32() declare @llvm.log.nxv4f32() diff --git a/test/Bitcode/upgrade-powi-intrinsics.ll b/test/Bitcode/upgrade-powi-intrinsics.ll new file mode 100644 index 00000000000..2ad8adc097a --- /dev/null +++ b/test/Bitcode/upgrade-powi-intrinsics.ll @@ -0,0 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S < %s | FileCheck %s +; RUN: llvm-dis < %s.bc | FileCheck %s + +; Verifying auto-upgrade for the change related to llvm.powi with the exponent +; now being an overloaded operand. +define void @foo(double %a, float %b, i32 %c) { +; CHECK-LABEL: @foo( +; CHECK-NEXT: [[T1:%.*]] = call double @llvm.powi.f64.i32(double [[A:%.*]], i32 [[C:%.*]]) +; CHECK-NEXT: [[T2:%.*]] = call float @llvm.powi.f32.i32(float [[B:%.*]], i32 [[C]]) +; CHECK-NEXT: ret void +; + %t1 = call double @llvm.powi.f64(double %a, i32 %c) + %t2 = call float @llvm.powi.f32(float %b, i32 %c) + ret void +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.powi.f64(double, i32) #2 + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare float @llvm.powi.f32(float, i32) #2 + +attributes #2 = { nofree nosync nounwind readnone speculatable willreturn } diff --git a/test/Bitcode/upgrade-powi-intrinsics.ll.bc b/test/Bitcode/upgrade-powi-intrinsics.ll.bc new file mode 100644 index 0000000000000000000000000000000000000000..ca2a33428be7521a48477650e21c646a19bf6610 GIT binary patch literal 1260 zcmYjRe@Gj56#pg{b2p9OyJ~1-z02;@L6G8NbX~GbwkFpWWv+{C3pJz6%`vNq7@n&j>dd-NdL`iBLA9oiM#xY4b!o#1z+fu||$j?pbYL zn)u4d3YVko4;q^%VrsTD$6G&KzIvNYrBU1&766D=TLbY7+jmP>+prWtYA>=FH`a7T zV^xtz7j;emqdK2gFR@=}ELgYqQ((L=O^fI#=r#eMM{E6j?r$XcIw`#Znk4N*aEKu@ z=tSR9DWG;5fU{f|BzNbpT>0jv(7bR;0)*;Omb1Q&pW7>_vTNCJ0W;iL(Woc?lYTH$ zt-aKV0O@T7;B#bdBP2SNrvpZfD=OF{O3WkSlRE*h~i!D$0YscfOUpn3-jB8V+C`rg`E+M&yM{$js8g0v)yaar`?;}(4UNd*c! zA<6F&@~}xBNyw9l(7B@$Jj%g8)?v|#^`;l;!Xlk1>#U#N;^`{!LQ=UgZeLBtmgz?LY;rjtu?2$*r@K`?{^vPo;d2B(hyZZ)t1jygLun>oP z7sxydi>M0L!elqhNw@hPM`uIkY>qDEnTHy4z|?JD7G9bU^BHf`zlHeZi69GWPZ-wm{gT18@iNmO~t? zsvNPP-r-X{VgmUq2qnk1GJQaAE><79RcfKICzzaW-}Au-UIaB+Pyr_&c;WG~6v8Jh zWhMk=RE3H|k2tT`S2X-m*fpnUKR~3Svw1qp(gnY&4@2gppWa@i(T* %X) nounwind { L.entry: %0 = load <4 x float>, <4 x float>* @A, align 16 - %1 = call <4 x float> @llvm.powi.v4f32(<4 x float> %0, i32 2) + %1 = call <4 x float> @llvm.powi.v4f32.i32(<4 x float> %0, i32 2) store <4 x float> %1, <4 x float>* %X, align 16 ret void } -declare <4 x float> @llvm.powi.v4f32(<4 x float>, i32) nounwind readonly +declare <4 x float> @llvm.powi.v4f32.i32(<4 x float>, i32) nounwind readonly define void @test_sin(<4 x float>* %X) nounwind { diff --git a/test/CodeGen/ARM/Windows/powi.ll b/test/CodeGen/ARM/Windows/powi.ll index 4ec4b0abbdf..7db6327cf53 100644 --- a/test/CodeGen/ARM/Windows/powi.ll +++ b/test/CodeGen/ARM/Windows/powi.ll @@ -1,11 +1,11 @@ ; RUN: llc -mtriple thumbv7--windows-itanium -filetype asm -o - %s | FileCheck %s -declare double @llvm.powi.f64(double, i32) -declare float @llvm.powi.f32(float, i32) +declare double @llvm.powi.f64.i32(double, i32) +declare float @llvm.powi.f32.i32(float, i32) define arm_aapcs_vfpcc double @d(double %d, i32 %i) { entry: - %0 = tail call double @llvm.powi.f64(double %d, i32 %i) + %0 = tail call double @llvm.powi.f64.i32(double %d, i32 %i) ret double %0 } @@ -17,7 +17,7 @@ entry: define arm_aapcs_vfpcc float @f(float %f, i32 %i) { entry: - %0 = tail call float @llvm.powi.f32(float %f, i32 %i) + %0 = tail call float @llvm.powi.f32.i32(float %f, i32 %i) ret float %0 } @@ -29,7 +29,7 @@ entry: define arm_aapcs_vfpcc float @g(double %d, i32 %i) { entry: - %0 = tail call double @llvm.powi.f64(double %d, i32 %i) + %0 = tail call double @llvm.powi.f64.i32(double %d, i32 %i) %conv = fptrunc double %0 to float ret float %conv } @@ -43,7 +43,7 @@ entry: define arm_aapcs_vfpcc double @h(float %f, i32 %i) { entry: - %0 = tail call float @llvm.powi.f32(float %f, i32 %i) + %0 = tail call float @llvm.powi.f32.i32(float %f, i32 %i) %conv = fpext float %0 to double ret double %conv } diff --git a/test/CodeGen/ARM/fp16-fullfp16.ll b/test/CodeGen/ARM/fp16-fullfp16.ll index 86a8caa76aa..0ff7e063545 100644 --- a/test/CodeGen/ARM/fp16-fullfp16.ll +++ b/test/CodeGen/ARM/fp16-fullfp16.ll @@ -242,7 +242,7 @@ define void @test_fpowi(half* %p, i32 %b) { ; CHECK-NEXT: vstr.16 s0, [r4] ; CHECK-NEXT: pop {r4, pc} %a = load half, half* %p, align 2 - %r = call half @llvm.powi.f16(half %a, i32 %b) + %r = call half @llvm.powi.f16.i32(half %a, i32 %b) store half %r, half* %p ret void } @@ -587,7 +587,7 @@ define void @test_fmuladd(half* %p, half* %q, half* %r) { } declare half @llvm.sqrt.f16(half %a) -declare half @llvm.powi.f16(half %a, i32 %b) +declare half @llvm.powi.f16.i32(half %a, i32 %b) declare half @llvm.sin.f16(half %a) declare half @llvm.cos.f16(half %a) declare half @llvm.pow.f16(half %a, half %b) diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll index 65b8217ecfe..b8c73c5f831 100644 --- a/test/CodeGen/ARM/fp16-promote.ll +++ b/test/CodeGen/ARM/fp16-promote.ll @@ -397,7 +397,7 @@ define void @test_bitcast_i16tohalf(i16 %a, half* %p) #0 { } declare half @llvm.sqrt.f16(half %a) #0 -declare half @llvm.powi.f16(half %a, i32 %b) #0 +declare half @llvm.powi.f16.i32(half %a, i32 %b) #0 declare half @llvm.sin.f16(half %a) #0 declare half @llvm.cos.f16(half %a) #0 declare half @llvm.pow.f16(half %a, half %b) #0 @@ -444,7 +444,7 @@ define void @test_sqrt(half* %p) #0 { ; CHECK-LIBCALL: bl __aeabi_f2h define void @test_fpowi(half* %p, i32 %b) #0 { %a = load half, half* %p, align 2 - %r = call half @llvm.powi.f16(half %a, i32 %b) + %r = call half @llvm.powi.f16.i32(half %a, i32 %b) store half %r, half* %p ret void } diff --git a/test/CodeGen/ARM/fpowi.ll b/test/CodeGen/ARM/fpowi.ll index 8eface8dae3..f5422bed345 100644 --- a/test/CodeGen/ARM/fpowi.ll +++ b/test/CodeGen/ARM/fpowi.ll @@ -5,11 +5,11 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64" target triple = "arm-unknown-linux-gnueabi" -declare double @llvm.powi.f64(double, i32) +declare double @llvm.powi.f64.i32(double, i32) define double @_ZSt3powdi(double %__x, i32 %__i) { entry: - %tmp3 = call double @llvm.powi.f64(double %__x, i32 %__i) + %tmp3 = call double @llvm.powi.f64.i32(double %__x, i32 %__i) ret double %tmp3 } diff --git a/test/CodeGen/ARM/vfloatintrinsics.ll b/test/CodeGen/ARM/vfloatintrinsics.ll index b4c5061fa18..d6dc3509be4 100644 --- a/test/CodeGen/ARM/vfloatintrinsics.ll +++ b/test/CodeGen/ARM/vfloatintrinsics.ll @@ -14,7 +14,7 @@ define %v2f32 @test_v2f32.sqrt(%v2f32 %a) { ; CHECK-LABEL: test_v2f32.powi:{{.*}} define %v2f32 @test_v2f32.powi(%v2f32 %a, i32 %b) { ; CHECK: pow - %1 = call %v2f32 @llvm.powi.v2f32(%v2f32 %a, i32 %b) + %1 = call %v2f32 @llvm.powi.v2f32.i32(%v2f32 %a, i32 %b) ret %v2f32 %1 } ; CHECK-LABEL: test_v2f32.sin:{{.*}} @@ -109,7 +109,7 @@ define %v2f32 @test_v2f32.nearbyint(%v2f32 %a) { } declare %v2f32 @llvm.sqrt.v2f32(%v2f32) #0 -declare %v2f32 @llvm.powi.v2f32(%v2f32, i32) #0 +declare %v2f32 @llvm.powi.v2f32.i32(%v2f32, i32) #0 declare %v2f32 @llvm.sin.v2f32(%v2f32) #0 declare %v2f32 @llvm.cos.v2f32(%v2f32) #0 declare %v2f32 @llvm.pow.v2f32(%v2f32, %v2f32) #0 @@ -138,7 +138,7 @@ define %v4f32 @test_v4f32.sqrt(%v4f32 %a) { ; CHECK-LABEL: test_v4f32.powi:{{.*}} define %v4f32 @test_v4f32.powi(%v4f32 %a, i32 %b) { ; CHECK: pow - %1 = call %v4f32 @llvm.powi.v4f32(%v4f32 %a, i32 %b) + %1 = call %v4f32 @llvm.powi.v4f32.i32(%v4f32 %a, i32 %b) ret %v4f32 %1 } ; CHECK-LABEL: test_v4f32.sin:{{.*}} @@ -233,7 +233,7 @@ define %v4f32 @test_v4f32.nearbyint(%v4f32 %a) { } declare %v4f32 @llvm.sqrt.v4f32(%v4f32) #0 -declare %v4f32 @llvm.powi.v4f32(%v4f32, i32) #0 +declare %v4f32 @llvm.powi.v4f32.i32(%v4f32, i32) #0 declare %v4f32 @llvm.sin.v4f32(%v4f32) #0 declare %v4f32 @llvm.cos.v4f32(%v4f32) #0 declare %v4f32 @llvm.pow.v4f32(%v4f32, %v4f32) #0 @@ -262,7 +262,7 @@ define %v2f64 @test_v2f64.sqrt(%v2f64 %a) { ; CHECK-LABEL: test_v2f64.powi:{{.*}} define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) { ; CHECK: pow - %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b) + %1 = call %v2f64 @llvm.powi.v2f64.i32(%v2f64 %a, i32 %b) ret %v2f64 %1 } ; CHECK-LABEL: test_v2f64.sin:{{.*}} @@ -358,7 +358,7 @@ define %v2f64 @test_v2f64.nearbyint(%v2f64 %a) { } declare %v2f64 @llvm.sqrt.v2f64(%v2f64) #0 -declare %v2f64 @llvm.powi.v2f64(%v2f64, i32) #0 +declare %v2f64 @llvm.powi.v2f64.i32(%v2f64, i32) #0 declare %v2f64 @llvm.sin.v2f64(%v2f64) #0 declare %v2f64 @llvm.cos.v2f64(%v2f64) #0 declare %v2f64 @llvm.pow.v2f64(%v2f64, %v2f64) #0 diff --git a/test/CodeGen/Generic/fpowi-promote.ll b/test/CodeGen/Generic/fpowi-promote.ll index 8dacebed737..dbdc4238799 100644 --- a/test/CodeGen/Generic/fpowi-promote.ll +++ b/test/CodeGen/Generic/fpowi-promote.ll @@ -4,8 +4,8 @@ define float @test(float %tmp23302331, i32 %tmp23282329 ) { -%tmp2339 = call float @llvm.powi.f32( float %tmp23302331, i32 %tmp23282329 ) +%tmp2339 = call float @llvm.powi.f32.i32( float %tmp23302331, i32 %tmp23282329 ) ret float %tmp2339 } -declare float @llvm.powi.f32(float,i32) +declare float @llvm.powi.f32.i32(float,i32) diff --git a/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll b/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll index f6e64a40f4e..c286eda76e6 100644 --- a/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll +++ b/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll @@ -79,14 +79,14 @@ declare double @llvm.exp.f64(double) #0 define <4 x double> @powi_v4(<4 x double> %in){ ; COMMON-LABEL: define {{[^@]+}}@powi_v4 ; COMMON-SAME: (<4 x double> [[IN:%.*]]) { -; COMMON-NEXT: [[CALL:%.*]] = call <4 x double> @llvm.powi.v4f64(<4 x double> [[IN]], i32 3) +; COMMON-NEXT: [[CALL:%.*]] = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> [[IN]], i32 3) ; COMMON-NEXT: ret <4 x double> [[CALL]] ; - %call = call <4 x double> @llvm.powi.v4f64(<4 x double> %in, i32 3) + %call = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %in, i32 3) ret <4 x double> %call } -declare <4 x double> @llvm.powi.v4f64(<4 x double>, i32) #0 +declare <4 x double> @llvm.powi.v4f64.i32(<4 x double>, i32) #0 ; Replacement should not take place if the vector length ; does not match exactly. diff --git a/test/CodeGen/Mips/mips64-f128.ll b/test/CodeGen/Mips/mips64-f128.ll index 33a65070c7f..ad2da0d3d5a 100644 --- a/test/CodeGen/Mips/mips64-f128.ll +++ b/test/CodeGen/Mips/mips64-f128.ll @@ -411,11 +411,11 @@ declare fp128 @rintl(fp128) #1 define fp128 @libcall_powil(fp128 %a, i32 %b) { entry: - %0 = tail call fp128 @llvm.powi.f128(fp128 %a, i32 %b) + %0 = tail call fp128 @llvm.powi.f128.i32(fp128 %a, i32 %b) ret fp128 %0 } -declare fp128 @llvm.powi.f128(fp128, i32) #3 +declare fp128 @llvm.powi.f128.i32(fp128, i32) #3 ; ALL-LABEL: libcall2_copysignl: ; NOT-R2R6-DAG: daddiu $[[R2:[0-9]+]], $zero, 1 diff --git a/test/CodeGen/Mips/msa/f16-llvm-ir.ll b/test/CodeGen/Mips/msa/f16-llvm-ir.ll index f1f1ba69296..513e108407b 100644 --- a/test/CodeGen/Mips/msa/f16-llvm-ir.ll +++ b/test/CodeGen/Mips/msa/f16-llvm-ir.ll @@ -1009,7 +1009,7 @@ entry: ret void } -declare float @llvm.powi.f32(float, i32) +declare float @llvm.powi.f32.i32(float, i32) define void @fpowi() { ; MIPS32-LABEL: fpowi: @@ -1073,7 +1073,7 @@ entry: %1 = call float @llvm.convert.from.fp16.f32(i16 %0) - %powi = call float @llvm.powi.f32(float %1, i32 2) + %powi = call float @llvm.powi.f32.i32(float %1, i32 2) %2 = call i16 @llvm.convert.to.fp16.f32(float %powi) @@ -1184,7 +1184,7 @@ entry: %1 = call float @llvm.convert.from.fp16.f32(i16 %0) - %powi = call float @llvm.powi.f32(float %1, i32 %var) + %powi = call float @llvm.powi.f32.i32(float %1, i32 %var) %2 = call i16 @llvm.convert.to.fp16.f32(float %powi) diff --git a/test/CodeGen/Mips/powif64_16.ll b/test/CodeGen/Mips/powif64_16.ll index d6dbc52d9da..465f71f3d6a 100644 --- a/test/CodeGen/Mips/powif64_16.ll +++ b/test/CodeGen/Mips/powif64_16.ll @@ -1,17 +1,17 @@ ; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -relocation-model=static < %s | FileCheck %s -declare float @llvm.powi.f32(float %Val, i32 %power) -declare double @llvm.powi.f64(double %Val, i32 %power) +declare float @llvm.powi.f32.i32(float %Val, i32 %power) +declare double @llvm.powi.f64.i32(double %Val, i32 %power) define float @foo_pow_f32(float %y, i32 %p) { - %1 = tail call float @llvm.powi.f32(float %y, i32 %p) + %1 = tail call float @llvm.powi.f32.i32(float %y, i32 %p) ; CHECK-NOT: .ent __call_stub_fp_llvm.powi.f32 ; CHECK-NOT: {{.*}} jal llvm.powi.f32 ret float %1 } define double @foo_pow_f64(double %y, i32 %p) { - %1 = tail call double @llvm.powi.f64(double %y, i32 %p) + %1 = tail call double @llvm.powi.f64.i32(double %y, i32 %p) ; CHECK-NOT: .ent __call_stub_fp_llvm.powi.f64 ; CHECK-NOT: {{.*}} jal llvm.powi.f64 ret double %1 diff --git a/test/CodeGen/Mips/pr36061.ll b/test/CodeGen/Mips/pr36061.ll index 6a9aa72aae0..b7f4fe99af3 100644 --- a/test/CodeGen/Mips/pr36061.ll +++ b/test/CodeGen/Mips/pr36061.ll @@ -4,7 +4,7 @@ ; Test that powi has its integer argument sign extended on mips64. -declare double @llvm.powi.f64(double, i32) +declare double @llvm.powi.f64.i32(double, i32) define double @powi(double %value, i32 %power) { ; MIPSN64-LABEL: powi: @@ -30,11 +30,11 @@ define double @powi(double %value, i32 %power) { ; MIPSN32-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload ; MIPSN32-NEXT: jr $ra ; MIPSN32-NEXT: addiu $sp, $sp, 16 - %1 = tail call double @llvm.powi.f64(double %value, i32 %power) + %1 = tail call double @llvm.powi.f64.i32(double %value, i32 %power) ret double %1 } -declare float @llvm.powi.f32(float, i32) +declare float @llvm.powi.f32.i32(float, i32) define float @powfi(float %value, i32 %power) { ; MIPSN64-LABEL: powfi: @@ -60,6 +60,6 @@ define float @powfi(float %value, i32 %power) { ; MIPSN32-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload ; MIPSN32-NEXT: jr $ra ; MIPSN32-NEXT: addiu $sp, $sp, 16 - %1 = tail call float @llvm.powi.f32(float %value, i32 %power) + %1 = tail call float @llvm.powi.f32.i32(float %value, i32 %power) ret float %1 } diff --git a/test/CodeGen/NVPTX/f16-instructions.ll b/test/CodeGen/NVPTX/f16-instructions.ll index 314cc686f79..3601929e00e 100644 --- a/test/CodeGen/NVPTX/f16-instructions.ll +++ b/test/CodeGen/NVPTX/f16-instructions.ll @@ -806,7 +806,7 @@ define half @test_bitcast_i16tohalf(i16 %a) #0 { declare half @llvm.sqrt.f16(half %a) #0 -declare half @llvm.powi.f16(half %a, i32 %b) #0 +declare half @llvm.powi.f16.i32(half %a, i32 %b) #0 declare half @llvm.sin.f16(half %a) #0 declare half @llvm.cos.f16(half %a) #0 declare half @llvm.pow.f16(half %a, half %b) #0 @@ -845,7 +845,7 @@ define half @test_sqrt(half %a) #0 { ;;; Can't do this yet: requires libcall. ; XCHECK-LABEL: test_powi( ;define half @test_powi(half %a, i32 %b) #0 { -; %r = call half @llvm.powi.f16(half %a, i32 %b) +; %r = call half @llvm.powi.f16.i32(half %a, i32 %b) ; ret half %r ;} diff --git a/test/CodeGen/NVPTX/f16x2-instructions.ll b/test/CodeGen/NVPTX/f16x2-instructions.ll index 903cdcc5ed0..4ca11358c34 100644 --- a/test/CodeGen/NVPTX/f16x2-instructions.ll +++ b/test/CodeGen/NVPTX/f16x2-instructions.ll @@ -990,7 +990,7 @@ define <2 x half> @test_bitcast_2xi16_to_2xhalf(<2 x i16> %a) #0 { declare <2 x half> @llvm.sqrt.f16(<2 x half> %a) #0 -declare <2 x half> @llvm.powi.f16(<2 x half> %a, <2 x i32> %b) #0 +declare <2 x half> @llvm.powi.f16.i32(<2 x half> %a, <2 x i32> %b) #0 declare <2 x half> @llvm.sin.f16(<2 x half> %a) #0 declare <2 x half> @llvm.cos.f16(<2 x half> %a) #0 declare <2 x half> @llvm.pow.f16(<2 x half> %a, <2 x half> %b) #0 @@ -1032,7 +1032,7 @@ define <2 x half> @test_sqrt(<2 x half> %a) #0 { ;;; Can't do this yet: requires libcall. ; XCHECK-LABEL: test_powi( ;define <2 x half> @test_powi(<2 x half> %a, <2 x i32> %b) #0 { -; %r = call <2 x half> @llvm.powi.f16(<2 x half> %a, <2 x i32> %b) +; %r = call <2 x half> @llvm.powi.f16.i32(<2 x half> %a, <2 x i32> %b) ; ret <2 x half> %r ;} diff --git a/test/CodeGen/NVPTX/libcall-intrinsic.ll b/test/CodeGen/NVPTX/libcall-intrinsic.ll index 4778667002c..f5f465cbc99 100644 --- a/test/CodeGen/NVPTX/libcall-intrinsic.ll +++ b/test/CodeGen/NVPTX/libcall-intrinsic.ll @@ -3,8 +3,8 @@ ; CHECK: LLVM ERROR: Undefined external symbol "__powidf2" define double @powi(double, i32) { - %a = call double @llvm.powi.f64(double %0, i32 %1) + %a = call double @llvm.powi.f64.i32(double %0, i32 %1) ret double %a } -declare double @llvm.powi.f64(double, i32) nounwind readnone +declare double @llvm.powi.f64.i32(double, i32) nounwind readnone diff --git a/test/CodeGen/PowerPC/f128-arith.ll b/test/CodeGen/PowerPC/f128-arith.ll index be6c1ea585d..95922dab0cc 100644 --- a/test/CodeGen/PowerPC/f128-arith.ll +++ b/test/CodeGen/PowerPC/f128-arith.ll @@ -765,11 +765,11 @@ define dso_local void @qp_powi(fp128* nocapture readonly %a, i32* nocapture read entry: %0 = load fp128, fp128* %a, align 16 %1 = load i32, i32* %b, align 8 - %2 = tail call fp128 @llvm.powi.f128(fp128 %0, i32 %1) + %2 = tail call fp128 @llvm.powi.f128.i32(fp128 %0, i32 %1) store fp128 %2, fp128* %res, align 16 ret void } -declare fp128 @llvm.powi.f128(fp128 %Val, i32 %power) +declare fp128 @llvm.powi.f128.i32(fp128 %Val, i32 %power) @a = common dso_local global fp128 0xL00000000000000000000000000000000, align 16 @b = common dso_local global fp128 0xL00000000000000000000000000000000, align 16 diff --git a/test/CodeGen/RISCV/double-intrinsics.ll b/test/CodeGen/RISCV/double-intrinsics.ll index 0f39935a439..79dcd37eccf 100644 --- a/test/CodeGen/RISCV/double-intrinsics.ll +++ b/test/CodeGen/RISCV/double-intrinsics.ll @@ -30,7 +30,7 @@ define double @sqrt_f64(double %a) nounwind { ret double %1 } -declare double @llvm.powi.f64(double, i32) +declare double @llvm.powi.f64.i32(double, i32) define double @powi_f64(double %a, i32 %b) nounwind { ; RV32IFD-LABEL: powi_f64: @@ -51,7 +51,7 @@ define double @powi_f64(double %a, i32 %b) nounwind { ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret - %1 = call double @llvm.powi.f64(double %a, i32 %b) + %1 = call double @llvm.powi.f64.i32(double %a, i32 %b) ret double %1 } diff --git a/test/CodeGen/RISCV/float-intrinsics.ll b/test/CodeGen/RISCV/float-intrinsics.ll index eb0d77ed042..cda32b390af 100644 --- a/test/CodeGen/RISCV/float-intrinsics.ll +++ b/test/CodeGen/RISCV/float-intrinsics.ll @@ -28,7 +28,7 @@ define float @sqrt_f32(float %a) nounwind { ret float %1 } -declare float @llvm.powi.f32(float, i32) +declare float @llvm.powi.f32.i32(float, i32) define float @powi_f32(float %a, i32 %b) nounwind { ; RV32IF-LABEL: powi_f32: @@ -49,7 +49,7 @@ define float @powi_f32(float %a, i32 %b) nounwind { ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret - %1 = call float @llvm.powi.f32(float %a, i32 %b) + %1 = call float @llvm.powi.f32.i32(float %a, i32 %b) ret float %1 } diff --git a/test/CodeGen/RISCV/rv64i-single-softfloat.ll b/test/CodeGen/RISCV/rv64i-single-softfloat.ll index 31897d02e4c..e250cff27a9 100644 --- a/test/CodeGen/RISCV/rv64i-single-softfloat.ll +++ b/test/CodeGen/RISCV/rv64i-single-softfloat.ll @@ -663,7 +663,7 @@ define float @fpow_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.powi.f32(float %Val, i32 %power) +declare float @llvm.powi.f32.i32(float %Val, i32 %power) define float @fpowi_s(float %a, i32 %b) nounwind { ; RV64I-LABEL: fpowi_s: @@ -675,7 +675,7 @@ define float @fpowi_s(float %a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret - %1 = call float @llvm.powi.f32(float %a, i32 %b) + %1 = call float @llvm.powi.f32.i32(float %a, i32 %b) ret float %1 } diff --git a/test/CodeGen/SystemZ/fp-libcall.ll b/test/CodeGen/SystemZ/fp-libcall.ll index 2df25aaf814..60b698e34fc 100644 --- a/test/CodeGen/SystemZ/fp-libcall.ll +++ b/test/CodeGen/SystemZ/fp-libcall.ll @@ -5,21 +5,21 @@ define float @f1(float %x, i32 %y) { ; CHECK-LABEL: f1: ; CHECK: brasl %r14, __powisf2@PLT - %tmp = call float @llvm.powi.f32(float %x, i32 %y) + %tmp = call float @llvm.powi.f32.i32(float %x, i32 %y) ret float %tmp } define double @f2(double %x, i32 %y) { ; CHECK-LABEL: f2: ; CHECK: brasl %r14, __powidf2@PLT - %tmp = call double @llvm.powi.f64(double %x, i32 %y) + %tmp = call double @llvm.powi.f64.i32(double %x, i32 %y) ret double %tmp } define fp128 @f3(fp128 %x, i32 %y) { ; CHECK-LABEL: f3: ; CHECK: brasl %r14, __powitf2@PLT - %tmp = call fp128 @llvm.powi.f128(fp128 %x, i32 %y) + %tmp = call fp128 @llvm.powi.f128.i32(fp128 %x, i32 %y) ret fp128 %tmp } @@ -295,9 +295,9 @@ define fp128 @f39(fp128 %x, fp128 %y) { ret fp128 %tmp } -declare float @llvm.powi.f32(float, i32) -declare double @llvm.powi.f64(double, i32) -declare fp128 @llvm.powi.f128(fp128, i32) +declare float @llvm.powi.f32.i32(float, i32) +declare double @llvm.powi.f64.i32(double, i32) +declare fp128 @llvm.powi.f128.i32(fp128, i32) declare float @llvm.pow.f32(float, float) declare double @llvm.pow.f64(double, double) declare fp128 @llvm.pow.f128(fp128, fp128) diff --git a/test/CodeGen/Thumb2/float-intrinsics-double.ll b/test/CodeGen/Thumb2/float-intrinsics-double.ll index 29843a51fc2..70a5939865b 100644 --- a/test/CodeGen/Thumb2/float-intrinsics-double.ll +++ b/test/CodeGen/Thumb2/float-intrinsics-double.ll @@ -14,12 +14,12 @@ define double @sqrt_d(double %a) { ret double %1 } -declare double @llvm.powi.f64(double %Val, i32 %power) +declare double @llvm.powi.f64.i32(double %Val, i32 %power) define double @powi_d(double %a, i32 %b) { ; CHECK-LABEL: powi_d: ; SOFT: {{(bl|b)}} __powidf2 ; HARD: b __powidf2 - %1 = call double @llvm.powi.f64(double %a, i32 %b) + %1 = call double @llvm.powi.f64.i32(double %a, i32 %b) ret double %1 } diff --git a/test/CodeGen/Thumb2/float-intrinsics-float.ll b/test/CodeGen/Thumb2/float-intrinsics-float.ll index 3682edc3f93..b6b891edd04 100644 --- a/test/CodeGen/Thumb2/float-intrinsics-float.ll +++ b/test/CodeGen/Thumb2/float-intrinsics-float.ll @@ -15,12 +15,12 @@ define float @sqrt_f(float %a) { ret float %1 } -declare float @llvm.powi.f32(float %Val, i32 %power) +declare float @llvm.powi.f32.i32(float %Val, i32 %power) define float @powi_f(float %a, i32 %b) { ; CHECK-LABEL: powi_f: ; SOFT: bl __powisf2 ; HARD: b __powisf2 - %1 = call float @llvm.powi.f32(float %a, i32 %b) + %1 = call float @llvm.powi.f32.i32(float %a, i32 %b) ret float %1 } diff --git a/test/CodeGen/Thumb2/intrinsics-cc.ll b/test/CodeGen/Thumb2/intrinsics-cc.ll index 0eb0af9bdc1..d45f0969aa6 100644 --- a/test/CodeGen/Thumb2/intrinsics-cc.ll +++ b/test/CodeGen/Thumb2/intrinsics-cc.ll @@ -13,21 +13,21 @@ ; RUN: llc -mtriple thumbv7-unknown-none-musleabihf -float-abi soft -filetype asm -o - %s | FileCheck %s ; RUN: llc -mtriple thumbv7-unknown-none-musleabihf -float-abi hard -filetype asm -o - %s | FileCheck %s -declare float @llvm.powi.f32(float, i32) +declare float @llvm.powi.f32.i32(float, i32) define float @powi_f32(float %f, i32 %i) { entry: - %0 = call float @llvm.powi.f32(float %f, i32 %i) + %0 = call float @llvm.powi.f32.i32(float %f, i32 %i) ret float %0 } ; CHECK: b __powisf2 -declare double @llvm.powi.f64(double, i32) +declare double @llvm.powi.f64.i32(double, i32) define double @powi_f64(double %d, i32 %i) { entry: - %0 = call double @llvm.powi.f64(double %d, i32 %i) + %0 = call double @llvm.powi.f64.i32(double %d, i32 %i) ret double %0 } diff --git a/test/CodeGen/WebAssembly/libcalls.ll b/test/CodeGen/WebAssembly/libcalls.ll index 7eaee5a7e0b..d3de90432bb 100644 --- a/test/CodeGen/WebAssembly/libcalls.ll +++ b/test/CodeGen/WebAssembly/libcalls.ll @@ -10,12 +10,12 @@ declare fp128 @llvm.floor.f128(fp128) declare fp128 @llvm.trunc.f128(fp128) declare fp128 @llvm.nearbyint.f128(fp128) declare fp128 @llvm.pow.f128(fp128, fp128) -declare fp128 @llvm.powi.f128(fp128, i32) +declare fp128 @llvm.powi.f128.i32(fp128, i32) declare double @llvm.cos.f64(double) declare double @llvm.log10.f64(double) declare double @llvm.pow.f64(double, double) -declare double @llvm.powi.f64(double, i32) +declare double @llvm.powi.f64.i32(double, i32) declare double @llvm.log.f64(double) declare double @llvm.exp.f64(double) declare i32 @llvm.lround(double) @@ -39,7 +39,7 @@ define fp128 @fp128libcalls(fp128 %x, fp128 %y, i32 %z) { ; CHECK: call powl %f = call fp128 @llvm.pow.f128(fp128 %e, fp128 %y) ; CHECK: call __powitf2 - %g = call fp128 @llvm.powi.f128(fp128 %f, i32 %z) + %g = call fp128 @llvm.powi.f128.i32(fp128 %f, i32 %z) ; CHECK: call truncl %h = call fp128 @llvm.trunc.f128(fp128 %g) ; CHECK: call nearbyintl @@ -68,7 +68,7 @@ define i32 @f64libcalls(double %x, double %y, i32 %z) { ; CHECK: call $push{{[0-9]}}=, pow %c = call double @llvm.pow.f64(double %b, double %y) ; CHECK: call $push{{[0-9]}}=, __powidf2 - %d = call double @llvm.powi.f64(double %c, i32 %z) + %d = call double @llvm.powi.f64.i32(double %c, i32 %z) ; CHECK: call $push{{[0-9]}}=, log %e = call double @llvm.log.f64(double %d) ; CHECK: call $push{{[0-9]}}=, exp diff --git a/test/CodeGen/WebAssembly/simd-unsupported.ll b/test/CodeGen/WebAssembly/simd-unsupported.ll index 9332f51c7a9..f8b026453ce 100644 --- a/test/CodeGen/WebAssembly/simd-unsupported.ll +++ b/test/CodeGen/WebAssembly/simd-unsupported.ll @@ -392,9 +392,9 @@ define <4 x float> @cos_v4f32(<4 x float> %x) { ; CHECK-LABEL: powi_v4f32: ; CHECK: call $push[[L:[0-9]+]]=, __powisf2 -declare <4 x float> @llvm.powi.v4f32(<4 x float>, i32) +declare <4 x float> @llvm.powi.v4f32.i32(<4 x float>, i32) define <4 x float> @powi_v4f32(<4 x float> %x, i32 %y) { - %v = call <4 x float> @llvm.powi.v4f32(<4 x float> %x, i32 %y) + %v = call <4 x float> @llvm.powi.v4f32.i32(<4 x float> %x, i32 %y) ret <4 x float> %v } @@ -492,9 +492,9 @@ define <2 x double> @cos_v2f64(<2 x double> %x) { ; CHECK-LABEL: powi_v2f64: ; CHECK: call $push[[L:[0-9]+]]=, __powidf2 -declare <2 x double> @llvm.powi.v2f64(<2 x double>, i32) +declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32) define <2 x double> @powi_v2f64(<2 x double> %x, i32 %y) { - %v = call <2 x double> @llvm.powi.v2f64(<2 x double> %x, i32 %y) + %v = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> %x, i32 %y) ret <2 x double> %v } diff --git a/test/CodeGen/X86/2007-09-27-LDIntrinsics.ll b/test/CodeGen/X86/2007-09-27-LDIntrinsics.ll index 88057c86fd9..3ad64929784 100644 --- a/test/CodeGen/X86/2007-09-27-LDIntrinsics.ll +++ b/test/CodeGen/X86/2007-09-27-LDIntrinsics.ll @@ -17,7 +17,7 @@ declare x86_fp80 @llvm.sqrt.f80(x86_fp80) define x86_fp80 @bar(x86_fp80 %x) nounwind { entry: - %tmp2 = call x86_fp80 @llvm.powi.f80( x86_fp80 %x, i32 3 ) + %tmp2 = call x86_fp80 @llvm.powi.f80.i32( x86_fp80 %x, i32 3 ) ret x86_fp80 %tmp2 ; CHECK-LABEL: bar: ; CHECK: fldt 4(%esp) @@ -27,4 +27,4 @@ entry: ; CHECK-NEXT: ret } -declare x86_fp80 @llvm.powi.f80(x86_fp80, i32) +declare x86_fp80 @llvm.powi.f80.i32(x86_fp80, i32) diff --git a/test/CodeGen/X86/2010-05-07-ldconvert.ll b/test/CodeGen/X86/2010-05-07-ldconvert.ll index a0c3c95ef60..000b44739f4 100644 --- a/test/CodeGen/X86/2010-05-07-ldconvert.ll +++ b/test/CodeGen/X86/2010-05-07-ldconvert.ll @@ -6,7 +6,7 @@ entry: %retval = alloca i32, align 4 ; [#uses=2] %r = alloca i32, align 4 ; [#uses=2] store i32 0, i32* %retval - %tmp = call x86_fp80 @llvm.powi.f80(x86_fp80 0xK3FFF8000000000000000, i32 -64) ; [#uses=1] + %tmp = call x86_fp80 @llvm.powi.f80.i32(x86_fp80 0xK3FFF8000000000000000, i32 -64) ; [#uses=1] %conv = fptosi x86_fp80 %tmp to i32 ; [#uses=1] store i32 %conv, i32* %r %tmp1 = load i32, i32* %r ; [#uses=1] @@ -22,6 +22,6 @@ if.end: ; preds = %if.then, %entry ret i32 %0 } -declare x86_fp80 @llvm.powi.f80(x86_fp80, i32) nounwind readonly +declare x86_fp80 @llvm.powi.f80.i32(x86_fp80, i32) nounwind readonly declare void @_Z1fv() diff --git a/test/CodeGen/X86/powi-windows.ll b/test/CodeGen/X86/powi-windows.ll index 804071ba123..bc5a8719ae3 100644 --- a/test/CodeGen/X86/powi-windows.ll +++ b/test/CodeGen/X86/powi-windows.ll @@ -1,11 +1,11 @@ ; RUN: llc -mtriple x86_64-windows < %s | FileCheck %s -declare double @llvm.powi.f64(double, i32) -declare float @llvm.powi.f32(float, i32) +declare double @llvm.powi.f64.i32(double, i32) +declare float @llvm.powi.f32.i32(float, i32) define double @d(double %d, i32 %i) { entry: - %0 = tail call double @llvm.powi.f64(double %d, i32 %i) + %0 = tail call double @llvm.powi.f64.i32(double %d, i32 %i) ret double %0 } @@ -15,7 +15,7 @@ entry: define float @f(float %f, i32 %i) { entry: - %0 = tail call float @llvm.powi.f32(float %f, i32 %i) + %0 = tail call float @llvm.powi.f32.i32(float %f, i32 %i) ret float %0 } @@ -25,7 +25,7 @@ entry: define float @g(double %d, i32 %i) { entry: - %0 = tail call double @llvm.powi.f64(double %d, i32 %i) + %0 = tail call double @llvm.powi.f64.i32(double %d, i32 %i) %conv = fptrunc double %0 to float ret float %conv } @@ -36,7 +36,7 @@ entry: define double @h(float %f, i32 %i) { entry: - %0 = tail call float @llvm.powi.f32(float %f, i32 %i) + %0 = tail call float @llvm.powi.f32.i32(float %f, i32 %i) %conv = fpext float %0 to double ret double %conv } diff --git a/test/CodeGen/X86/powi.ll b/test/CodeGen/X86/powi.ll index 80779cd7f70..933bb40ca98 100644 --- a/test/CodeGen/X86/powi.ll +++ b/test/CodeGen/X86/powi.ll @@ -49,7 +49,7 @@ define double @pow_wrapper(double %a) nounwind readonly ssp noredzone { ; X64-NEXT: mulsd %xmm0, %xmm1 ; X64-NEXT: movapd %xmm1, %xmm0 ; X64-NEXT: retq - %ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; [#uses=1] + %ret = tail call double @llvm.powi.f64.i32(double %a, i32 15) nounwind ; [#uses=1] ret double %ret } @@ -82,7 +82,7 @@ define double @pow_wrapper_optsize(double %a) optsize { ; X64: # %bb.0: ; X64-NEXT: movl $15, %edi ; X64-NEXT: jmp __powidf2@PLT # TAILCALL - %ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; [#uses=1] + %ret = tail call double @llvm.powi.f64.i32(double %a, i32 15) nounwind ; [#uses=1] ret double %ret } @@ -115,7 +115,7 @@ define double @pow_wrapper_pgso(double %a) !prof !14 { ; X64: # %bb.0: ; X64-NEXT: movl $15, %edi ; X64-NEXT: jmp __powidf2@PLT # TAILCALL - %ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; [#uses=1] + %ret = tail call double @llvm.powi.f64.i32(double %a, i32 15) nounwind ; [#uses=1] ret double %ret } @@ -151,11 +151,11 @@ define double @pow_wrapper_minsize(double %a) minsize { ; X64-NEXT: popq %rdi ; X64-NEXT: .cfi_adjust_cfa_offset -8 ; X64-NEXT: jmp __powidf2@PLT # TAILCALL - %ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; [#uses=1] + %ret = tail call double @llvm.powi.f64.i32(double %a, i32 15) nounwind ; [#uses=1] ret double %ret } -declare double @llvm.powi.f64(double, i32) nounwind readonly +declare double @llvm.powi.f64.i32(double, i32) nounwind readonly !llvm.module.flags = !{!0} !0 = !{i32 1, !"ProfileSummary", !1} diff --git a/test/CodeGen/X86/tailcall-multiret.ll b/test/CodeGen/X86/tailcall-multiret.ll index a77a59cd70b..bf573703fc2 100644 --- a/test/CodeGen/X86/tailcall-multiret.ll +++ b/test/CodeGen/X86/tailcall-multiret.ll @@ -1,14 +1,14 @@ ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mcpu=core2 | FileCheck %s ; See PR19530 -declare double @llvm.powi.f64(double %Val, i32 %power) +declare double @llvm.powi.f64.i32(double %Val, i32 %power) define <3 x double> @julia_foo17589(i32 %arg) { - %tmp1 = call double @llvm.powi.f64(double 1.000000e+00, i32 %arg) + %tmp1 = call double @llvm.powi.f64.i32(double 1.000000e+00, i32 %arg) ; CHECK: callq __powidf2 %tmp2 = insertelement <3 x double> undef, double %tmp1, i32 0 - %tmp3 = call double @llvm.powi.f64(double 2.000000e+00, i32 %arg) + %tmp3 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %arg) ; CHECK: callq __powidf2 %tmp4 = insertelement <3 x double> %tmp2, double %tmp3, i32 1 - %tmp5 = call double @llvm.powi.f64(double 3.000000e+00, i32 %arg) + %tmp5 = call double @llvm.powi.f64.i32(double 3.000000e+00, i32 %arg) ; CHECK: callq __powidf2 %tmp6 = insertelement <3 x double> %tmp4, double %tmp5, i32 2 ; CHECK-NOT: TAILCALL diff --git a/test/CodeGen/X86/vector-intrinsics.ll b/test/CodeGen/X86/vector-intrinsics.ll index 8bbcbdec967..18ae22ec9ae 100644 --- a/test/CodeGen/X86/vector-intrinsics.ll +++ b/test/CodeGen/X86/vector-intrinsics.ll @@ -4,7 +4,7 @@ declare <4 x double> @llvm.sin.v4f64(<4 x double> %p) declare <4 x double> @llvm.cos.v4f64(<4 x double> %p) declare <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q) -declare <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32) +declare <4 x double> @llvm.powi.v4f64.i32(<4 x double> %p, i32) define <4 x double> @foo(<4 x double> %p) ; CHECK-LABEL: foo: @@ -144,14 +144,14 @@ define <4 x double> @zoo(<4 x double> %p, i32 %q) ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq { - %t = call <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32 %q) + %t = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %p, i32 %q) ret <4 x double> %t } declare <9 x double> @llvm.exp.v9f64(<9 x double> %a) declare <9 x double> @llvm.pow.v9f64(<9 x double> %a, <9 x double> %b) -declare <9 x double> @llvm.powi.v9f64(<9 x double> %a, i32) +declare <9 x double> @llvm.powi.v9f64.i32(<9 x double> %a, i32) define void @a(<9 x double>* %p) nounwind { ; CHECK-LABEL: a: @@ -399,7 +399,7 @@ define void @c(<9 x double>* %p, i32 %n) nounwind { ; CHECK-NEXT: popq %rbp ; CHECK-NEXT: retq %a = load <9 x double>, <9 x double>* %p - %r = call <9 x double> @llvm.powi.v9f64(<9 x double> %a, i32 %n) + %r = call <9 x double> @llvm.powi.v9f64.i32(<9 x double> %a, i32 %n) store <9 x double> %r, <9 x double>* %p ret void } diff --git a/test/CodeGen/XCore/float-intrinsics.ll b/test/CodeGen/XCore/float-intrinsics.ll index 588203655ff..81350497a4f 100644 --- a/test/CodeGen/XCore/float-intrinsics.ll +++ b/test/CodeGen/XCore/float-intrinsics.ll @@ -6,7 +6,7 @@ declare double @llvm.log.f64(double) declare double @llvm.log10.f64(double) declare double @llvm.log2.f64(double) declare double @llvm.pow.f64(double, double) -declare double @llvm.powi.f64(double, i32) +declare double @llvm.powi.f64.i32(double, i32) declare double @llvm.sin.f64(double) declare double @llvm.sqrt.f64(double) @@ -125,16 +125,16 @@ define float @powf(float %F, float %power) { define double @powi(double %F, i32 %power) { ; CHECK-LABEL: powi: ; CHECK: bl __powidf2 - %result = call double @llvm.powi.f64(double %F, i32 %power) + %result = call double @llvm.powi.f64.i32(double %F, i32 %power) ret double %result } -declare float @llvm.powi.f32(float, i32) +declare float @llvm.powi.f32.i32(float, i32) define float @powif(float %F, i32 %power) { ; CHECK-LABEL: powif: ; CHECK: bl __powisf2 - %result = call float @llvm.powi.f32(float %F, i32 %power) + %result = call float @llvm.powi.f32.i32(float %F, i32 %power) ret float %result } diff --git a/test/Transforms/InstCombine/fdiv.ll b/test/Transforms/InstCombine/fdiv.ll index 72325f630b2..1da6a113f5e 100644 --- a/test/Transforms/InstCombine/fdiv.ll +++ b/test/Transforms/InstCombine/fdiv.ll @@ -8,8 +8,8 @@ declare float @llvm.exp.f32(float) nounwind readnone declare <2 x half> @llvm.exp.v2f16(<2 x half>) nounwind readnone declare float @llvm.exp2.f32(float) nounwind readnone declare <2 x half> @llvm.exp2.v2f16(<2 x half>) nounwind readnone -declare float @llvm.powi.f32(float, i32) nounwind readnone -declare <2 x half> @llvm.powi.v2f16(<2 x half>, i32) nounwind readnone +declare float @llvm.powi.f32.i32(float, i32) nounwind readnone +declare <2 x half> @llvm.powi.v2f16.i32(<2 x half>, i32) nounwind readnone define float @exact_inverse(float %x) { ; CHECK-LABEL: @exact_inverse( @@ -875,11 +875,11 @@ define <2 x half> @exp2_recip(<2 x half> %x, <2 x half> %y) { define float @powi_divisor(float %x, i32 %y, float %z) { ; CHECK-LABEL: @powi_divisor( ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = call reassoc ninf arcp float @llvm.powi.f32(float [[X:%.*]], i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call reassoc ninf arcp float @llvm.powi.f32.i32(float [[X:%.*]], i32 [[TMP1]]) ; CHECK-NEXT: [[R:%.*]] = fmul reassoc ninf arcp float [[TMP2]], [[Z:%.*]] ; CHECK-NEXT: ret float [[R]] ; - %p = call float @llvm.powi.f32(float %x, i32 %y) + %p = call float @llvm.powi.f32.i32(float %x, i32 %y) %r = fdiv reassoc arcp ninf float %z, %p ret float %r } @@ -888,12 +888,12 @@ define float @powi_divisor(float %x, i32 %y, float %z) { define float @powi_divisor_extra_use(float %x, i32 %y, float %z) { ; CHECK-LABEL: @powi_divisor_extra_use( -; CHECK-NEXT: [[P:%.*]] = call float @llvm.powi.f32(float [[X:%.*]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[P:%.*]] = call float @llvm.powi.f32.i32(float [[X:%.*]], i32 [[Y:%.*]]) ; CHECK-NEXT: call void @use_f32(float [[P]]) ; CHECK-NEXT: [[R:%.*]] = fdiv reassoc ninf arcp float [[Z:%.*]], [[P]] ; CHECK-NEXT: ret float [[R]] ; - %p = call float @llvm.powi.f32(float %x, i32 %y) + %p = call float @llvm.powi.f32.i32(float %x, i32 %y) call void @use_f32(float %p) %r = fdiv reassoc arcp ninf float %z, %p ret float %r @@ -903,11 +903,11 @@ define float @powi_divisor_extra_use(float %x, i32 %y, float %z) { define float @powi_divisor_not_enough_fmf(float %x, i32 %y, float %z) { ; CHECK-LABEL: @powi_divisor_not_enough_fmf( -; CHECK-NEXT: [[P:%.*]] = call fast float @llvm.powi.f32(float [[X:%.*]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[P:%.*]] = call fast float @llvm.powi.f32.i32(float [[X:%.*]], i32 [[Y:%.*]]) ; CHECK-NEXT: [[R:%.*]] = fdiv reassoc ninf float [[Z:%.*]], [[P]] ; CHECK-NEXT: ret float [[R]] ; - %p = call fast float @llvm.powi.f32(float %x, i32 %y) + %p = call fast float @llvm.powi.f32.i32(float %x, i32 %y) %r = fdiv reassoc ninf float %z, %p ret float %r } @@ -916,11 +916,11 @@ define float @powi_divisor_not_enough_fmf(float %x, i32 %y, float %z) { define float @powi_divisor_not_enough_fmf2(float %x, i32 %y, float %z) { ; CHECK-LABEL: @powi_divisor_not_enough_fmf2( -; CHECK-NEXT: [[P:%.*]] = call fast float @llvm.powi.f32(float [[X:%.*]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[P:%.*]] = call fast float @llvm.powi.f32.i32(float [[X:%.*]], i32 [[Y:%.*]]) ; CHECK-NEXT: [[R:%.*]] = fdiv ninf arcp float [[Z:%.*]], [[P]] ; CHECK-NEXT: ret float [[R]] ; - %p = call fast float @llvm.powi.f32(float %x, i32 %y) + %p = call fast float @llvm.powi.f32.i32(float %x, i32 %y) %r = fdiv arcp ninf float %z, %p ret float %r } @@ -930,10 +930,10 @@ define float @powi_divisor_not_enough_fmf2(float %x, i32 %y, float %z) { define <2 x half> @powi_recip(<2 x half> %x, i32 %y) { ; CHECK-LABEL: @powi_recip( ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = call reassoc nnan ninf arcp <2 x half> @llvm.powi.v2f16(<2 x half> [[X:%.*]], i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call reassoc nnan ninf arcp <2 x half> @llvm.powi.v2f16.i32(<2 x half> [[X:%.*]], i32 [[TMP1]]) ; CHECK-NEXT: ret <2 x half> [[TMP2]] ; - %p = call <2 x half> @llvm.powi.v2f16(<2 x half> %x, i32 %y) + %p = call <2 x half> @llvm.powi.v2f16.i32(<2 x half> %x, i32 %y) %r = fdiv reassoc arcp nnan ninf <2 x half> , %p ret <2 x half> %r } diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll index 1039ed2c2f2..cabd82c3044 100644 --- a/test/Transforms/InstCombine/intrinsics.ll +++ b/test/Transforms/InstCombine/intrinsics.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -instcombine -S < %s | FileCheck %s -declare double @llvm.powi.f64(double, i32) nounwind readonly +declare double @llvm.powi.f64.i16(double, i16) nounwind readonly +declare double @llvm.powi.f64.i32(double, i32) nounwind readonly declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone declare i1 @llvm.cttz.i1(i1, i1) nounwind readnone @@ -28,13 +29,23 @@ define void @powi(double %V, double *%P) { ; CHECK-NEXT: store volatile double [[A]], double* [[P:%.*]], align 8 ; CHECK-NEXT: [[D:%.*]] = fmul nnan double [[V]], [[V]] ; CHECK-NEXT: store volatile double [[D]], double* [[P]], align 8 +; CHECK-NEXT: [[A2:%.*]] = fdiv fast double 1.000000e+00, [[V]] +; CHECK-NEXT: store volatile double [[A2]], double* [[P]], align 8 +; CHECK-NEXT: [[D2:%.*]] = fmul nnan double [[V]], [[V]] +; CHECK-NEXT: store volatile double [[D2]], double* [[P]], align 8 ; CHECK-NEXT: ret void ; - %A = tail call fast double @llvm.powi.f64(double %V, i32 -1) nounwind + %A = tail call fast double @llvm.powi.f64.i32(double %V, i32 -1) nounwind store volatile double %A, double* %P - %D = tail call nnan double @llvm.powi.f64(double %V, i32 2) nounwind + %D = tail call nnan double @llvm.powi.f64.i32(double %V, i32 2) nounwind store volatile double %D, double* %P + + %A2 = tail call fast double @llvm.powi.f64.i16(double %V, i16 -1) nounwind + store volatile double %A2, double* %P + + %D2 = tail call nnan double @llvm.powi.f64.i16(double %V, i16 2) nounwind + store volatile double %D2, double* %P ret void } diff --git a/test/Transforms/InstCombine/pow-4.ll b/test/Transforms/InstCombine/pow-4.ll index 29fb034fbe3..a92e4c36926 100644 --- a/test/Transforms/InstCombine/pow-4.ll +++ b/test/Transforms/InstCombine/pow-4.ll @@ -1,6 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -instcombine -S < %s | FileCheck %s --check-prefixes=CHECK,SQRT -; RUN: opt -instcombine -S < %s -disable-builtin sqrt | FileCheck %s --check-prefixes=CHECK,NOSQRT +; RUN: opt -instcombine -S < %s -mtriple unknown | FileCheck %s --check-prefixes=CHECK,CHECKI32,SQRT +; RUN: opt -instcombine -S < %s -mtriple unknown -disable-builtin sqrt | FileCheck %s --check-prefixes=CHECK,CHECKI32,NOSQRT +; RUN: opt -instcombine -S < %s -mtriple msp430 | FileCheck %s --check-prefixes=CHECK,CHECKI16,SQRT +; RUN: opt -instcombine -S < %s -mtriple msp430 -disable-builtin sqrt | FileCheck %s --check-prefixes=CHECK,CHECKI16,NOSQRT + declare double @llvm.pow.f64(double, double) declare float @llvm.pow.f32(float, float) declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>) @@ -111,9 +114,13 @@ define float @test_simplify_32(float %x) { ; pow(x, 33.0) define double @test_simplify_33(double %x) { -; CHECK-LABEL: @test_simplify_33( -; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64(double [[X:%.*]], i32 33) -; CHECK-NEXT: ret double [[TMP1]] +; CHECKI32-LABEL: @test_simplify_33( +; CHECKI32-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64.i32(double [[X:%.*]], i32 33) +; CHECKI32-NEXT: ret double [[TMP1]] +; +; CHECKI16-LABEL: @test_simplify_33( +; CHECKI16-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64.i16(double [[X:%.*]], i16 33) +; CHECKI16-NEXT: ret double [[TMP1]] ; %1 = call fast double @llvm.pow.f64(double %x, double 3.300000e+01) ret double %1 @@ -235,13 +242,9 @@ define <4 x float> @test_simplify_3_5(<4 x float> %x) { ; (float)pow((double)(float)x, 0.5) define float @shrink_pow_libcall_half(float %x) { -; SQRT-LABEL: @shrink_pow_libcall_half( -; SQRT-NEXT: [[SQRTF:%.*]] = call fast float @sqrtf(float [[X]]) -; SQRT-NEXT: ret float [[SQRTF]] -; -; NOSQRT-LABEL: @shrink_pow_libcall_half( -; NOSQRT-NEXT: [[SQRTF:%.*]] = call fast float @sqrtf(float [[X:%.*]]) -; NOSQRT-NEXT: ret float [[SQRTF]] +; CHECK-LABEL: @shrink_pow_libcall_half( +; CHECK-NEXT: [[SQRTF:%.*]] = call fast float @sqrtf(float [[X:%.*]]) +; CHECK-NEXT: ret float [[SQRTF]] ; %dx = fpext float %x to double %call = call fast double @pow(double %dx, double 0.5) diff --git a/test/Transforms/InstCombine/pow_fp_int.ll b/test/Transforms/InstCombine/pow_fp_int.ll index e0e13c642ba..292f56f226d 100644 --- a/test/Transforms/InstCombine/pow_fp_int.ll +++ b/test/Transforms/InstCombine/pow_fp_int.ll @@ -1,11 +1,11 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -mtriple unknown -instcombine -S < %s | FileCheck %s ; PR42190 +; Can't generate test checks due to PR42740. define double @pow_sitofp_const_base_fast(i32 %x) { ; CHECK-LABEL: @pow_sitofp_const_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[X:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[X:%.*]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP1]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -18,7 +18,7 @@ define double @pow_sitofp_const_base_fast(i32 %x) { define double @pow_uitofp_const_base_fast(i31 %x) { ; CHECK-LABEL: @pow_uitofp_const_base_fast( ; CHECK-NEXT: [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -30,7 +30,7 @@ define double @pow_uitofp_const_base_fast(i31 %x) { define double @pow_sitofp_double_const_base_fast(i32 %x) { ; CHECK-LABEL: @pow_sitofp_double_const_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = call afn double @llvm.powi.f64(double 7.000000e+00, i32 [[X:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call afn double @llvm.powi.f64.i32(double 7.000000e+00, i32 [[X:%.*]]) ; CHECK-NEXT: ret double [[TMP1]] ; %subfp = sitofp i32 %x to double @@ -41,7 +41,7 @@ define double @pow_sitofp_double_const_base_fast(i32 %x) { define double @pow_uitofp_double_const_base_fast(i31 %x) { ; CHECK-LABEL: @pow_uitofp_double_const_base_fast( ; CHECK-NEXT: [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64(double 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64.i32(double 7.000000e+00, i32 [[TMP1]]) ; CHECK-NEXT: ret double [[TMP2]] ; %subfp = uitofp i31 %x to double @@ -104,7 +104,7 @@ define double @pow_uitofp_const_base_power_of_2_fast(i31 %x) { define double @pow_sitofp_float_base_fast(float %base, i32 %x) { ; CHECK-LABEL: @pow_sitofp_float_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = call afn float @llvm.powi.f32(float [[BASE:%.*]], i32 [[X:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call afn float @llvm.powi.f32.i32(float [[BASE:%.*]], i32 [[X:%.*]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP1]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -117,7 +117,7 @@ define double @pow_sitofp_float_base_fast(float %base, i32 %x) { define double @pow_uitofp_float_base_fast(float %base, i31 %x) { ; CHECK-LABEL: @pow_uitofp_float_base_fast( ; CHECK-NEXT: [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float [[BASE:%.*]], i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i32(float [[BASE:%.*]], i32 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -129,7 +129,7 @@ define double @pow_uitofp_float_base_fast(float %base, i31 %x) { define double @pow_sitofp_double_base_fast(double %base, i32 %x) { ; CHECK-LABEL: @pow_sitofp_double_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = call afn double @llvm.powi.f64(double [[BASE:%.*]], i32 [[X:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call afn double @llvm.powi.f64.i32(double [[BASE:%.*]], i32 [[X:%.*]]) ; CHECK-NEXT: ret double [[TMP1]] ; %subfp = sitofp i32 %x to double @@ -140,7 +140,7 @@ define double @pow_sitofp_double_base_fast(double %base, i32 %x) { define double @pow_uitofp_double_base_fast(double %base, i31 %x) { ; CHECK-LABEL: @pow_uitofp_double_base_fast( ; CHECK-NEXT: [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64(double [[BASE:%.*]], i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64.i32(double [[BASE:%.*]], i32 [[TMP1]]) ; CHECK-NEXT: ret double [[TMP2]] ; %subfp = uitofp i31 %x to double @@ -151,7 +151,7 @@ define double @pow_uitofp_double_base_fast(double %base, i31 %x) { define double @pow_sitofp_const_base_fast_i8(i8 %x) { ; CHECK-LABEL: @pow_sitofp_const_base_fast_i8( ; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -164,7 +164,7 @@ define double @pow_sitofp_const_base_fast_i8(i8 %x) { define double @pow_sitofp_const_base_fast_i16(i16 %x) { ; CHECK-LABEL: @pow_sitofp_const_base_fast_i16( ; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -178,7 +178,7 @@ define double @pow_sitofp_const_base_fast_i16(i16 %x) { define double @pow_uitofp_const_base_fast_i8(i8 %x) { ; CHECK-LABEL: @pow_uitofp_const_base_fast_i8( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -191,7 +191,7 @@ define double @pow_uitofp_const_base_fast_i8(i8 %x) { define double @pow_uitofp_const_base_fast_i16(i16 %x) { ; CHECK-LABEL: @pow_uitofp_const_base_fast_i16( ; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -203,7 +203,7 @@ define double @pow_uitofp_const_base_fast_i16(i16 %x) { define double @powf_exp_const_int_fast(double %base) { ; CHECK-LABEL: @powf_exp_const_int_fast( -; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64(double [[BASE:%.*]], i32 40) +; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64.i32(double [[BASE:%.*]], i32 40) ; CHECK-NEXT: ret double [[TMP1]] ; %res = tail call fast double @llvm.pow.f64(double %base, double 4.000000e+01) @@ -212,7 +212,7 @@ define double @powf_exp_const_int_fast(double %base) { define double @powf_exp_const2_int_fast(double %base) { ; CHECK-LABEL: @powf_exp_const2_int_fast( -; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64(double [[BASE:%.*]], i32 -40) +; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64.i32(double [[BASE:%.*]], i32 -40) ; CHECK-NEXT: ret double [[TMP1]] ; %res = tail call fast double @llvm.pow.f64(double %base, double -4.000000e+01) diff --git a/test/Transforms/InstCombine/pow_fp_int16.ll b/test/Transforms/InstCombine/pow_fp_int16.ll index 94e6ef2885d..b901ab99c53 100644 --- a/test/Transforms/InstCombine/pow_fp_int16.ll +++ b/test/Transforms/InstCombine/pow_fp_int16.ll @@ -2,16 +2,11 @@ ; Test case was copied from pow_fp_int.ll but adjusted for 16-bit int. ; Assuming that we can't generate test checks for the same reason (PR42740). -; -; FIXME: All calls to powi.f32 using i32 for the exponent is faulty. The -; RT lib functions expects an "C type int" which maps to i16 for -; some targets such as msp430. define double @pow_sitofp_const_base_fast(i16 %x) { ; CHECK-LABEL: @pow_sitofp_const_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) -; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double +; CHECK-NEXT: [[TMP1:%.*]] = call afn float @llvm.powi.f32.i16(float 7.000000e+00, i16 [[X:%.*]]) +; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP1]] to double ; CHECK-NEXT: ret double [[RES]] ; %subfp = sitofp i16 %x to float @@ -22,8 +17,8 @@ define double @pow_sitofp_const_base_fast(i16 %x) { define double @pow_uitofp_const_base_fast(i15 %x) { ; CHECK-LABEL: @pow_uitofp_const_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = zext i15 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i15 [[X:%.*]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i16(float 7.000000e+00, i16 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -35,9 +30,8 @@ define double @pow_uitofp_const_base_fast(i15 %x) { define double @pow_sitofp_double_const_base_fast(i16 %x) { ; CHECK-LABEL: @pow_sitofp_double_const_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64(double 7.000000e+00, i32 [[TMP1]]) -; CHECK-NEXT: ret double [[TMP2]] +; CHECK-NEXT: [[TMP1:%.*]] = call afn double @llvm.powi.f64.i16(double 7.000000e+00, i16 [[X:%.*]]) +; CHECK-NEXT: ret double [[TMP1]] ; %subfp = sitofp i16 %x to double %pow = tail call afn double @llvm.pow.f64(double 7.000000e+00, double %subfp) @@ -46,8 +40,8 @@ define double @pow_sitofp_double_const_base_fast(i16 %x) { define double @pow_uitofp_double_const_base_fast(i15 %x) { ; CHECK-LABEL: @pow_uitofp_double_const_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = zext i15 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64(double 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i15 [[X:%.*]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64.i16(double 7.000000e+00, i16 [[TMP1]]) ; CHECK-NEXT: ret double [[TMP2]] ; %subfp = uitofp i15 %x to double @@ -110,9 +104,8 @@ define double @pow_uitofp_const_base_power_of_2_fast(i15 %x) { define double @pow_sitofp_float_base_fast(float %base, i16 %x) { ; CHECK-LABEL: @pow_sitofp_float_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float [[BASE:%.*]], i32 [[TMP1]]) -; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double +; CHECK-NEXT: [[TMP1:%.*]] = call afn float @llvm.powi.f32.i16(float [[BASE:%.*]], i16 [[X:%.*]]) +; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP1]] to double ; CHECK-NEXT: ret double [[RES]] ; %subfp = sitofp i16 %x to float @@ -123,8 +116,8 @@ define double @pow_sitofp_float_base_fast(float %base, i16 %x) { define double @pow_uitofp_float_base_fast(float %base, i15 %x) { ; CHECK-LABEL: @pow_uitofp_float_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = zext i15 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float [[BASE:%.*]], i32 [[TMP1]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i15 [[X:%.*]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i16(float [[BASE:%.*]], i16 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -136,9 +129,8 @@ define double @pow_uitofp_float_base_fast(float %base, i15 %x) { define double @pow_sitofp_double_base_fast(double %base, i16 %x) { ; CHECK-LABEL: @pow_sitofp_double_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64(double [[BASE:%.*]], i32 [[TMP1]]) -; CHECK-NEXT: ret double [[TMP2]] +; CHECK-NEXT: [[TMP1:%.*]] = call afn double @llvm.powi.f64.i16(double [[BASE:%.*]], i16 [[X:%.*]]) +; CHECK-NEXT: ret double [[TMP1]] ; %subfp = sitofp i16 %x to double %res = tail call afn double @llvm.pow.f64(double %base, double %subfp) @@ -147,8 +139,8 @@ define double @pow_sitofp_double_base_fast(double %base, i16 %x) { define double @pow_uitofp_double_base_fast(double %base, i15 %x) { ; CHECK-LABEL: @pow_uitofp_double_base_fast( -; CHECK-NEXT: [[TMP1:%.*]] = zext i15 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64(double [[BASE:%.*]], i32 [[TMP1]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i15 [[X:%.*]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = call afn double @llvm.powi.f64.i16(double [[BASE:%.*]], i16 [[TMP1]]) ; CHECK-NEXT: ret double [[TMP2]] ; %subfp = uitofp i15 %x to double @@ -158,8 +150,8 @@ define double @pow_uitofp_double_base_fast(double %base, i15 %x) { define double @pow_sitofp_const_base_fast_i8(i8 %x) { ; CHECK-LABEL: @pow_sitofp_const_base_fast_i8( -; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[X:%.*]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i16(float 7.000000e+00, i16 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -171,9 +163,8 @@ define double @pow_sitofp_const_base_fast_i8(i8 %x) { define double @pow_sitofp_const_base_fast_i16(i16 %x) { ; CHECK-LABEL: @pow_sitofp_const_base_fast_i16( -; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) -; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double +; CHECK-NEXT: [[TMP1:%.*]] = call afn float @llvm.powi.f32.i16(float 7.000000e+00, i16 [[X:%.*]]) +; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP1]] to double ; CHECK-NEXT: ret double [[RES]] ; %subfp = sitofp i16 %x to float @@ -185,8 +176,8 @@ define double @pow_sitofp_const_base_fast_i16(i16 %x) { define double @pow_uitofp_const_base_fast_i8(i8 %x) { ; CHECK-LABEL: @pow_uitofp_const_base_fast_i8( -; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) +; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X:%.*]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32.i16(float 7.000000e+00, i16 [[TMP1]]) ; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double ; CHECK-NEXT: ret double [[RES]] ; @@ -198,9 +189,9 @@ define double @pow_uitofp_const_base_fast_i8(i8 %x) { define double @pow_uitofp_const_base_afn_i16(i16 %x) { ; CHECK-LABEL: @pow_uitofp_const_base_afn_i16( -; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call afn float @llvm.powi.f32(float 7.000000e+00, i32 [[TMP1]]) -; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double +; CHECK-NEXT: [[SUBFP:%.*]] = uitofp i16 [[X:%.*]] to float +; CHECK-NEXT: [[POW:%.*]] = tail call afn float @llvm.pow.f32(float 7.000000e+00, float [[SUBFP]]) +; CHECK-NEXT: [[RES:%.*]] = fpext float [[POW]] to double ; CHECK-NEXT: ret double [[RES]] ; %subfp = uitofp i16 %x to float @@ -211,7 +202,7 @@ define double @pow_uitofp_const_base_afn_i16(i16 %x) { define double @powf_exp_const_int_fast(double %base) { ; CHECK-LABEL: @powf_exp_const_int_fast( -; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64(double [[BASE:%.*]], i32 40) +; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64.i16(double [[BASE:%.*]], i16 40) ; CHECK-NEXT: ret double [[TMP1]] ; %res = tail call fast double @llvm.pow.f64(double %base, double 4.000000e+01) @@ -220,7 +211,7 @@ define double @powf_exp_const_int_fast(double %base) { define double @powf_exp_const2_int_fast(double %base) { ; CHECK-LABEL: @powf_exp_const2_int_fast( -; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64(double [[BASE:%.*]], i32 -40) +; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.powi.f64.i16(double [[BASE:%.*]], i16 -40) ; CHECK-NEXT: ret double [[TMP1]] ; %res = tail call fast double @llvm.pow.f64(double %base, double -4.000000e+01) @@ -272,9 +263,9 @@ define double @pow_uitofp_const_base_power_of_2_fast_i16(i16 %x) { define double @pow_uitofp_float_base_fast_i16(float %base, i16 %x) { ; CHECK-LABEL: @pow_uitofp_float_base_fast_i16( -; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call fast float @llvm.powi.f32(float [[BASE:%.*]], i32 [[TMP1]]) -; CHECK-NEXT: [[RES:%.*]] = fpext float [[TMP2]] to double +; CHECK-NEXT: [[SUBFP:%.*]] = uitofp i16 [[X:%.*]] to float +; CHECK-NEXT: [[POW:%.*]] = tail call fast float @llvm.pow.f32(float [[BASE:%.*]], float [[SUBFP]]) +; CHECK-NEXT: [[RES:%.*]] = fpext float [[POW]] to double ; CHECK-NEXT: ret double [[RES]] ; %subfp = uitofp i16 %x to float @@ -285,9 +276,9 @@ define double @pow_uitofp_float_base_fast_i16(float %base, i16 %x) { define double @pow_uitofp_double_base_fast_i16(double %base, i16 %x) { ; CHECK-LABEL: @pow_uitofp_double_base_fast_i16( -; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = call fast double @llvm.powi.f64(double [[BASE:%.*]], i32 [[TMP1]]) -; CHECK-NEXT: ret double [[TMP2]] +; CHECK-NEXT: [[SUBFP:%.*]] = uitofp i16 [[X:%.*]] to double +; CHECK-NEXT: [[RES:%.*]] = tail call fast double @llvm.pow.f64(double [[BASE:%.*]], double [[SUBFP]]) +; CHECK-NEXT: ret double [[RES]] ; %subfp = uitofp i16 %x to double %res = tail call fast double @llvm.pow.f64(double %base, double %subfp) diff --git a/test/Transforms/InstSimplify/ConstProp/math-2.ll b/test/Transforms/InstSimplify/ConstProp/math-2.ll index 3d8f9c34335..0b39725470b 100644 --- a/test/Transforms/InstSimplify/ConstProp/math-2.ll +++ b/test/Transforms/InstSimplify/ConstProp/math-2.ll @@ -90,12 +90,12 @@ define float @i_powf() { ret float %res } -declare double @llvm.powi.f64(double, i32) +declare double @llvm.powi.f64.i32(double, i32) define double @i_powi() { ; CHECK-LABEL: @i_powi( ; CHECK-NEXT: ret double 1.000000e+00 ; - %res = tail call fast double @llvm.powi.f64(double 1.0, i32 2) + %res = tail call fast double @llvm.powi.f64.i32(double 1.0, i32 2) ret double %res } diff --git a/test/Transforms/InstSimplify/call.ll b/test/Transforms/InstSimplify/call.ll index 721f4941e51..db3a336c135 100644 --- a/test/Transforms/InstSimplify/call.ll +++ b/test/Transforms/InstSimplify/call.ll @@ -494,14 +494,24 @@ declare noalias i8* @malloc(i64) declare <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>) -declare double @llvm.powi.f64(double, i32) -declare <2 x double> @llvm.powi.v2f64(<2 x double>, i32) +declare double @llvm.powi.f64.i16(double, i16) +declare <2 x double> @llvm.powi.v2f64.i16(<2 x double>, i16) +declare double @llvm.powi.f64.i32(double, i32) +declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32) define double @constant_fold_powi() { ; CHECK-LABEL: @constant_fold_powi( ; CHECK-NEXT: ret double 9.000000e+00 ; - %t0 = call double @llvm.powi.f64(double 3.00000e+00, i32 2) + %t0 = call double @llvm.powi.f64.i32(double 3.00000e+00, i32 2) + ret double %t0 +} + +define double @constant_fold_powi_i16() { +; CHECK-LABEL: @constant_fold_powi_i16( +; CHECK-NEXT: ret double 9.000000e+00 +; + %t0 = call double @llvm.powi.f64.i16(double 3.00000e+00, i16 2) ret double %t0 } @@ -509,7 +519,15 @@ define <2 x double> @constant_fold_powi_vec() { ; CHECK-LABEL: @constant_fold_powi_vec( ; CHECK-NEXT: ret <2 x double> ; - %t0 = call <2 x double> @llvm.powi.v2f64(<2 x double> , i32 2) + %t0 = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> , i32 2) + ret <2 x double> %t0 +} + +define <2 x double> @constant_fold_powi_vec_i16() { +; CHECK-LABEL: @constant_fold_powi_vec_i16( +; CHECK-NEXT: ret <2 x double> +; + %t0 = call <2 x double> @llvm.powi.v2f64.i16(<2 x double> , i16 2) ret <2 x double> %t0 } diff --git a/test/Transforms/InstSimplify/floating-point-compare.ll b/test/Transforms/InstSimplify/floating-point-compare.ll index e5184ce5c46..1d2be21e949 100644 --- a/test/Transforms/InstSimplify/floating-point-compare.ll +++ b/test/Transforms/InstSimplify/floating-point-compare.ll @@ -176,7 +176,7 @@ declare <2 x float> @llvm.fabs.v2f32(<2 x float>) declare <3 x float> @llvm.fabs.v3f32(<3 x float>) declare <2 x double> @llvm.fabs.v2f64(<2 x double>) declare float @llvm.sqrt.f32(float) -declare double @llvm.powi.f64(double,i32) +declare double @llvm.powi.f64.i32(double,i32) declare float @llvm.exp.f32(float) declare float @llvm.minnum.f32(float, float) declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>) @@ -248,10 +248,10 @@ define i1 @orderedLessZeroPowi(double,double) { ; CHECK-NEXT: ret i1 false ; ; Even constant exponent - %a = call double @llvm.powi.f64(double %0, i32 2) + %a = call double @llvm.powi.f64.i32(double %0, i32 2) %square = fmul double %1, %1 ; Odd constant exponent with provably non-negative base - %b = call double @llvm.powi.f64(double %square, i32 3) + %b = call double @llvm.powi.f64.i32(double %square, i32 3) %c = fadd double %a, %b %olt = fcmp olt double %b, 0.000000e+00 ret i1 %olt diff --git a/test/Transforms/InstSimplify/fold-intrinsics.ll b/test/Transforms/InstSimplify/fold-intrinsics.ll index e484704e8a7..a9b4997d3f5 100644 --- a/test/Transforms/InstSimplify/fold-intrinsics.ll +++ b/test/Transforms/InstSimplify/fold-intrinsics.ll @@ -1,6 +1,7 @@ ; RUN: opt < %s -instsimplify -S | FileCheck %s -declare double @llvm.powi.f64(double, i32) nounwind readonly +declare float @llvm.powi.f32.i16(float, i16) nounwind readonly +declare double @llvm.powi.f64.i32(double, i32) nounwind readonly declare i32 @llvm.bswap.i32(i32) ; A @@ -14,10 +15,10 @@ define i32 @test_bswap(i32 %a) nounwind { } define void @powi(double %V, double *%P) { - %B = tail call double @llvm.powi.f64(double %V, i32 0) nounwind + %B = tail call double @llvm.powi.f64.i32(double %V, i32 0) nounwind store volatile double %B, double* %P - %C = tail call double @llvm.powi.f64(double %V, i32 1) nounwind + %C = tail call double @llvm.powi.f64.i32(double %V, i32 1) nounwind store volatile double %C, double* %P ret void @@ -25,3 +26,16 @@ define void @powi(double %V, double *%P) { ; CHECK: store volatile double 1.0 ; CHECK: store volatile double %V } + +define void @powi_i16(float %V, float *%P) { + %B = tail call float @llvm.powi.f32.i16(float %V, i16 0) nounwind + store volatile float %B, float* %P + + %C = tail call float @llvm.powi.f32.i16(float %V, i16 1) nounwind + store volatile float %C, float* %P + + ret void +; CHECK-LABEL: @powi_i16( +; CHECK: store volatile float 1.0 +; CHECK: store volatile float %V +} diff --git a/test/Transforms/LICM/hoist-round.ll b/test/Transforms/LICM/hoist-round.ll index c48847b40db..f76919eac81 100644 --- a/test/Transforms/LICM/hoist-round.ll +++ b/test/Transforms/LICM/hoist-round.ll @@ -45,7 +45,7 @@ for.body: %tmp.10 = call float @llvm.maxnum.f32(float %tmp.9, float %arg2) %tmp.11 = call float @llvm.minimum.f32(float %tmp.10, float %arg2) %tmp.12 = call float @llvm.maximum.f32(float %tmp.11, float %arg2) - %tmp.13 = call float @llvm.powi.f32(float %tmp.12, i32 4) + %tmp.13 = call float @llvm.powi.f32.i32(float %tmp.12, i32 4) %tmp.14 = call float @llvm.roundeven.f32(float %tmp.13) call void @consume(float %tmp.14) %IND.new = add i32 %IND, 1 @@ -69,5 +69,5 @@ declare float @llvm.minnum.f32(float, float) declare float @llvm.maxnum.f32(float, float) declare float @llvm.minimum.f32(float, float) declare float @llvm.maximum.f32(float, float) -declare float @llvm.powi.f32(float, i32) +declare float @llvm.powi.f32.i32(float, i32) declare float @llvm.roundeven.f32(float) diff --git a/test/Transforms/LoopVectorize/intrinsic.ll b/test/Transforms/LoopVectorize/intrinsic.ll index 4639579d920..a4c90077221 100644 --- a/test/Transforms/LoopVectorize/intrinsic.ll +++ b/test/Transforms/LoopVectorize/intrinsic.ll @@ -1141,7 +1141,7 @@ for.end: ; preds = %for.body ret void } -declare double @llvm.powi.f64(double %Val, i32 %power) nounwind readnone +declare double @llvm.powi.f64.i32(double %Val, i32 %power) nounwind readnone ;CHECK-LABEL: @powi_f64( ;CHECK: llvm.powi.v4f64 @@ -1155,7 +1155,7 @@ for.body: ; preds = %entry, %for.body %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv %0 = load double, double* %arrayidx, align 8 - %call = tail call double @llvm.powi.f64(double %0, i32 %P) nounwind readnone + %call = tail call double @llvm.powi.f64.i32(double %0, i32 %P) nounwind readnone %arrayidx4 = getelementptr inbounds double, double* %x, i64 %indvars.iv store double %call, double* %arrayidx4, align 8 %indvars.iv.next = add i64 %indvars.iv, 1 @@ -1180,7 +1180,7 @@ for.body: ; preds = %entry, %for.body %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv %0 = load double, double* %arrayidx, align 8 %1 = trunc i64 %indvars.iv to i32 - %call = tail call double @llvm.powi.f64(double %0, i32 %1) nounwind readnone + %call = tail call double @llvm.powi.f64.i32(double %0, i32 %1) nounwind readnone %arrayidx4 = getelementptr inbounds double, double* %x, i64 %indvars.iv store double %call, double* %arrayidx4, align 8 %indvars.iv.next = add i64 %indvars.iv, 1 diff --git a/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll b/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll index 16321c4d22a..efbdb14ddb8 100644 --- a/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll +++ b/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll @@ -33,7 +33,7 @@ entry: } -declare float @llvm.powi.f32(float, i32) +declare float @llvm.powi.f32.i32(float, i32) define void @fn2(i32* %a, i32* %b, float* %c) { ; CHECK-LABEL: @fn2( ; CHECK-NEXT: entry: @@ -50,7 +50,7 @@ define void @fn2(i32* %a, i32* %b, float* %c) { ; CHECK-NEXT: [[TMP4:%.*]] = add <4 x i32> [[TMP1]], [[TMP3]] ; CHECK-NEXT: [[TMP5:%.*]] = sitofp <4 x i32> [[TMP4]] to <4 x float> ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i32> [[TMP4]], i32 0 -; CHECK-NEXT: [[TMP7:%.*]] = call <4 x float> @llvm.powi.v4f32(<4 x float> [[TMP5]], i32 [[TMP6]]) +; CHECK-NEXT: [[TMP7:%.*]] = call <4 x float> @llvm.powi.v4f32.i32(<4 x float> [[TMP5]], i32 [[TMP6]]) ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[C:%.*]], i32 1 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[C]], i32 2 ; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[C]], i32 3 @@ -63,7 +63,7 @@ entry: %i1 = load i32, i32* %b, align 4 %add1 = add i32 %i0, %i1 %fp1 = sitofp i32 %add1 to float - %call1 = tail call float @llvm.powi.f32(float %fp1,i32 %add1) nounwind readnone + %call1 = tail call float @llvm.powi.f32.i32(float %fp1,i32 %add1) nounwind readnone %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1 %i2 = load i32, i32* %arrayidx2, align 4 @@ -71,7 +71,7 @@ entry: %i3 = load i32, i32* %arrayidx3, align 4 %add2 = add i32 %i2, %i3 %fp2 = sitofp i32 %add2 to float - %call2 = tail call float @llvm.powi.f32(float %fp2,i32 %add1) nounwind readnone + %call2 = tail call float @llvm.powi.f32.i32(float %fp2,i32 %add1) nounwind readnone %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2 %i4 = load i32, i32* %arrayidx4, align 4 @@ -79,7 +79,7 @@ entry: %i5 = load i32, i32* %arrayidx5, align 4 %add3 = add i32 %i4, %i5 %fp3 = sitofp i32 %add3 to float - %call3 = tail call float @llvm.powi.f32(float %fp3,i32 %add1) nounwind readnone + %call3 = tail call float @llvm.powi.f32.i32(float %fp3,i32 %add1) nounwind readnone %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3 %i6 = load i32, i32* %arrayidx6, align 4 @@ -87,7 +87,7 @@ entry: %i7 = load i32, i32* %arrayidx7, align 4 %add4 = add i32 %i6, %i7 %fp4 = sitofp i32 %add4 to float - %call4 = tail call float @llvm.powi.f32(float %fp4,i32 %add1) nounwind readnone + %call4 = tail call float @llvm.powi.f32.i32(float %fp4,i32 %add1) nounwind readnone store float %call1, float* %c, align 4 %arrayidx8 = getelementptr inbounds float, float* %c, i32 1 diff --git a/test/Transforms/SLPVectorizer/X86/intrinsic.ll b/test/Transforms/SLPVectorizer/X86/intrinsic.ll index ae60c165d7c..0fbafe0f97d 100644 --- a/test/Transforms/SLPVectorizer/X86/intrinsic.ll +++ b/test/Transforms/SLPVectorizer/X86/intrinsic.ll @@ -389,7 +389,7 @@ entry: } -declare float @llvm.powi.f32(float, i32) +declare float @llvm.powi.f32.i32(float, i32) define void @vec_powi_f32(float* %a, float* %b, float* %c, i32 %P) { ; CHECK-LABEL: @vec_powi_f32( ; CHECK-NEXT: entry: @@ -398,7 +398,7 @@ define void @vec_powi_f32(float* %a, float* %b, float* %c, i32 %P) { ; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[B:%.*]] to <4 x float>* ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = fadd <4 x float> [[TMP1]], [[TMP3]] -; CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.powi.v4f32(<4 x float> [[TMP4]], i32 [[P:%.*]]) +; CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.powi.v4f32.i32(<4 x float> [[TMP4]], i32 [[P:%.*]]) ; CHECK-NEXT: [[TMP6:%.*]] = bitcast float* [[C:%.*]] to <4 x float>* ; CHECK-NEXT: store <4 x float> [[TMP5]], <4 x float>* [[TMP6]], align 4 ; CHECK-NEXT: ret void @@ -407,28 +407,28 @@ entry: %i0 = load float, float* %a, align 4 %i1 = load float, float* %b, align 4 %add1 = fadd float %i0, %i1 - %call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone + %call1 = tail call float @llvm.powi.f32.i32(float %add1,i32 %P) nounwind readnone %arrayidx2 = getelementptr inbounds float, float* %a, i32 1 %i2 = load float, float* %arrayidx2, align 4 %arrayidx3 = getelementptr inbounds float, float* %b, i32 1 %i3 = load float, float* %arrayidx3, align 4 %add2 = fadd float %i2, %i3 - %call2 = tail call float @llvm.powi.f32(float %add2,i32 %P) nounwind readnone + %call2 = tail call float @llvm.powi.f32.i32(float %add2,i32 %P) nounwind readnone %arrayidx4 = getelementptr inbounds float, float* %a, i32 2 %i4 = load float, float* %arrayidx4, align 4 %arrayidx5 = getelementptr inbounds float, float* %b, i32 2 %i5 = load float, float* %arrayidx5, align 4 %add3 = fadd float %i4, %i5 - %call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone + %call3 = tail call float @llvm.powi.f32.i32(float %add3,i32 %P) nounwind readnone %arrayidx6 = getelementptr inbounds float, float* %a, i32 3 %i6 = load float, float* %arrayidx6, align 4 %arrayidx7 = getelementptr inbounds float, float* %b, i32 3 %i7 = load float, float* %arrayidx7, align 4 %add4 = fadd float %i6, %i7 - %call4 = tail call float @llvm.powi.f32(float %add4,i32 %P) nounwind readnone + %call4 = tail call float @llvm.powi.f32.i32(float %add4,i32 %P) nounwind readnone store float %call1, float* %c, align 4 %arrayidx8 = getelementptr inbounds float, float* %c, i32 1 @@ -448,25 +448,25 @@ define void @vec_powi_f32_neg(float* %a, float* %b, float* %c, i32 %P, i32 %Q) { ; CHECK-NEXT: [[I0:%.*]] = load float, float* [[A:%.*]], align 4 ; CHECK-NEXT: [[I1:%.*]] = load float, float* [[B:%.*]], align 4 ; CHECK-NEXT: [[ADD1:%.*]] = fadd float [[I0]], [[I1]] -; CHECK-NEXT: [[CALL1:%.*]] = tail call float @llvm.powi.f32(float [[ADD1]], i32 [[P:%.*]]) #3 +; CHECK-NEXT: [[CALL1:%.*]] = tail call float @llvm.powi.f32.i32(float [[ADD1]], i32 [[P:%.*]]) #3 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[A]], i32 1 ; CHECK-NEXT: [[I2:%.*]] = load float, float* [[ARRAYIDX2]], align 4 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[B]], i32 1 ; CHECK-NEXT: [[I3:%.*]] = load float, float* [[ARRAYIDX3]], align 4 ; CHECK-NEXT: [[ADD2:%.*]] = fadd float [[I2]], [[I3]] -; CHECK-NEXT: [[CALL2:%.*]] = tail call float @llvm.powi.f32(float [[ADD2]], i32 [[Q:%.*]]) #3 +; CHECK-NEXT: [[CALL2:%.*]] = tail call float @llvm.powi.f32.i32(float [[ADD2]], i32 [[Q:%.*]]) #3 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[A]], i32 2 ; CHECK-NEXT: [[I4:%.*]] = load float, float* [[ARRAYIDX4]], align 4 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[B]], i32 2 ; CHECK-NEXT: [[I5:%.*]] = load float, float* [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[ADD3:%.*]] = fadd float [[I4]], [[I5]] -; CHECK-NEXT: [[CALL3:%.*]] = tail call float @llvm.powi.f32(float [[ADD3]], i32 [[P]]) #3 +; CHECK-NEXT: [[CALL3:%.*]] = tail call float @llvm.powi.f32.i32(float [[ADD3]], i32 [[P]]) #3 ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[A]], i32 3 ; CHECK-NEXT: [[I6:%.*]] = load float, float* [[ARRAYIDX6]], align 4 ; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[B]], i32 3 ; CHECK-NEXT: [[I7:%.*]] = load float, float* [[ARRAYIDX7]], align 4 ; CHECK-NEXT: [[ADD4:%.*]] = fadd float [[I6]], [[I7]] -; CHECK-NEXT: [[CALL4:%.*]] = tail call float @llvm.powi.f32(float [[ADD4]], i32 [[Q]]) #3 +; CHECK-NEXT: [[CALL4:%.*]] = tail call float @llvm.powi.f32.i32(float [[ADD4]], i32 [[Q]]) #3 ; CHECK-NEXT: store float [[CALL1]], float* [[C:%.*]], align 4 ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[C]], i32 1 ; CHECK-NEXT: store float [[CALL2]], float* [[ARRAYIDX8]], align 4 @@ -480,28 +480,28 @@ entry: %i0 = load float, float* %a, align 4 %i1 = load float, float* %b, align 4 %add1 = fadd float %i0, %i1 - %call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone + %call1 = tail call float @llvm.powi.f32.i32(float %add1,i32 %P) nounwind readnone %arrayidx2 = getelementptr inbounds float, float* %a, i32 1 %i2 = load float, float* %arrayidx2, align 4 %arrayidx3 = getelementptr inbounds float, float* %b, i32 1 %i3 = load float, float* %arrayidx3, align 4 %add2 = fadd float %i2, %i3 - %call2 = tail call float @llvm.powi.f32(float %add2,i32 %Q) nounwind readnone + %call2 = tail call float @llvm.powi.f32.i32(float %add2,i32 %Q) nounwind readnone %arrayidx4 = getelementptr inbounds float, float* %a, i32 2 %i4 = load float, float* %arrayidx4, align 4 %arrayidx5 = getelementptr inbounds float, float* %b, i32 2 %i5 = load float, float* %arrayidx5, align 4 %add3 = fadd float %i4, %i5 - %call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone + %call3 = tail call float @llvm.powi.f32.i32(float %add3,i32 %P) nounwind readnone %arrayidx6 = getelementptr inbounds float, float* %a, i32 3 %i6 = load float, float* %arrayidx6, align 4 %arrayidx7 = getelementptr inbounds float, float* %b, i32 3 %i7 = load float, float* %arrayidx7, align 4 %add4 = fadd float %i6, %i7 - %call4 = tail call float @llvm.powi.f32(float %add4,i32 %Q) nounwind readnone + %call4 = tail call float @llvm.powi.f32.i32(float %add4,i32 %Q) nounwind readnone store float %call1, float* %c, align 4 %arrayidx8 = getelementptr inbounds float, float* %c, i32 1 diff --git a/test/Transforms/Scalarizer/intrinsics.ll b/test/Transforms/Scalarizer/intrinsics.ll index c42eb676100..ece3f5a11b9 100644 --- a/test/Transforms/Scalarizer/intrinsics.ll +++ b/test/Transforms/Scalarizer/intrinsics.ll @@ -19,7 +19,7 @@ declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>) declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) ; Unary fp plus any scalar operand -declare <2 x float> @llvm.powi.v2f32(<2 x float>, i32) +declare <2 x float> @llvm.powi.v2f32.i32(<2 x float>, i32) ; Binary int plus constant scalar operand declare <2 x i32> @llvm.smul.fix.sat.v2i32(<2 x i32>, <2 x i32>, i32) @@ -104,13 +104,13 @@ define <2 x i32> @scalarize_ctlz_v2i32(<2 x i32> %x) #0 { } ; CHECK-LABEL: @scalarize_powi_v2f32( -; CHECK: %powi.i0 = call float @llvm.powi.f32(float %x.i0, i32 %y) -; CHECK: %powi.i1 = call float @llvm.powi.f32(float %x.i1, i32 %y) +; CHECK: %powi.i0 = call float @llvm.powi.f32.i32(float %x.i0, i32 %y) +; CHECK: %powi.i1 = call float @llvm.powi.f32.i32(float %x.i1, i32 %y) ; CHECK: %powi.upto0 = insertelement <2 x float> poison, float %powi.i0, i32 0 ; CHECK: %powi = insertelement <2 x float> %powi.upto0, float %powi.i1, i32 1 ; CHECK: ret <2 x float> %powi define <2 x float> @scalarize_powi_v2f32(<2 x float> %x, i32 %y) #0 { - %powi = call <2 x float> @llvm.powi.v2f32(<2 x float> %x, i32 %y) + %powi = call <2 x float> @llvm.powi.v2f32.i32(<2 x float> %x, i32 %y) ret <2 x float> %powi } diff --git a/unittests/Analysis/ValueTrackingTest.cpp b/unittests/Analysis/ValueTrackingTest.cpp index 582cfe754bd..e8b1a189531 100644 --- a/unittests/Analysis/ValueTrackingTest.cpp +++ b/unittests/Analysis/ValueTrackingTest.cpp @@ -814,7 +814,7 @@ TEST(ValueTracking, propagatesPoison) { "declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)\n" "declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)\n" "declare float @llvm.sqrt.f32(float)\n" - "declare float @llvm.powi.f32(float, i32)\n" + "declare float @llvm.powi.f32.i32(float, i32)\n" "declare float @llvm.sin.f32(float)\n" "declare float @llvm.cos.f32(float)\n" "declare float @llvm.pow.f32(float, float)\n" @@ -876,7 +876,7 @@ TEST(ValueTracking, propagatesPoison) { {true, "call {i32, i1} @llvm.usub.with.overflow.i32(i32 %x, i32 %y)"}, {true, "call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 %y)"}, {false, "call float @llvm.sqrt.f32(float %fx)"}, - {false, "call float @llvm.powi.f32(float %fx, i32 %x)"}, + {false, "call float @llvm.powi.f32.i32(float %fx, i32 %x)"}, {false, "call float @llvm.sin.f32(float %fx)"}, {false, "call float @llvm.cos.f32(float %fx)"}, {false, "call float @llvm.pow.f32(float %fx, float %fy)"},