From d6bb2bcfabaa503145c7ce9fa47503fe48caa825 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 31 Jan 2019 17:23:29 +0000 Subject: [PATCH] [CallSite removal] Remove CallSite uses from InstCombine. Reviewers: chandlerc Reviewed By: chandlerc Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D57494 llvm-svn: 352771 --- include/llvm/IR/InstrTypes.h | 20 +++ lib/IR/Instructions.cpp | 15 ++ .../InstCombine/InstCombineCalls.cpp | 168 +++++++++--------- .../InstCombine/InstCombineInternal.h | 9 +- .../InstCombineLoadStoreAlloca.cpp | 16 +- .../InstCombine/InstCombineMulDivRem.cpp | 3 +- 6 files changed, 130 insertions(+), 101 deletions(-) diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h index f0d52fdecdb..68c82eb135b 100644 --- a/include/llvm/IR/InstrTypes.h +++ b/include/llvm/IR/InstrTypes.h @@ -1095,6 +1095,19 @@ public: return isDataOperand(&UI.getUse()); } + /// Given a value use iterator, return the data operand corresponding to it. + /// Iterator must actually correspond to a data operand. + unsigned getDataOperandNo(Value::const_user_iterator UI) const { + return getDataOperandNo(&UI.getUse()); + } + + /// Given a use for a data operand, get the data operand number that + /// corresponds to it. + unsigned getDataOperandNo(const Use *U) const { + assert(isDataOperand(U) && "Data operand # out of range!"); + return U - data_operands_begin(); + } + /// Return the iterator pointing to the beginning of the argument list. User::op_iterator arg_begin() { return op_begin(); } User::const_op_iterator arg_begin() const { @@ -1198,6 +1211,13 @@ public: return const_cast(this)->getCaller(); } + /// Tests if this call site must be tail call optimized. Only a CallInst can + /// be tail call optimized. + bool isMustTailCall() const; + + /// Tests if this call site is marked as a tail call. + bool isTailCall() const; + /// Returns the intrinsic ID of the intrinsic called or /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if /// this is an indirect call. diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp index f48a970fd40..6c7817adc6a 100644 --- a/lib/IR/Instructions.cpp +++ b/lib/IR/Instructions.cpp @@ -266,6 +266,21 @@ bool CallBase::isIndirectCall() const { return true; } +/// Tests if this call site must be tail call optimized. Only a CallInst can +/// be tail call optimized. +bool CallBase::isMustTailCall() const { + if (auto *CI = dyn_cast(this)) + return CI->isMustTailCall(); + return false; +} + +/// Tests if this call site is marked as a tail call. +bool CallBase::isTailCall() const { + if (auto *CI = dyn_cast(this)) + return CI->isTailCall(); + return false; +} + Intrinsic::ID CallBase::getIntrinsicID() const { if (auto *F = getCalledFunction()) return F->getIntrinsicID(); diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp index a705d7ac75f..7b09fe615f1 100644 --- a/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -27,7 +27,6 @@ #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" -#include "llvm/IR/CallSite.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" @@ -1788,7 +1787,7 @@ static Instruction *canonicalizeConstantArg0ToArg1(CallInst &Call) { } /// CallInst simplification. This mostly only handles folding of intrinsic -/// instructions. For normal calls, it allows visitCallSite to do the heavy +/// instructions. For normal calls, it allows visitCallBase to do the heavy /// lifting. Instruction *InstCombiner::visitCallInst(CallInst &CI) { if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI))) @@ -1805,10 +1804,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { } IntrinsicInst *II = dyn_cast(&CI); - if (!II) return visitCallSite(&CI); + if (!II) return visitCallBase(CI); // Intrinsics cannot occur in an invoke, so handle them here instead of in - // visitCallSite. + // visitCallBase. if (auto *MI = dyn_cast(II)) { bool Changed = false; @@ -3962,7 +3961,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { break; } } - return visitCallSite(II); + return visitCallBase(*II); } // Fence instruction simplification @@ -3977,12 +3976,12 @@ Instruction *InstCombiner::visitFenceInst(FenceInst &FI) { // InvokeInst simplification Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { - return visitCallSite(&II); + return visitCallBase(II); } /// If this cast does not affect the value passed through the varargs area, we /// can eliminate the use of the cast. -static bool isSafeToEliminateVarargsCast(const CallSite CS, +static bool isSafeToEliminateVarargsCast(const CallBase &Call, const DataLayout &DL, const CastInst *const CI, const int ix) { @@ -3994,13 +3993,13 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS, // TODO: This is probably something which should be expanded to all // intrinsics since the entire point of intrinsics is that // they are understandable by the optimizer. - if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) + if (isStatepoint(&Call) || isGCRelocate(&Call) || isGCResult(&Call)) return false; // The size of ByVal or InAlloca arguments is derived from the type, so we // can't change to a type with a different size. If the size were // passed explicitly we could avoid this check. - if (!CS.isByValOrInAllocaArgument(ix)) + if (!Call.isByValOrInAllocaArgument(ix)) return true; Type* SrcTy = @@ -4109,9 +4108,9 @@ static IntrinsicInst *findInitTrampoline(Value *Callee) { } /// Improvements for call and invoke instructions. -Instruction *InstCombiner::visitCallSite(CallSite CS) { - if (isAllocLikeFn(CS.getInstruction(), &TLI)) - return visitAllocSite(*CS.getInstruction()); +Instruction *InstCombiner::visitCallBase(CallBase &Call) { + if (isAllocLikeFn(&Call, &TLI)) + return visitAllocSite(Call); bool Changed = false; @@ -4121,49 +4120,49 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) { SmallVector ArgNos; unsigned ArgNo = 0; - for (Value *V : CS.args()) { + for (Value *V : Call.args()) { if (V->getType()->isPointerTy() && - !CS.paramHasAttr(ArgNo, Attribute::NonNull) && - isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT)) + !Call.paramHasAttr(ArgNo, Attribute::NonNull) && + isKnownNonZero(V, DL, 0, &AC, &Call, &DT)) ArgNos.push_back(ArgNo); ArgNo++; } - assert(ArgNo == CS.arg_size() && "sanity check"); + assert(ArgNo == Call.arg_size() && "sanity check"); if (!ArgNos.empty()) { - AttributeList AS = CS.getAttributes(); - LLVMContext &Ctx = CS.getInstruction()->getContext(); + AttributeList AS = Call.getAttributes(); + LLVMContext &Ctx = Call.getContext(); AS = AS.addParamAttribute(Ctx, ArgNos, Attribute::get(Ctx, Attribute::NonNull)); - CS.setAttributes(AS); + Call.setAttributes(AS); Changed = true; } // If the callee is a pointer to a function, attempt to move any casts to the // arguments of the call/invoke. - Value *Callee = CS.getCalledValue(); - if (!isa(Callee) && transformConstExprCastCall(CS)) + Value *Callee = Call.getCalledValue(); + if (!isa(Callee) && transformConstExprCastCall(Call)) return nullptr; if (Function *CalleeF = dyn_cast(Callee)) { // Remove the convergent attr on calls when the callee is not convergent. - if (CS.isConvergent() && !CalleeF->isConvergent() && + if (Call.isConvergent() && !CalleeF->isConvergent() && !CalleeF->isIntrinsic()) { - LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " - << CS.getInstruction() << "\n"); - CS.setNotConvergent(); - return CS.getInstruction(); + LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call + << "\n"); + Call.setNotConvergent(); + return &Call; } // If the call and callee calling conventions don't match, this call must // be unreachable, as the call is undefined. - if (CalleeF->getCallingConv() != CS.getCallingConv() && + if (CalleeF->getCallingConv() != Call.getCallingConv() && // Only do this for calls to a function with a body. A prototype may // not actually end up matching the implementation's calling conv for a // variety of reasons (e.g. it may be written in assembly). !CalleeF->isDeclaration()) { - Instruction *OldCall = CS.getInstruction(); + Instruction *OldCall = &Call; new StoreInst(ConstantInt::getTrue(Callee->getContext()), UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), OldCall); @@ -4183,15 +4182,14 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) { } if ((isa(Callee) && - !NullPointerIsDefined(CS.getInstruction()->getFunction())) || + !NullPointerIsDefined(Call.getFunction())) || isa(Callee)) { - // If CS does not return void then replaceAllUsesWith undef. + // If Call does not return void then replaceAllUsesWith undef. // This allows ValueHandlers and custom metadata to adjust itself. - if (!CS.getInstruction()->getType()->isVoidTy()) - replaceInstUsesWith(*CS.getInstruction(), - UndefValue::get(CS.getInstruction()->getType())); + if (!Call.getType()->isVoidTy()) + replaceInstUsesWith(Call, UndefValue::get(Call.getType())); - if (isa(CS.getInstruction())) { + if (isa(Call)) { // Can't remove an invoke because we cannot change the CFG. return nullptr; } @@ -4201,13 +4199,13 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) { // that we can't modify the CFG here. new StoreInst(ConstantInt::getTrue(Callee->getContext()), UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), - CS.getInstruction()); + &Call); - return eraseInstFromFunction(*CS.getInstruction()); + return eraseInstFromFunction(Call); } if (IntrinsicInst *II = findInitTrampoline(Callee)) - return transformCallThroughTrampoline(CS, II); + return transformCallThroughTrampoline(Call, *II); PointerType *PTy = cast(Callee->getType()); FunctionType *FTy = cast(PTy->getElementType()); @@ -4215,39 +4213,39 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) { int ix = FTy->getNumParams(); // See if we can optimize any arguments passed through the varargs area of // the call. - for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(), - E = CS.arg_end(); I != E; ++I, ++ix) { + for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end(); + I != E; ++I, ++ix) { CastInst *CI = dyn_cast(*I); - if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) { + if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) { *I = CI->getOperand(0); Changed = true; } } } - if (isa(Callee) && !CS.doesNotThrow()) { + if (isa(Callee) && !Call.doesNotThrow()) { // Inline asm calls cannot throw - mark them 'nounwind'. - CS.setDoesNotThrow(); + Call.setDoesNotThrow(); Changed = true; } // Try to optimize the call if possible, we require DataLayout for most of // this. None of these calls are seen as possibly dead so go ahead and // delete the instruction now. - if (CallInst *CI = dyn_cast(CS.getInstruction())) { + if (CallInst *CI = dyn_cast(&Call)) { Instruction *I = tryOptimizeCall(CI); // If we changed something return the result, etc. Otherwise let // the fallthrough check. if (I) return eraseInstFromFunction(*I); } - return Changed ? CS.getInstruction() : nullptr; + return Changed ? &Call : nullptr; } /// If the callee is a constexpr cast of a function, attempt to move the cast to /// the arguments of the call/invoke. -bool InstCombiner::transformConstExprCastCall(CallSite CS) { - auto *Callee = dyn_cast(CS.getCalledValue()->stripPointerCasts()); +bool InstCombiner::transformConstExprCastCall(CallBase &Call) { + auto *Callee = dyn_cast(Call.getCalledValue()->stripPointerCasts()); if (!Callee) return false; @@ -4261,11 +4259,11 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { // prototype with the exception of pointee types. The code below doesn't // implement that, so we can't do this transform. // TODO: Do the transform if it only requires adding pointer casts. - if (CS.isMustTailCall()) + if (Call.isMustTailCall()) return false; - Instruction *Caller = CS.getInstruction(); - const AttributeList &CallerPAL = CS.getAttributes(); + Instruction *Caller = &Call; + const AttributeList &CallerPAL = Call.getAttributes(); // Okay, this is a cast from a function to a different type. Unless doing so // would cause a type conversion of one of our arguments, change this call to @@ -4296,7 +4294,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { return false; // Attribute not compatible with transformed value. } - // If the callsite is an invoke instruction, and the return value is used by + // If the callbase is an invoke instruction, and the return value is used by // a PHI node in a successor, we cannot change the return type of the call // because there is no place to put the cast instruction (without breaking // the critical edge). Bail out in this case. @@ -4309,7 +4307,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { return false; } - unsigned NumActualArgs = CS.arg_size(); + unsigned NumActualArgs = Call.arg_size(); unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); // Prevent us turning: @@ -4324,7 +4322,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal)) return false; - CallSite::arg_iterator AI = CS.arg_begin(); + auto AI = Call.arg_begin(); for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { Type *ParamTy = FT->getParamType(i); Type *ActTy = (*AI)->getType(); @@ -4336,7 +4334,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { .overlaps(AttributeFuncs::typeIncompatible(ParamTy))) return false; // Attribute not compatible with transformed value. - if (CS.isInAllocaArgument(i)) + if (Call.isInAllocaArgument(i)) return false; // Cannot transform to and from inalloca. // If the parameter is passed as a byval argument, then we have to have a @@ -4361,7 +4359,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { // If the callee is just a declaration, don't change the varargsness of the // call. We don't want to introduce a varargs call where one doesn't // already exist. - PointerType *APTy = cast(CS.getCalledValue()->getType()); + PointerType *APTy = cast(Call.getCalledValue()->getType()); if (FT->isVarArg()!=cast(APTy->getElementType())->isVarArg()) return false; @@ -4400,7 +4398,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { // with the existing attributes. Wipe out any problematic attributes. RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy)); - AI = CS.arg_begin(); + AI = Call.arg_begin(); for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { Type *ParamTy = FT->getParamType(i); @@ -4454,29 +4452,29 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs); SmallVector OpBundles; - CS.getOperandBundlesAsDefs(OpBundles); + Call.getOperandBundlesAsDefs(OpBundles); - CallSite NewCS; + CallBase *NewCall; if (InvokeInst *II = dyn_cast(Caller)) { - NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(), - II->getUnwindDest(), Args, OpBundles); + NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(), + II->getUnwindDest(), Args, OpBundles); } else { - NewCS = Builder.CreateCall(Callee, Args, OpBundles); - cast(NewCS.getInstruction()) - ->setTailCallKind(cast(Caller)->getTailCallKind()); + NewCall = Builder.CreateCall(Callee, Args, OpBundles); + cast(NewCall)->setTailCallKind( + cast(Caller)->getTailCallKind()); } - NewCS->takeName(Caller); - NewCS.setCallingConv(CS.getCallingConv()); - NewCS.setAttributes(NewCallerPAL); + NewCall->takeName(Caller); + NewCall->setCallingConv(Call.getCallingConv()); + NewCall->setAttributes(NewCallerPAL); // Preserve the weight metadata for the new call instruction. The metadata // is used by SamplePGO to check callsite's hotness. uint64_t W; if (Caller->extractProfTotalWeight(W)) - NewCS->setProfWeight(W); + NewCall->setProfWeight(W); // Insert a cast of the return type as necessary. - Instruction *NC = NewCS.getInstruction(); + Instruction *NC = NewCall; Value *NV = NC; if (OldRetTy != NV->getType() && !Caller->use_empty()) { if (!NV->getType()->isVoidTy()) { @@ -4516,22 +4514,19 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { /// Turn a call to a function created by init_trampoline / adjust_trampoline /// intrinsic pair into a direct call to the underlying function. Instruction * -InstCombiner::transformCallThroughTrampoline(CallSite CS, - IntrinsicInst *Tramp) { - Value *Callee = CS.getCalledValue(); +InstCombiner::transformCallThroughTrampoline(CallBase &Call, + IntrinsicInst &Tramp) { + Value *Callee = Call.getCalledValue(); PointerType *PTy = cast(Callee->getType()); FunctionType *FTy = cast(PTy->getElementType()); - AttributeList Attrs = CS.getAttributes(); + AttributeList Attrs = Call.getAttributes(); // If the call already has the 'nest' attribute somewhere then give up - // otherwise 'nest' would occur twice after splicing in the chain. if (Attrs.hasAttrSomewhere(Attribute::Nest)) return nullptr; - assert(Tramp && - "transformCallThroughTrampoline called with incorrect CallSite."); - - Function *NestF =cast(Tramp->getArgOperand(1)->stripPointerCasts()); + Function *NestF = cast(Tramp.getArgOperand(1)->stripPointerCasts()); FunctionType *NestFTy = cast(NestF->getValueType()); AttributeList NestAttrs = NestF->getAttributes(); @@ -4554,22 +4549,21 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, } if (NestTy) { - Instruction *Caller = CS.getInstruction(); std::vector NewArgs; std::vector NewArgAttrs; - NewArgs.reserve(CS.arg_size() + 1); - NewArgAttrs.reserve(CS.arg_size()); + NewArgs.reserve(Call.arg_size() + 1); + NewArgAttrs.reserve(Call.arg_size()); // Insert the nest argument into the call argument list, which may // mean appending it. Likewise for attributes. { unsigned ArgNo = 0; - CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); + auto I = Call.arg_begin(), E = Call.arg_end(); do { if (ArgNo == NestArgNo) { // Add the chain argument and attributes. - Value *NestVal = Tramp->getArgOperand(2); + Value *NestVal = Tramp.getArgOperand(2); if (NestVal->getType() != NestTy) NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest"); NewArgs.push_back(NestVal); @@ -4631,10 +4625,10 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, Attrs.getRetAttributes(), NewArgAttrs); SmallVector OpBundles; - CS.getOperandBundlesAsDefs(OpBundles); + Call.getOperandBundlesAsDefs(OpBundles); Instruction *NewCaller; - if (InvokeInst *II = dyn_cast(Caller)) { + if (InvokeInst *II = dyn_cast(&Call)) { NewCaller = InvokeInst::Create(NewCallee, II->getNormalDest(), II->getUnwindDest(), NewArgs, OpBundles); @@ -4643,12 +4637,12 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, } else { NewCaller = CallInst::Create(NewCallee, NewArgs, OpBundles); cast(NewCaller)->setTailCallKind( - cast(Caller)->getTailCallKind()); + cast(Call).getTailCallKind()); cast(NewCaller)->setCallingConv( - cast(Caller)->getCallingConv()); + cast(Call).getCallingConv()); cast(NewCaller)->setAttributes(NewPAL); } - NewCaller->setDebugLoc(Caller->getDebugLoc()); + NewCaller->setDebugLoc(Call.getDebugLoc()); return NewCaller; } @@ -4660,6 +4654,6 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, Constant *NewCallee = NestF->getType() == PTy ? NestF : ConstantExpr::getBitCast(NestF, PTy); - CS.setCalledFunction(NewCallee); - return CS.getInstruction(); + Call.setCalledFunction(NewCallee); + return &Call; } diff --git a/lib/Transforms/InstCombine/InstCombineInternal.h b/lib/Transforms/InstCombine/InstCombineInternal.h index 51e182ef187..a1c666d48f4 100644 --- a/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/lib/Transforms/InstCombine/InstCombineInternal.h @@ -52,7 +52,6 @@ namespace llvm { class APInt; class AssumptionCache; -class CallSite; class DataLayout; class DominatorTree; class GEPOperator; @@ -467,11 +466,11 @@ private: Instruction &CtxI, Value *&OperationResult, Constant *&OverflowResult); - Instruction *visitCallSite(CallSite CS); + Instruction *visitCallBase(CallBase &Call); Instruction *tryOptimizeCall(CallInst *CI); - bool transformConstExprCastCall(CallSite CS); - Instruction *transformCallThroughTrampoline(CallSite CS, - IntrinsicInst *Tramp); + bool transformConstExprCastCall(CallBase &Call); + Instruction *transformCallThroughTrampoline(CallBase &Call, + IntrinsicInst &Tramp); /// Transform (zext icmp) to bitwise / integer operations in order to /// eliminate it. diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index f2cb4775f47..a7d86080451 100644 --- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -88,29 +88,29 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, continue; } - if (auto CS = CallSite(I)) { + if (auto *Call = dyn_cast(I)) { // If this is the function being called then we treat it like a load and // ignore it. - if (CS.isCallee(&U)) + if (Call->isCallee(&U)) continue; - unsigned DataOpNo = CS.getDataOperandNo(&U); - bool IsArgOperand = CS.isArgOperand(&U); + unsigned DataOpNo = Call->getDataOperandNo(&U); + bool IsArgOperand = Call->isArgOperand(&U); // Inalloca arguments are clobbered by the call. - if (IsArgOperand && CS.isInAllocaArgument(DataOpNo)) + if (IsArgOperand && Call->isInAllocaArgument(DataOpNo)) return false; // If this is a readonly/readnone call site, then we know it is just a // load (but one that potentially returns the value itself), so we can // ignore it if we know that the value isn't captured. - if (CS.onlyReadsMemory() && - (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo))) + if (Call->onlyReadsMemory() && + (Call->use_empty() || Call->doesNotCapture(DataOpNo))) continue; // If this is being passed as a byval argument, the caller is making a // copy, so it is only a read of the alloca. - if (IsArgOperand && CS.isByValArgument(DataOpNo)) + if (IsArgOperand && Call->isByValArgument(DataOpNo)) continue; } diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index ae3f081f508..03855f6bac4 100644 --- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -1180,7 +1180,8 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { IRBuilder<> B(&I); IRBuilder<>::FastMathFlagGuard FMFGuard(B); B.setFastMathFlags(I.getFastMathFlags()); - AttributeList Attrs = CallSite(Op0).getCalledFunction()->getAttributes(); + AttributeList Attrs = + cast(Op0)->getCalledFunction()->getAttributes(); Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf, LibFunc_tanl, B, Attrs); if (IsCot)