1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 19:52:54 +01:00

[CallSite removal] Remove CallSite uses from InstCombine.

Reviewers: chandlerc

Reviewed By: chandlerc

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D57494

llvm-svn: 352771
This commit is contained in:
Craig Topper 2019-01-31 17:23:29 +00:00
parent 2b3eb5adcc
commit d6bb2bcfab
6 changed files with 130 additions and 101 deletions

View File

@ -1095,6 +1095,19 @@ public:
return isDataOperand(&UI.getUse());
}
/// Given a value use iterator, return the data operand corresponding to it.
/// Iterator must actually correspond to a data operand.
unsigned getDataOperandNo(Value::const_user_iterator UI) const {
return getDataOperandNo(&UI.getUse());
}
/// Given a use for a data operand, get the data operand number that
/// corresponds to it.
unsigned getDataOperandNo(const Use *U) const {
assert(isDataOperand(U) && "Data operand # out of range!");
return U - data_operands_begin();
}
/// Return the iterator pointing to the beginning of the argument list.
User::op_iterator arg_begin() { return op_begin(); }
User::const_op_iterator arg_begin() const {
@ -1198,6 +1211,13 @@ public:
return const_cast<CallBase *>(this)->getCaller();
}
/// Tests if this call site must be tail call optimized. Only a CallInst can
/// be tail call optimized.
bool isMustTailCall() const;
/// Tests if this call site is marked as a tail call.
bool isTailCall() const;
/// Returns the intrinsic ID of the intrinsic called or
/// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
/// this is an indirect call.

View File

@ -266,6 +266,21 @@ bool CallBase::isIndirectCall() const {
return true;
}
/// Tests if this call site must be tail call optimized. Only a CallInst can
/// be tail call optimized.
bool CallBase::isMustTailCall() const {
if (auto *CI = dyn_cast<CallInst>(this))
return CI->isMustTailCall();
return false;
}
/// Tests if this call site is marked as a tail call.
bool CallBase::isTailCall() const {
if (auto *CI = dyn_cast<CallInst>(this))
return CI->isTailCall();
return false;
}
Intrinsic::ID CallBase::getIntrinsicID() const {
if (auto *F = getCalledFunction())
return F->getIntrinsicID();

View File

@ -27,7 +27,6 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@ -1788,7 +1787,7 @@ static Instruction *canonicalizeConstantArg0ToArg1(CallInst &Call) {
}
/// CallInst simplification. This mostly only handles folding of intrinsic
/// instructions. For normal calls, it allows visitCallSite to do the heavy
/// instructions. For normal calls, it allows visitCallBase to do the heavy
/// lifting.
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
@ -1805,10 +1804,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
if (!II) return visitCallSite(&CI);
if (!II) return visitCallBase(CI);
// Intrinsics cannot occur in an invoke, so handle them here instead of in
// visitCallSite.
// visitCallBase.
if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
bool Changed = false;
@ -3962,7 +3961,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
}
}
return visitCallSite(II);
return visitCallBase(*II);
}
// Fence instruction simplification
@ -3977,12 +3976,12 @@ Instruction *InstCombiner::visitFenceInst(FenceInst &FI) {
// InvokeInst simplification
Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
return visitCallSite(&II);
return visitCallBase(II);
}
/// If this cast does not affect the value passed through the varargs area, we
/// can eliminate the use of the cast.
static bool isSafeToEliminateVarargsCast(const CallSite CS,
static bool isSafeToEliminateVarargsCast(const CallBase &Call,
const DataLayout &DL,
const CastInst *const CI,
const int ix) {
@ -3994,13 +3993,13 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
// TODO: This is probably something which should be expanded to all
// intrinsics since the entire point of intrinsics is that
// they are understandable by the optimizer.
if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
if (isStatepoint(&Call) || isGCRelocate(&Call) || isGCResult(&Call))
return false;
// The size of ByVal or InAlloca arguments is derived from the type, so we
// can't change to a type with a different size. If the size were
// passed explicitly we could avoid this check.
if (!CS.isByValOrInAllocaArgument(ix))
if (!Call.isByValOrInAllocaArgument(ix))
return true;
Type* SrcTy =
@ -4109,9 +4108,9 @@ static IntrinsicInst *findInitTrampoline(Value *Callee) {
}
/// Improvements for call and invoke instructions.
Instruction *InstCombiner::visitCallSite(CallSite CS) {
if (isAllocLikeFn(CS.getInstruction(), &TLI))
return visitAllocSite(*CS.getInstruction());
Instruction *InstCombiner::visitCallBase(CallBase &Call) {
if (isAllocLikeFn(&Call, &TLI))
return visitAllocSite(Call);
bool Changed = false;
@ -4121,49 +4120,49 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
SmallVector<unsigned, 4> ArgNos;
unsigned ArgNo = 0;
for (Value *V : CS.args()) {
for (Value *V : Call.args()) {
if (V->getType()->isPointerTy() &&
!CS.paramHasAttr(ArgNo, Attribute::NonNull) &&
isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT))
!Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
ArgNos.push_back(ArgNo);
ArgNo++;
}
assert(ArgNo == CS.arg_size() && "sanity check");
assert(ArgNo == Call.arg_size() && "sanity check");
if (!ArgNos.empty()) {
AttributeList AS = CS.getAttributes();
LLVMContext &Ctx = CS.getInstruction()->getContext();
AttributeList AS = Call.getAttributes();
LLVMContext &Ctx = Call.getContext();
AS = AS.addParamAttribute(Ctx, ArgNos,
Attribute::get(Ctx, Attribute::NonNull));
CS.setAttributes(AS);
Call.setAttributes(AS);
Changed = true;
}
// If the callee is a pointer to a function, attempt to move any casts to the
// arguments of the call/invoke.
Value *Callee = CS.getCalledValue();
if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
Value *Callee = Call.getCalledValue();
if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
return nullptr;
if (Function *CalleeF = dyn_cast<Function>(Callee)) {
// Remove the convergent attr on calls when the callee is not convergent.
if (CS.isConvergent() && !CalleeF->isConvergent() &&
if (Call.isConvergent() && !CalleeF->isConvergent() &&
!CalleeF->isIntrinsic()) {
LLVM_DEBUG(dbgs() << "Removing convergent attr from instr "
<< CS.getInstruction() << "\n");
CS.setNotConvergent();
return CS.getInstruction();
LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
<< "\n");
Call.setNotConvergent();
return &Call;
}
// If the call and callee calling conventions don't match, this call must
// be unreachable, as the call is undefined.
if (CalleeF->getCallingConv() != CS.getCallingConv() &&
if (CalleeF->getCallingConv() != Call.getCallingConv() &&
// Only do this for calls to a function with a body. A prototype may
// not actually end up matching the implementation's calling conv for a
// variety of reasons (e.g. it may be written in assembly).
!CalleeF->isDeclaration()) {
Instruction *OldCall = CS.getInstruction();
Instruction *OldCall = &Call;
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
OldCall);
@ -4183,15 +4182,14 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
}
if ((isa<ConstantPointerNull>(Callee) &&
!NullPointerIsDefined(CS.getInstruction()->getFunction())) ||
!NullPointerIsDefined(Call.getFunction())) ||
isa<UndefValue>(Callee)) {
// If CS does not return void then replaceAllUsesWith undef.
// If Call does not return void then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
if (!CS.getInstruction()->getType()->isVoidTy())
replaceInstUsesWith(*CS.getInstruction(),
UndefValue::get(CS.getInstruction()->getType()));
if (!Call.getType()->isVoidTy())
replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
if (isa<InvokeInst>(CS.getInstruction())) {
if (isa<InvokeInst>(Call)) {
// Can't remove an invoke because we cannot change the CFG.
return nullptr;
}
@ -4201,13 +4199,13 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// that we can't modify the CFG here.
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
CS.getInstruction());
&Call);
return eraseInstFromFunction(*CS.getInstruction());
return eraseInstFromFunction(Call);
}
if (IntrinsicInst *II = findInitTrampoline(Callee))
return transformCallThroughTrampoline(CS, II);
return transformCallThroughTrampoline(Call, *II);
PointerType *PTy = cast<PointerType>(Callee->getType());
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
@ -4215,39 +4213,39 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
int ix = FTy->getNumParams();
// See if we can optimize any arguments passed through the varargs area of
// the call.
for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
E = CS.arg_end(); I != E; ++I, ++ix) {
for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
I != E; ++I, ++ix) {
CastInst *CI = dyn_cast<CastInst>(*I);
if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
*I = CI->getOperand(0);
Changed = true;
}
}
}
if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
// Inline asm calls cannot throw - mark them 'nounwind'.
CS.setDoesNotThrow();
Call.setDoesNotThrow();
Changed = true;
}
// Try to optimize the call if possible, we require DataLayout for most of
// this. None of these calls are seen as possibly dead so go ahead and
// delete the instruction now.
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
Instruction *I = tryOptimizeCall(CI);
// If we changed something return the result, etc. Otherwise let
// the fallthrough check.
if (I) return eraseInstFromFunction(*I);
}
return Changed ? CS.getInstruction() : nullptr;
return Changed ? &Call : nullptr;
}
/// If the callee is a constexpr cast of a function, attempt to move the cast to
/// the arguments of the call/invoke.
bool InstCombiner::transformConstExprCastCall(CallSite CS) {
auto *Callee = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
auto *Callee = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
if (!Callee)
return false;
@ -4261,11 +4259,11 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// prototype with the exception of pointee types. The code below doesn't
// implement that, so we can't do this transform.
// TODO: Do the transform if it only requires adding pointer casts.
if (CS.isMustTailCall())
if (Call.isMustTailCall())
return false;
Instruction *Caller = CS.getInstruction();
const AttributeList &CallerPAL = CS.getAttributes();
Instruction *Caller = &Call;
const AttributeList &CallerPAL = Call.getAttributes();
// Okay, this is a cast from a function to a different type. Unless doing so
// would cause a type conversion of one of our arguments, change this call to
@ -4296,7 +4294,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
return false; // Attribute not compatible with transformed value.
}
// If the callsite is an invoke instruction, and the return value is used by
// If the callbase is an invoke instruction, and the return value is used by
// a PHI node in a successor, we cannot change the return type of the call
// because there is no place to put the cast instruction (without breaking
// the critical edge). Bail out in this case.
@ -4309,7 +4307,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
return false;
}
unsigned NumActualArgs = CS.arg_size();
unsigned NumActualArgs = Call.arg_size();
unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
// Prevent us turning:
@ -4324,7 +4322,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
return false;
CallSite::arg_iterator AI = CS.arg_begin();
auto AI = Call.arg_begin();
for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
Type *ParamTy = FT->getParamType(i);
Type *ActTy = (*AI)->getType();
@ -4336,7 +4334,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
.overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
return false; // Attribute not compatible with transformed value.
if (CS.isInAllocaArgument(i))
if (Call.isInAllocaArgument(i))
return false; // Cannot transform to and from inalloca.
// If the parameter is passed as a byval argument, then we have to have a
@ -4361,7 +4359,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// If the callee is just a declaration, don't change the varargsness of the
// call. We don't want to introduce a varargs call where one doesn't
// already exist.
PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
PointerType *APTy = cast<PointerType>(Call.getCalledValue()->getType());
if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
return false;
@ -4400,7 +4398,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// with the existing attributes. Wipe out any problematic attributes.
RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
AI = CS.arg_begin();
AI = Call.arg_begin();
for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
Type *ParamTy = FT->getParamType(i);
@ -4454,29 +4452,29 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
SmallVector<OperandBundleDef, 1> OpBundles;
CS.getOperandBundlesAsDefs(OpBundles);
Call.getOperandBundlesAsDefs(OpBundles);
CallSite NewCS;
CallBase *NewCall;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(),
II->getUnwindDest(), Args, OpBundles);
NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
II->getUnwindDest(), Args, OpBundles);
} else {
NewCS = Builder.CreateCall(Callee, Args, OpBundles);
cast<CallInst>(NewCS.getInstruction())
->setTailCallKind(cast<CallInst>(Caller)->getTailCallKind());
NewCall = Builder.CreateCall(Callee, Args, OpBundles);
cast<CallInst>(NewCall)->setTailCallKind(
cast<CallInst>(Caller)->getTailCallKind());
}
NewCS->takeName(Caller);
NewCS.setCallingConv(CS.getCallingConv());
NewCS.setAttributes(NewCallerPAL);
NewCall->takeName(Caller);
NewCall->setCallingConv(Call.getCallingConv());
NewCall->setAttributes(NewCallerPAL);
// Preserve the weight metadata for the new call instruction. The metadata
// is used by SamplePGO to check callsite's hotness.
uint64_t W;
if (Caller->extractProfTotalWeight(W))
NewCS->setProfWeight(W);
NewCall->setProfWeight(W);
// Insert a cast of the return type as necessary.
Instruction *NC = NewCS.getInstruction();
Instruction *NC = NewCall;
Value *NV = NC;
if (OldRetTy != NV->getType() && !Caller->use_empty()) {
if (!NV->getType()->isVoidTy()) {
@ -4516,22 +4514,19 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
/// Turn a call to a function created by init_trampoline / adjust_trampoline
/// intrinsic pair into a direct call to the underlying function.
Instruction *
InstCombiner::transformCallThroughTrampoline(CallSite CS,
IntrinsicInst *Tramp) {
Value *Callee = CS.getCalledValue();
InstCombiner::transformCallThroughTrampoline(CallBase &Call,
IntrinsicInst &Tramp) {
Value *Callee = Call.getCalledValue();
PointerType *PTy = cast<PointerType>(Callee->getType());
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
AttributeList Attrs = CS.getAttributes();
AttributeList Attrs = Call.getAttributes();
// If the call already has the 'nest' attribute somewhere then give up -
// otherwise 'nest' would occur twice after splicing in the chain.
if (Attrs.hasAttrSomewhere(Attribute::Nest))
return nullptr;
assert(Tramp &&
"transformCallThroughTrampoline called with incorrect CallSite.");
Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
FunctionType *NestFTy = cast<FunctionType>(NestF->getValueType());
AttributeList NestAttrs = NestF->getAttributes();
@ -4554,22 +4549,21 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
}
if (NestTy) {
Instruction *Caller = CS.getInstruction();
std::vector<Value*> NewArgs;
std::vector<AttributeSet> NewArgAttrs;
NewArgs.reserve(CS.arg_size() + 1);
NewArgAttrs.reserve(CS.arg_size());
NewArgs.reserve(Call.arg_size() + 1);
NewArgAttrs.reserve(Call.arg_size());
// Insert the nest argument into the call argument list, which may
// mean appending it. Likewise for attributes.
{
unsigned ArgNo = 0;
CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
auto I = Call.arg_begin(), E = Call.arg_end();
do {
if (ArgNo == NestArgNo) {
// Add the chain argument and attributes.
Value *NestVal = Tramp->getArgOperand(2);
Value *NestVal = Tramp.getArgOperand(2);
if (NestVal->getType() != NestTy)
NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
NewArgs.push_back(NestVal);
@ -4631,10 +4625,10 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
Attrs.getRetAttributes(), NewArgAttrs);
SmallVector<OperandBundleDef, 1> OpBundles;
CS.getOperandBundlesAsDefs(OpBundles);
Call.getOperandBundlesAsDefs(OpBundles);
Instruction *NewCaller;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
NewCaller = InvokeInst::Create(NewCallee,
II->getNormalDest(), II->getUnwindDest(),
NewArgs, OpBundles);
@ -4643,12 +4637,12 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
} else {
NewCaller = CallInst::Create(NewCallee, NewArgs, OpBundles);
cast<CallInst>(NewCaller)->setTailCallKind(
cast<CallInst>(Caller)->getTailCallKind());
cast<CallInst>(Call).getTailCallKind());
cast<CallInst>(NewCaller)->setCallingConv(
cast<CallInst>(Caller)->getCallingConv());
cast<CallInst>(Call).getCallingConv());
cast<CallInst>(NewCaller)->setAttributes(NewPAL);
}
NewCaller->setDebugLoc(Caller->getDebugLoc());
NewCaller->setDebugLoc(Call.getDebugLoc());
return NewCaller;
}
@ -4660,6 +4654,6 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
Constant *NewCallee =
NestF->getType() == PTy ? NestF :
ConstantExpr::getBitCast(NestF, PTy);
CS.setCalledFunction(NewCallee);
return CS.getInstruction();
Call.setCalledFunction(NewCallee);
return &Call;
}

View File

@ -52,7 +52,6 @@ namespace llvm {
class APInt;
class AssumptionCache;
class CallSite;
class DataLayout;
class DominatorTree;
class GEPOperator;
@ -467,11 +466,11 @@ private:
Instruction &CtxI, Value *&OperationResult,
Constant *&OverflowResult);
Instruction *visitCallSite(CallSite CS);
Instruction *visitCallBase(CallBase &Call);
Instruction *tryOptimizeCall(CallInst *CI);
bool transformConstExprCastCall(CallSite CS);
Instruction *transformCallThroughTrampoline(CallSite CS,
IntrinsicInst *Tramp);
bool transformConstExprCastCall(CallBase &Call);
Instruction *transformCallThroughTrampoline(CallBase &Call,
IntrinsicInst &Tramp);
/// Transform (zext icmp) to bitwise / integer operations in order to
/// eliminate it.

View File

@ -88,29 +88,29 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
continue;
}
if (auto CS = CallSite(I)) {
if (auto *Call = dyn_cast<CallBase>(I)) {
// If this is the function being called then we treat it like a load and
// ignore it.
if (CS.isCallee(&U))
if (Call->isCallee(&U))
continue;
unsigned DataOpNo = CS.getDataOperandNo(&U);
bool IsArgOperand = CS.isArgOperand(&U);
unsigned DataOpNo = Call->getDataOperandNo(&U);
bool IsArgOperand = Call->isArgOperand(&U);
// Inalloca arguments are clobbered by the call.
if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
return false;
// If this is a readonly/readnone call site, then we know it is just a
// load (but one that potentially returns the value itself), so we can
// ignore it if we know that the value isn't captured.
if (CS.onlyReadsMemory() &&
(CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
if (Call->onlyReadsMemory() &&
(Call->use_empty() || Call->doesNotCapture(DataOpNo)))
continue;
// If this is being passed as a byval argument, the caller is making a
// copy, so it is only a read of the alloca.
if (IsArgOperand && CS.isByValArgument(DataOpNo))
if (IsArgOperand && Call->isByValArgument(DataOpNo))
continue;
}

View File

@ -1180,7 +1180,8 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
IRBuilder<> B(&I);
IRBuilder<>::FastMathFlagGuard FMFGuard(B);
B.setFastMathFlags(I.getFastMathFlags());
AttributeList Attrs = CallSite(Op0).getCalledFunction()->getAttributes();
AttributeList Attrs =
cast<CallBase>(Op0)->getCalledFunction()->getAttributes();
Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf,
LibFunc_tanl, B, Attrs);
if (IsCot)