1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 19:12:56 +02:00

llvm/ObjCARC: Split OptimizeIndividualCallImpl out of OptimizeIndividualCalls, NFC

Split out a helper function for the individual call optimizations and
skip useless calls to it (where the instruction is not an ARC
intrinsic).  Besides reducing indentation (and possibly speeding up
compile time in some small way), an upcoming patch will add additional
calls and expand out the `switch`.
This commit is contained in:
Duncan P. N. Exon Smith 2019-11-17 16:51:34 -08:00
parent 9fd7df73f8
commit a7245c607c

View File

@ -507,6 +507,10 @@ namespace {
void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
ARCInstKind &Class);
void OptimizeIndividualCalls(Function &F);
void
OptimizeIndividualCallImpl(Function &F,
DenseMap<BasicBlock *, ColorVector> &BlockColors,
Instruction *Inst, ARCInstKind Class);
void CheckForCFGHazards(const BasicBlock *BB,
DenseMap<const BasicBlock *, BBState> &BBStates,
@ -789,280 +793,295 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
// Some of the ARC calls can be deleted if their arguments are global
// variables that are inert in ARC.
if (IsNoopOnGlobal(Class)) {
Value *Opnd = Inst->getOperand(0);
if (auto *GV = dyn_cast<GlobalVariable>(Opnd->stripPointerCasts()))
if (GV->hasAttribute("objc_arc_inert")) {
if (!Inst->getType()->isVoidTy())
Inst->replaceAllUsesWith(Opnd);
Inst->eraseFromParent();
continue;
}
}
// Skip this loop if this instruction isn't itself an ARC intrinsic.
switch (Class) {
default: break;
// Delete no-op casts. These function calls have special semantics, but
// the semantics are entirely implemented via lowering in the front-end,
// so by the time they reach the optimizer, they are just no-op calls
// which return their argument.
//
// There are gray areas here, as the ability to cast reference-counted
// pointers to raw void* and back allows code to break ARC assumptions,
// however these are currently considered to be unimportant.
case ARCInstKind::NoopCast:
Changed = true;
++NumNoops;
LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
EraseInstruction(Inst);
continue;
// If the pointer-to-weak-pointer is null, it's undefined behavior.
case ARCInstKind::StoreWeak:
case ARCInstKind::LoadWeak:
case ARCInstKind::LoadWeakRetained:
case ARCInstKind::InitWeak:
case ARCInstKind::DestroyWeak: {
CallInst *CI = cast<CallInst>(Inst);
if (IsNullOrUndef(CI->getArgOperand(0))) {
Changed = true;
Type *Ty = CI->getArgOperand(0)->getType();
new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
Constant::getNullValue(Ty),
CI);
Value *NewValue = UndefValue::get(CI->getType());
LLVM_DEBUG(
dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
"\nOld = "
<< *CI << "\nNew = " << *NewValue << "\n");
CI->replaceAllUsesWith(NewValue);
CI->eraseFromParent();
continue;
}
default:
break;
}
case ARCInstKind::CopyWeak:
case ARCInstKind::MoveWeak: {
CallInst *CI = cast<CallInst>(Inst);
if (IsNullOrUndef(CI->getArgOperand(0)) ||
IsNullOrUndef(CI->getArgOperand(1))) {
Changed = true;
Type *Ty = CI->getArgOperand(0)->getType();
new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
Constant::getNullValue(Ty),
CI);
Value *NewValue = UndefValue::get(CI->getType());
LLVM_DEBUG(
dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
"\nOld = "
<< *CI << "\nNew = " << *NewValue << "\n");
CI->replaceAllUsesWith(NewValue);
CI->eraseFromParent();
continue;
}
break;
}
case ARCInstKind::RetainRV:
if (OptimizeRetainRVCall(F, Inst))
continue;
break;
case ARCInstKind::AutoreleaseRV:
OptimizeAutoreleaseRVCall(F, Inst, Class);
break;
}
// objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
if (IsAutorelease(Class) && Inst->use_empty()) {
CallInst *Call = cast<CallInst>(Inst);
const Value *Arg = Call->getArgOperand(0);
Arg = FindSingleUseIdentifiedObject(Arg);
if (Arg) {
Changed = true;
++NumAutoreleases;
// Create the declaration lazily.
LLVMContext &C = Inst->getContext();
Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
Call);
NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
MDNode::get(C, None));
LLVM_DEBUG(
dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
"since x is otherwise unused.\nOld: "
<< *Call << "\nNew: " << *NewCall << "\n");
EraseInstruction(Call);
Inst = NewCall;
Class = ARCInstKind::Release;
}
}
// For functions which can never be passed stack arguments, add
// a tail keyword.
if (IsAlwaysTail(Class) && !cast<CallInst>(Inst)->isNoTailCall()) {
Changed = true;
LLVM_DEBUG(
dbgs() << "Adding tail keyword to function since it can never be "
"passed stack args: "
<< *Inst << "\n");
cast<CallInst>(Inst)->setTailCall();
}
// Ensure that functions that can never have a "tail" keyword due to the
// semantics of ARC truly do not do so.
if (IsNeverTail(Class)) {
Changed = true;
LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
<< "\n");
cast<CallInst>(Inst)->setTailCall(false);
}
// Set nounwind as needed.
if (IsNoThrow(Class)) {
Changed = true;
LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: "
<< *Inst << "\n");
cast<CallInst>(Inst)->setDoesNotThrow();
}
if (!IsNoopOnNull(Class)) {
UsedInThisFunction |= 1 << unsigned(Class);
case ARCInstKind::CallOrUser:
case ARCInstKind::User:
case ARCInstKind::None:
continue;
}
const Value *Arg = GetArgRCIdentityRoot(Inst);
// ARC calls with null are no-ops. Delete them.
if (IsNullOrUndef(Arg)) {
Changed = true;
++NumNoops;
LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
<< "\n");
EraseInstruction(Inst);
continue;
}
// Keep track of which of retain, release, autorelease, and retain_block
// are actually present in this function.
UsedInThisFunction |= 1 << unsigned(Class);
// If Arg is a PHI, and one or more incoming values to the
// PHI are null, and the call is control-equivalent to the PHI, and there
// are no relevant side effects between the PHI and the call, and the call
// is not a release that doesn't have the clang.imprecise_release tag, the
// call could be pushed up to just those paths with non-null incoming
// values. For now, don't bother splitting critical edges for this.
if (Class == ARCInstKind::Release &&
!Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
continue;
SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
Worklist.push_back(std::make_pair(Inst, Arg));
do {
std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
Inst = Pair.first;
Arg = Pair.second;
const PHINode *PN = dyn_cast<PHINode>(Arg);
if (!PN) continue;
// Determine if the PHI has any null operands, or any incoming
// critical edges.
bool HasNull = false;
bool HasCriticalEdges = false;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming =
GetRCIdentityRoot(PN->getIncomingValue(i));
if (IsNullOrUndef(Incoming))
HasNull = true;
else if (PN->getIncomingBlock(i)->getTerminator()->getNumSuccessors() !=
1) {
HasCriticalEdges = true;
break;
}
}
// If we have null operands and no critical edges, optimize.
if (HasCriticalEdges)
continue;
if (!HasNull)
continue;
SmallPtrSet<Instruction *, 4> DependingInstructions;
SmallPtrSet<const BasicBlock *, 4> Visited;
// Check that there is nothing that cares about the reference
// count between the call and the phi.
switch (Class) {
case ARCInstKind::Retain:
case ARCInstKind::RetainBlock:
// These can always be moved up.
break;
case ARCInstKind::Release:
// These can't be moved across things that care about the retain
// count.
FindDependencies(NeedsPositiveRetainCount, Arg, Inst->getParent(), Inst,
DependingInstructions, Visited, PA);
break;
case ARCInstKind::Autorelease:
// These can't be moved across autorelease pool scope boundaries.
FindDependencies(AutoreleasePoolBoundary, Arg, Inst->getParent(), Inst,
DependingInstructions, Visited, PA);
break;
case ARCInstKind::ClaimRV:
case ARCInstKind::RetainRV:
case ARCInstKind::AutoreleaseRV:
// Don't move these; the RV optimization depends on the autoreleaseRV
// being tail called, and the retainRV being immediately after a call
// (which might still happen if we get lucky with codegen layout, but
// it's not worth taking the chance).
continue;
default:
llvm_unreachable("Invalid dependence flavor");
}
if (DependingInstructions.size() != 1)
continue;
if (*DependingInstructions.begin() != PN)
continue;
Changed = true;
++NumPartialNoops;
// Clone the call into each predecessor that has a non-null value.
CallInst *CInst = cast<CallInst>(Inst);
Type *ParamTy = CInst->getArgOperand(0)->getType();
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
if (IsNullOrUndef(Incoming))
continue;
Value *Op = PN->getIncomingValue(i);
Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
CallInst *Clone = cast<CallInst>(
CloneCallInstForBB(*CInst, *InsertPos->getParent(), BlockColors));
if (Op->getType() != ParamTy)
Op = new BitCastInst(Op, ParamTy, "", InsertPos);
Clone->setArgOperand(0, Op);
Clone->insertBefore(InsertPos);
LLVM_DEBUG(dbgs() << "Cloning " << *CInst << "\n"
"And inserting clone at "
<< *InsertPos << "\n");
Worklist.push_back(std::make_pair(Clone, Incoming));
}
// Erase the original call.
LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
EraseInstruction(CInst);
} while (!Worklist.empty());
OptimizeIndividualCallImpl(F, BlockColors, Inst, Class);
}
}
void ObjCARCOpt::OptimizeIndividualCallImpl(
Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
Instruction *Inst, ARCInstKind Class) {
// Some of the ARC calls can be deleted if their arguments are global
// variables that are inert in ARC.
if (IsNoopOnGlobal(Class)) {
Value *Opnd = Inst->getOperand(0);
if (auto *GV = dyn_cast<GlobalVariable>(Opnd->stripPointerCasts()))
if (GV->hasAttribute("objc_arc_inert")) {
if (!Inst->getType()->isVoidTy())
Inst->replaceAllUsesWith(Opnd);
Inst->eraseFromParent();
return;
}
}
switch (Class) {
default:
break;
// Delete no-op casts. These function calls have special semantics, but
// the semantics are entirely implemented via lowering in the front-end,
// so by the time they reach the optimizer, they are just no-op calls
// which return their argument.
//
// There are gray areas here, as the ability to cast reference-counted
// pointers to raw void* and back allows code to break ARC assumptions,
// however these are currently considered to be unimportant.
case ARCInstKind::NoopCast:
Changed = true;
++NumNoops;
LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
EraseInstruction(Inst);
return;
// If the pointer-to-weak-pointer is null, it's undefined behavior.
case ARCInstKind::StoreWeak:
case ARCInstKind::LoadWeak:
case ARCInstKind::LoadWeakRetained:
case ARCInstKind::InitWeak:
case ARCInstKind::DestroyWeak: {
CallInst *CI = cast<CallInst>(Inst);
if (IsNullOrUndef(CI->getArgOperand(0))) {
Changed = true;
Type *Ty = CI->getArgOperand(0)->getType();
new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
Constant::getNullValue(Ty), CI);
Value *NewValue = UndefValue::get(CI->getType());
LLVM_DEBUG(
dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
"\nOld = "
<< *CI << "\nNew = " << *NewValue << "\n");
CI->replaceAllUsesWith(NewValue);
CI->eraseFromParent();
return;
}
break;
}
case ARCInstKind::CopyWeak:
case ARCInstKind::MoveWeak: {
CallInst *CI = cast<CallInst>(Inst);
if (IsNullOrUndef(CI->getArgOperand(0)) ||
IsNullOrUndef(CI->getArgOperand(1))) {
Changed = true;
Type *Ty = CI->getArgOperand(0)->getType();
new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
Constant::getNullValue(Ty), CI);
Value *NewValue = UndefValue::get(CI->getType());
LLVM_DEBUG(
dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
"\nOld = "
<< *CI << "\nNew = " << *NewValue << "\n");
CI->replaceAllUsesWith(NewValue);
CI->eraseFromParent();
return;
}
break;
}
case ARCInstKind::RetainRV:
if (OptimizeRetainRVCall(F, Inst))
return;
break;
case ARCInstKind::AutoreleaseRV:
OptimizeAutoreleaseRVCall(F, Inst, Class);
break;
}
// objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
if (IsAutorelease(Class) && Inst->use_empty()) {
CallInst *Call = cast<CallInst>(Inst);
const Value *Arg = Call->getArgOperand(0);
Arg = FindSingleUseIdentifiedObject(Arg);
if (Arg) {
Changed = true;
++NumAutoreleases;
// Create the declaration lazily.
LLVMContext &C = Inst->getContext();
Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
CallInst *NewCall =
CallInst::Create(Decl, Call->getArgOperand(0), "", Call);
NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
MDNode::get(C, None));
LLVM_DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
"since x is otherwise unused.\nOld: "
<< *Call << "\nNew: " << *NewCall << "\n");
EraseInstruction(Call);
Inst = NewCall;
Class = ARCInstKind::Release;
}
}
// For functions which can never be passed stack arguments, add
// a tail keyword.
if (IsAlwaysTail(Class) && !cast<CallInst>(Inst)->isNoTailCall()) {
Changed = true;
LLVM_DEBUG(
dbgs() << "Adding tail keyword to function since it can never be "
"passed stack args: "
<< *Inst << "\n");
cast<CallInst>(Inst)->setTailCall();
}
// Ensure that functions that can never have a "tail" keyword due to the
// semantics of ARC truly do not do so.
if (IsNeverTail(Class)) {
Changed = true;
LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
<< "\n");
cast<CallInst>(Inst)->setTailCall(false);
}
// Set nounwind as needed.
if (IsNoThrow(Class)) {
Changed = true;
LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
<< "\n");
cast<CallInst>(Inst)->setDoesNotThrow();
}
// Note: This catches instructions unrelated to ARC.
if (!IsNoopOnNull(Class)) {
UsedInThisFunction |= 1 << unsigned(Class);
return;
}
const Value *Arg = GetArgRCIdentityRoot(Inst);
// ARC calls with null are no-ops. Delete them.
if (IsNullOrUndef(Arg)) {
Changed = true;
++NumNoops;
LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
<< "\n");
EraseInstruction(Inst);
return;
}
// Keep track of which of retain, release, autorelease, and retain_block
// are actually present in this function.
UsedInThisFunction |= 1 << unsigned(Class);
// If Arg is a PHI, and one or more incoming values to the
// PHI are null, and the call is control-equivalent to the PHI, and there
// are no relevant side effects between the PHI and the call, and the call
// is not a release that doesn't have the clang.imprecise_release tag, the
// call could be pushed up to just those paths with non-null incoming
// values. For now, don't bother splitting critical edges for this.
if (Class == ARCInstKind::Release &&
!Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
return;
SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
Worklist.push_back(std::make_pair(Inst, Arg));
do {
std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
Inst = Pair.first;
Arg = Pair.second;
const PHINode *PN = dyn_cast<PHINode>(Arg);
if (!PN)
continue;
// Determine if the PHI has any null operands, or any incoming
// critical edges.
bool HasNull = false;
bool HasCriticalEdges = false;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
if (IsNullOrUndef(Incoming))
HasNull = true;
else if (PN->getIncomingBlock(i)->getTerminator()->getNumSuccessors() !=
1) {
HasCriticalEdges = true;
break;
}
}
// If we have null operands and no critical edges, optimize.
if (HasCriticalEdges)
continue;
if (!HasNull)
continue;
SmallPtrSet<Instruction *, 4> DependingInstructions;
SmallPtrSet<const BasicBlock *, 4> Visited;
// Check that there is nothing that cares about the reference
// count between the call and the phi.
switch (Class) {
case ARCInstKind::Retain:
case ARCInstKind::RetainBlock:
// These can always be moved up.
break;
case ARCInstKind::Release:
// These can't be moved across things that care about the retain
// count.
FindDependencies(NeedsPositiveRetainCount, Arg, Inst->getParent(), Inst,
DependingInstructions, Visited, PA);
break;
case ARCInstKind::Autorelease:
// These can't be moved across autorelease pool scope boundaries.
FindDependencies(AutoreleasePoolBoundary, Arg, Inst->getParent(), Inst,
DependingInstructions, Visited, PA);
break;
case ARCInstKind::ClaimRV:
case ARCInstKind::RetainRV:
case ARCInstKind::AutoreleaseRV:
// Don't move these; the RV optimization depends on the autoreleaseRV
// being tail called, and the retainRV being immediately after a call
// (which might still happen if we get lucky with codegen layout, but
// it's not worth taking the chance).
continue;
default:
llvm_unreachable("Invalid dependence flavor");
}
if (DependingInstructions.size() != 1)
continue;
if (*DependingInstructions.begin() != PN)
continue;
Changed = true;
++NumPartialNoops;
// Clone the call into each predecessor that has a non-null value.
CallInst *CInst = cast<CallInst>(Inst);
Type *ParamTy = CInst->getArgOperand(0)->getType();
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
if (IsNullOrUndef(Incoming))
continue;
Value *Op = PN->getIncomingValue(i);
Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
CallInst *Clone = cast<CallInst>(
CloneCallInstForBB(*CInst, *InsertPos->getParent(), BlockColors));
if (Op->getType() != ParamTy)
Op = new BitCastInst(Op, ParamTy, "", InsertPos);
Clone->setArgOperand(0, Op);
Clone->insertBefore(InsertPos);
LLVM_DEBUG(dbgs() << "Cloning " << *CInst << "\n"
"And inserting clone at "
<< *InsertPos << "\n");
Worklist.push_back(std::make_pair(Clone, Incoming));
}
// Erase the original call.
LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
EraseInstruction(CInst);
} while (!Worklist.empty());
}
/// If we have a top down pointer in the S_Use state, make sure that there are
/// no CFG hazards by checking the states of various bottom up pointers.
static void CheckForUseCFGHazard(const Sequence SuccSSeq,