1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

Simplify some boolean conditional return statements in AArch64.

http://reviews.llvm.org/D9979

Patch by Richard Thomson (and some conflict resolution by me).

llvm-svn: 262266
This commit is contained in:
Eric Christopher 2016-02-29 22:50:49 +00:00
parent 48494ce3b7
commit b615cc9f15
7 changed files with 18 additions and 48 deletions

View File

@ -207,9 +207,7 @@ bool AArch64AddressTypePromotion::shouldGetThrough(const Instruction *Inst) {
}
static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) {
if (isa<SelectInst>(Inst) && OpIdx == 0)
return false;
return true;
return !(isa<SelectInst>(Inst) && OpIdx == 0);
}
bool

View File

@ -946,10 +946,7 @@ bool AArch64FastISel::isValueAvailable(const Value *V) const {
return true;
const auto *I = cast<Instruction>(V);
if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB)
return true;
return false;
return FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB;
}
bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {

View File

@ -130,9 +130,7 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
// Note: currently hasFP() is always true for hasCalls(), but that's an
// implementation detail of the current code, not a strict requirement,
// so stay safe here and check both.
if (MFI->hasCalls() || hasFP(MF) || NumBytes > 128)
return false;
return true;
return !(MFI->hasCalls() || hasFP(MF) || NumBytes > 128);
}
/// hasFP - Return true if the specified function should have a dedicated frame

View File

@ -328,9 +328,7 @@ static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
// it hurts if the value is used at least twice, unless we are optimizing
// for code size.
if (ForCodeSize || V.hasOneUse())
return true;
return false;
return ForCodeSize || V.hasOneUse();
}
/// SelectShiftedRegister - Select a "shifted register" operand. If the value
@ -797,10 +795,7 @@ bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
return false;
if (isWorthFolding(N))
return true;
return false;
return isWorthFolding(N);
}
bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,

View File

@ -2812,9 +2812,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
return false;
if (getTargetMachine().Options.GuaranteedTailCallOpt) {
if (IsTailCallConvention(CalleeCC) && CCMatch)
return true;
return false;
return IsTailCallConvention(CalleeCC) && CCMatch;
}
// Externally-defined functions with weak linkage should not be
@ -6974,12 +6972,10 @@ bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
const DataLayout &DL = I->getModule()->getDataLayout();
EVT VT = getValueType(DL, User->getOperand(0)->getType());
if (isFMAFasterThanFMulAndFAdd(VT) &&
isOperationLegalOrCustom(ISD::FMA, VT) &&
(Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath))
return false;
return true;
return !(isFMAFasterThanFMulAndFAdd(VT) &&
isOperationLegalOrCustom(ISD::FMA, VT) &&
(Options.AllowFPOpFusion == FPOpFusion::Fast ||
Options.UnsafeFPMath));
}
// All 32-bit GPR operations implicitly zero the high-half of the corresponding
@ -7282,9 +7278,7 @@ EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
// Same encoding for add/sub, just flip the sign.
Immed = std::abs(Immed);
if ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0))
return true;
return false;
return ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
}
// Integer comparisons are implemented with ADDS/SUBS, so the range of valid
@ -7341,10 +7335,8 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
// Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2
if (!AM.Scale || AM.Scale == 1 ||
(AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes))
return true;
return false;
return !AM.Scale || AM.Scale == 1 ||
(AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes);
}
int AArch64TargetLowering::getScalingFactorCost(const DataLayout &DL,
@ -9309,10 +9301,8 @@ bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) {
}
case ISD::Constant:
case ISD::TargetConstant: {
if (std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) <
1LL << (width - 1))
return true;
return false;
return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) <
1LL << (width - 1);
}
}
@ -9922,10 +9912,7 @@ bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
// return instructions to help enable tail call optimizations for this
// instruction.
bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
if (!CI->isTailCall())
return false;
return true;
return CI->isTailCall();
}
bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,

View File

@ -285,10 +285,7 @@ static bool shouldConvertUse(const Constant *Cst, const Instruction *Instr,
// Do not mess with inline asm.
const CallInst *CI = dyn_cast<const CallInst>(Instr);
if (CI && isa<const InlineAsm>(CI->getCalledValue()))
return false;
return true;
return !(CI && isa<const InlineAsm>(CI->getCalledValue()));
}
/// Check if the given Cst should be converted into

View File

@ -190,9 +190,7 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
// If it's wrong, we'll materialize the constant and still get to the
// object; it's just suboptimal. Negative offsets use the unscaled
// load/store instructions, which have a 9-bit signed immediate.
if (MFI->getLocalFrameSize() < 256)
return false;
return true;
return MFI->getLocalFrameSize() >= 256;
}
return false;