1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00

fix function names; NFC

llvm-svn: 281637
This commit is contained in:
Sanjay Patel 2016-09-15 18:22:25 +00:00
parent f7704110de
commit ea119532b0

View File

@ -36,11 +36,11 @@ using namespace PatternMatch;
STATISTIC(NumSel, "Number of select opts");
static ConstantInt *ExtractElement(Constant *V, Constant *Idx) {
static ConstantInt *extractElement(Constant *V, Constant *Idx) {
return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
}
static bool HasAddOverflow(ConstantInt *Result,
static bool hasAddOverflow(ConstantInt *Result,
ConstantInt *In1, ConstantInt *In2,
bool IsSigned) {
if (!IsSigned)
@ -53,28 +53,28 @@ static bool HasAddOverflow(ConstantInt *Result,
/// Compute Result = In1+In2, returning true if the result overflowed for this
/// type.
static bool AddWithOverflow(Constant *&Result, Constant *In1,
static bool addWithOverflow(Constant *&Result, Constant *In1,
Constant *In2, bool IsSigned = false) {
Result = ConstantExpr::getAdd(In1, In2);
if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
if (HasAddOverflow(ExtractElement(Result, Idx),
ExtractElement(In1, Idx),
ExtractElement(In2, Idx),
if (hasAddOverflow(extractElement(Result, Idx),
extractElement(In1, Idx),
extractElement(In2, Idx),
IsSigned))
return true;
}
return false;
}
return HasAddOverflow(cast<ConstantInt>(Result),
return hasAddOverflow(cast<ConstantInt>(Result),
cast<ConstantInt>(In1), cast<ConstantInt>(In2),
IsSigned);
}
static bool HasSubOverflow(ConstantInt *Result,
static bool hasSubOverflow(ConstantInt *Result,
ConstantInt *In1, ConstantInt *In2,
bool IsSigned) {
if (!IsSigned)
@ -88,23 +88,23 @@ static bool HasSubOverflow(ConstantInt *Result,
/// Compute Result = In1-In2, returning true if the result overflowed for this
/// type.
static bool SubWithOverflow(Constant *&Result, Constant *In1,
static bool subWithOverflow(Constant *&Result, Constant *In1,
Constant *In2, bool IsSigned = false) {
Result = ConstantExpr::getSub(In1, In2);
if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
if (HasSubOverflow(ExtractElement(Result, Idx),
ExtractElement(In1, Idx),
ExtractElement(In2, Idx),
if (hasSubOverflow(extractElement(Result, Idx),
extractElement(In1, Idx),
extractElement(In2, Idx),
IsSigned))
return true;
}
return false;
}
return HasSubOverflow(cast<ConstantInt>(Result),
return hasSubOverflow(cast<ConstantInt>(Result),
cast<ConstantInt>(In1), cast<ConstantInt>(In2),
IsSigned);
}
@ -175,7 +175,7 @@ static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
/// Given a signed integer type and a set of known zero and one bits, compute
/// the maximum and minimum values that could have the specified known zero and
/// known one bits, returning them in Min/Max.
static void ComputeSignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
static void computeSignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
const APInt &KnownOne,
APInt &Min, APInt &Max) {
assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
@ -198,7 +198,7 @@ static void ComputeSignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
/// Given an unsigned integer type and a set of known zero and one bits, compute
/// the maximum and minimum values that could have the specified known zero and
/// known one bits, returning them in Min/Max.
static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
static void computeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
const APInt &KnownOne,
APInt &Min, APInt &Max) {
assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
@ -500,7 +500,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
///
/// If we can't emit an optimized form for this expression, this returns null.
///
static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
const DataLayout &DL) {
gep_type_iterator GTI = gep_type_begin(GEP);
@ -932,7 +932,7 @@ Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// This transformation (ignoring the base and scales) is valid because we
// know pointers can't overflow since the gep is inbounds. See if we can
// output an optimized form.
Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, *this, DL);
Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
// If not, synthesize the offset the hard way.
if (!Offset)
@ -1290,7 +1290,7 @@ Instruction *InstCombiner::foldICmpCstShlConst(ICmpInst &I, Value *Op, Value *A,
/// if (sum+128 >u 255)
/// Then replace it with llvm.sadd.with.overflow.i8.
///
static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
ConstantInt *CI2, ConstantInt *CI1,
InstCombiner &IC) {
// The transformation we're trying to do here is to transform this into an
@ -1399,7 +1399,7 @@ Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI
if (Pred == ICmpInst::ICMP_UGT &&
match(X, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
if (Instruction *Res = ProcessUGT_ADDCST_ADD(
if (Instruction *Res = processUGT_ADDCST_ADD(
Cmp, A, B, CI2, cast<ConstantInt>(Cmp.getOperand(1)), *this))
return Res;
}
@ -2180,7 +2180,7 @@ Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
if (!HiOverflow) {
// If this is not an exact divide, then many values in the range collapse
// to the same result value.
HiOverflow = AddWithOverflow(HiBound, LoBound, RangeSize, false);
HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
}
} else if (C2->isStrictlyPositive()) { // Divisor is > 0.
if (*C == 0) { // (X / pos) op 0
@ -2191,14 +2191,14 @@ Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
HiOverflow = LoOverflow = ProdOV;
if (!HiOverflow)
HiOverflow = AddWithOverflow(HiBound, Prod, RangeSize, true);
HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
} else { // (X / pos) op neg
// e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
HiBound = AddOne(Prod);
LoOverflow = HiOverflow = ProdOV ? -1 : 0;
if (!LoOverflow) {
Constant *DivNeg = ConstantExpr::getNeg(RangeSize);
LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
}
}
} else if (C2->isNegative()) { // Divisor is < 0.
@ -2217,12 +2217,12 @@ Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
HiBound = AddOne(Prod);
HiOverflow = LoOverflow = ProdOV ? -1 : 0;
if (!LoOverflow)
LoOverflow = AddWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
} else { // (X / neg) op neg
LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
LoOverflow = HiOverflow = ProdOV;
if (!HiOverflow)
HiOverflow = SubWithOverflow(HiBound, Prod, RangeSize, true);
HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
}
// Dividing by a negative swaps the condition. LT <-> GT
@ -2858,7 +2858,7 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
/// \param OtherVal The other argument of compare instruction.
/// \returns Instruction which must replace the compare instruction, NULL if no
/// replacement required.
static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
Value *OtherVal, InstCombiner &IC) {
// Don't bother doing this transformation for pointers, don't do it for
// vectors.
@ -3082,8 +3082,8 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
/// When performing a comparison against a constant, it is possible that not all
/// the bits in the LHS are demanded. This helper method computes the mask that
/// IS demanded.
static APInt DemandedBitsLHSMask(ICmpInst &I,
unsigned BitWidth, bool isSignCheck) {
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth,
bool isSignCheck) {
if (isSignCheck)
return APInt::getSignBit(BitWidth);
@ -3293,7 +3293,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
if (SimplifyDemandedBits(I.getOperandUse(0),
DemandedBitsLHSMask(I, BitWidth, IsSignBit),
getDemandedBitsLHSMask(I, BitWidth, IsSignBit),
Op0KnownZero, Op0KnownOne, 0))
return &I;
@ -3307,14 +3307,14 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
if (I.isSigned()) {
ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, Op0Min,
computeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, Op0Min,
Op0Max);
ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, Op1Min,
computeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, Op1Min,
Op1Max);
} else {
ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, Op0Min,
computeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, Op0Min,
Op0Max);
ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, Op1Min,
computeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, Op1Min,
Op1Max);
}
@ -4147,11 +4147,11 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// (zext a) * (zext b) --> llvm.umul.with.overflow.
if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
if (Instruction *R = ProcessUMulZExtIdiom(I, Op0, Op1, *this))
if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
return R;
}
if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
if (Instruction *R = ProcessUMulZExtIdiom(I, Op1, Op0, *this))
if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
return R;
}
}