1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

Replace Count{Leading,Trailing}Zeros_{32,64} with count{Leading,Trailing}Zeros.

llvm-svn: 182680
This commit is contained in:
Michael J. Spencer 2013-05-24 22:23:49 +00:00
parent f5ff6c7f54
commit c195b8a813
40 changed files with 104 additions and 104 deletions

View File

@ -1399,18 +1399,18 @@ public:
/// equivalent of the string given by \p str.
static unsigned getBitsNeeded(StringRef str, uint8_t radix);
/// \brief Count the number of zeros from the msb to the first one bit.
/// \brief The APInt version of the countLeadingZeros functions in
/// MathExtras.h.
///
/// This function is an APInt version of the countLeadingZeros_{32,64}
/// functions in MathExtras.h. It counts the number of zeros from the most
/// significant bit to the first one bit.
/// It counts the number of zeros from the most significant bit to the first
/// one bit.
///
/// \returns BitWidth if the value is zero, otherwise returns the number of
/// zeros from the most significant bit to the first one bits.
/// zeros from the most significant bit to the first one bits.
unsigned countLeadingZeros() const {
if (isSingleWord()) {
unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth;
return CountLeadingZeros_64(VAL) - unusedBits;
return llvm::countLeadingZeros(VAL) - unusedBits;
}
return countLeadingZerosSlowCase();
}

View File

@ -153,9 +153,9 @@ public:
for (unsigned i = 0; i < NumBitWords(size()); ++i)
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Bits[i]);
return i * BITWORD_SIZE + countTrailingZeros((uint32_t)Bits[i]);
if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]);
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
llvm_unreachable("Unsupported!");
}
return -1;
@ -176,9 +176,9 @@ public:
if (Copy != 0) {
if (sizeof(BitWord) == 4)
return WordPos * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Copy);
return WordPos * BITWORD_SIZE + countTrailingZeros((uint32_t)Copy);
if (sizeof(BitWord) == 8)
return WordPos * BITWORD_SIZE + CountTrailingZeros_64(Copy);
return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
llvm_unreachable("Unsupported!");
}
@ -186,9 +186,9 @@ public:
for (unsigned i = WordPos+1; i < NumBitWords(size()); ++i)
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Bits[i]);
return i * BITWORD_SIZE + countTrailingZeros((uint32_t)Bits[i]);
if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]);
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
llvm_unreachable("Unsupported!");
}
return -1;

View File

@ -216,9 +216,9 @@ public:
if (Bits == 0)
return -1;
if (NumBaseBits == 32)
return CountTrailingZeros_32(Bits);
return countTrailingZeros(Bits);
if (NumBaseBits == 64)
return CountTrailingZeros_64(Bits);
return countTrailingZeros(Bits);
llvm_unreachable("Unsupported!");
}
return getPointer()->find_first();
@ -234,9 +234,9 @@ public:
if (Bits == 0 || Prev + 1 >= getSmallSize())
return -1;
if (NumBaseBits == 32)
return CountTrailingZeros_32(Bits);
return countTrailingZeros(Bits);
if (NumBaseBits == 64)
return CountTrailingZeros_64(Bits);
return countTrailingZeros(Bits);
llvm_unreachable("Unsupported!");
}
return getPointer()->find_next(Prev);

View File

@ -137,9 +137,9 @@ public:
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + CountTrailingZeros_32(Bits[i]);
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]);
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
llvm_unreachable("Unsupported!");
}
llvm_unreachable("Illegal empty element");
@ -162,9 +162,9 @@ public:
if (Copy != 0) {
if (sizeof(BitWord) == 4)
return WordPos * BITWORD_SIZE + CountTrailingZeros_32(Copy);
return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
if (sizeof(BitWord) == 8)
return WordPos * BITWORD_SIZE + CountTrailingZeros_64(Copy);
return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
llvm_unreachable("Unsupported!");
}
@ -172,9 +172,9 @@ public:
for (unsigned i = WordPos+1; i < BITWORDS_PER_ELEMENT; ++i)
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + CountTrailingZeros_32(Bits[i]);
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]);
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
llvm_unreachable("Unsupported!");
}
return -1;

View File

@ -517,7 +517,7 @@ inline unsigned CountTrailingZeros_64(uint64_t Value) {
/// zero bit (64 bit edition.)
/// Returns 64 if the word is all ones.
inline unsigned CountTrailingOnes_64(uint64_t Value) {
return CountTrailingZeros_64(~Value);
return countTrailingZeros(~Value);
}
/// CountPopulation_32 - this function counts the number of set bits in a value.
@ -550,26 +550,26 @@ inline unsigned CountPopulation_64(uint64_t Value) {
/// -1 if the value is zero. (32 bit edition.)
/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
inline unsigned Log2_32(uint32_t Value) {
return 31 - CountLeadingZeros_32(Value);
return 31 - countLeadingZeros(Value);
}
/// Log2_64 - This function returns the floor log base 2 of the specified value,
/// -1 if the value is zero. (64 bit edition.)
inline unsigned Log2_64(uint64_t Value) {
return 63 - CountLeadingZeros_64(Value);
return 63 - countLeadingZeros(Value);
}
/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified
/// value, 32 if the value is zero. (32 bit edition).
/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
inline unsigned Log2_32_Ceil(uint32_t Value) {
return 32-CountLeadingZeros_32(Value-1);
return 32 - countLeadingZeros(Value - 1);
}
/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified
/// value, 64 if the value is zero. (64 bit edition.)
inline unsigned Log2_64_Ceil(uint64_t Value) {
return 64-CountLeadingZeros_64(Value-1);
return 64 - countLeadingZeros(Value - 1);
}
/// GreatestCommonDivisor64 - Return the greatest common divisor of the two

View File

@ -290,7 +290,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
}
if (Align > 0)
KnownZero = APInt::getLowBitsSet(BitWidth,
CountTrailingZeros_32(Align));
countTrailingZeros(Align));
else
KnownZero.clearAllBits();
KnownOne.clearAllBits();
@ -321,7 +321,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
}
if (Align)
KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
return;
}
@ -613,7 +613,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
Align = TD->getABITypeAlignment(AI->getType()->getElementType());
if (Align > 0)
KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
break;
}
case Instruction::GetElementPtr: {
@ -633,8 +633,8 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
const StructLayout *SL = TD->getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
uint64_t Offset = SL->getElementOffset(Idx);
TrailZ = std::min(TrailZ,
CountTrailingZeros_64(Offset));
TrailZ = std::min<unsigned>(TrailZ,
countTrailingZeros(Offset));
} else {
// Handle array index arithmetic.
Type *IndexedTy = GTI.getIndexedType();
@ -644,7 +644,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
ComputeMaskedBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1);
TrailZ = std::min(TrailZ,
unsigned(CountTrailingZeros_64(TypeSize) +
unsigned(countTrailingZeros(TypeSize) +
LocalKnownZero.countTrailingOnes()));
}
}

View File

@ -91,7 +91,7 @@ struct DomainValue {
// First domain available.
unsigned getFirstDomain() const {
return CountTrailingZeros_32(AvailableDomains);
return countTrailingZeros(AvailableDomains);
}
DomainValue() : Refs(0) { clear(); }
@ -564,7 +564,7 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
// If the collapsed operands force a single domain, propagate the collapse.
if (isPowerOf2_32(available)) {
unsigned domain = CountTrailingZeros_32(available);
unsigned domain = countTrailingZeros(available);
TII->setExecutionDomain(mi, domain);
visitHardInstr(mi, domain);
return;

View File

@ -7509,9 +7509,9 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
// 0 and the bits being kept are 1. Use getSExtValue so that leading bits
// follow the sign bit for uniformity.
uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
unsigned NotMaskLZ = CountLeadingZeros_64(NotMask);
unsigned NotMaskLZ = countLeadingZeros(NotMask);
if (NotMaskLZ & 7) return Result; // Must be multiple of a byte.
unsigned NotMaskTZ = CountTrailingZeros_64(NotMask);
unsigned NotMaskTZ = countTrailingZeros(NotMask);
if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
if (NotMaskLZ == 64) return Result; // All zero mask.

View File

@ -1816,7 +1816,7 @@ void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
Cmp = DAG.getSetCC(getCurDebugLoc(),
TLI.getSetCCResultType(*DAG.getContext(), VT),
ShiftOp,
DAG.getConstant(CountTrailingZeros_64(B.Mask), VT),
DAG.getConstant(countTrailingZeros(B.Mask), VT),
ISD::SETEQ);
} else if (PopCount == BB.Range) {
// There is only one zero bit in the range, test for it directly.

View File

@ -85,7 +85,7 @@ TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
Base < BaseE; Base += 32) {
unsigned Idx = Base;
for (unsigned Mask = *SubClass++; Mask; Mask >>= 1) {
unsigned Offset = CountTrailingZeros_32(Mask);
unsigned Offset = countTrailingZeros(Mask);
const TargetRegisterClass *SubRC = getRegClass(Idx + Offset);
if (SubRC->isAllocatable())
return SubRC;
@ -155,7 +155,7 @@ const TargetRegisterClass *firstCommonClass(const uint32_t *A,
const TargetRegisterInfo *TRI) {
for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
if (unsigned Common = *A++ & *B++)
return TRI->getRegClass(I + CountTrailingZeros_32(Common));
return TRI->getRegClass(I + countTrailingZeros(Common));
return 0;
}

View File

@ -24,7 +24,7 @@ ObjectFile *ObjectFile::createELFObjectFile(MemoryBuffer *Object) {
error_code ec;
std::size_t MaxAlignment =
1ULL << CountTrailingZeros_64(uintptr_t(Object->getBufferStart()));
1ULL << countTrailingZeros(uintptr_t(Object->getBufferStart()));
if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB)
#if !LLVM_IS_UNALIGNED_ACCESS_FAST

View File

@ -692,14 +692,14 @@ unsigned APInt::countLeadingZerosSlowCase() const {
unsigned i = getNumWords();
integerPart MSW = pVal[i-1] & MSWMask;
if (MSW)
return CountLeadingZeros_64(MSW) - (APINT_BITS_PER_WORD - BitsInMSW);
return llvm::countLeadingZeros(MSW) - (APINT_BITS_PER_WORD - BitsInMSW);
unsigned Count = BitsInMSW;
for (--i; i > 0u; --i) {
if (pVal[i-1] == 0)
Count += APINT_BITS_PER_WORD;
else {
Count += CountLeadingZeros_64(pVal[i-1]);
Count += llvm::countLeadingZeros(pVal[i-1]);
break;
}
}
@ -735,13 +735,13 @@ unsigned APInt::countLeadingOnes() const {
unsigned APInt::countTrailingZeros() const {
if (isSingleWord())
return std::min(unsigned(CountTrailingZeros_64(VAL)), BitWidth);
return std::min(unsigned(llvm::countTrailingZeros(VAL)), BitWidth);
unsigned Count = 0;
unsigned i = 0;
for (; i < getNumWords() && pVal[i] == 0; ++i)
Count += APINT_BITS_PER_WORD;
if (i < getNumWords())
Count += CountTrailingZeros_64(pVal[i]);
Count += llvm::countTrailingZeros(pVal[i]);
return std::min(Count, BitWidth);
}
@ -1512,7 +1512,7 @@ static void KnuthDiv(unsigned *u, unsigned *v, unsigned *q, unsigned* r,
// and v so that its high bits are shifted to the top of v's range without
// overflow. Note that this can require an extra word in u so that u must
// be of length m+n+1.
unsigned shift = CountLeadingZeros_32(v[n-1]);
unsigned shift = countLeadingZeros(v[n-1]);
unsigned v_carry = 0;
unsigned u_carry = 0;
if (shift) {

View File

@ -87,7 +87,7 @@ namespace {
// If the block size isn't a multiple of the known bits, assume the
// worst case padding.
if (Size & ((1u << Bits) - 1))
Bits = CountTrailingZeros_32(Size);
Bits = countTrailingZeros(Size);
return Bits;
}

View File

@ -2464,7 +2464,7 @@ static int32_t getLSBForBFI(SelectionDAG &DAG, DebugLoc DL, EVT VT,
// cases (e.g. bitfield to bitfield copy) may still need a real shift before
// the BFI.
uint64_t LSB = CountTrailingZeros_64(Mask);
uint64_t LSB = countTrailingZeros(Mask);
int64_t ShiftRightRequired = LSB;
if (MaskedVal.getOpcode() == ISD::SHL &&
isa<ConstantSDNode>(MaskedVal.getOperand(1))) {

View File

@ -972,7 +972,7 @@ bool A64Imms::isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits) {
// Now we have to work out the amount of rotation needed. The first part of
// this calculation is actually independent of RepeatWidth, but the complex
// case will depend on it.
Rotation = CountTrailingZeros_64(Imm);
Rotation = countTrailingZeros(Imm);
if (Rotation == 0) {
// There were no leading zeros, which means it's either in place or there
// are 1s at each end (e.g. 0x8003 needs rotating).

View File

@ -1044,8 +1044,8 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
return;
} else if ((MCID.Opcode == ARM::BFC) || (MCID.Opcode == ARM::BFI)) {
uint32_t v = ~MI.getOperand(2).getImm();
int32_t lsb = CountTrailingZeros_32(v);
int32_t msb = (32 - CountLeadingZeros_32(v)) - 1;
int32_t lsb = countTrailingZeros(v);
int32_t msb = (32 - countLeadingZeros(v)) - 1;
// Instr{20-16} = msb, Instr{11-7} = lsb
Binary |= (msb & 0x1F) << 16;
Binary |= (lsb & 0x1F) << 7;

View File

@ -128,7 +128,7 @@ namespace {
// If the block size isn't a multiple of the known bits, assume the
// worst case padding.
if (Size & ((1u << Bits) - 1))
Bits = CountTrailingZeros_32(Size);
Bits = countTrailingZeros(Size);
return Bits;
}

View File

@ -364,7 +364,7 @@ void ARMDAGToDAGISel::PreprocessISelDAG() {
continue;
// Check if the AND mask is an immediate of the form: 000.....1111111100
unsigned TZ = CountTrailingZeros_32(And_imm);
unsigned TZ = countTrailingZeros(And_imm);
if (TZ != 1 && TZ != 2)
// Be conservative here. Shifter operands aren't always free. e.g. On
// Swift, left shifter operand of 1 / 2 for free but others are not.

View File

@ -8167,7 +8167,7 @@ static SDValue PerformMULCombine(SDNode *N,
return SDValue();
int64_t MulAmt = C->getSExtValue();
unsigned ShiftAmt = CountTrailingZeros_64(MulAmt);
unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
ShiftAmt = ShiftAmt & (32 - 1);
SDValue V = N->getOperand(0);
@ -8388,7 +8388,7 @@ static SDValue PerformORCombine(SDNode *N,
return SDValue();
if (ARM::isBitFieldInvertedMask(Mask)) {
Val >>= CountTrailingZeros_32(~Mask);
Val >>= countTrailingZeros(~Mask);
Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
DAG.getConstant(Val, MVT::i32),
@ -8415,7 +8415,7 @@ static SDValue PerformORCombine(SDNode *N,
(Mask == 0xffff || Mask == 0xffff0000))
return SDValue();
// 2a
unsigned amt = CountTrailingZeros_32(Mask2);
unsigned amt = countTrailingZeros(Mask2);
Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
DAG.getConstant(amt, MVT::i32));
Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
@ -8431,7 +8431,7 @@ static SDValue PerformORCombine(SDNode *N,
(Mask2 == 0xffff || Mask2 == 0xffff0000))
return SDValue();
// 2b
unsigned lsb = CountTrailingZeros_32(Mask);
unsigned lsb = countTrailingZeros(Mask);
Res = DAG.getNode(ISD::SRL, DL, VT, N00,
DAG.getConstant(lsb, MVT::i32));
Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
@ -8449,7 +8449,7 @@ static SDValue PerformORCombine(SDNode *N,
// where lsb(mask) == #shamt and masked bits of B are known zero.
SDValue ShAmt = N00.getOperand(1);
unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
unsigned LSB = CountTrailingZeros_32(Mask);
unsigned LSB = countTrailingZeros(Mask);
if (ShAmtC != LSB)
return SDValue();
@ -8492,8 +8492,8 @@ static SDValue PerformBFICombine(SDNode *N,
if (!N11C)
return SDValue();
unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
unsigned LSB = CountTrailingZeros_32(~InvMask);
unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB;
unsigned LSB = countTrailingZeros(~InvMask);
unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
unsigned Mask = (1 << Width)-1;
unsigned Mask2 = N11C->getZExtValue();
if ((Mask & (~Mask2)) == 0)

View File

@ -90,7 +90,7 @@ class ARMAsmParser : public MCTargetAsmParser {
if (!inITBlock()) return;
// Move to the next instruction in the IT block, if there is one. If not,
// mark the block as done.
unsigned TZ = CountTrailingZeros_32(ITState.Mask);
unsigned TZ = countTrailingZeros(ITState.Mask);
if (++ITState.CurPosition == 5 - TZ)
ITState.CurPosition = ~0U; // Done with the IT block after this.
}
@ -7420,7 +7420,7 @@ processInstruction(MCInst &Inst,
MCOperand &MO = Inst.getOperand(1);
unsigned Mask = MO.getImm();
unsigned OrigMask = Mask;
unsigned TZ = CountTrailingZeros_32(Mask);
unsigned TZ = countTrailingZeros(Mask);
if ((Inst.getOperand(0).getImm() & 1) == 0) {
assert(Mask && TZ <= 3 && "illegal IT mask value!");
Mask ^= (0xE << TZ) & 0xF;

View File

@ -65,7 +65,7 @@ namespace {
void setITState(char Firstcond, char Mask) {
// (3 - the number of trailing zeros) is the number of then / else.
unsigned CondBit0 = Firstcond & 1;
unsigned NumTZ = CountTrailingZeros_32(Mask);
unsigned NumTZ = countTrailingZeros<uint8_t>(Mask);
unsigned char CCBits = static_cast<unsigned char>(Firstcond & 0xf);
assert(NumTZ <= 3 && "Invalid IT mask!");
// push condition codes onto the stack the correct order for the pops

View File

@ -660,8 +660,8 @@ void ARMInstPrinter::printBitfieldInvMaskImmOperand(const MCInst *MI,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
uint32_t v = ~MO.getImm();
int32_t lsb = CountTrailingZeros_32(v);
int32_t width = (32 - CountLeadingZeros_32 (v)) - lsb;
int32_t lsb = countTrailingZeros(v);
int32_t width = (32 - countLeadingZeros (v)) - lsb;
assert(MO.isImm() && "Not a valid bf_inv_mask_imm value!");
O << markup("<imm:") << '#' << lsb << markup(">")
<< ", "
@ -931,7 +931,7 @@ void ARMInstPrinter::printThumbITMask(const MCInst *MI, unsigned OpNum,
unsigned Mask = MI->getOperand(OpNum).getImm();
unsigned Firstcond = MI->getOperand(OpNum-1).getImm();
unsigned CondBit0 = Firstcond & 1;
unsigned NumTZ = CountTrailingZeros_32(Mask);
unsigned NumTZ = countTrailingZeros(Mask);
assert(NumTZ <= 3 && "Invalid IT mask!");
for (unsigned Pos = 3, e = NumTZ; Pos > e; --Pos) {
bool T = ((Mask >> Pos) & 1) == CondBit0;

View File

@ -140,7 +140,7 @@ namespace ARM_AM {
if ((Imm & ~255U) == 0) return 0;
// Use CTZ to compute the rotate amount.
unsigned TZ = CountTrailingZeros_32(Imm);
unsigned TZ = countTrailingZeros(Imm);
// Rotate amount must be even. Something like 0x200 must be rotated 8 bits,
// not 9.
@ -153,7 +153,7 @@ namespace ARM_AM {
// For values like 0xF000000F, we should ignore the low 6 bits, then
// retry the hunt.
if (Imm & 63U) {
unsigned TZ2 = CountTrailingZeros_32(Imm & ~63U);
unsigned TZ2 = countTrailingZeros(Imm & ~63U);
unsigned RotAmt2 = TZ2 & ~1;
if ((rotr32(Imm, RotAmt2) & ~255U) == 0)
return (32-RotAmt2)&31; // HW rotates right, not left.
@ -221,7 +221,7 @@ namespace ARM_AM {
if ((Imm & ~255U) == 0) return 0;
// Use CTZ to compute the shift amount.
return CountTrailingZeros_32(Imm);
return countTrailingZeros(Imm);
}
/// isThumbImmShiftedVal - Return true if the specified value can be obtained
@ -240,7 +240,7 @@ namespace ARM_AM {
if ((Imm & ~65535U) == 0) return 0;
// Use CTZ to compute the shift amount.
return CountTrailingZeros_32(Imm);
return countTrailingZeros(Imm);
}
/// isThumbImm16ShiftedVal - Return true if the specified value can be
@ -296,7 +296,7 @@ namespace ARM_AM {
/// encoding is possible.
/// See ARM Reference Manual A6.3.2.
static inline int getT2SOImmValRotateVal(unsigned V) {
unsigned RotAmt = CountLeadingZeros_32(V);
unsigned RotAmt = countLeadingZeros(V);
if (RotAmt >= 24)
return -1;
@ -328,7 +328,7 @@ namespace ARM_AM {
static inline unsigned getT2SOImmValRotate(unsigned V) {
if ((V & ~255U) == 0) return 0;
// Use CTZ to compute the rotate amount.
unsigned RotAmt = CountTrailingZeros_32(V);
unsigned RotAmt = countTrailingZeros(V);
return (32 - RotAmt) & 31;
}

View File

@ -1359,8 +1359,8 @@ getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
// msb of the mask.
const MCOperand &MO = MI.getOperand(Op);
uint32_t v = ~MO.getImm();
uint32_t lsb = CountTrailingZeros_32(v);
uint32_t msb = (32 - CountLeadingZeros_32 (v)) - 1;
uint32_t lsb = countTrailingZeros(v);
uint32_t msb = (32 - countLeadingZeros (v)) - 1;
assert (v != 0 && lsb < 32 && msb < 32 && "Illegal bitfield mask!");
return lsb | (msb << 5);
}

View File

@ -285,7 +285,7 @@ void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
NumBytes = 0;
} else {
// FIXME: Move this to ARMAddressingModes.h?
unsigned RotAmt = CountLeadingZeros_32(ThisVal);
unsigned RotAmt = countLeadingZeros(ThisVal);
ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
NumBytes &= ~ThisVal;
assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
@ -302,7 +302,7 @@ void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
NumBytes = 0;
} else {
// FIXME: Move this to ARMAddressingModes.h?
unsigned RotAmt = CountLeadingZeros_32(ThisVal);
unsigned RotAmt = countLeadingZeros(ThisVal);
ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
NumBytes &= ~ThisVal;
assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
@ -484,7 +484,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
// Otherwise, extract 8 adjacent bits from the immediate into this
// t2ADDri/t2SUBri.
unsigned RotAmt = CountLeadingZeros_32(Offset);
unsigned RotAmt = countLeadingZeros<unsigned>(Offset);
unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt);
// We will handle these bits from offset, clear them.

View File

@ -40,7 +40,7 @@ void MipsAnalyzeImmediate::GetInstSeqLsORi(uint64_t Imm, unsigned RemSize,
void MipsAnalyzeImmediate::GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize,
InstSeqLs &SeqLs) {
unsigned Shamt = CountTrailingZeros_64(Imm);
unsigned Shamt = countTrailingZeros(Imm);
GetInstSeqLs(Imm >> Shamt, RemSize - Shamt, SeqLs);
AddInstr(SeqLs, Inst(SLL, Shamt));
}

View File

@ -70,7 +70,7 @@ static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
return false;
Size = CountPopulation_64(I);
Pos = CountTrailingZeros_64(I);
Pos = countTrailingZeros(I);
return true;
}

View File

@ -247,7 +247,7 @@ public:
unsigned getCRBitMask() const {
assert(isCRBitMask() && "Invalid access!");
return 7 - CountTrailingZeros_32(Imm.Val);
return 7 - countTrailingZeros<uint64_t>(Imm.Val);
}
bool isToken() const { return Kind == Token; }

View File

@ -332,17 +332,17 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
bool PPCDAGToDAGISel::isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME) {
if (isShiftedMask_32(Val)) {
// look for the first non-zero bit
MB = CountLeadingZeros_32(Val);
MB = countLeadingZeros(Val);
// look for the first zero bit after the run of ones
ME = CountLeadingZeros_32((Val - 1) ^ Val);
ME = countLeadingZeros((Val - 1) ^ Val);
return true;
} else {
Val = ~Val; // invert mask
if (isShiftedMask_32(Val)) {
// effectively look for the first zero bit
ME = CountLeadingZeros_32(Val) - 1;
ME = countLeadingZeros(Val) - 1;
// effectively look for the first one bit after the run of zeros
MB = CountLeadingZeros_32((Val - 1) ^ Val) + 1;
MB = countLeadingZeros((Val - 1) ^ Val) + 1;
return true;
}
}
@ -912,7 +912,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// If it can't be represented as a 32 bit value.
if (!isInt<32>(Imm)) {
Shift = CountTrailingZeros_64(Imm);
Shift = countTrailingZeros<uint64_t>(Imm);
int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
// If the shifted value fits 32 bits.

View File

@ -893,8 +893,8 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Produce implicit-defs for free by using killed registers.
while (Kills && Defs) {
unsigned KReg = CountTrailingZeros_32(Kills);
unsigned DReg = CountTrailingZeros_32(Defs);
unsigned KReg = countTrailingZeros(Kills);
unsigned DReg = countTrailingZeros(Defs);
DEBUG(dbgs() << "Renaming %FP" << KReg << " as imp %FP" << DReg << "\n");
std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]);
std::swap(RegMap[KReg], RegMap[DReg]);
@ -917,7 +917,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Manually kill the rest.
while (Kills) {
unsigned KReg = CountTrailingZeros_32(Kills);
unsigned KReg = countTrailingZeros(Kills);
DEBUG(dbgs() << "Killing %FP" << KReg << "\n");
freeStackSlotBefore(I, KReg);
Kills &= ~(1 << KReg);
@ -925,7 +925,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Load zeros for all the imp-defs.
while(Defs) {
unsigned DReg = CountTrailingZeros_32(Defs);
unsigned DReg = countTrailingZeros(Defs);
DEBUG(dbgs() << "Defining %FP" << DReg << " as 0\n");
BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0));
pushReg(DReg);
@ -1636,7 +1636,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
// Note: this might be a non-optimal pop sequence. We might be able to do
// better by trying to pop in stack order or something.
while (FPKills) {
unsigned FPReg = CountTrailingZeros_32(FPKills);
unsigned FPReg = countTrailingZeros(FPKills);
if (isLive(FPReg))
freeStackSlotAfter(InsertPt, FPReg);
FPKills &= ~(1U << FPReg);

View File

@ -886,8 +886,8 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
return true;
unsigned ShiftAmt = Shift.getConstantOperandVal(1);
unsigned MaskLZ = CountLeadingZeros_64(Mask);
unsigned MaskTZ = CountTrailingZeros_64(Mask);
unsigned MaskLZ = countLeadingZeros(Mask);
unsigned MaskTZ = countTrailingZeros(Mask);
// The amount of shift we're trying to fit into the addressing mode is taken
// from the trailing zeros of the mask.

View File

@ -5426,7 +5426,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Special case for single non-zero, non-undef, element.
if (NumNonZero == 1) {
unsigned Idx = CountTrailingZeros_32(NonZeros);
unsigned Idx = countTrailingZeros(NonZeros);
SDValue Item = Op.getOperand(Idx);
// If this is an insertion of an i64 value on x86-32, and if the top bits of
@ -5535,7 +5535,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
// Check if it's possible to issue this instead.
// shuffle (vload ptr)), undef, <1, 1, 1, 1>
unsigned Idx = CountTrailingZeros_32(NonZeros);
unsigned Idx = countTrailingZeros(NonZeros);
SDValue Item = Op.getOperand(Idx);
if (Op.getNode()->isOnlyUserOf(Item.getNode()))
return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
@ -5570,7 +5570,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (EVTBits == 64) {
if (NumNonZero == 1) {
// One half is zero or undef.
unsigned Idx = CountTrailingZeros_32(NonZeros);
unsigned Idx = countTrailingZeros(NonZeros);
SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
Op.getOperand(Idx));
return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);

View File

@ -61,7 +61,7 @@ namespace {
if (!isMask_32(value)) {
return false;
}
int msksize = 32 - CountLeadingZeros_32(value);
int msksize = 32 - countLeadingZeros(value);
return (msksize >= 1 && msksize <= 8) ||
msksize == 16 || msksize == 24 || msksize == 32;
}
@ -117,7 +117,7 @@ SDNode *XCoreDAGToDAGISel::Select(SDNode *N) {
if (immMskBitp(N)) {
// Transformation function: get the size of a mask
// Look for the first non-zero bit
SDValue MskSize = getI32Imm(32 - CountLeadingZeros_32(Val));
SDValue MskSize = getI32Imm(32 - countLeadingZeros(Val));
return CurDAG->getMachineNode(XCore::MKMSK_rus, dl,
MVT::i32, MskSize);
}

View File

@ -84,7 +84,7 @@ def msksize_xform : SDNodeXForm<imm, [{
// Transformation function: get the size of a mask
assert(isMask_32(N->getZExtValue()));
// look for the first non-zero bit
return getI32Imm(32 - CountLeadingZeros_32(N->getZExtValue()));
return getI32Imm(32 - countLeadingZeros(N->getZExtValue()));
}]>;
def neg_xform : SDNodeXForm<imm, [{

View File

@ -1380,7 +1380,7 @@ static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
// into a byteswap. At least one of the two bytes would not be aligned with
// their ultimate destination.
if (!isPowerOf2_32(ByteMask)) return true;
unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
unsigned InputByteNo = countTrailingZeros(ByteMask);
// 2) The input and ultimate destinations must line up: if byte 3 of an i32
// is demanded, it needs to go into byte 0 of the result. This means that the

View File

@ -519,7 +519,7 @@ ModulePass *llvm::createAddressSanitizerModulePass(
}
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
size_t Res = CountTrailingZeros_32(TypeSize / 8);
size_t Res = countTrailingZeros(TypeSize / 8);
assert(Res < kNumberOfAccessSizes);
return Res;
}

View File

@ -579,7 +579,7 @@ int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) {
// Ignore all unusual sizes.
return -1;
}
size_t Idx = CountTrailingZeros_32(TypeSize / 8);
size_t Idx = countTrailingZeros(TypeSize / 8);
assert(Idx < kNumberOfAccessSizes);
return Idx;
}

View File

@ -3607,7 +3607,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
abs64(NewF.BaseOffset)) &&
(C->getValue()->getValue() +
NewF.BaseOffset).countTrailingZeros() >=
CountTrailingZeros_64(NewF.BaseOffset))
countTrailingZeros<uint64_t>(NewF.BaseOffset))
goto skip_formula;
// Ok, looks good.

View File

@ -63,7 +63,7 @@ void printProgramHeaders(
<< format(Fmt, (uint64_t)pi->p_vaddr)
<< "paddr "
<< format(Fmt, (uint64_t)pi->p_paddr)
<< format("align 2**%u\n", CountTrailingZeros_64(pi->p_align))
<< format("align 2**%u\n", countTrailingZeros<uint64_t>(pi->p_align))
<< " filesz "
<< format(Fmt, (uint64_t)pi->p_filesz)
<< "memsz "

View File

@ -487,7 +487,7 @@ void LTOModule::addDefinedSymbol(const GlobalValue *def, bool isFunction) {
// set alignment part log2() can have rounding errors
uint32_t align = def->getAlignment();
uint32_t attr = align ? CountTrailingZeros_32(def->getAlignment()) : 0;
uint32_t attr = align ? countTrailingZeros(def->getAlignment()) : 0;
// set permissions part
if (isFunction) {