1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

Silencing warnings from MSVC 2015 Update 2. All of these changes silence "C4334 '<<': result of 32-bit shift implicitly converted to 64 bits (was 64-bit shift intended?)". NFC.

llvm-svn: 264929
This commit is contained in:
Aaron Ballman 2016-03-30 21:30:00 +00:00
parent 7d16d35792
commit 32225c3c15
8 changed files with 17 additions and 17 deletions

View File

@ -502,7 +502,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
ProgInfo.LDSBlocks =
alignTo(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift;
// Scratch is allocated in 256 dword blocks.
unsigned ScratchAlignShift = 10;
@ -511,7 +511,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
// scratch memory used per thread.
ProgInfo.ScratchBlocks =
alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
1 << ScratchAlignShift) >>
1ULL << ScratchAlignShift) >>
ScratchAlignShift;
ProgInfo.ComputePGMRSrc1 =

View File

@ -1447,7 +1447,7 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
// Add LHS high bit
REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
SDValue BIT = DAG.getConstant(1 << bitPos, DL, HalfVT);
SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);

View File

@ -1124,7 +1124,7 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
Growth = CPEEnd - NextBlockOffset;
// Compute the padding that would go at the end of the CPE to align the next
// block.
Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
// If the CPE is to be inserted before the instruction, that will raise
// the offset of the instruction. Also account for unknown alignment padding

View File

@ -985,7 +985,7 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
Growth = CPEEnd - NextBlockOffset;
// Compute the padding that would go at the end of the CPE to align the next
// block.
Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
// If the CPE is to be inserted before the instruction, that will raise
// the offset of the instruction. Also account for unknown alignment padding

View File

@ -2653,7 +2653,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
default: llvm_unreachable("Unreachable!");
case X86::SHL16ri: {
unsigned ShAmt = MI->getOperand(2).getImm();
MIB.addReg(0).addImm(1 << ShAmt)
MIB.addReg(0).addImm(1ULL << ShAmt)
.addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
break;
}
@ -2768,7 +2768,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
.addOperand(Dest)
.addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
.addReg(0).addImm(1ULL << ShAmt).addOperand(Src).addImm(0).addReg(0);
break;
}
case X86::SHL32ri: {
@ -2788,7 +2788,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addOperand(Dest)
.addReg(0).addImm(1 << ShAmt)
.addReg(0).addImm(1ULL << ShAmt)
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addImm(0).addReg(0);
if (ImplicitOp.getReg() != 0)
@ -2806,7 +2806,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : nullptr;
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
.addOperand(Dest)
.addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
.addReg(0).addImm(1ULL << ShAmt).addOperand(Src).addImm(0).addReg(0);
break;
}
case X86::INC64r:

View File

@ -591,7 +591,7 @@ bool DevirtModule::tryVirtualConstProp(
Value *Addr = B.CreateConstGEP1_64(Call.VTable, OffsetByte);
if (BitWidth == 1) {
Value *Bits = B.CreateLoad(Addr);
Value *Bit = ConstantInt::get(Int8Ty, 1 << OffsetBit);
Value *Bit = ConstantInt::get(Int8Ty, 1ULL << OffsetBit);
Value *BitsAndBit = B.CreateAnd(Bits, Bit);
auto IsBitSet = B.CreateICmpNE(BitsAndBit, ConstantInt::get(Int8Ty, 0));
Call.replaceAndErase(IsBitSet);

View File

@ -1073,7 +1073,7 @@ Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue,
uint32_t TypeSize) {
size_t Granularity = 1 << Mapping.Scale;
size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
// Addr & (Granularity - 1)
Value *LastAccessedByte =
IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
@ -1116,7 +1116,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
size_t Granularity = 1 << Mapping.Scale;
size_t Granularity = 1ULL << Mapping.Scale;
TerminatorInst *CrashTerm = nullptr;
if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
@ -1608,7 +1608,7 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
const std::string Suffix = TypeStr + itostr(1 << AccessSizeIndex);
const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
@ -2019,7 +2019,7 @@ void FunctionStackPoisoner::poisonStack() {
// i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
size_t MinHeaderSize = ASan.LongSize / 2;
ASanStackFrameLayout L;
ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L);
ComputeASanStackFrameLayout(SVD, 1ULL << Mapping.Scale, MinHeaderSize, &L);
DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
uint64_t LocalStackSize = L.FrameSize;
bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&

View File

@ -341,7 +341,7 @@ void ARMAttributeParser::ABI_align_needed(AttrType Tag, const uint8_t *Data,
if (Value < array_lengthof(Strings))
Description = std::string(Strings[Value]);
else if (Value <= 12)
Description = std::string("8-byte alignment, ") + utostr(1 << Value)
Description = std::string("8-byte alignment, ") + utostr(1ULL << Value)
+ std::string("-byte extended alignment");
else
Description = "Invalid";
@ -362,8 +362,8 @@ void ARMAttributeParser::ABI_align_preserved(AttrType Tag, const uint8_t *Data,
if (Value < array_lengthof(Strings))
Description = std::string(Strings[Value]);
else if (Value <= 12)
Description = std::string("8-byte stack alignment, ") + utostr(1 << Value)
+ std::string("-byte data alignment");
Description = std::string("8-byte stack alignment, ") +
utostr(1ULL << Value) + std::string("-byte data alignment");
else
Description = "Invalid";