mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
Update to use new name alignTo().
llvm-svn: 257804
This commit is contained in:
parent
8e0f92e5a1
commit
dca64dbccc
@ -244,7 +244,7 @@ public:
|
|||||||
|
|
||||||
BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
|
BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
|
||||||
Bits[I / BITWORD_SIZE] |= PrefixMask;
|
Bits[I / BITWORD_SIZE] |= PrefixMask;
|
||||||
I = RoundUpToAlignment(I, BITWORD_SIZE);
|
I = alignTo(I, BITWORD_SIZE);
|
||||||
|
|
||||||
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
|
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
|
||||||
Bits[I / BITWORD_SIZE] = ~0UL;
|
Bits[I / BITWORD_SIZE] = ~0UL;
|
||||||
@ -283,7 +283,7 @@ public:
|
|||||||
|
|
||||||
BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
|
BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
|
||||||
Bits[I / BITWORD_SIZE] &= ~PrefixMask;
|
Bits[I / BITWORD_SIZE] &= ~PrefixMask;
|
||||||
I = RoundUpToAlignment(I, BITWORD_SIZE);
|
I = alignTo(I, BITWORD_SIZE);
|
||||||
|
|
||||||
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
|
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
|
||||||
Bits[I / BITWORD_SIZE] = 0UL;
|
Bits[I / BITWORD_SIZE] = 0UL;
|
||||||
|
@ -281,7 +281,7 @@ public:
|
|||||||
/// be able to store all arguments and such that the alignment requirement
|
/// be able to store all arguments and such that the alignment requirement
|
||||||
/// of each of the arguments is satisfied.
|
/// of each of the arguments is satisfied.
|
||||||
unsigned getAlignedCallFrameSize() const {
|
unsigned getAlignedCallFrameSize() const {
|
||||||
return RoundUpToAlignment(StackOffset, MaxStackArgAlign);
|
return alignTo(StackOffset, MaxStackArgAlign);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// isAllocated - Return true if the specified register (or an alias) is
|
/// isAllocated - Return true if the specified register (or an alias) is
|
||||||
@ -412,7 +412,7 @@ public:
|
|||||||
/// and alignment.
|
/// and alignment.
|
||||||
unsigned AllocateStack(unsigned Size, unsigned Align) {
|
unsigned AllocateStack(unsigned Size, unsigned Align) {
|
||||||
assert(Align && ((Align - 1) & Align) == 0); // Align is power of 2.
|
assert(Align && ((Align - 1) & Align) == 0); // Align is power of 2.
|
||||||
StackOffset = RoundUpToAlignment(StackOffset, Align);
|
StackOffset = alignTo(StackOffset, Align);
|
||||||
unsigned Result = StackOffset;
|
unsigned Result = StackOffset;
|
||||||
StackOffset += Size;
|
StackOffset += Size;
|
||||||
MaxStackArgAlign = std::max(Align, MaxStackArgAlign);
|
MaxStackArgAlign = std::max(Align, MaxStackArgAlign);
|
||||||
|
@ -156,7 +156,7 @@ public:
|
|||||||
{
|
{
|
||||||
TargetAddress NextCodeAddr = ObjAllocs.RemoteCodeAddr;
|
TargetAddress NextCodeAddr = ObjAllocs.RemoteCodeAddr;
|
||||||
for (auto &Alloc : ObjAllocs.CodeAllocs) {
|
for (auto &Alloc : ObjAllocs.CodeAllocs) {
|
||||||
NextCodeAddr = RoundUpToAlignment(NextCodeAddr, Alloc.getAlign());
|
NextCodeAddr = alignTo(NextCodeAddr, Alloc.getAlign());
|
||||||
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextCodeAddr);
|
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextCodeAddr);
|
||||||
DEBUG(dbgs() << " code: "
|
DEBUG(dbgs() << " code: "
|
||||||
<< static_cast<void *>(Alloc.getLocalAddress())
|
<< static_cast<void *>(Alloc.getLocalAddress())
|
||||||
@ -168,8 +168,7 @@ public:
|
|||||||
{
|
{
|
||||||
TargetAddress NextRODataAddr = ObjAllocs.RemoteRODataAddr;
|
TargetAddress NextRODataAddr = ObjAllocs.RemoteRODataAddr;
|
||||||
for (auto &Alloc : ObjAllocs.RODataAllocs) {
|
for (auto &Alloc : ObjAllocs.RODataAllocs) {
|
||||||
NextRODataAddr =
|
NextRODataAddr = alignTo(NextRODataAddr, Alloc.getAlign());
|
||||||
RoundUpToAlignment(NextRODataAddr, Alloc.getAlign());
|
|
||||||
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextRODataAddr);
|
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextRODataAddr);
|
||||||
DEBUG(dbgs() << " ro-data: "
|
DEBUG(dbgs() << " ro-data: "
|
||||||
<< static_cast<void *>(Alloc.getLocalAddress())
|
<< static_cast<void *>(Alloc.getLocalAddress())
|
||||||
@ -182,8 +181,7 @@ public:
|
|||||||
{
|
{
|
||||||
TargetAddress NextRWDataAddr = ObjAllocs.RemoteRWDataAddr;
|
TargetAddress NextRWDataAddr = ObjAllocs.RemoteRWDataAddr;
|
||||||
for (auto &Alloc : ObjAllocs.RWDataAllocs) {
|
for (auto &Alloc : ObjAllocs.RWDataAllocs) {
|
||||||
NextRWDataAddr =
|
NextRWDataAddr = alignTo(NextRWDataAddr, Alloc.getAlign());
|
||||||
RoundUpToAlignment(NextRWDataAddr, Alloc.getAlign());
|
|
||||||
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextRWDataAddr);
|
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextRWDataAddr);
|
||||||
DEBUG(dbgs() << " rw-data: "
|
DEBUG(dbgs() << " rw-data: "
|
||||||
<< static_cast<void *>(Alloc.getLocalAddress())
|
<< static_cast<void *>(Alloc.getLocalAddress())
|
||||||
@ -284,7 +282,7 @@ public:
|
|||||||
|
|
||||||
char *getLocalAddress() const {
|
char *getLocalAddress() const {
|
||||||
uintptr_t LocalAddr = reinterpret_cast<uintptr_t>(Contents.get());
|
uintptr_t LocalAddr = reinterpret_cast<uintptr_t>(Contents.get());
|
||||||
LocalAddr = RoundUpToAlignment(LocalAddr, Align);
|
LocalAddr = alignTo(LocalAddr, Align);
|
||||||
return reinterpret_cast<char *>(LocalAddr);
|
return reinterpret_cast<char *>(LocalAddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -387,7 +387,7 @@ public:
|
|||||||
/// returns 12 or 16 for x86_fp80, depending on alignment.
|
/// returns 12 or 16 for x86_fp80, depending on alignment.
|
||||||
uint64_t getTypeAllocSize(Type *Ty) const {
|
uint64_t getTypeAllocSize(Type *Ty) const {
|
||||||
// Round up to the next alignment boundary.
|
// Round up to the next alignment boundary.
|
||||||
return RoundUpToAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
|
return alignTo(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// \brief Returns the offset in bits between successive objects of the
|
/// \brief Returns the offset in bits between successive objects of the
|
||||||
|
@ -630,7 +630,7 @@ inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align,
|
|||||||
/// or equal to \p Value and is a multiple of \p Align. \p Align must be
|
/// or equal to \p Value and is a multiple of \p Align. \p Align must be
|
||||||
/// non-zero.
|
/// non-zero.
|
||||||
inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
|
inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
|
||||||
return RoundUpToAlignment(Value, Align) - Value;
|
return alignTo(Value, Align) - Value;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// SignExtend32 - Sign extend B-bit number x to 32-bit int.
|
/// SignExtend32 - Sign extend B-bit number x to 32-bit int.
|
||||||
|
@ -197,7 +197,7 @@ struct TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
|
|||||||
typename ExtractSecondType<MoreTys, size_t>::type... MoreCounts) {
|
typename ExtractSecondType<MoreTys, size_t>::type... MoreCounts) {
|
||||||
return additionalSizeToAllocImpl(
|
return additionalSizeToAllocImpl(
|
||||||
(requiresRealignment()
|
(requiresRealignment()
|
||||||
? llvm::RoundUpToAlignment(SizeSoFar, llvm::alignOf<NextTy>())
|
? llvm::alignTo(SizeSoFar, llvm::alignOf<NextTy>())
|
||||||
: SizeSoFar) +
|
: SizeSoFar) +
|
||||||
sizeof(NextTy) * Count1,
|
sizeof(NextTy) * Count1,
|
||||||
MoreCounts...);
|
MoreCounts...);
|
||||||
|
@ -75,9 +75,9 @@ public:
|
|||||||
///
|
///
|
||||||
int alignSPAdjust(int SPAdj) const {
|
int alignSPAdjust(int SPAdj) const {
|
||||||
if (SPAdj < 0) {
|
if (SPAdj < 0) {
|
||||||
SPAdj = -RoundUpToAlignment(-SPAdj, StackAlignment);
|
SPAdj = -alignTo(-SPAdj, StackAlignment);
|
||||||
} else {
|
} else {
|
||||||
SPAdj = RoundUpToAlignment(SPAdj, StackAlignment);
|
SPAdj = alignTo(SPAdj, StackAlignment);
|
||||||
}
|
}
|
||||||
return SPAdj;
|
return SPAdj;
|
||||||
}
|
}
|
||||||
|
@ -376,7 +376,7 @@ STATISTIC(ObjectVisitorLoad,
|
|||||||
|
|
||||||
APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
|
APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
|
||||||
if (RoundToAlign && Align)
|
if (RoundToAlign && Align)
|
||||||
return APInt(IntTyBits, RoundUpToAlignment(Size.getZExtValue(), Align));
|
return APInt(IntTyBits, alignTo(Size.getZExtValue(), Align));
|
||||||
return Size;
|
return Size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
|
|||||||
Align = MinAlign;
|
Align = MinAlign;
|
||||||
MF.getFrameInfo()->ensureMaxAlignment(Align);
|
MF.getFrameInfo()->ensureMaxAlignment(Align);
|
||||||
MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align);
|
MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align);
|
||||||
Size = unsigned(RoundUpToAlignment(Size, MinAlign));
|
Size = unsigned(alignTo(Size, MinAlign));
|
||||||
unsigned Offset = AllocateStack(Size, Align);
|
unsigned Offset = AllocateStack(Size, Align);
|
||||||
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
|
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
|
||||||
}
|
}
|
||||||
|
@ -512,7 +512,7 @@ AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
|
|||||||
MaxAlign = std::max(MaxAlign, Align);
|
MaxAlign = std::max(MaxAlign, Align);
|
||||||
|
|
||||||
// Adjust to alignment boundary.
|
// Adjust to alignment boundary.
|
||||||
Offset = RoundUpToAlignment(Offset, Align, Skew);
|
Offset = alignTo(Offset, Align, Skew);
|
||||||
|
|
||||||
if (StackGrowsDown) {
|
if (StackGrowsDown) {
|
||||||
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
|
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
|
||||||
@ -596,7 +596,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
|
|||||||
|
|
||||||
unsigned Align = MFI->getObjectAlignment(i);
|
unsigned Align = MFI->getObjectAlignment(i);
|
||||||
// Adjust to alignment boundary
|
// Adjust to alignment boundary
|
||||||
Offset = RoundUpToAlignment(Offset, Align, Skew);
|
Offset = alignTo(Offset, Align, Skew);
|
||||||
|
|
||||||
MFI->setObjectOffset(i, -Offset); // Set the computed offset
|
MFI->setObjectOffset(i, -Offset); // Set the computed offset
|
||||||
}
|
}
|
||||||
@ -605,7 +605,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
|
|||||||
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
|
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
|
||||||
unsigned Align = MFI->getObjectAlignment(i);
|
unsigned Align = MFI->getObjectAlignment(i);
|
||||||
// Adjust to alignment boundary
|
// Adjust to alignment boundary
|
||||||
Offset = RoundUpToAlignment(Offset, Align, Skew);
|
Offset = alignTo(Offset, Align, Skew);
|
||||||
|
|
||||||
MFI->setObjectOffset(i, Offset);
|
MFI->setObjectOffset(i, Offset);
|
||||||
Offset += MFI->getObjectSize(i);
|
Offset += MFI->getObjectSize(i);
|
||||||
@ -638,7 +638,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
|
|||||||
unsigned Align = MFI->getLocalFrameMaxAlign();
|
unsigned Align = MFI->getLocalFrameMaxAlign();
|
||||||
|
|
||||||
// Adjust to alignment boundary.
|
// Adjust to alignment boundary.
|
||||||
Offset = RoundUpToAlignment(Offset, Align, Skew);
|
Offset = alignTo(Offset, Align, Skew);
|
||||||
|
|
||||||
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
|
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
|
||||||
|
|
||||||
@ -757,7 +757,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
|
|||||||
// If the frame pointer is eliminated, all frame offsets will be relative to
|
// If the frame pointer is eliminated, all frame offsets will be relative to
|
||||||
// SP not FP. Align to MaxAlign so this works.
|
// SP not FP. Align to MaxAlign so this works.
|
||||||
StackAlign = std::max(StackAlign, MaxAlign);
|
StackAlign = std::max(StackAlign, MaxAlign);
|
||||||
Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
|
Offset = alignTo(Offset, StackAlign, Skew);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update frame info to pretend that this is part of the stack...
|
// Update frame info to pretend that this is part of the stack...
|
||||||
|
@ -106,9 +106,7 @@ public:
|
|||||||
Type *ElTy = GV->getType()->getElementType();
|
Type *ElTy = GV->getType()->getElementType();
|
||||||
size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
|
size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
|
||||||
void *RawMemory = ::operator new(
|
void *RawMemory = ::operator new(
|
||||||
RoundUpToAlignment(sizeof(GVMemoryBlock),
|
alignTo(sizeof(GVMemoryBlock), TD.getPreferredAlignment(GV)) + GVSize);
|
||||||
TD.getPreferredAlignment(GV))
|
|
||||||
+ GVSize);
|
|
||||||
new(RawMemory) GVMemoryBlock(GV);
|
new(RawMemory) GVMemoryBlock(GV);
|
||||||
return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
|
return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
|
|||||||
// Add padding if necessary to align the data element properly.
|
// Add padding if necessary to align the data element properly.
|
||||||
if ((StructSize & (TyAlign-1)) != 0) {
|
if ((StructSize & (TyAlign-1)) != 0) {
|
||||||
IsPadded = true;
|
IsPadded = true;
|
||||||
StructSize = RoundUpToAlignment(StructSize, TyAlign);
|
StructSize = alignTo(StructSize, TyAlign);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keep track of maximum alignment constraint.
|
// Keep track of maximum alignment constraint.
|
||||||
@ -69,7 +69,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
|
|||||||
// and all array elements would be aligned correctly.
|
// and all array elements would be aligned correctly.
|
||||||
if ((StructSize & (StructAlignment-1)) != 0) {
|
if ((StructSize & (StructAlignment-1)) != 0) {
|
||||||
IsPadded = true;
|
IsPadded = true;
|
||||||
StructSize = RoundUpToAlignment(StructSize, StructAlignment);
|
StructSize = alignTo(StructSize, StructAlignment);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ static void addByteCountSuffix(raw_ostream &OS, const Function *F,
|
|||||||
Ty = cast<PointerType>(Ty)->getElementType();
|
Ty = cast<PointerType>(Ty)->getElementType();
|
||||||
// Size should be aligned to pointer size.
|
// Size should be aligned to pointer size.
|
||||||
unsigned PtrSize = DL.getPointerSize();
|
unsigned PtrSize = DL.getPointerSize();
|
||||||
ArgWords += RoundUpToAlignment(DL.getTypeAllocSize(Ty), PtrSize);
|
ArgWords += alignTo(DL.getTypeAllocSize(Ty), PtrSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
OS << '@' << ArgWords;
|
OS << '@' << ArgWords;
|
||||||
|
@ -433,7 +433,7 @@ void *MDNode::operator new(size_t Size, unsigned NumOps) {
|
|||||||
size_t OpSize = NumOps * sizeof(MDOperand);
|
size_t OpSize = NumOps * sizeof(MDOperand);
|
||||||
// uint64_t is the most aligned type we need support (ensured by static_assert
|
// uint64_t is the most aligned type we need support (ensured by static_assert
|
||||||
// above)
|
// above)
|
||||||
OpSize = RoundUpToAlignment(OpSize, llvm::alignOf<uint64_t>());
|
OpSize = alignTo(OpSize, llvm::alignOf<uint64_t>());
|
||||||
void *Ptr = reinterpret_cast<char *>(::operator new(OpSize + Size)) + OpSize;
|
void *Ptr = reinterpret_cast<char *>(::operator new(OpSize + Size)) + OpSize;
|
||||||
MDOperand *O = static_cast<MDOperand *>(Ptr);
|
MDOperand *O = static_cast<MDOperand *>(Ptr);
|
||||||
for (MDOperand *E = O - NumOps; O != E; --O)
|
for (MDOperand *E = O - NumOps; O != E; --O)
|
||||||
@ -444,7 +444,7 @@ void *MDNode::operator new(size_t Size, unsigned NumOps) {
|
|||||||
void MDNode::operator delete(void *Mem) {
|
void MDNode::operator delete(void *Mem) {
|
||||||
MDNode *N = static_cast<MDNode *>(Mem);
|
MDNode *N = static_cast<MDNode *>(Mem);
|
||||||
size_t OpSize = N->NumOperands * sizeof(MDOperand);
|
size_t OpSize = N->NumOperands * sizeof(MDOperand);
|
||||||
OpSize = RoundUpToAlignment(OpSize, llvm::alignOf<uint64_t>());
|
OpSize = alignTo(OpSize, llvm::alignOf<uint64_t>());
|
||||||
|
|
||||||
MDOperand *O = static_cast<MDOperand *>(Mem);
|
MDOperand *O = static_cast<MDOperand *>(Mem);
|
||||||
for (MDOperand *E = O - N->NumOperands; O != E; --O)
|
for (MDOperand *E = O - N->NumOperands; O != E; --O)
|
||||||
|
@ -404,7 +404,7 @@ static unsigned ComputeLinkerOptionsLoadCommandSize(
|
|||||||
unsigned Size = sizeof(MachO::linker_option_command);
|
unsigned Size = sizeof(MachO::linker_option_command);
|
||||||
for (const std::string &Option : Options)
|
for (const std::string &Option : Options)
|
||||||
Size += Option.size() + 1;
|
Size += Option.size() + 1;
|
||||||
return RoundUpToAlignment(Size, is64Bit ? 8 : 4);
|
return alignTo(Size, is64Bit ? 8 : 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MachObjectWriter::writeLinkerOptionsLoadCommand(
|
void MachObjectWriter::writeLinkerOptionsLoadCommand(
|
||||||
@ -606,7 +606,7 @@ void MachObjectWriter::computeSectionAddresses(const MCAssembler &Asm,
|
|||||||
const MCAsmLayout &Layout) {
|
const MCAsmLayout &Layout) {
|
||||||
uint64_t StartAddress = 0;
|
uint64_t StartAddress = 0;
|
||||||
for (const MCSection *Sec : Layout.getSectionOrder()) {
|
for (const MCSection *Sec : Layout.getSectionOrder()) {
|
||||||
StartAddress = RoundUpToAlignment(StartAddress, Sec->getAlignment());
|
StartAddress = alignTo(StartAddress, Sec->getAlignment());
|
||||||
SectionAddress[Sec] = StartAddress;
|
SectionAddress[Sec] = StartAddress;
|
||||||
StartAddress += Layout.getSectionAddressSize(Sec);
|
StartAddress += Layout.getSectionAddressSize(Sec);
|
||||||
|
|
||||||
@ -736,7 +736,7 @@ void MachObjectWriter::writeObject(MCAssembler &Asm,
|
|||||||
|
|
||||||
// Add the loh load command size, if used.
|
// Add the loh load command size, if used.
|
||||||
uint64_t LOHRawSize = Asm.getLOHContainer().getEmitSize(*this, Layout);
|
uint64_t LOHRawSize = Asm.getLOHContainer().getEmitSize(*this, Layout);
|
||||||
uint64_t LOHSize = RoundUpToAlignment(LOHRawSize, is64Bit() ? 8 : 4);
|
uint64_t LOHSize = alignTo(LOHRawSize, is64Bit() ? 8 : 4);
|
||||||
if (LOHSize) {
|
if (LOHSize) {
|
||||||
++NumLoadCommands;
|
++NumLoadCommands;
|
||||||
LoadCommandsSize += sizeof(MachO::linkedit_data_command);
|
LoadCommandsSize += sizeof(MachO::linkedit_data_command);
|
||||||
|
@ -924,7 +924,7 @@ void WinCOFFObjectWriter::writeObject(MCAssembler &Asm,
|
|||||||
|
|
||||||
if (IsPhysicalSection(Sec)) {
|
if (IsPhysicalSection(Sec)) {
|
||||||
// Align the section data to a four byte boundary.
|
// Align the section data to a four byte boundary.
|
||||||
offset = RoundUpToAlignment(offset, 4);
|
offset = alignTo(offset, 4);
|
||||||
Sec->Header.PointerToRawData = offset;
|
Sec->Header.PointerToRawData = offset;
|
||||||
|
|
||||||
offset += Sec->Header.SizeOfRawData;
|
offset += Sec->Header.SizeOfRawData;
|
||||||
|
@ -135,7 +135,7 @@ MemoryBuffer::getNewUninitMemBuffer(size_t Size, const Twine &BufferName) {
|
|||||||
SmallString<256> NameBuf;
|
SmallString<256> NameBuf;
|
||||||
StringRef NameRef = BufferName.toStringRef(NameBuf);
|
StringRef NameRef = BufferName.toStringRef(NameBuf);
|
||||||
size_t AlignedStringLen =
|
size_t AlignedStringLen =
|
||||||
RoundUpToAlignment(sizeof(MemoryBufferMem) + NameRef.size() + 1, 16);
|
alignTo(sizeof(MemoryBufferMem) + NameRef.size() + 1, 16);
|
||||||
size_t RealLen = AlignedStringLen + Size + 1;
|
size_t RealLen = AlignedStringLen + Size + 1;
|
||||||
char *Mem = static_cast<char*>(operator new(RealLen, std::nothrow));
|
char *Mem = static_cast<char*>(operator new(RealLen, std::nothrow));
|
||||||
if (!Mem)
|
if (!Mem)
|
||||||
|
@ -170,7 +170,7 @@ void AArch64FrameLowering::eliminateCallFramePseudoInstr(
|
|||||||
unsigned Align = getStackAlignment();
|
unsigned Align = getStackAlignment();
|
||||||
|
|
||||||
int64_t Amount = I->getOperand(0).getImm();
|
int64_t Amount = I->getOperand(0).getImm();
|
||||||
Amount = RoundUpToAlignment(Amount, Align);
|
Amount = alignTo(Amount, Align);
|
||||||
if (!IsDestroy)
|
if (!IsDestroy)
|
||||||
Amount = -Amount;
|
Amount = -Amount;
|
||||||
|
|
||||||
|
@ -2545,7 +2545,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
|
|||||||
// This is a non-standard ABI so by fiat I say we're allowed to make full
|
// This is a non-standard ABI so by fiat I say we're allowed to make full
|
||||||
// use of the stack area to be popped, which must be aligned to 16 bytes in
|
// use of the stack area to be popped, which must be aligned to 16 bytes in
|
||||||
// any case:
|
// any case:
|
||||||
StackArgSize = RoundUpToAlignment(StackArgSize, 16);
|
StackArgSize = alignTo(StackArgSize, 16);
|
||||||
|
|
||||||
// If we're expected to restore the stack (e.g. fastcc) then we'll be adding
|
// If we're expected to restore the stack (e.g. fastcc) then we'll be adding
|
||||||
// a multiple of 16.
|
// a multiple of 16.
|
||||||
@ -2959,7 +2959,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||||||
|
|
||||||
// Since callee will pop argument stack as a tail call, we must keep the
|
// Since callee will pop argument stack as a tail call, we must keep the
|
||||||
// popped size 16-byte aligned.
|
// popped size 16-byte aligned.
|
||||||
NumBytes = RoundUpToAlignment(NumBytes, 16);
|
NumBytes = alignTo(NumBytes, 16);
|
||||||
|
|
||||||
// FPDiff will be negative if this tail call requires more space than we
|
// FPDiff will be negative if this tail call requires more space than we
|
||||||
// would automatically have in our incoming argument space. Positive if we
|
// would automatically have in our incoming argument space. Positive if we
|
||||||
@ -3199,9 +3199,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||||||
Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops);
|
Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops);
|
||||||
InFlag = Chain.getValue(1);
|
InFlag = Chain.getValue(1);
|
||||||
|
|
||||||
uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt)
|
uint64_t CalleePopBytes =
|
||||||
? RoundUpToAlignment(NumBytes, 16)
|
DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
|
||||||
: 0;
|
|
||||||
|
|
||||||
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
|
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
|
||||||
DAG.getIntPtrConstant(CalleePopBytes, DL, true),
|
DAG.getIntPtrConstant(CalleePopBytes, DL, true),
|
||||||
|
@ -976,7 +976,7 @@ static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
|
|||||||
|
|
||||||
// Do alignment, specialized to power of 2 and for signed ints,
|
// Do alignment, specialized to power of 2 and for signed ints,
|
||||||
// avoiding having to do a C-style cast from uint_64t to int when
|
// avoiding having to do a C-style cast from uint_64t to int when
|
||||||
// using RoundUpToAlignment from include/llvm/Support/MathExtras.h.
|
// using alignTo from include/llvm/Support/MathExtras.h.
|
||||||
// FIXME: Move this function to include/MathExtras.h?
|
// FIXME: Move this function to include/MathExtras.h?
|
||||||
static int alignTo(int Num, int PowOf2) {
|
static int alignTo(int Num, int PowOf2) {
|
||||||
return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
|
return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
|
||||||
|
@ -327,7 +327,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
|
|||||||
|
|
||||||
if (MFI->getShaderType() == ShaderType::COMPUTE) {
|
if (MFI->getShaderType() == ShaderType::COMPUTE) {
|
||||||
OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
|
OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
|
||||||
OutStreamer->EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
|
OutStreamer->EmitIntValue(alignTo(MFI->LDSSize, 4) >> 2, 4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -503,7 +503,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
|
|||||||
|
|
||||||
ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
|
ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
|
||||||
ProgInfo.LDSBlocks =
|
ProgInfo.LDSBlocks =
|
||||||
RoundUpToAlignment(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
|
alignTo(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
|
||||||
|
|
||||||
// Scratch is allocated in 256 dword blocks.
|
// Scratch is allocated in 256 dword blocks.
|
||||||
unsigned ScratchAlignShift = 10;
|
unsigned ScratchAlignShift = 10;
|
||||||
@ -511,8 +511,9 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
|
|||||||
// is used by the entire wave. ProgInfo.ScratchSize is the amount of
|
// is used by the entire wave. ProgInfo.ScratchSize is the amount of
|
||||||
// scratch memory used per thread.
|
// scratch memory used per thread.
|
||||||
ProgInfo.ScratchBlocks =
|
ProgInfo.ScratchBlocks =
|
||||||
RoundUpToAlignment(ProgInfo.ScratchSize * STM.getWavefrontSize(),
|
alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
|
||||||
1 << ScratchAlignShift) >> ScratchAlignShift;
|
1 << ScratchAlignShift) >>
|
||||||
|
ScratchAlignShift;
|
||||||
|
|
||||||
ProgInfo.ComputePGMRSrc1 =
|
ProgInfo.ComputePGMRSrc1 =
|
||||||
S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
|
S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
|
||||||
|
@ -87,15 +87,15 @@ int AMDGPUFrameLowering::getFrameIndexReference(const MachineFunction &MF,
|
|||||||
int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
|
int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
|
||||||
|
|
||||||
for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
|
for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
|
||||||
OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(i));
|
OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(i));
|
||||||
OffsetBytes += MFI->getObjectSize(i);
|
OffsetBytes += MFI->getObjectSize(i);
|
||||||
// Each register holds 4 bytes, so we must always align the offset to at
|
// Each register holds 4 bytes, so we must always align the offset to at
|
||||||
// least 4 bytes, so that 2 frame objects won't share the same register.
|
// least 4 bytes, so that 2 frame objects won't share the same register.
|
||||||
OffsetBytes = RoundUpToAlignment(OffsetBytes, 4);
|
OffsetBytes = alignTo(OffsetBytes, 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FI != -1)
|
if (FI != -1)
|
||||||
OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(FI));
|
OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(FI));
|
||||||
|
|
||||||
return OffsetBytes / (getStackWidth(MF) * 4);
|
return OffsetBytes / (getStackWidth(MF) * 4);
|
||||||
}
|
}
|
||||||
|
@ -142,8 +142,8 @@ unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CFStack::updateMaxStackSize() {
|
void CFStack::updateMaxStackSize() {
|
||||||
unsigned CurrentStackSize = CurrentEntries +
|
unsigned CurrentStackSize =
|
||||||
(RoundUpToAlignment(CurrentSubEntries, 4) / 4);
|
CurrentEntries + (alignTo(CurrentSubEntries, 4) / 4);
|
||||||
MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
|
MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,7 +211,7 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo, MVT &ValVT,
|
|||||||
|
|
||||||
// First consume all registers that would give an unaligned object. Whether
|
// First consume all registers that would give an unaligned object. Whether
|
||||||
// we go on stack or in regs, no-one will be using them in future.
|
// we go on stack or in regs, no-one will be using them in future.
|
||||||
unsigned RegAlign = RoundUpToAlignment(Align, 4) / 4;
|
unsigned RegAlign = alignTo(Align, 4) / 4;
|
||||||
while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
|
while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
|
||||||
State.AllocateReg(RegList[RegIdx++]);
|
State.AllocateReg(RegList[RegIdx++]);
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
|
|||||||
|
|
||||||
unsigned RequiredExtraInstrs;
|
unsigned RequiredExtraInstrs;
|
||||||
if (ExtraRange)
|
if (ExtraRange)
|
||||||
RequiredExtraInstrs = RoundUpToAlignment(RangeAfterCopy, ExtraRange) / ExtraRange;
|
RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
|
||||||
else if (RangeAfterCopy > 0)
|
else if (RangeAfterCopy > 0)
|
||||||
// We need an extra instruction but none is available
|
// We need an extra instruction but none is available
|
||||||
RequiredExtraInstrs = 1000000;
|
RequiredExtraInstrs = 1000000;
|
||||||
|
@ -436,10 +436,10 @@ void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB) const {
|
|||||||
// Get the number of bytes to allocate from the FrameInfo.
|
// Get the number of bytes to allocate from the FrameInfo.
|
||||||
unsigned FrameSize = MFI->getStackSize();
|
unsigned FrameSize = MFI->getStackSize();
|
||||||
// Round up the max call frame size to the max alignment on the stack.
|
// Round up the max call frame size to the max alignment on the stack.
|
||||||
unsigned MaxCFA = RoundUpToAlignment(MFI->getMaxCallFrameSize(), MaxAlign);
|
unsigned MaxCFA = alignTo(MFI->getMaxCallFrameSize(), MaxAlign);
|
||||||
MFI->setMaxCallFrameSize(MaxCFA);
|
MFI->setMaxCallFrameSize(MaxCFA);
|
||||||
|
|
||||||
FrameSize = MaxCFA + RoundUpToAlignment(FrameSize, MaxAlign);
|
FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);
|
||||||
MFI->setStackSize(FrameSize);
|
MFI->setStackSize(FrameSize);
|
||||||
|
|
||||||
bool AlignStack = (MaxAlign > getStackAlignment());
|
bool AlignStack = (MaxAlign > getStackAlignment());
|
||||||
|
@ -1180,7 +1180,7 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
|
|||||||
// for now (will return false). We need to determine the right alignment
|
// for now (will return false). We need to determine the right alignment
|
||||||
// based on the normal alignment for the underlying machine type.
|
// based on the normal alignment for the underlying machine type.
|
||||||
//
|
//
|
||||||
unsigned ArgSize = RoundUpToAlignment(ArgVT.getSizeInBits(), 4);
|
unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
|
||||||
|
|
||||||
unsigned BEAlign = 0;
|
unsigned BEAlign = 0;
|
||||||
if (ArgSize < 8 && !Subtarget->isLittle())
|
if (ArgSize < 8 && !Subtarget->isLittle())
|
||||||
|
@ -122,7 +122,7 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
|
|||||||
// Conservatively assume all callee-saved registers will be saved.
|
// Conservatively assume all callee-saved registers will be saved.
|
||||||
for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
|
for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
|
||||||
unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
|
unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
|
||||||
Offset = RoundUpToAlignment(Offset + Size, Size);
|
Offset = alignTo(Offset + Size, Size);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned MaxAlign = MFI->getMaxAlignment();
|
unsigned MaxAlign = MFI->getMaxAlignment();
|
||||||
@ -133,14 +133,14 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
|
|||||||
|
|
||||||
// Iterate over other objects.
|
// Iterate over other objects.
|
||||||
for (unsigned I = 0, E = MFI->getObjectIndexEnd(); I != E; ++I)
|
for (unsigned I = 0, E = MFI->getObjectIndexEnd(); I != E; ++I)
|
||||||
Offset = RoundUpToAlignment(Offset + MFI->getObjectSize(I), MaxAlign);
|
Offset = alignTo(Offset + MFI->getObjectSize(I), MaxAlign);
|
||||||
|
|
||||||
// Call frame.
|
// Call frame.
|
||||||
if (MFI->adjustsStack() && hasReservedCallFrame(MF))
|
if (MFI->adjustsStack() && hasReservedCallFrame(MF))
|
||||||
Offset = RoundUpToAlignment(Offset + MFI->getMaxCallFrameSize(),
|
Offset = alignTo(Offset + MFI->getMaxCallFrameSize(),
|
||||||
std::max(MaxAlign, getStackAlignment()));
|
std::max(MaxAlign, getStackAlignment()));
|
||||||
|
|
||||||
return RoundUpToAlignment(Offset, getStackAlignment());
|
return alignTo(Offset, getStackAlignment());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions
|
// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions
|
||||||
|
@ -1873,10 +1873,10 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
|
|||||||
auto &TD = DAG.getDataLayout();
|
auto &TD = DAG.getDataLayout();
|
||||||
unsigned ArgSizeInBytes =
|
unsigned ArgSizeInBytes =
|
||||||
TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
|
TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
|
||||||
SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
|
SDValue Tmp3 =
|
||||||
DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes,
|
DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
|
||||||
ArgSlotSizeInBytes),
|
DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
|
||||||
DL, VAList.getValueType()));
|
DL, VAList.getValueType()));
|
||||||
// Store the incremented VAList to the legalized pointer
|
// Store the incremented VAList to the legalized pointer
|
||||||
Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
|
Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
|
||||||
MachinePointerInfo(SV), false, false, 0);
|
MachinePointerInfo(SV), false, false, 0);
|
||||||
@ -2604,7 +2604,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||||||
// ByValChain is the output chain of the last Memcpy node created for copying
|
// ByValChain is the output chain of the last Memcpy node created for copying
|
||||||
// byval arguments to the stack.
|
// byval arguments to the stack.
|
||||||
unsigned StackAlignment = TFL->getStackAlignment();
|
unsigned StackAlignment = TFL->getStackAlignment();
|
||||||
NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
|
NextStackOffset = alignTo(NextStackOffset, StackAlignment);
|
||||||
SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
|
SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
|
||||||
|
|
||||||
if (!IsTailCall)
|
if (!IsTailCall)
|
||||||
@ -3787,8 +3787,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
|
|||||||
int VaArgOffset;
|
int VaArgOffset;
|
||||||
|
|
||||||
if (ArgRegs.size() == Idx)
|
if (ArgRegs.size() == Idx)
|
||||||
VaArgOffset =
|
VaArgOffset = alignTo(State.getNextStackOffset(), RegSizeInBytes);
|
||||||
RoundUpToAlignment(State.getNextStackOffset(), RegSizeInBytes);
|
|
||||||
else {
|
else {
|
||||||
VaArgOffset =
|
VaArgOffset =
|
||||||
(int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
|
(int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
|
||||||
@ -3854,7 +3853,7 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mark the registers allocated.
|
// Mark the registers allocated.
|
||||||
Size = RoundUpToAlignment(Size, RegSizeInBytes);
|
Size = alignTo(Size, RegSizeInBytes);
|
||||||
for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
|
for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
|
||||||
Size -= RegSizeInBytes, ++I, ++NumRegs)
|
Size -= RegSizeInBytes, ++I, ++NumRegs)
|
||||||
State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
|
State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
|
||||||
|
@ -146,7 +146,7 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF,
|
|||||||
// Finally, ensure that the size is sufficiently aligned for the
|
// Finally, ensure that the size is sufficiently aligned for the
|
||||||
// data on the stack.
|
// data on the stack.
|
||||||
if (MFI->getMaxAlignment() > 0) {
|
if (MFI->getMaxAlignment() > 0) {
|
||||||
NumBytes = RoundUpToAlignment(NumBytes, MFI->getMaxAlignment());
|
NumBytes = alignTo(NumBytes, MFI->getMaxAlignment());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update stack size with corrected value.
|
// Update stack size with corrected value.
|
||||||
|
@ -1131,7 +1131,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
|
|||||||
unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
|
unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
|
||||||
|
|
||||||
// Keep stack frames 16-byte aligned.
|
// Keep stack frames 16-byte aligned.
|
||||||
ArgsSize = RoundUpToAlignment(ArgsSize, 16);
|
ArgsSize = alignTo(ArgsSize, 16);
|
||||||
|
|
||||||
// Varargs calls require special treatment.
|
// Varargs calls require special treatment.
|
||||||
if (CLI.IsVarArg)
|
if (CLI.IsVarArg)
|
||||||
|
@ -64,7 +64,7 @@ int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
|
|||||||
frameSize += 128;
|
frameSize += 128;
|
||||||
// Frames with calls must also reserve space for 6 outgoing arguments
|
// Frames with calls must also reserve space for 6 outgoing arguments
|
||||||
// whether they are used or not. LowerCall_64 takes care of that.
|
// whether they are used or not. LowerCall_64 takes care of that.
|
||||||
frameSize = RoundUpToAlignment(frameSize, 16);
|
frameSize = alignTo(frameSize, 16);
|
||||||
} else {
|
} else {
|
||||||
// Emit the correct save instruction based on the number of bytes in
|
// Emit the correct save instruction based on the number of bytes in
|
||||||
// the frame. Minimum stack frame size according to V8 ABI is:
|
// the frame. Minimum stack frame size according to V8 ABI is:
|
||||||
@ -77,7 +77,7 @@ int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
|
|||||||
|
|
||||||
// Round up to next doubleword boundary -- a double-word boundary
|
// Round up to next doubleword boundary -- a double-word boundary
|
||||||
// is required by the ABI.
|
// is required by the ABI.
|
||||||
frameSize = RoundUpToAlignment(frameSize, 8);
|
frameSize = alignTo(frameSize, 8);
|
||||||
}
|
}
|
||||||
return frameSize;
|
return frameSize;
|
||||||
}
|
}
|
||||||
|
@ -528,7 +528,7 @@ AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
|
|||||||
MaxAlign = std::max(MaxAlign, Align);
|
MaxAlign = std::max(MaxAlign, Align);
|
||||||
|
|
||||||
// Adjust to alignment boundary.
|
// Adjust to alignment boundary.
|
||||||
Offset = RoundUpToAlignment(Offset, Align, Skew);
|
Offset = alignTo(Offset, Align, Skew);
|
||||||
|
|
||||||
if (StackGrowsDown) {
|
if (StackGrowsDown) {
|
||||||
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
|
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
|
||||||
@ -612,7 +612,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
|
|||||||
|
|
||||||
unsigned Align = MFI->getObjectAlignment(i);
|
unsigned Align = MFI->getObjectAlignment(i);
|
||||||
// Adjust to alignment boundary
|
// Adjust to alignment boundary
|
||||||
Offset = RoundUpToAlignment(Offset, Align, Skew);
|
Offset = alignTo(Offset, Align, Skew);
|
||||||
|
|
||||||
MFI->setObjectOffset(i, -Offset); // Set the computed offset
|
MFI->setObjectOffset(i, -Offset); // Set the computed offset
|
||||||
}
|
}
|
||||||
@ -621,7 +621,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
|
|||||||
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
|
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
|
||||||
unsigned Align = MFI->getObjectAlignment(i);
|
unsigned Align = MFI->getObjectAlignment(i);
|
||||||
// Adjust to alignment boundary
|
// Adjust to alignment boundary
|
||||||
Offset = RoundUpToAlignment(Offset, Align, Skew);
|
Offset = alignTo(Offset, Align, Skew);
|
||||||
|
|
||||||
MFI->setObjectOffset(i, Offset);
|
MFI->setObjectOffset(i, Offset);
|
||||||
Offset += MFI->getObjectSize(i);
|
Offset += MFI->getObjectSize(i);
|
||||||
@ -654,7 +654,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
|
|||||||
unsigned Align = MFI->getLocalFrameMaxAlign();
|
unsigned Align = MFI->getLocalFrameMaxAlign();
|
||||||
|
|
||||||
// Adjust to alignment boundary.
|
// Adjust to alignment boundary.
|
||||||
Offset = RoundUpToAlignment(Offset, Align, Skew);
|
Offset = alignTo(Offset, Align, Skew);
|
||||||
|
|
||||||
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
|
DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
|
||||||
|
|
||||||
@ -773,7 +773,7 @@ void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
|
|||||||
// If the frame pointer is eliminated, all frame offsets will be relative to
|
// If the frame pointer is eliminated, all frame offsets will be relative to
|
||||||
// SP not FP. Align to MaxAlign so this works.
|
// SP not FP. Align to MaxAlign so this works.
|
||||||
StackAlign = std::max(StackAlign, MaxAlign);
|
StackAlign = std::max(StackAlign, MaxAlign);
|
||||||
Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
|
Offset = alignTo(Offset, StackAlign, Skew);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update frame info to pretend that this is part of the stack...
|
// Update frame info to pretend that this is part of the stack...
|
||||||
|
@ -1010,7 +1010,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
|
|||||||
|
|
||||||
// Callee-saved registers are pushed on stack before the stack is realigned.
|
// Callee-saved registers are pushed on stack before the stack is realigned.
|
||||||
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
|
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
|
||||||
NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
|
NumBytes = alignTo(NumBytes, MaxAlign);
|
||||||
|
|
||||||
// Get the offset of the stack slot for the EBP register, which is
|
// Get the offset of the stack slot for the EBP register, which is
|
||||||
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
|
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
|
||||||
@ -1131,7 +1131,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
|
|||||||
// virtual memory manager are allocated in correct sequence.
|
// virtual memory manager are allocated in correct sequence.
|
||||||
uint64_t AlignedNumBytes = NumBytes;
|
uint64_t AlignedNumBytes = NumBytes;
|
||||||
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
|
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
|
||||||
AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
|
AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
|
||||||
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
|
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
|
||||||
// Check whether EAX is livein for this function.
|
// Check whether EAX is livein for this function.
|
||||||
bool isEAXAlive = isEAXLiveIn(MF);
|
bool isEAXAlive = isEAXLiveIn(MF);
|
||||||
@ -1430,8 +1430,7 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
|
|||||||
// RBP is not included in the callee saved register block. After pushing RBP,
|
// RBP is not included in the callee saved register block. After pushing RBP,
|
||||||
// everything is 16 byte aligned. Everything we allocate before an outgoing
|
// everything is 16 byte aligned. Everything we allocate before an outgoing
|
||||||
// call must also be 16 byte aligned.
|
// call must also be 16 byte aligned.
|
||||||
unsigned FrameSizeMinusRBP =
|
unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
|
||||||
RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
|
|
||||||
// Subtract out the size of the callee saved registers. This is how much stack
|
// Subtract out the size of the callee saved registers. This is how much stack
|
||||||
// each funclet will allocate.
|
// each funclet will allocate.
|
||||||
return FrameSizeMinusRBP - CSSize;
|
return FrameSizeMinusRBP - CSSize;
|
||||||
@ -1491,7 +1490,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
|
|||||||
// Callee-saved registers were pushed on stack before the stack was
|
// Callee-saved registers were pushed on stack before the stack was
|
||||||
// realigned.
|
// realigned.
|
||||||
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
|
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
|
||||||
NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
|
NumBytes = alignTo(FrameSize, MaxAlign);
|
||||||
|
|
||||||
// Pop EBP.
|
// Pop EBP.
|
||||||
BuildMI(MBB, MBBI, DL,
|
BuildMI(MBB, MBBI, DL,
|
||||||
@ -2480,7 +2479,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
|||||||
// amount of space needed for the outgoing arguments up to the next
|
// amount of space needed for the outgoing arguments up to the next
|
||||||
// alignment boundary.
|
// alignment boundary.
|
||||||
unsigned StackAlign = getStackAlignment();
|
unsigned StackAlign = getStackAlignment();
|
||||||
Amount = RoundUpToAlignment(Amount, StackAlign);
|
Amount = alignTo(Amount, StackAlign);
|
||||||
|
|
||||||
MachineModuleInfo &MMI = MF.getMMI();
|
MachineModuleInfo &MMI = MF.getMMI();
|
||||||
const Function *Fn = MF.getFunction();
|
const Function *Fn = MF.getFunction();
|
||||||
|
@ -381,7 +381,7 @@ lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
|
|||||||
false, false, 0);
|
false, false, 0);
|
||||||
}
|
}
|
||||||
// Lower to pair of consecutive word aligned loads plus some bit shifting.
|
// Lower to pair of consecutive word aligned loads plus some bit shifting.
|
||||||
int32_t HighOffset = RoundUpToAlignment(Offset, 4);
|
int32_t HighOffset = alignTo(Offset, 4);
|
||||||
int32_t LowOffset = HighOffset - 4;
|
int32_t LowOffset = HighOffset - 4;
|
||||||
SDValue LowAddr, HighAddr;
|
SDValue LowAddr, HighAddr;
|
||||||
if (GlobalAddressSDNode *GASD =
|
if (GlobalAddressSDNode *GASD =
|
||||||
|
@ -544,7 +544,7 @@ void LowerBitSets::buildBitSetsFromGlobalVariables(
|
|||||||
// Cap at 128 was found experimentally to have a good data/instruction
|
// Cap at 128 was found experimentally to have a good data/instruction
|
||||||
// overhead tradeoff.
|
// overhead tradeoff.
|
||||||
if (Padding > 128)
|
if (Padding > 128)
|
||||||
Padding = RoundUpToAlignment(InitSize, 128) - InitSize;
|
Padding = alignTo(InitSize, 128) - InitSize;
|
||||||
|
|
||||||
GlobalInits.push_back(
|
GlobalInits.push_back(
|
||||||
ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
|
ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
|
||||||
|
@ -1142,7 +1142,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
|||||||
setOrigin(A, getCleanOrigin());
|
setOrigin(A, getCleanOrigin());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ArgOffset += RoundUpToAlignment(Size, kShadowTLSAlignment);
|
ArgOffset += alignTo(Size, kShadowTLSAlignment);
|
||||||
}
|
}
|
||||||
assert(*ShadowPtr && "Could not find shadow for an argument");
|
assert(*ShadowPtr && "Could not find shadow for an argument");
|
||||||
return *ShadowPtr;
|
return *ShadowPtr;
|
||||||
@ -2498,7 +2498,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
|||||||
(void)Store;
|
(void)Store;
|
||||||
assert(Size != 0 && Store != nullptr);
|
assert(Size != 0 && Store != nullptr);
|
||||||
DEBUG(dbgs() << " Param:" << *Store << "\n");
|
DEBUG(dbgs() << " Param:" << *Store << "\n");
|
||||||
ArgOffset += RoundUpToAlignment(Size, 8);
|
ArgOffset += alignTo(Size, 8);
|
||||||
}
|
}
|
||||||
DEBUG(dbgs() << " done with call args\n");
|
DEBUG(dbgs() << " done with call args\n");
|
||||||
|
|
||||||
@ -2818,7 +2818,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
|
|||||||
Type *RealTy = A->getType()->getPointerElementType();
|
Type *RealTy = A->getType()->getPointerElementType();
|
||||||
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
|
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
|
||||||
Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
|
Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
|
||||||
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
|
OverflowOffset += alignTo(ArgSize, 8);
|
||||||
IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
|
IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
|
||||||
ArgSize, kShadowTLSAlignment);
|
ArgSize, kShadowTLSAlignment);
|
||||||
} else {
|
} else {
|
||||||
@ -2840,7 +2840,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
|
|||||||
case AK_Memory:
|
case AK_Memory:
|
||||||
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
|
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
|
||||||
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
|
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
|
||||||
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
|
OverflowOffset += alignTo(ArgSize, 8);
|
||||||
}
|
}
|
||||||
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
|
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
|
||||||
}
|
}
|
||||||
@ -2965,7 +2965,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
|
|||||||
#endif
|
#endif
|
||||||
Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
|
Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
|
||||||
VAArgOffset += ArgSize;
|
VAArgOffset += ArgSize;
|
||||||
VAArgOffset = RoundUpToAlignment(VAArgOffset, 8);
|
VAArgOffset = alignTo(VAArgOffset, 8);
|
||||||
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
|
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3110,7 +3110,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
|
|||||||
case AK_Memory:
|
case AK_Memory:
|
||||||
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
|
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
|
||||||
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
|
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
|
||||||
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
|
OverflowOffset += alignTo(ArgSize, 8);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
|
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
|
||||||
|
@ -534,7 +534,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack(
|
|||||||
// Add alignment.
|
// Add alignment.
|
||||||
// NOTE: we ensure that BasePointer itself is aligned to >= Align.
|
// NOTE: we ensure that BasePointer itself is aligned to >= Align.
|
||||||
StaticOffset += Size;
|
StaticOffset += Size;
|
||||||
StaticOffset = RoundUpToAlignment(StaticOffset, Align);
|
StaticOffset = alignTo(StaticOffset, Align);
|
||||||
|
|
||||||
Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
|
Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
|
||||||
ConstantInt::get(Int32Ty, -StaticOffset));
|
ConstantInt::get(Int32Ty, -StaticOffset));
|
||||||
@ -565,7 +565,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack(
|
|||||||
// Add alignment.
|
// Add alignment.
|
||||||
// NOTE: we ensure that BasePointer itself is aligned to >= Align.
|
// NOTE: we ensure that BasePointer itself is aligned to >= Align.
|
||||||
StaticOffset += Size;
|
StaticOffset += Size;
|
||||||
StaticOffset = RoundUpToAlignment(StaticOffset, Align);
|
StaticOffset = alignTo(StaticOffset, Align);
|
||||||
|
|
||||||
Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
|
Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
|
||||||
ConstantInt::get(Int32Ty, -StaticOffset));
|
ConstantInt::get(Int32Ty, -StaticOffset));
|
||||||
@ -582,7 +582,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack(
|
|||||||
// Re-align BasePointer so that our callees would see it aligned as
|
// Re-align BasePointer so that our callees would see it aligned as
|
||||||
// expected.
|
// expected.
|
||||||
// FIXME: no need to update BasePointer in leaf functions.
|
// FIXME: no need to update BasePointer in leaf functions.
|
||||||
StaticOffset = RoundUpToAlignment(StaticOffset, StackAlignment);
|
StaticOffset = alignTo(StaticOffset, StackAlignment);
|
||||||
|
|
||||||
// Update shadow stack pointer in the function epilogue.
|
// Update shadow stack pointer in the function epilogue.
|
||||||
IRB.SetInsertPoint(BasePointer->getNextNode());
|
IRB.SetInsertPoint(BasePointer->getNextNode());
|
||||||
|
@ -253,8 +253,7 @@ bool SanitizerCoverageModule::runOnModule(Module &M) {
|
|||||||
if (Options.Use8bitCounters) {
|
if (Options.Use8bitCounters) {
|
||||||
// Make sure the array is 16-aligned.
|
// Make sure the array is 16-aligned.
|
||||||
static const int kCounterAlignment = 16;
|
static const int kCounterAlignment = 16;
|
||||||
Type *Int8ArrayNTy =
|
Type *Int8ArrayNTy = ArrayType::get(Int8Ty, alignTo(N, kCounterAlignment));
|
||||||
ArrayType::get(Int8Ty, RoundUpToAlignment(N, kCounterAlignment));
|
|
||||||
RealEightBitCounterArray = new GlobalVariable(
|
RealEightBitCounterArray = new GlobalVariable(
|
||||||
M, Int8ArrayNTy, false, GlobalValue::PrivateLinkage,
|
M, Int8ArrayNTy, false, GlobalValue::PrivateLinkage,
|
||||||
Constant::getNullValue(Int8ArrayNTy), "__sancov_gen_cov_counter");
|
Constant::getNullValue(Int8ArrayNTy), "__sancov_gen_cov_counter");
|
||||||
|
@ -44,7 +44,7 @@ static size_t VarAndRedzoneSize(size_t Size, size_t Alignment) {
|
|||||||
else if (Size <= 512) Res = Size + 64;
|
else if (Size <= 512) Res = Size + 64;
|
||||||
else if (Size <= 4096) Res = Size + 128;
|
else if (Size <= 4096) Res = Size + 128;
|
||||||
else Res = Size + 256;
|
else Res = Size + 256;
|
||||||
return RoundUpToAlignment(Res, Alignment);
|
return alignTo(Res, Alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -241,7 +241,7 @@ static void transferSegmentAndSections(
|
|||||||
// start address leave a sufficient gap to store the __DWARF
|
// start address leave a sufficient gap to store the __DWARF
|
||||||
// segment.
|
// segment.
|
||||||
uint64_t PrevEndAddress = EndAddress;
|
uint64_t PrevEndAddress = EndAddress;
|
||||||
EndAddress = RoundUpToAlignment(EndAddress, 0x1000);
|
EndAddress = alignTo(EndAddress, 0x1000);
|
||||||
if (GapForDwarf == UINT64_MAX && Segment.vmaddr > EndAddress &&
|
if (GapForDwarf == UINT64_MAX && Segment.vmaddr > EndAddress &&
|
||||||
Segment.vmaddr - EndAddress >= DwarfSegmentSize)
|
Segment.vmaddr - EndAddress >= DwarfSegmentSize)
|
||||||
GapForDwarf = EndAddress;
|
GapForDwarf = EndAddress;
|
||||||
@ -268,8 +268,8 @@ static void createDwarfSegment(uint64_t VMAddr, uint64_t FileOffset,
|
|||||||
uint64_t FileSize, unsigned NumSections,
|
uint64_t FileSize, unsigned NumSections,
|
||||||
MCAsmLayout &Layout, MachObjectWriter &Writer) {
|
MCAsmLayout &Layout, MachObjectWriter &Writer) {
|
||||||
Writer.writeSegmentLoadCommand("__DWARF", NumSections, VMAddr,
|
Writer.writeSegmentLoadCommand("__DWARF", NumSections, VMAddr,
|
||||||
RoundUpToAlignment(FileSize, 0x1000),
|
alignTo(FileSize, 0x1000), FileOffset,
|
||||||
FileOffset, FileSize, /* MaxProt */ 7,
|
FileSize, /* MaxProt */ 7,
|
||||||
/* InitProt =*/3);
|
/* InitProt =*/3);
|
||||||
|
|
||||||
for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
|
for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
|
||||||
@ -279,8 +279,8 @@ static void createDwarfSegment(uint64_t VMAddr, uint64_t FileOffset,
|
|||||||
|
|
||||||
unsigned Align = Sec->getAlignment();
|
unsigned Align = Sec->getAlignment();
|
||||||
if (Align > 1) {
|
if (Align > 1) {
|
||||||
VMAddr = RoundUpToAlignment(VMAddr, Align);
|
VMAddr = alignTo(VMAddr, Align);
|
||||||
FileOffset = RoundUpToAlignment(FileOffset, Align);
|
FileOffset = alignTo(FileOffset, Align);
|
||||||
}
|
}
|
||||||
Writer.writeSection(Layout, *Sec, VMAddr, FileOffset, 0, 0, 0);
|
Writer.writeSection(Layout, *Sec, VMAddr, FileOffset, 0, 0, 0);
|
||||||
|
|
||||||
@ -394,8 +394,7 @@ bool generateDsymCompanion(const DebugMap &DM, MCStreamer &MS,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (uint64_t Size = Layout.getSectionFileSize(Sec)) {
|
if (uint64_t Size = Layout.getSectionFileSize(Sec)) {
|
||||||
DwarfSegmentSize =
|
DwarfSegmentSize = alignTo(DwarfSegmentSize, Sec->getAlignment());
|
||||||
RoundUpToAlignment(DwarfSegmentSize, Sec->getAlignment());
|
|
||||||
DwarfSegmentSize += Size;
|
DwarfSegmentSize += Size;
|
||||||
++NumDwarfSections;
|
++NumDwarfSections;
|
||||||
}
|
}
|
||||||
@ -419,7 +418,7 @@ bool generateDsymCompanion(const DebugMap &DM, MCStreamer &MS,
|
|||||||
|
|
||||||
uint64_t SymtabStart = LoadCommandSize;
|
uint64_t SymtabStart = LoadCommandSize;
|
||||||
SymtabStart += HeaderSize;
|
SymtabStart += HeaderSize;
|
||||||
SymtabStart = RoundUpToAlignment(SymtabStart, 0x1000);
|
SymtabStart = alignTo(SymtabStart, 0x1000);
|
||||||
|
|
||||||
// We gathered all the information we need, start emitting the output file.
|
// We gathered all the information we need, start emitting the output file.
|
||||||
Writer.writeHeader(MachO::MH_DSYM, NumLoadCommands, LoadCommandSize, false);
|
Writer.writeHeader(MachO::MH_DSYM, NumLoadCommands, LoadCommandSize, false);
|
||||||
@ -441,7 +440,7 @@ bool generateDsymCompanion(const DebugMap &DM, MCStreamer &MS,
|
|||||||
NewStringsSize);
|
NewStringsSize);
|
||||||
|
|
||||||
uint64_t DwarfSegmentStart = StringStart + NewStringsSize;
|
uint64_t DwarfSegmentStart = StringStart + NewStringsSize;
|
||||||
DwarfSegmentStart = RoundUpToAlignment(DwarfSegmentStart, 0x1000);
|
DwarfSegmentStart = alignTo(DwarfSegmentStart, 0x1000);
|
||||||
|
|
||||||
// Write the load commands for the segments and sections we 'import' from
|
// Write the load commands for the segments and sections we 'import' from
|
||||||
// the original binary.
|
// the original binary.
|
||||||
@ -460,7 +459,7 @@ bool generateDsymCompanion(const DebugMap &DM, MCStreamer &MS,
|
|||||||
DwarfSegmentSize, GapForDwarf, EndAddress);
|
DwarfSegmentSize, GapForDwarf, EndAddress);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t DwarfVMAddr = RoundUpToAlignment(EndAddress, 0x1000);
|
uint64_t DwarfVMAddr = alignTo(EndAddress, 0x1000);
|
||||||
uint64_t DwarfVMMax = Is64Bit ? UINT64_MAX : UINT32_MAX;
|
uint64_t DwarfVMMax = Is64Bit ? UINT64_MAX : UINT32_MAX;
|
||||||
if (DwarfVMAddr + DwarfSegmentSize > DwarfVMMax ||
|
if (DwarfVMAddr + DwarfSegmentSize > DwarfVMMax ||
|
||||||
DwarfVMAddr + DwarfSegmentSize < DwarfVMAddr /* Overflow */) {
|
DwarfVMAddr + DwarfSegmentSize < DwarfVMAddr /* Overflow */) {
|
||||||
@ -510,7 +509,7 @@ bool generateDsymCompanion(const DebugMap &DM, MCStreamer &MS,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
uint64_t Pos = OutFile.tell();
|
uint64_t Pos = OutFile.tell();
|
||||||
Writer.WriteZeros(RoundUpToAlignment(Pos, Sec.getAlignment()) - Pos);
|
Writer.WriteZeros(alignTo(Pos, Sec.getAlignment()) - Pos);
|
||||||
MCAsm.writeSectionData(&Sec, Layout);
|
MCAsm.writeSectionData(&Sec, Layout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ static std::error_code getObject(const T *&Obj, MemoryBufferRef M,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t bytesToBlocks(uint64_t NumBytes, uint64_t BlockSize) {
|
static uint64_t bytesToBlocks(uint64_t NumBytes, uint64_t BlockSize) {
|
||||||
return RoundUpToAlignment(NumBytes, BlockSize) / BlockSize;
|
return alignTo(NumBytes, BlockSize) / BlockSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t blockToOffset(uint64_t BlockNumber, uint64_t BlockSize) {
|
static uint64_t blockToOffset(uint64_t BlockNumber, uint64_t BlockSize) {
|
||||||
|
@ -982,7 +982,7 @@ void COFFDumper::printCodeViewSymbolSection(StringRef SectionName,
|
|||||||
// the next subsection.
|
// the next subsection.
|
||||||
size_t SectionOffset = Data.data() - SectionContents.data();
|
size_t SectionOffset = Data.data() - SectionContents.data();
|
||||||
size_t NextOffset = SectionOffset + SubSectionSize;
|
size_t NextOffset = SectionOffset + SubSectionSize;
|
||||||
NextOffset = RoundUpToAlignment(NextOffset, 4);
|
NextOffset = alignTo(NextOffset, 4);
|
||||||
Data = SectionContents.drop_front(NextOffset);
|
Data = SectionContents.drop_front(NextOffset);
|
||||||
|
|
||||||
// Optionally print the subsection bytes in case our parsing gets confused
|
// Optionally print the subsection bytes in case our parsing gets confused
|
||||||
|
@ -191,7 +191,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint8_t *allocateFromSlab(uintptr_t Size, unsigned Alignment, bool isCode) {
|
uint8_t *allocateFromSlab(uintptr_t Size, unsigned Alignment, bool isCode) {
|
||||||
Size = RoundUpToAlignment(Size, Alignment);
|
Size = alignTo(Size, Alignment);
|
||||||
if (CurrentSlabOffset + Size > SlabSize)
|
if (CurrentSlabOffset + Size > SlabSize)
|
||||||
report_fatal_error("Can't allocate enough memory. Tune --preallocate");
|
report_fatal_error("Can't allocate enough memory. Tune --preallocate");
|
||||||
|
|
||||||
|
@ -173,12 +173,12 @@ static bool layoutCOFF(COFFParser &CP) {
|
|||||||
// Assign each section data address consecutively.
|
// Assign each section data address consecutively.
|
||||||
for (COFFYAML::Section &S : CP.Obj.Sections) {
|
for (COFFYAML::Section &S : CP.Obj.Sections) {
|
||||||
if (S.SectionData.binary_size() > 0) {
|
if (S.SectionData.binary_size() > 0) {
|
||||||
CurrentSectionDataOffset = RoundUpToAlignment(
|
CurrentSectionDataOffset = alignTo(CurrentSectionDataOffset,
|
||||||
CurrentSectionDataOffset, CP.isPE() ? CP.getFileAlignment() : 4);
|
CP.isPE() ? CP.getFileAlignment() : 4);
|
||||||
S.Header.SizeOfRawData = S.SectionData.binary_size();
|
S.Header.SizeOfRawData = S.SectionData.binary_size();
|
||||||
if (CP.isPE())
|
if (CP.isPE())
|
||||||
S.Header.SizeOfRawData =
|
S.Header.SizeOfRawData =
|
||||||
RoundUpToAlignment(S.Header.SizeOfRawData, CP.getFileAlignment());
|
alignTo(S.Header.SizeOfRawData, CP.getFileAlignment());
|
||||||
S.Header.PointerToRawData = CurrentSectionDataOffset;
|
S.Header.PointerToRawData = CurrentSectionDataOffset;
|
||||||
CurrentSectionDataOffset += S.Header.SizeOfRawData;
|
CurrentSectionDataOffset += S.Header.SizeOfRawData;
|
||||||
if (!S.Relocations.empty()) {
|
if (!S.Relocations.empty()) {
|
||||||
@ -292,10 +292,9 @@ static uint32_t initializeOptionalHeader(COFFParser &CP, uint16_t Magic, T Heade
|
|||||||
Header->FileAlignment = CP.Obj.OptionalHeader->Header.FileAlignment;
|
Header->FileAlignment = CP.Obj.OptionalHeader->Header.FileAlignment;
|
||||||
uint32_t SizeOfCode = 0, SizeOfInitializedData = 0,
|
uint32_t SizeOfCode = 0, SizeOfInitializedData = 0,
|
||||||
SizeOfUninitializedData = 0;
|
SizeOfUninitializedData = 0;
|
||||||
uint32_t SizeOfHeaders = RoundUpToAlignment(
|
uint32_t SizeOfHeaders = alignTo(CP.SectionTableStart + CP.SectionTableSize,
|
||||||
CP.SectionTableStart + CP.SectionTableSize, Header->FileAlignment);
|
Header->FileAlignment);
|
||||||
uint32_t SizeOfImage =
|
uint32_t SizeOfImage = alignTo(SizeOfHeaders, Header->SectionAlignment);
|
||||||
RoundUpToAlignment(SizeOfHeaders, Header->SectionAlignment);
|
|
||||||
uint32_t BaseOfData = 0;
|
uint32_t BaseOfData = 0;
|
||||||
for (const COFFYAML::Section &S : CP.Obj.Sections) {
|
for (const COFFYAML::Section &S : CP.Obj.Sections) {
|
||||||
if (S.Header.Characteristics & COFF::IMAGE_SCN_CNT_CODE)
|
if (S.Header.Characteristics & COFF::IMAGE_SCN_CNT_CODE)
|
||||||
@ -309,8 +308,7 @@ static uint32_t initializeOptionalHeader(COFFParser &CP, uint16_t Magic, T Heade
|
|||||||
else if (S.Name.equals(".data"))
|
else if (S.Name.equals(".data"))
|
||||||
BaseOfData = S.Header.VirtualAddress; // RVA
|
BaseOfData = S.Header.VirtualAddress; // RVA
|
||||||
if (S.Header.VirtualAddress)
|
if (S.Header.VirtualAddress)
|
||||||
SizeOfImage +=
|
SizeOfImage += alignTo(S.Header.VirtualSize, Header->SectionAlignment);
|
||||||
RoundUpToAlignment(S.Header.VirtualSize, Header->SectionAlignment);
|
|
||||||
}
|
}
|
||||||
Header->SizeOfCode = SizeOfCode;
|
Header->SizeOfCode = SizeOfCode;
|
||||||
Header->SizeOfInitializedData = SizeOfInitializedData;
|
Header->SizeOfInitializedData = SizeOfInitializedData;
|
||||||
|
@ -38,7 +38,7 @@ class ContiguousBlobAccumulator {
|
|||||||
if (Align == 0)
|
if (Align == 0)
|
||||||
Align = 1;
|
Align = 1;
|
||||||
uint64_t CurrentOffset = InitialOffset + OS.tell();
|
uint64_t CurrentOffset = InitialOffset + OS.tell();
|
||||||
uint64_t AlignedOffset = RoundUpToAlignment(CurrentOffset, Align);
|
uint64_t AlignedOffset = alignTo(CurrentOffset, Align);
|
||||||
for (; CurrentOffset != AlignedOffset; ++CurrentOffset)
|
for (; CurrentOffset != AlignedOffset; ++CurrentOffset)
|
||||||
OS.write('\0');
|
OS.write('\0');
|
||||||
return AlignedOffset; // == CurrentOffset;
|
return AlignedOffset; // == CurrentOffset;
|
||||||
|
@ -179,15 +179,15 @@ TEST(MathExtras, NextPowerOf2) {
|
|||||||
EXPECT_EQ(256u, NextPowerOf2(128));
|
EXPECT_EQ(256u, NextPowerOf2(128));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(MathExtras, RoundUpToAlignment) {
|
TEST(MathExtras, alignTo) {
|
||||||
EXPECT_EQ(8u, RoundUpToAlignment(5, 8));
|
EXPECT_EQ(8u, alignTo(5, 8));
|
||||||
EXPECT_EQ(24u, RoundUpToAlignment(17, 8));
|
EXPECT_EQ(24u, alignTo(17, 8));
|
||||||
EXPECT_EQ(0u, RoundUpToAlignment(~0LL, 8));
|
EXPECT_EQ(0u, alignTo(~0LL, 8));
|
||||||
|
|
||||||
EXPECT_EQ(7u, RoundUpToAlignment(5, 8, 7));
|
EXPECT_EQ(7u, alignTo(5, 8, 7));
|
||||||
EXPECT_EQ(17u, RoundUpToAlignment(17, 8, 1));
|
EXPECT_EQ(17u, alignTo(17, 8, 1));
|
||||||
EXPECT_EQ(3u, RoundUpToAlignment(~0LL, 8, 3));
|
EXPECT_EQ(3u, alignTo(~0LL, 8, 3));
|
||||||
EXPECT_EQ(552u, RoundUpToAlignment(321, 255, 42));
|
EXPECT_EQ(552u, alignTo(321, 255, 42));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
|
@ -118,8 +118,8 @@ TEST(TrailingObjects, TwoArg) {
|
|||||||
Class2 *C1 = Class2::create(4);
|
Class2 *C1 = Class2::create(4);
|
||||||
Class2 *C2 = Class2::create(0, 4.2);
|
Class2 *C2 = Class2::create(0, 4.2);
|
||||||
|
|
||||||
EXPECT_EQ(sizeof(Class2), llvm::RoundUpToAlignment(sizeof(bool) * 2,
|
EXPECT_EQ(sizeof(Class2),
|
||||||
llvm::alignOf<double>()));
|
llvm::alignTo(sizeof(bool) * 2, llvm::alignOf<double>()));
|
||||||
EXPECT_EQ(llvm::alignOf<Class2>(), llvm::alignOf<double>());
|
EXPECT_EQ(llvm::alignOf<Class2>(), llvm::alignOf<double>());
|
||||||
|
|
||||||
EXPECT_EQ((Class2::additionalSizeToAlloc<double, short>(1, 0)),
|
EXPECT_EQ((Class2::additionalSizeToAlloc<double, short>(1, 0)),
|
||||||
@ -162,8 +162,7 @@ class Class3 final : public TrailingObjects<Class3, double, short, bool> {
|
|||||||
TEST(TrailingObjects, ThreeArg) {
|
TEST(TrailingObjects, ThreeArg) {
|
||||||
EXPECT_EQ((Class3::additionalSizeToAlloc<double, short, bool>(1, 1, 3)),
|
EXPECT_EQ((Class3::additionalSizeToAlloc<double, short, bool>(1, 1, 3)),
|
||||||
sizeof(double) + sizeof(short) + 3 * sizeof(bool));
|
sizeof(double) + sizeof(short) + 3 * sizeof(bool));
|
||||||
EXPECT_EQ(sizeof(Class3),
|
EXPECT_EQ(sizeof(Class3), llvm::alignTo(1, llvm::alignOf<double>()));
|
||||||
llvm::RoundUpToAlignment(1, llvm::alignOf<double>()));
|
|
||||||
std::unique_ptr<char[]> P(new char[1000]);
|
std::unique_ptr<char[]> P(new char[1000]);
|
||||||
Class3 *C = reinterpret_cast<Class3 *>(P.get());
|
Class3 *C = reinterpret_cast<Class3 *>(P.get());
|
||||||
EXPECT_EQ(C->getTrailingObjects<double>(), reinterpret_cast<double *>(C + 1));
|
EXPECT_EQ(C->getTrailingObjects<double>(), reinterpret_cast<double *>(C + 1));
|
||||||
@ -183,8 +182,8 @@ class Class4 final : public TrailingObjects<Class4, char, long> {
|
|||||||
|
|
||||||
TEST(TrailingObjects, Realignment) {
|
TEST(TrailingObjects, Realignment) {
|
||||||
EXPECT_EQ((Class4::additionalSizeToAlloc<char, long>(1, 1)),
|
EXPECT_EQ((Class4::additionalSizeToAlloc<char, long>(1, 1)),
|
||||||
llvm::RoundUpToAlignment(sizeof(long) + 1, llvm::alignOf<long>()));
|
llvm::alignTo(sizeof(long) + 1, llvm::alignOf<long>()));
|
||||||
EXPECT_EQ(sizeof(Class4), llvm::RoundUpToAlignment(1, llvm::alignOf<long>()));
|
EXPECT_EQ(sizeof(Class4), llvm::alignTo(1, llvm::alignOf<long>()));
|
||||||
std::unique_ptr<char[]> P(new char[1000]);
|
std::unique_ptr<char[]> P(new char[1000]);
|
||||||
Class4 *C = reinterpret_cast<Class4 *>(P.get());
|
Class4 *C = reinterpret_cast<Class4 *>(P.get());
|
||||||
EXPECT_EQ(C->getTrailingObjects<char>(), reinterpret_cast<char *>(C + 1));
|
EXPECT_EQ(C->getTrailingObjects<char>(), reinterpret_cast<char *>(C + 1));
|
||||||
|
Loading…
Reference in New Issue
Block a user