1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[Allocator] Lift the slab size and size threshold into template

parameters rather than runtime parameters.

There is only one user of these parameters and they are compile time for
that user. Making these compile time seems to better reflect their
intended usage as well.

llvm-svn: 205143
This commit is contained in:
Chandler Carruth 2014-03-30 12:07:07 +00:00
parent 412ffb7e6f
commit a46b65cb1f
4 changed files with 169 additions and 183 deletions

View File

@ -17,6 +17,7 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Memory.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@ -87,6 +88,27 @@ public:
void Deallocate(MemSlab *Slab) override;
};
/// \brief Non-templated base class for the \c BumpPtrAllocatorImpl template.
class BumpPtrAllocatorBase {
public:
void Deallocate(const void * /*Ptr*/) {}
void PrintStats() const;
/// \brief Returns the total physical memory allocated by this allocator.
size_t getTotalMemory() const;
protected:
/// \brief The slab that we are currently allocating into.
MemSlab *CurSlab;
/// \brief How many bytes we've allocated.
///
/// Used so that we can compute how much space was wasted.
size_t BytesAllocated;
BumpPtrAllocatorBase() : CurSlab(0), BytesAllocated(0) {}
};
/// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
///
/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
@ -97,21 +119,85 @@ public:
///
/// Note that this also has a threshold for forcing allocations above a certain
/// size into their own slab.
class BumpPtrAllocator {
BumpPtrAllocator(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
void operator=(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
template <size_t SlabSize = 4096, size_t SizeThreshold = SlabSize>
class BumpPtrAllocatorImpl : public BumpPtrAllocatorBase {
BumpPtrAllocatorImpl(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION;
void operator=(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION;
public:
BumpPtrAllocator(size_t size = 4096, size_t threshold = 4096);
BumpPtrAllocator(size_t size, size_t threshold, SlabAllocator &allocator);
~BumpPtrAllocator();
static_assert(SizeThreshold <= SlabSize,
"The SizeThreshold must be at most the SlabSize to ensure "
"that objects larger than a slab go into their own memory "
"allocation.");
BumpPtrAllocatorImpl()
: Allocator(DefaultSlabAllocator), NumSlabs(0) {}
BumpPtrAllocatorImpl(SlabAllocator &Allocator)
: Allocator(Allocator), NumSlabs(0) {}
~BumpPtrAllocatorImpl() { DeallocateSlabs(CurSlab); }
/// \brief Deallocate all but the current slab and reset the current pointer
/// to the beginning of it, freeing all memory allocated so far.
void Reset();
void Reset() {
if (!CurSlab)
return;
DeallocateSlabs(CurSlab->NextPtr);
CurSlab->NextPtr = 0;
CurPtr = (char *)(CurSlab + 1);
End = ((char *)CurSlab) + CurSlab->Size;
BytesAllocated = 0;
}
/// \brief Allocate space at the specified alignment.
void *Allocate(size_t Size, size_t Alignment);
void *Allocate(size_t Size, size_t Alignment) {
if (!CurSlab) // Start a new slab if we haven't allocated one already.
StartNewSlab();
// Keep track of how many bytes we've allocated.
BytesAllocated += Size;
// 0-byte alignment means 1-byte alignment.
if (Alignment == 0)
Alignment = 1;
// Allocate the aligned space, going forwards from CurPtr.
char *Ptr = alignPtr(CurPtr, Alignment);
// Check if we can hold it.
if (Ptr + Size <= End) {
CurPtr = Ptr + Size;
// Update the allocation point of this memory block in MemorySanitizer.
// Without this, MemorySanitizer messages for values originated from here
// will point to the allocation of the entire slab.
__msan_allocated_memory(Ptr, Size);
return Ptr;
}
// If Size is really big, allocate a separate slab for it.
size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
if (PaddedSize > SizeThreshold) {
++NumSlabs;
MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
// Put the new slab after the current slab, since we are not allocating
// into it.
NewSlab->NextPtr = CurSlab->NextPtr;
CurSlab->NextPtr = NewSlab;
Ptr = alignPtr((char *)(NewSlab + 1), Alignment);
assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + NewSlab->Size);
__msan_allocated_memory(Ptr, Size);
return Ptr;
}
// Otherwise, start a new slab and try again.
StartNewSlab();
Ptr = alignPtr(CurPtr, Alignment);
CurPtr = Ptr + Size;
assert(CurPtr <= End && "Unable to allocate memory!");
__msan_allocated_memory(Ptr, Size);
return Ptr;
}
/// \brief Allocate space for one object without constructing it.
template <typename T> T *Allocate() {
@ -131,22 +217,9 @@ public:
return static_cast<T *>(Allocate(Num * EltSize, Alignment));
}
void Deallocate(const void * /*Ptr*/) {}
size_t GetNumSlabs() const { return NumSlabs; }
void PrintStats() const;
/// \brief Returns the total physical memory allocated by this allocator.
size_t getTotalMemory() const;
private:
/// \brief Allocate at least this many bytes of memory in a slab.
size_t SlabSize;
/// \brief Threshold above which allocations to go into a dedicated slab.
size_t SizeThreshold;
/// \brief The default allocator used if one is not provided.
MallocSlabAllocator DefaultSlabAllocator;
@ -156,9 +229,6 @@ private:
/// changed to use a custom allocator.
SlabAllocator &Allocator;
/// \brief The slab that we are currently allocating into.
MemSlab *CurSlab;
/// \brief The current pointer into the current slab.
///
/// This points to the next free byte in the slab.
@ -167,11 +237,6 @@ private:
/// \brief The end of the current slab.
char *End;
/// \brief How many bytes we've allocated.
///
/// Used so that we can compute how much space was wasted.
size_t BytesAllocated;
/// \brief How many slabs we've allocated.
///
/// Used to scale the size of each slab and reduce the number of allocations
@ -180,14 +245,48 @@ private:
/// \brief Allocate a new slab and move the bump pointers over into the new
/// slab, modifying CurPtr and End.
void StartNewSlab();
void StartNewSlab() {
++NumSlabs;
// Scale the actual allocated slab size based on the number of slabs
// allocated. Every 128 slabs allocated, we double the allocated size to
// reduce allocation frequency, but saturate at multiplying the slab size by
// 2^30.
// FIXME: Currently, this count includes special slabs for objects above the
// size threshold. That will be fixed in a subsequent commit to make the
// growth even more predictable.
size_t AllocatedSlabSize =
SlabSize * (1 << std::min<size_t>(30, NumSlabs / 128));
MemSlab *NewSlab = Allocator.Allocate(AllocatedSlabSize);
NewSlab->NextPtr = CurSlab;
CurSlab = NewSlab;
CurPtr = (char *)(CurSlab + 1);
End = ((char *)CurSlab) + CurSlab->Size;
}
/// \brief Deallocate all memory slabs after and including this one.
void DeallocateSlabs(MemSlab *Slab);
void DeallocateSlabs(MemSlab *Slab) {
while (Slab) {
MemSlab *NextSlab = Slab->NextPtr;
#ifndef NDEBUG
// Poison the memory so stale pointers crash sooner. Note we must
// preserve the Size and NextPtr fields at the beginning.
sys::Memory::setRangeWritable(Slab + 1, Slab->Size - sizeof(MemSlab));
memset(Slab + 1, 0xCD, Slab->Size - sizeof(MemSlab));
#endif
Allocator.Deallocate(Slab);
Slab = NextSlab;
--NumSlabs;
}
}
template <typename T> friend class SpecificBumpPtrAllocator;
};
/// \brief The standard BumpPtrAllocator which just uses the default template
/// paramaters.
typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
/// \brief A BumpPtrAllocator that allows only elements of a specific type to be
/// allocated.
///
@ -197,11 +296,8 @@ template <typename T> class SpecificBumpPtrAllocator {
BumpPtrAllocator Allocator;
public:
SpecificBumpPtrAllocator(size_t size = 4096, size_t threshold = 4096)
: Allocator(size, threshold) {}
SpecificBumpPtrAllocator(size_t size, size_t threshold,
SlabAllocator &allocator)
: Allocator(size, threshold, allocator) {}
SpecificBumpPtrAllocator() : Allocator() {}
SpecificBumpPtrAllocator(SlabAllocator &allocator) : Allocator(allocator) {}
~SpecificBumpPtrAllocator() { DestroyAll(); }
@ -229,7 +325,10 @@ public:
} // end namespace llvm
inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) {
template <size_t SlabSize, size_t SizeThreshold>
void *
operator new(size_t Size,
llvm::BumpPtrAllocatorImpl<SlabSize, SizeThreshold> &Allocator) {
struct S {
char c;
union {
@ -239,10 +338,12 @@ inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) {
void *P;
} x;
};
return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
offsetof(S, x)));
return Allocator.Allocate(
Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
}
inline void operator delete(void *, llvm::BumpPtrAllocator &) {}
template <size_t SlabSize, size_t SizeThreshold>
void operator delete(void *,
llvm::BumpPtrAllocatorImpl<SlabSize, SizeThreshold> &) {}
#endif // LLVM_SUPPORT_ALLOCATOR_H

View File

@ -314,8 +314,8 @@ namespace {
// confuse them with the blocks of memory described above.
std::vector<sys::MemoryBlock> CodeSlabs;
JITSlabAllocator BumpSlabAllocator;
BumpPtrAllocator StubAllocator;
BumpPtrAllocator DataAllocator;
BumpPtrAllocatorImpl<DefaultSlabSize, DefaultSizeThreshold> StubAllocator;
BumpPtrAllocatorImpl<DefaultSlabSize, DefaultSizeThreshold> DataAllocator;
// Circular list of free blocks.
FreeRangeHeader *FreeMemoryList;
@ -590,8 +590,8 @@ DefaultJITMemoryManager::DefaultJITMemoryManager()
#endif
LastSlab(0, 0),
BumpSlabAllocator(*this),
StubAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator),
DataAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator) {
StubAllocator(BumpSlabAllocator),
DataAllocator(BumpSlabAllocator) {
// Allocate space for code.
sys::MemoryBlock MemBlock = allocateNewSlab(DefaultCodeSlabSize);

View File

@ -21,144 +21,6 @@
namespace llvm {
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
SlabAllocator &allocator)
: SlabSize(size), SizeThreshold(std::min(size, threshold)),
Allocator(allocator), CurSlab(0), BytesAllocated(0), NumSlabs(0) {}
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold)
: SlabSize(size), SizeThreshold(std::min(size, threshold)),
Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0),
NumSlabs(0) {}
BumpPtrAllocator::~BumpPtrAllocator() {
DeallocateSlabs(CurSlab);
}
/// StartNewSlab - Allocate a new slab and move the bump pointers over into
/// the new slab. Modifies CurPtr and End.
void BumpPtrAllocator::StartNewSlab() {
++NumSlabs;
// Scale the actual allocated slab size based on the number of slabs
// allocated. Every 128 slabs allocated, we double the allocated size to
// reduce allocation frequency, but saturate at multiplying the slab size by
// 2^30.
// FIXME: Currently, this count includes special slabs for objects above the
// size threshold. That will be fixed in a subsequent commit to make the
// growth even more predictable.
size_t AllocatedSlabSize =
SlabSize * (1 << std::min<size_t>(30, NumSlabs / 128));
MemSlab *NewSlab = Allocator.Allocate(AllocatedSlabSize);
NewSlab->NextPtr = CurSlab;
CurSlab = NewSlab;
CurPtr = (char*)(CurSlab + 1);
End = ((char*)CurSlab) + CurSlab->Size;
}
/// DeallocateSlabs - Deallocate all memory slabs after and including this
/// one.
void BumpPtrAllocator::DeallocateSlabs(MemSlab *Slab) {
while (Slab) {
MemSlab *NextSlab = Slab->NextPtr;
#ifndef NDEBUG
// Poison the memory so stale pointers crash sooner. Note we must
// preserve the Size and NextPtr fields at the beginning.
sys::Memory::setRangeWritable(Slab + 1, Slab->Size - sizeof(MemSlab));
memset(Slab + 1, 0xCD, Slab->Size - sizeof(MemSlab));
#endif
Allocator.Deallocate(Slab);
Slab = NextSlab;
--NumSlabs;
}
}
/// Reset - Deallocate all but the current slab and reset the current pointer
/// to the beginning of it, freeing all memory allocated so far.
void BumpPtrAllocator::Reset() {
if (!CurSlab)
return;
DeallocateSlabs(CurSlab->NextPtr);
CurSlab->NextPtr = 0;
CurPtr = (char*)(CurSlab + 1);
End = ((char*)CurSlab) + CurSlab->Size;
BytesAllocated = 0;
}
/// Allocate - Allocate space at the specified alignment.
///
void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
if (!CurSlab) // Start a new slab if we haven't allocated one already.
StartNewSlab();
// Keep track of how many bytes we've allocated.
BytesAllocated += Size;
// 0-byte alignment means 1-byte alignment.
if (Alignment == 0) Alignment = 1;
// Allocate the aligned space, going forwards from CurPtr.
char *Ptr = alignPtr(CurPtr, Alignment);
// Check if we can hold it.
if (Ptr + Size <= End) {
CurPtr = Ptr + Size;
// Update the allocation point of this memory block in MemorySanitizer.
// Without this, MemorySanitizer messages for values originated from here
// will point to the allocation of the entire slab.
__msan_allocated_memory(Ptr, Size);
return Ptr;
}
// If Size is really big, allocate a separate slab for it.
size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
if (PaddedSize > SizeThreshold) {
++NumSlabs;
MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
// Put the new slab after the current slab, since we are not allocating
// into it.
NewSlab->NextPtr = CurSlab->NextPtr;
CurSlab->NextPtr = NewSlab;
Ptr = alignPtr((char*)(NewSlab + 1), Alignment);
assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + NewSlab->Size);
__msan_allocated_memory(Ptr, Size);
return Ptr;
}
// Otherwise, start a new slab and try again.
StartNewSlab();
Ptr = alignPtr(CurPtr, Alignment);
CurPtr = Ptr + Size;
assert(CurPtr <= End && "Unable to allocate memory!");
__msan_allocated_memory(Ptr, Size);
return Ptr;
}
size_t BumpPtrAllocator::getTotalMemory() const {
size_t TotalMemory = 0;
for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
TotalMemory += Slab->Size;
}
return TotalMemory;
}
void BumpPtrAllocator::PrintStats() const {
unsigned NumSlabs = 0;
size_t TotalMemory = 0;
for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
TotalMemory += Slab->Size;
++NumSlabs;
}
errs() << "\nNumber of memory regions: " << NumSlabs << '\n'
<< "Bytes used: " << BytesAllocated << '\n'
<< "Bytes allocated: " << TotalMemory << '\n'
<< "Bytes wasted: " << (TotalMemory - BytesAllocated)
<< " (includes alignment, etc)\n";
}
SlabAllocator::~SlabAllocator() { }
MallocSlabAllocator::~MallocSlabAllocator() { }
@ -174,6 +36,29 @@ void MallocSlabAllocator::Deallocate(MemSlab *Slab) {
Allocator.Deallocate(Slab);
}
void BumpPtrAllocatorBase::PrintStats() const {
unsigned NumSlabs = 0;
size_t TotalMemory = 0;
for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
TotalMemory += Slab->Size;
++NumSlabs;
}
errs() << "\nNumber of memory regions: " << NumSlabs << '\n'
<< "Bytes used: " << BytesAllocated << '\n'
<< "Bytes allocated: " << TotalMemory << '\n'
<< "Bytes wasted: " << (TotalMemory - BytesAllocated)
<< " (includes alignment, etc)\n";
}
size_t BumpPtrAllocatorBase::getTotalMemory() const {
size_t TotalMemory = 0;
for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
TotalMemory += Slab->Size;
}
return TotalMemory;
}
void PrintRecyclerStats(size_t Size,
size_t Align,
size_t FreeListSize) {

View File

@ -141,7 +141,7 @@ public:
// will not.
TEST(AllocatorTest, TestBigAlignment) {
MockSlabAllocator SlabAlloc;
BumpPtrAllocator Alloc(4096, 4096, SlabAlloc);
BumpPtrAllocator Alloc(SlabAlloc);
uintptr_t Ptr = (uintptr_t)Alloc.Allocate(3000, 2048);
MemSlab *Slab = SlabAlloc.GetLastSlab();
EXPECT_LE(Ptr + 3000, ((uintptr_t)Slab) + Slab->Size);