Skip to content

Instantly share code, notes, and snippets.

@nilium
Created April 13, 2012 06:52
Show Gist options
  • Select an option

  • Save nilium/2374568 to your computer and use it in GitHub Desktop.

Select an option

Save nilium/2374568 to your computer and use it in GitHub Desktop.
Bizarro world memory pool stuff
/*
Copyright (c) 2012 Noel Cower
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "memory_pool.hh"
#include <logging/logging.hh>
#include <stdlib.h>
#include <unistd.h>
namespace snow {
#if !defined(USE_MEMORY_GUARD)
#if NDEBUG
#define USE_MEMORY_GUARD 1
#else
#define USE_MEMORY_GUARD 0
#endif
#endif
#if USE_MEMORY_GUARD
#define MEMORY_GUARD_SIZE (sizeof(uint32_t))
#else
#define MEMORY_GUARD_SIZE 0
#endif
// The minimum size of a memory pool. This number is kind of arbitrary, but
// by default the minimum is the size of four minimum-size blocks.
#define MIN_POOL_SIZE (MIN_BLOCK_SIZE * 4)
// Default memory pool size for main pools.
#define BLOCK_ALIGNMENT (16)
// Macro for getting the actual size of a block given a requested size.
#define BLOCK_SIZE(SZ) ((size_t)(((SZ) + sizeof(block_head_t) + MEMORY_GUARD_SIZE + (BLOCK_ALIGNMENT)) & ~(BLOCK_ALIGNMENT - 1)))
// Minimum allocation size - defaults to larger of a pointer or size_t.
#define MIN_ALLOC_SIZE (sizeof(void *) >= sizeof(size_t) ? sizeof(void *) : sizeof(size_t))
// Size of a block for a minimum-size allocation.
#define MIN_BLOCK_SIZE BLOCK_SIZE(MIN_ALLOC_SIZE)
// Memory guard value
#define BLOCK_GUARD (0xD3ADBE3F)
enum {
BLOCK_OK = 0,
BLOCK_NULL = 1 << 0,
BLOCK_NEXT_NULL = 1 << 1,
BLOCK_NEXT_CHAIN = 1 << 2,
BLOCK_PREV_NULL = 1 << 3,
BLOCK_PREV_CHAIN = 1 << 4,
BLOCK_GUARD_CORRUPT = 1 << 5
};
size_t memory_pool_t::allocated() const {
size_t sz = 0;
block_map_t::const_iterator iter = m_blocks.begin();
block_map_t::const_iterator end = m_blocks.end();
for (; iter != end; ++iter)
sz += iter->second->size;
return sz;
}
size_t memory_pool_t::unallocated() const {
return m_size - allocated();
}
void memory_pool_t::init_with_buffer(size_t size, char *buffer) throw(allocation_error_t) {
block_head_t *init_block = NULL;
if (buffer == NULL)
throw allocation_error_t("Failed to allocate the buffer for the memory pool");
m_size = size;
m_buffer = buffer;
init_block = (block_head_t *)(
((unsigned long)m_buffer + BLOCK_ALIGNMENT) & ~(BLOCK_ALIGNMENT - 1) );
init_block->size = size;
init_block->next = &m_head;
init_block->prev = &m_head;
m_head.size = 0;
m_head.id = -1; // not a real ID in this case
m_head.next = init_block;
m_head.prev = init_block;
m_head.lock = 0;
m_sequence = 1;
m_next_unused = init_block;
}
memory_pool_t::memory_pool_t()
: m_base_pool(*this)
{
// null constructor
}
memory_pool_t::memory_pool_t(size_t size, memory_pool_t &pool) throw(allocation_error_t)
: m_base_pool(pool), m_blocks()
{
block_id_t block = m_base_pool.allocate(size);
m_base_block = block;
char *buffer = (char *)pool.lock_block(block);
init_with_buffer(size, buffer);
}
memory_pool_t::memory_pool_t(size_t size) throw(allocation_error_t)
: m_base_pool(*this), m_base_block(0), m_blocks()
{
size_t buffer_size = (size + BLOCK_ALIGNMENT) & ~(BLOCK_ALIGNMENT - 1);
if (buffer_size < MIN_POOL_SIZE)
buffer_size = MIN_POOL_SIZE;
char *buffer = (char *)calloc(buffer_size, 1);
init_with_buffer(buffer_size, buffer);
}
memory_pool_t::~memory_pool_t() {
if (m_base_pool == *this && m_base_block == 0)
free(m_buffer);
else if (m_base_block != 0) {
m_base_pool.unlock_block(m_base_block);
m_base_pool.release(m_base_block);
}
}
block_id_t memory_pool_t::allocate(size_t size, block_tag_t tag) throw(pool_error_t) {
size_t block_size = BLOCK_SIZE(size);
if (block_size < MIN_BLOCK_SIZE)
block_size = MIN_BLOCK_SIZE;
if (block_size > m_size) {
throw std::runtime_error("block size is too large for the pool");
}
block_head_t *block = m_next_unused;
if (NULL == block)
throw block_error_t("Pool is corrupt");
else if (NULL == block->prev)
throw block_error_t("Search block is corrupt");
block_head_t * const term = block->prev;
block_id_t new_id = 0;
for (; block != term; block = block->next) {
if (block->id || block->size < block_size)
continue;
try {
new_id = gen_id();
reserve_block_sized(&block, block_size, new_id, false);
} catch (pool_error_t &e) {
throw;
}
m_next_unused = block->next;
block->tag = tag;
m_blocks.insert(std::pair<block_id_t, block_head_t *>(new_id, block));
log_note("[%p] Allocated block %d [%zu:%zu:%zu]",
this, new_id, size, block_size, unallocated());
return new_id;
}
log_error("Failed to allocate block for size %zu (%zu)", size, block_size);
throw allocation_error_t("Failed to allocate block");
}
void memory_pool_t::release(block_id_t id) throw(block_error_t) {
block_head_t *block = get_block_and_remove(id);
if (NULL == block)
throw block_error_t("Block not found");
try {
destroy_block(&block);
log_note("[%p] Destroyed block %d", this, id);
} catch (block_error_t &e) {
throw;
}
}
int memory_pool_t::release_tagged(block_tag_t tag) throw(block_error_t) {
block_head_t *block = m_head.next;
block_head_t * const head = &m_head;
int count = 0;
for (; block != head; block = block->next) {
if (block->id && ! block->lock && block->tag == tag) {
try {
destroy_block(&block);
} catch (block_error_t &e) {
throw;
}
++count;
}
}
return count;
}
int memory_pool_t::gen_id() throw(allocation_error_t) {
block_id_t const stop = m_sequence;
block_id_t proposed = stop + 1;
block_map_t::iterator const not_found = m_blocks.end();
while (1) {
if (proposed == 0)
proposed = 1;
if (not_found == m_blocks.find(proposed))
break;
else if (stop == proposed)
throw allocation_error_t("There are no available block IDs");
++proposed;
}
m_sequence = proposed;
return proposed;
}
int memory_pool_t::compact() {
// does fuckin nothing!
return 0;
}
void memory_pool_t::move_block(block_head_t *origin, block_head_t *target, bool at_end) throw(block_error_t) {
if (origin == NULL)
throw block_error_t("Origin is null");
else if (target == NULL)
throw block_error_t("Target is null");
else if (origin == target)
return; // not really an error..
if (check_block(origin)) {
throw block_error_t("Cannot move block - origin block is corrupt");
} else if (check_block(target)) {
throw block_error_t("Cannot move block - target block is corrupt");
}
try {
reserve_block_sized(&target, origin->size, origin->id, at_end);
} catch (pool_error_t &e) {
throw;
}
++origin->lock;
++target->lock;
memcpy(target+1, origin+1, origin->size - sizeof(block_head_t));
target->tag = origin->tag;
block_map_t::iterator iter = m_blocks.find(target->id);
if (m_blocks.end() == iter)
m_blocks.insert(block_map_t::value_type(target->id, target));
else
iter->second = target;
--origin->lock;
--target->lock;
try {
destroy_block(&origin);
} catch (block_error_t &e) {
throw;
}
}
void memory_pool_t::destroy_block(block_head_t **block) throw(block_error_t) {
if (NULL == block)
throw block_error_t("Block input is null");
else if (NULL == *block)
throw block_error_t("Block is null");
block_head_t *ruin = *block;
// if anything other than the block guard is corrupt, do not attempt to
// destroy the block
int err = check_block(ruin);
if (err & ~BLOCK_GUARD_CORRUPT)
throw block_error_t("Failed to destroy block - block is corrupt");
else if (err)
log_warning("Block guard is corrupt - destroying anyway");
ruin->id = 0;
if (0 == ruin->prev->id) {
ruin = ruin->prev;
ruin->size += ruin->next->size;
ruin->next = ruin->next->next;
ruin->next->prev = ruin;
}
if (0 == ruin->next->id) {
ruin->size += ruin->next->size;
ruin->next = ruin->next->next;
ruin->next->prev = ruin;
}
m_next_unused = ruin;
if (check_block(ruin))
throw block_error_t("Block is corrupt");
*block = ruin;
}
void memory_pool_t::reserve_block_sized(block_head_t **block_io, size_t block_size, block_id_t id, bool at_end) throw(block_error_t) {
if (block_io == NULL) {
throw block_error_t("Block pointer is null");
} else if (*block_io == NULL) {
throw block_error_t("Target block is null");
}
block_head_t *block = *block_io;
if (block->id)
throw block_error_t("Block is already in use");
else if (block->size < block_size)
throw block_error_t("Block is too small");
else if (check_block(block))
throw block_error_t("Block is corrupt");
// if the block is large enough to split its end off into a new block, do so
if ((block->size - block_size) > MIN_BLOCK_SIZE) {
block_head_t *split = NULL;
size_t split_size;
size_t origin_size;
if (at_end) {
split_size = block_size;
origin_size = block->size - block_size;
} else {
split_size = block->size - block_size;
origin_size = block_size;
}
split = (block_head_t *)((char *)block + origin_size);
split->size = split_size;
block->size = origin_size;
split->lock = 0;
split->id = 0;
split->tag = TAG_EMPTY;
split->next = block->next;
split->prev = block;
split->next->prev = split;
block->next = split;
if (at_end)
block = split;
}
// block->requested_size = req_size;
block->id = id;
block->tag = TAG_EMPTY;
block->lock = 0;
#if USE_STATIC_RELEASE
block->owner = this;
#endif
#if USE_MEMORY_GUARD
*((uint32_t *)((char *)(block + 1) + req_size)) = BLOCK_GUARD;
#endif
if (check_block(block)) {
log_error("Block is corrupt (post-reserve)");
}
*block_io = block;
}
memory_pool_t::block_head_t *memory_pool_t::get_block(block_id_t id) {
if (id == 0)
return NULL;
block_map_t::iterator iter = m_blocks.find(id);
if (m_blocks.end() != iter)
return iter->second;
return NULL;
}
memory_pool_t::block_head_t *memory_pool_t::get_block_and_remove(block_id_t id) {
if (id == 0)
return NULL;
block_map_t::iterator iter = m_blocks.find(id);
if (m_blocks.end() != iter) {
block_head_t *block = iter->second;
m_blocks.erase(iter);
return block;
}
return NULL;
}
void *memory_pool_t::ptr_for_block(block_id_t id) throw(block_error_t) {
block_head_t *block = get_block(id);
if (block == NULL)
throw block_error_t("Block not found");
else if (check_block(block))
throw block_error_t("Block is corrupt");
return (void *)(block + 1);
}
void *memory_pool_t::lock_block(block_id_t id) throw(block_error_t) {
block_head_t *block = get_block(id);
if (block == NULL)
throw block_error_t("Block not found");
else if (check_block(block))
throw block_error_t("Block is corrupt");
++block->lock;
return (void *)(block + 1);
}
void memory_pool_t::unlock_block(block_id_t id) throw(pool_error_t) {
block_head_t *block = get_block(id);
if (block == NULL)
throw block_error_t("Block not found");
else if (block->lock == 0)
throw locking_error_t("Lock underflow");
else if (check_block(block))
throw block_error_t("Block is corrupt");
--block->lock;
}
int memory_pool_t::check_block(block_head_t *block) const {
int result = 0;
if (block == NULL) {
log_error("Block is null");
return BLOCK_NULL;
} else {
if (NULL == block->next) {
log_error("Block %d is corrupt - next block is null", block->id);
result |= BLOCK_NEXT_NULL;
} else if (block->next->prev != block) {
log_error("Block %d is corrupt - chain is broken (this->next->prev [%p] != this [%p])", block->id, block->next->prev, block);
result |= BLOCK_NEXT_CHAIN;
}
if (NULL == block->prev) {
log_error("Block %d is corrupt - prev block is null", block->id);
result |= BLOCK_PREV_NULL;
} else if (block->prev->next != block) {
log_error("Block %d is corrupt - chain is broken (this->prev->next [%p] != this [%p])", block->id, block->prev->next, block);
result |= BLOCK_PREV_CHAIN;
}
#if USE_MEMORY_GUARD
if (block->id) {
uint32_t guard = ((uint32_t *)((char *)(block) + block->size))[-1];
if (BLOCK_GUARD != guard) {
log_error("Block %d is corrupt - guard is corrupt (= %X)", block->id, guard);
result |= BLOCK_GUARD_CORRUPT;
}
}
#endif
}
return result;
}
// statics
#if USE_STATIC_RELEASE
void memory_pool_t::release_ptr(void *buffer) throw(block_error_t) {
// not pretty, but a sufficient hack for when the allocator is a jerk
if (NULL == buffer)
throw block_error_t("Pointer is null");
block_head_t *block = ((block_head_t *)buffer) - 1;
block->owner->release(block->id);
}
#endif
block_id_t memory_pool_t::id_for_ptr(void *buffer) throw(block_error_t) {
if (NULL == buffer)
throw block_error_t("Pointer is null");
block_head_t *block = ((block_head_t *)buffer) - 1;
return block->id;
}
// exceptions
#define EXCEPTION_CTOR(KLASS, BASE) \
memory_pool_t::KLASS::KLASS(const std::string &what) throw() : BASE(what) {}
EXCEPTION_CTOR(pool_error_t, std::runtime_error);
EXCEPTION_CTOR(allocation_error_t, pool_error_t);
EXCEPTION_CTOR(block_error_t, pool_error_t);
EXCEPTION_CTOR(locking_error_t, pool_error_t);
}
/*
Copyright (c) 2012 Noel Cower
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef MEMORY_POOL_HH_1GV1RJSS
#define MEMORY_POOL_HH_1GV1RJSS
#include <stdint.h>
#include <stdlib.h>
#include <map>
#include <stdexcept>
#include <string>
#define USE_STATIC_RELEASE 0
namespace snow
{
#define TAG_EMPTY (0)
#define POOL_CACHE_SIZE (16)
typedef int16_t block_id_t;
typedef int16_t block_tag_t;
class memory_pool_t
{
public:
// exceptions
#define DEF_POOL_ERROR(KLASS, BASE) \
struct KLASS : public BASE { \
explicit KLASS(const std::string &what) throw(); \
};
// ; not needed, but it helps with syntax highlighting issues
DEF_POOL_ERROR(pool_error_t, std::runtime_error);
DEF_POOL_ERROR(allocation_error_t, pool_error_t);
DEF_POOL_ERROR(block_error_t, pool_error_t);
DEF_POOL_ERROR(locking_error_t, pool_error_t);
#undef DEF_POOL_ERROR
#if USE_STATIC_RELEASE
// Releases a block based on its pointer.
static void release_ptr(void *buf) throw(block_error_t);
#endif
// Gets the ID for a block based on its pointer. On failure, returns 0.
static block_id_t id_for_ptr(void *buf) throw(block_error_t);
// ctor / dtor
explicit memory_pool_t(size_t size, memory_pool_t &base_pool) throw(allocation_error_t);
explicit memory_pool_t(size_t size) throw(allocation_error_t);
~memory_pool_t();
// Allocates a block with the given size and tag. Tags are optional.
// Returns a block ID that can be used to lock the block.
block_id_t allocate(size_t size, block_tag_t tag = TAG_EMPTY) throw(pool_error_t);
// Releases the block with the given ID.
void release(block_id_t id) throw(block_error_t);
// Get the capacity of the memory pool. This isn't necessarily its actual
// size, but the amount of memory that it will allow you to allocate from.
inline size_t capacity() const { return m_size; }
// The amount of memory currently allocated in the pool.
size_t allocated() const;
// The amount of memory currently unallocated in the pool.
size_t unallocated() const;
// Gets a pointer to the block with the given ID.
void *ptr_for_block(block_id_t id) throw(block_error_t);
// Locks the buffer - this means the buffer will not be relocated for the
// duration of the lock.
// Buffers can be locked multiple times.
// Returns 0 and a pointer to the locked buffer in buffer_out if successful.
// Otherwise, returns an error code (writes nothing to buffer_out).
void *lock_block(block_id_t id) throw(block_error_t);
// Unlocks the buffer.
void unlock_block(block_id_t id) throw(pool_error_t);
// Free all unlocked blocks with the given tag.
// Returns the number of blocks released.
// If an error occurs, a negative value will be returned. Locked blocks
// aren't released.
int release_tagged(block_tag_t tag) throw(block_error_t);
// Dumps the memory pool to the given stream.
//int dump_pool(ostream &stream) const;
// Rearranges blocks in the pool all toward the beginning of the pool,
// blocks are ordered by size. This is not in-place sorting and requires
// at least the same amount of memory as the pool's capacity is available.
int compact();
private:
// null constructor
memory_pool_t();
inline memory_pool_t(const memory_pool_t &other) : m_base_pool(*this) {}
inline memory_pool_t &operator = (const memory_pool_t &other) {
return *this;
}
bool operator == (const memory_pool_t &other) {
return &other == this;
}
protected:
void init_with_buffer(size_t size, char *buffer) throw(allocation_error_t);
// Block header struct
struct block_head_t {
#if USE_STATIC_RELEASE
memory_pool_t *owner;
#endif
// Block ID: 0 if unused, 1 if a head block, otherwise a regular block.
// Can overflow.
block_id_t id;
// tag to identify certain allocations by (i.e., for batch-freeing)
block_tag_t tag;
// NOTE: Can remove this and determine size of blocks based on the
// difference of the block addr the next block
uint32_t size; // size of the block, including header & guard
// size_t requested_size; // the size requested (not incl. header & guard)
block_head_t *prev;
block_head_t *next;
uint16_t lock;
#if 0 && !NDEBUG
struct {
// source_file and function can only be string literal constants
const char *source_file;
const char *function;
int32_t line;
size_t requested_size;
} debug_info;
#endif
};
typedef std::map<block_id_t, block_head_t *> block_map_t;
// member variables
memory_pool_t &m_base_pool;
block_id_t m_base_block;
size_t m_size; // requested size of the pool (the usable size)
block_id_t m_sequence; // last ID in use
char *m_buffer; // the pool's buffer
// the next unused block (typically the last deallocated/merged buffer)
block_head_t *m_next_unused;
block_head_t m_head; // the head/dummy block header
// mutex_t m_lock;
// FIXME: this still uses the default allocator
block_map_t m_blocks; // map of all block IDs to blocks
// Generates a new block ID.
// If successful, returns true and writes a block ID to id_out.
// If a block ID couldn't be generated, returns 0.
int gen_id() throw(allocation_error_t);
// move_block basically does what it says - it moves the contents of one
// block to another. The origin block is destroyed. Returns 0 on success.
// If at_end is true and the target block is split, the block will be moved
// to the end of the target block.
void move_block(block_head_t *origin, block_head_t *target, bool at_end) throw(block_error_t);
// Releases a block and merges it with other released blocks. Returns the
// block via the input, as the block's location may change if merged.
void destroy_block(block_head_t **block) throw(block_error_t);
// Reserves a free block. Splits it if it's larger than what's needed.
// If at_end is true and the block is split, the new block will be placed at
// the end of the block rather than the beginning.
void reserve_block_sized(block_head_t **block_io, size_t block_size, block_id_t id, bool at_end) throw(block_error_t);
// Gets the block with the given id. Returns NULL on failure.
block_head_t *get_block(block_id_t id);
// Gets the block with the given id and removes it from the block map.
// Returns NULL on failure.
block_head_t *get_block_and_remove(block_id_t id);
// Gets a block and updates the cache.
// block_head_t *get_block_cache(block_id_t id);
// Caches a block. Returns 0 on success.
// int cache_block(block_head_t *block);
// checks the block for validity
int check_block(block_head_t *block) const;
public:
template <class T, size_t Z=sizeof(T)>
class allocator_t {
allocator_t &operator = (const allocator_t &other) throw(pool_error_t) {
throw pool_error_t("Cannot assign pool allocator");
}
public:
memory_pool_t &pool;
allocator_t(memory_pool_t &_pool) : pool(_pool) {}
allocator_t(const allocator_t &other) : pool(other.pool) {}
template <class U>
allocator_t(const allocator_t<U> &other) : pool(other.pool) {}
~allocator_t() {}
template <typename U>
struct rebind {
typedef allocator_t<U> other;
};
typedef T value_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T &reference;
typedef const T &const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
pointer allocate(size_type count, const_pointer hint = 0) {
block_id_t id = pool.allocate(block_size(count));
pointer p = pointer(pool.lock_block(id));
return p;
}
void deallocate(pointer buf, size_type size) {
pool.release(id_for_ptr(buf));
}
size_type max_size() const {
size_t un=pool.unallocated();
size_type r = size_type(un / Z);
return r;
}
size_type block_size() const {
return Z;
}
size_type block_size(size_t count) const {
const size_type abs_size = sizeof(value_type) * count;
return (Z < abs_size ? abs_size : Z);
};
void construct(pointer buf, const_reference ref) {
new(buf) value_type(ref);
}
void destroy(pointer buf) {
buf->~value_type();
}
pointer address(reference ref) { return &ref; }
const_pointer address(const_reference ref) const { return &ref; }
bool operator == (const memory_pool_t::allocator_t<T> &other) {
return pool == other.pool;
}
bool operator != (const memory_pool_t::allocator_t<T> &other) {
return pool != other.pool;
}
};
};
} /* snow */
#endif /* end of include guard: MEMORY_POOL_HH_1GV1RJSS */
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment