Created
November 3, 2014 21:01
-
-
Save awreece/ecc13f69198935d9e331 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* Copyright (c) 2014 by Alex Reece <[email protected]>. All rights reserved. | |
*/ | |
#include <assert.h> | |
#include <stdint.h> | |
#include <stdio.h> | |
#include <string.h> | |
#include <stdbool.h> | |
#include "debug.h" | |
#include "util.h" | |
#include "memlib.h" | |
#include "mm.h" | |
/* | |
* We set these values to 0 and 1 because we know that we only have 1 bit to | |
* express them in the mm_label_t below. | |
*/ | |
typedef enum { | |
M_ALLOCATABLE = 0, | |
M_INUSE = 1 | |
} m_state_t; | |
typedef struct mm_label { | |
#ifndef NDEBUG | |
/* | |
* In debug mode, use the first 32 bits of a label as a magic canary | |
* value to allow us to validate the integrity of the heap. | |
*/ | |
uint32_t ml_magic : 32; | |
uint32_t ml_size : 31; | |
m_state_t ml_allocatable : 1; | |
#else | |
uint64_t ml_size : 63; | |
m_state_t ml_allocatable : 1; | |
#endif | |
} mm_label_t; | |
#define ML_MAGIC 0x416c6578 | |
_Static_assert(sizeof (mm_label_t) == sizeof (uintptr_t), "label size wrong"); | |
typedef struct mm_block { | |
mm_label_t mb_header; | |
char mb_user_data[]; | |
} mm_block_t; | |
#define ARENA_SIZE (8*PAGE_SIZE) | |
#define MAX_ASIZE (ARENA_SIZE - (4 * sizeof (uintptr_t))) | |
typedef struct mm_arena { | |
struct mm_arena *ma_next; | |
uintptr_t ma_magic; | |
mm_label_t ma_prologue; | |
union { | |
char ma_padding[MAX_ASIZE]; | |
mm_block_t ma_block; | |
}; | |
mm_label_t ma_epilogue; | |
} mm_arena_t; | |
_Static_assert(sizeof (mm_arena_t) == ARENA_SIZE, "arena size wrong"); | |
_Static_assert(ARENA_SIZE % PAGE_SIZE == 0, | |
"arena size not aligned to page size"); | |
#define MA_MAGIC 0x52656365 | |
#define OVERHEAD (2 * sizeof (mm_label_t)) | |
#define MIN_ASIZE (OVERHEAD + MIN_ALIGNMENT) | |
static inline void | |
ml_init(mm_label_t *label, size_t size, m_state_t allocatable) { | |
#ifndef NDEBUG | |
label->ml_magic = ML_MAGIC; | |
#endif | |
label->ml_size = size; | |
label->ml_allocatable = allocatable; | |
} | |
#ifndef NDEBUG | |
static inline bool | |
ml_check(mm_label_t *label) | |
{ | |
ASSERT3U(label->ml_magic, ==, ML_MAGIC); | |
ASSERT3U(label->ml_size, >, 0); | |
return (true); | |
} | |
#endif // NDEBUG | |
static inline mm_label_t * | |
mb_header(mm_block_t *block) | |
{ | |
return (&block->mb_header); | |
} | |
static inline mm_label_t * | |
mb_footer(mm_block_t *block) | |
{ | |
uintptr_t block_base = (uintptr_t) block; | |
uint64_t size = mb_header(block)->ml_size; | |
ASSERT(ml_check(mb_header(block))); | |
return ((mm_label_t *)(block_base + size - sizeof (mm_label_t))); | |
} | |
static inline void | |
mb_init(mm_block_t *block, size_t size, m_state_t allocatable) | |
{ | |
ASSERT3U(round_up(size, MIN_ALIGNMENT), ==, size); | |
ASSERT3U(size, >=, MIN_ASIZE); | |
ml_init(mb_header(block), size, allocatable); | |
ml_init(mb_footer(block), size, allocatable); | |
} | |
#ifndef NDEBUG | |
static inline bool | |
mb_check(mm_block_t *block) | |
{ | |
ASSERT(ml_check(mb_header(block))); | |
ASSERT(ml_check(mb_footer(block))); | |
ASSERT3S(mb_header(block)->ml_allocatable, ==, | |
mb_footer(block)->ml_allocatable); | |
ASSERT3U(mb_header(block)->ml_size, ==, mb_footer(block)->ml_size); | |
ASSERT3U(mb_header(block)->ml_size, >=, MIN_ASIZE); | |
return (true); | |
} | |
#endif // NDEBUG | |
static inline mm_block_t * | |
mb_phys_next(mm_block_t *block) | |
{ | |
uint64_t size = mb_header(block)->ml_size; | |
uintptr_t block_base = (uintptr_t) block; | |
ASSERT(mb_check(block)); | |
return ((mm_block_t *)(block_base + size)); | |
} | |
static inline mm_block_t * | |
mb_phys_prev(mm_block_t *block) | |
{ | |
uintptr_t block_base = (uintptr_t) block; | |
mm_label_t *prev_footer = (mm_label_t *) (block_base - | |
sizeof (mm_label_t)); | |
ASSERT(mb_check(block)); | |
ASSERT(ml_check(prev_footer)); | |
return ((mm_block_t *)(block_base - prev_footer->ml_size)); | |
} | |
static inline mm_block_t * | |
mb_from_user_ptr(void *user_ptr) | |
{ | |
uintptr_t ptr_base = (uintptr_t) user_ptr; | |
mm_block_t *ret; | |
ASSERT3U(user_ptr, !=, NULL); | |
ret = (mm_block_t *)(ptr_base - sizeof (mm_label_t)); | |
ASSERT(mb_check(ret)); | |
return (ret); | |
} | |
static inline mm_arena_t * | |
ma_alloc() | |
{ | |
mm_arena_t *arena = memlib_mmap(ARENA_SIZE); | |
size_t block_size = ARENA_SIZE - 4 * sizeof (mm_label_t); | |
ml_init(&arena->ma_prologue, sizeof (mm_label_t), M_INUSE); | |
mb_init(&arena->ma_block, block_size, M_ALLOCATABLE); | |
ml_init(&arena->ma_epilogue, sizeof (mm_label_t), M_INUSE); | |
arena->ma_next = NULL; | |
arena->ma_magic = MA_MAGIC; | |
return (arena); | |
} | |
#ifndef NDEBUG | |
static inline bool | |
ma_check(mm_arena_t *arena) | |
{ | |
ASSERT3U(arena->ma_magic, ==, MA_MAGIC); | |
ASSERT(mb_check(&arena->ma_block)); | |
ASSERT(ml_check(&arena->ma_prologue)); | |
ASSERT(ml_check(&arena->ma_epilogue)); | |
return (true); | |
} | |
#endif | |
/* Pointer to the first arena. */ | |
mm_arena_t *mm_arena = NULL; | |
void | |
mm_init() | |
{ | |
mm_arena = NULL; | |
} | |
static mm_block_t * | |
find_fit_arena(mm_arena_t *arena, size_t asize) | |
{ | |
mm_block_t *block; | |
ASSERT3P(arena, !=, NULL); | |
ASSERT(ma_check(arena)); | |
for (block = &arena->ma_block; mb_header(block)->ml_size >= MIN_ASIZE; | |
block = mb_phys_next(block)) { | |
mm_label_t *header = mb_header(block); | |
ASSERT(mb_check(block)); | |
if (header->ml_size >= asize && | |
header->ml_allocatable == M_ALLOCATABLE) { | |
return (block); | |
} | |
} | |
return (NULL); | |
} | |
static mm_block_t * | |
find_fit(size_t asize) | |
{ | |
mm_arena_t *arena; | |
for (arena = mm_arena; arena != NULL; arena = arena->ma_next) { | |
mm_block_t *ret = find_fit_arena(arena, asize); | |
if (ret != NULL) { | |
return (ret); | |
} | |
} | |
return (NULL); | |
} | |
static mm_block_t * | |
place(mm_block_t *block, size_t asize) | |
{ | |
size_t left_over; | |
ASSERT(mb_check(block)); | |
ASSERT3U(mb_header(block)->ml_size, >=, asize); | |
ASSERT3S(mb_header(block)->ml_allocatable, ==, M_ALLOCATABLE); | |
left_over = mb_header(block)->ml_size - asize; | |
/* | |
* If we can carve off enough space for another block, do so. | |
*/ | |
if (left_over >= MIN_ASIZE) { | |
mb_init(block, asize, M_ALLOCATABLE); | |
mb_init(mb_phys_next(block), left_over, M_ALLOCATABLE); | |
} | |
mb_header(block)->ml_allocatable = M_INUSE; | |
mb_footer(block)->ml_allocatable = M_INUSE; | |
ASSERT(mb_check(block)); | |
return (block); | |
} | |
static mm_block_t * | |
extend_heap() | |
{ | |
mm_arena_t *arena = ma_alloc(); | |
arena->ma_next = mm_arena; | |
mm_arena = arena; | |
ASSERT(ma_check(mm_arena)); | |
return (&arena->ma_block); | |
} | |
void * | |
mm_malloc(size_t size) | |
{ | |
mm_block_t *bp; | |
size_t asize; | |
if (size == 0) { | |
return (NULL); | |
} | |
asize = round_up(size + OVERHEAD, MIN_ALIGNMENT); | |
ASSERT3U(asize, >=, MIN_ASIZE); | |
ASSERT3U(asize, <=, MAX_ASIZE); | |
bp = find_fit(asize); | |
if (bp == NULL) { | |
bp = extend_heap(); | |
} | |
ASSERT3P(bp, !=, NULL); | |
place(bp, asize); | |
#ifndef NDEBUG | |
/* Mark this memory as allocated by scribbling an 'A' all over it. */ | |
memset(bp->mb_user_data, 'A', mb_header(bp)->ml_size - OVERHEAD); | |
ASSERT(mb_check(bp)); | |
ASSERT3S(mb_header(bp)->ml_allocatable, ==, M_INUSE); | |
#endif | |
return (bp->mb_user_data); | |
} | |
static mm_block_t * | |
coalesce(mm_block_t *block) | |
{ | |
mm_block_t *prev_block = mb_phys_prev(block); | |
mm_block_t *next_block = mb_phys_next(block); | |
bool prev_alloc = mb_header(prev_block)->ml_allocatable == M_INUSE; | |
bool next_alloc = mb_header(next_block)->ml_allocatable == M_INUSE; | |
size_t block_size = mb_header(block)->ml_size; | |
ASSERT(mb_check(block)); | |
if (prev_alloc && next_alloc) { | |
return (block); | |
} | |
if (!next_alloc) { | |
block_size += mb_header(next_block)->ml_size; | |
} | |
if (!prev_alloc) { | |
block_size += mb_header(prev_block)->ml_size; | |
block = prev_block; | |
} | |
#ifndef NDEBUG | |
/* Mark this memory as free by scribbling an 'F' all over it. */ | |
memset(block, 'F', block_size); | |
#endif | |
mb_init(block, block_size, M_ALLOCATABLE); | |
return (block); | |
} | |
void | |
mm_free(void *ptr) | |
{ | |
mm_block_t *block; | |
if (ptr == NULL) { | |
return; | |
} | |
block = mb_from_user_ptr(ptr); | |
mb_header(block)->ml_allocatable = M_ALLOCATABLE; | |
mb_footer(block)->ml_allocatable = M_ALLOCATABLE; | |
block = coalesce(block); | |
ASSERT(mb_check(block)); | |
ASSERT3S(mb_header(block)->ml_allocatable, ==, M_ALLOCATABLE); | |
} | |
void * | |
mm_realloc(void *old_ptr, size_t size) | |
{ | |
void *ptr; | |
if (size == 0) { | |
mm_free(old_ptr); | |
return (NULL); | |
} | |
ptr = mm_malloc(size); | |
if (ptr != NULL && old_ptr != NULL) { | |
mm_label_t *old_header = mb_header(mb_from_user_ptr(old_ptr)); | |
ASSERT3P(mb_header(mb_from_user_ptr(old_ptr)), !=, NULL); | |
ASSERT3S(old_header->ml_allocatable, ==, M_INUSE); | |
memcpy(ptr, old_ptr, MIN(size, old_header->ml_size)); | |
mm_free(old_ptr); | |
} | |
return (ptr); | |
} | |
void * | |
mm_calloc(size_t nmemb, size_t size) | |
{ | |
size_t bytes = nmemb * size; | |
void *newptr = mm_malloc(bytes); | |
memset(newptr, 0, bytes); | |
return (newptr); | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment