Created
August 5, 2008 15:02
-
-
Save qingfeng/4078 to your computer and use it in GitHub Desktop.
Python memory management
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* Python的内存分为 | |
* Block | |
* Pool | |
* arena | |
* 内存池几个部分 | |
*/ | |
#undef PyObject_Malloc | |
void * | |
PyObject_Malloc(size_t nbytes) | |
{ | |
block *bp; | |
poolp pool; | |
poolp next; | |
uint size; | |
/* | |
* This implicitly redirects malloc(0). | |
*/ | |
if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) { | |
LOCK(); | |
/* | |
* Most frequent paths first | |
*/ | |
size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT; | |
pool = usedpools[size + size]; | |
if (pool != pool->nextpool) { | |
/* | |
* There is a used pool for this size class. | |
* Pick up the head block of its free list. | |
*/ | |
++pool->ref.count; | |
bp = pool->freeblock; | |
assert(bp != NULL); | |
if ((pool->freeblock = *(block **)bp) != NULL) { | |
UNLOCK(); | |
return (void *)bp; | |
} | |
/* | |
* Reached the end of the free list, try to extend it. | |
*/ | |
if (pool->nextoffset <= pool->maxnextoffset) { | |
/* There is room for another block. */ | |
pool->freeblock = (block*)pool + | |
pool->nextoffset; | |
pool->nextoffset += INDEX2SIZE(size); | |
*(block **)(pool->freeblock) = NULL; | |
UNLOCK(); | |
return (void *)bp; | |
} | |
/* Pool is full, unlink from used pools. */ | |
next = pool->nextpool; | |
pool = pool->prevpool; | |
next->prevpool = pool; | |
pool->nextpool = next; | |
UNLOCK(); | |
return (void *)bp; | |
} | |
/* There isn't a pool of the right size class immediately | |
* available: use a free pool. | |
*/ | |
if (usable_arenas == NULL) { | |
/* No arena has a free pool: allocate a new arena. */ | |
#ifdef WITH_MEMORY_LIMITS | |
if (narenas_currently_allocated >= MAX_ARENAS) { | |
UNLOCK(); | |
goto redirect; | |
} | |
#endif | |
usable_arenas = new_arena(); | |
if (usable_arenas == NULL) { | |
UNLOCK(); | |
goto redirect; | |
} | |
usable_arenas->nextarena = | |
usable_arenas->prevarena = NULL; | |
} | |
assert(usable_arenas->address != 0); | |
/* Try to get a cached free pool. */ | |
pool = usable_arenas->freepools; | |
if (pool != NULL) { | |
/* Unlink from cached pools. */ | |
usable_arenas->freepools = pool->nextpool; | |
/* This arena already had the smallest nfreepools | |
* value, so decreasing nfreepools doesn't change | |
* that, and we don't need to rearrange the | |
* usable_arenas list. However, if the arena has | |
* become wholly allocated, we need to remove its | |
* arena_object from usable_arenas. | |
*/ | |
--usable_arenas->nfreepools; | |
if (usable_arenas->nfreepools == 0) { | |
/* Wholly allocated: remove. */ | |
assert(usable_arenas->freepools == NULL); | |
assert(usable_arenas->nextarena == NULL || | |
usable_arenas->nextarena->prevarena == | |
usable_arenas); | |
usable_arenas = usable_arenas->nextarena; | |
if (usable_arenas != NULL) { | |
usable_arenas->prevarena = NULL; | |
assert(usable_arenas->address != 0); | |
} | |
} | |
else { | |
/* nfreepools > 0: it must be that freepools | |
* isn't NULL, or that we haven't yet carved | |
* off all the arena's pools for the first | |
* time. | |
*/ | |
assert(usable_arenas->freepools != NULL || | |
usable_arenas->pool_address <= | |
(block*)usable_arenas->address + | |
ARENA_SIZE - POOL_SIZE); | |
} | |
init_pool: | |
/* Frontlink to used pools. */ | |
next = usedpools[size + size]; /* == prev */ | |
pool->nextpool = next; | |
pool->prevpool = next; | |
next->nextpool = pool; | |
next->prevpool = pool; | |
pool->ref.count = 1; | |
if (pool->szidx == size) { | |
/* Luckily, this pool last contained blocks | |
* of the same size class, so its header | |
* and free list are already initialized. | |
*/ | |
bp = pool->freeblock; | |
pool->freeblock = *(block **)bp; | |
UNLOCK(); | |
return (void *)bp; | |
} | |
/* | |
* Initialize the pool header, set up the free list to | |
* contain just the second block, and return the first | |
* block. | |
*/ | |
pool->szidx = size; | |
size = INDEX2SIZE(size); | |
bp = (block *)pool + POOL_OVERHEAD; | |
pool->nextoffset = POOL_OVERHEAD + (size << 1); | |
pool->maxnextoffset = POOL_SIZE - size; | |
pool->freeblock = bp + size; | |
*(block **)(pool->freeblock) = NULL; | |
UNLOCK(); | |
return (void *)bp; | |
} | |
/* Carve off a new pool. */ | |
assert(usable_arenas->nfreepools > 0); | |
assert(usable_arenas->freepools == NULL); | |
pool = (poolp)usable_arenas->pool_address; | |
assert((block*)pool <= (block*)usable_arenas->address + | |
ARENA_SIZE - POOL_SIZE); | |
pool->arenaindex = usable_arenas - arenas; | |
assert(&arenas[pool->arenaindex] == usable_arenas); | |
pool->szidx = DUMMY_SIZE_IDX; | |
usable_arenas->pool_address += POOL_SIZE; | |
--usable_arenas->nfreepools; | |
if (usable_arenas->nfreepools == 0) { | |
assert(usable_arenas->nextarena == NULL || | |
usable_arenas->nextarena->prevarena == | |
usable_arenas); | |
/* Unlink the arena: it is completely allocated. */ | |
usable_arenas = usable_arenas->nextarena; | |
if (usable_arenas != NULL) { | |
usable_arenas->prevarena = NULL; | |
assert(usable_arenas->address != 0); | |
} | |
} | |
goto init_pool; | |
} | |
/* The small block allocator ends here. */ | |
redirect: | |
/* Redirect the original request to the underlying (libc) allocator. | |
* We jump here on bigger requests, on error in the code above (as a | |
* last chance to serve the request) or when the max memory limit | |
* has been reached. | |
*/ | |
if (nbytes == 0) | |
nbytes = 1; | |
return (void *)malloc(nbytes); | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment