diff options
Diffstat (limited to 'src/common/memarea.c')
-rw-r--r-- | src/common/memarea.c | 104 |
1 files changed, 37 insertions, 67 deletions
diff --git a/src/common/memarea.c b/src/common/memarea.c index bcaea0949e..7d16b702e3 100644 --- a/src/common/memarea.c +++ b/src/common/memarea.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2013, The Tor Project, Inc. */ +/* Copyright (c) 2008-2016, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** \file memarea.c @@ -21,16 +21,19 @@ * value. */ #define MEMAREA_ALIGN SIZEOF_VOID_P +/** A value which, when masked out of a pointer, produces a maximally aligned + * pointer. */ #if MEMAREA_ALIGN == 4 -#define MEMAREA_ALIGN_MASK 3lu +#define MEMAREA_ALIGN_MASK ((uintptr_t)3) #elif MEMAREA_ALIGN == 8 -#define MEMAREA_ALIGN_MASK 7lu +#define MEMAREA_ALIGN_MASK ((uintptr_t)7) #else #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff." #endif #if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER) #define USE_ALIGNED_ATTRIBUTE +/** Name for the 'memory' member of a memory chunk. */ #define U_MEM mem #else #define U_MEM u.mem @@ -61,7 +64,7 @@ #endif /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */ -static INLINE void * +static inline void * realign_pointer(void *ptr) { uintptr_t x = (uintptr_t)ptr; @@ -80,15 +83,16 @@ typedef struct memarea_chunk_t { struct memarea_chunk_t *next_chunk; size_t mem_size; /**< How much RAM is available in mem, total? */ char *next_mem; /**< Next position in mem to allocate data at. If it's - * greater than or equal to mem+mem_size, this chunk is - * full. */ + * equal to mem+mem_size, this chunk is full. */ #ifdef USE_ALIGNED_ATTRIBUTE + /** Actual content of the memory chunk. */ char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN))); #else union { char mem[1]; /**< Memory space in this chunk. */ void *void_for_alignment_; /**< Dummy; used to make sure mem is aligned. */ - } u; + } u; /**< Union used to enforce alignment when we don't have support for + * doing it right. */ #endif } memarea_chunk_t; @@ -105,56 +109,32 @@ struct memarea_t { memarea_chunk_t *first; /**< Top of the chunk stack: never NULL. */ }; -/** How many chunks will we put into the freelist before freeing them? */ -#define MAX_FREELIST_LEN 4 -/** The number of memarea chunks currently in our freelist. */ -static int freelist_len=0; -/** A linked list of unused memory area chunks. Used to prevent us from - * spinning in malloc/free loops. */ -static memarea_chunk_t *freelist = NULL; - /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */ static memarea_chunk_t * -alloc_chunk(size_t sz, int freelist_ok) +alloc_chunk(size_t sz) { tor_assert(sz < SIZE_T_CEILING); - if (freelist && freelist_ok) { - memarea_chunk_t *res = freelist; - freelist = res->next_chunk; - res->next_chunk = NULL; - --freelist_len; - CHECK_SENTINEL(res); - return res; - } else { - size_t chunk_size = freelist_ok ? CHUNK_SIZE : sz; - memarea_chunk_t *res; - chunk_size += SENTINEL_LEN; - res = tor_malloc(chunk_size); - res->next_chunk = NULL; - res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN; - res->next_mem = res->U_MEM; - tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN == - ((char*)res)+chunk_size); - tor_assert(realign_pointer(res->next_mem) == res->next_mem); - SET_SENTINEL(res); - return res; - } + + size_t chunk_size = sz < CHUNK_SIZE ? CHUNK_SIZE : sz; + memarea_chunk_t *res; + chunk_size += SENTINEL_LEN; + res = tor_malloc(chunk_size); + res->next_chunk = NULL; + res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN; + res->next_mem = res->U_MEM; + tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN == + ((char*)res)+chunk_size); + tor_assert(realign_pointer(res->next_mem) == res->next_mem); + SET_SENTINEL(res); + return res; } -/** Release <b>chunk</b> from a memarea, either by adding it to the freelist - * or by freeing it if the freelist is already too big. */ +/** Release <b>chunk</b> from a memarea. */ static void -chunk_free_unchecked(memarea_chunk_t *chunk) +memarea_chunk_free_unchecked(memarea_chunk_t *chunk) { CHECK_SENTINEL(chunk); - if (freelist_len < MAX_FREELIST_LEN) { - ++freelist_len; - chunk->next_chunk = freelist; - freelist = chunk; - chunk->next_mem = chunk->U_MEM; - } else { - tor_free(chunk); - } + tor_free(chunk); } /** Allocate and return new memarea. */ @@ -162,7 +142,7 @@ memarea_t * memarea_new(void) { memarea_t *head = tor_malloc(sizeof(memarea_t)); - head->first = alloc_chunk(CHUNK_SIZE, 1); + head->first = alloc_chunk(CHUNK_SIZE); return head; } @@ -174,7 +154,7 @@ memarea_drop_all(memarea_t *area) memarea_chunk_t *chunk, *next; for (chunk = area->first; chunk; chunk = next) { next = chunk->next_chunk; - chunk_free_unchecked(chunk); + memarea_chunk_free_unchecked(chunk); } area->first = NULL; /*fail fast on */ tor_free(area); @@ -190,26 +170,13 @@ memarea_clear(memarea_t *area) if (area->first->next_chunk) { for (chunk = area->first->next_chunk; chunk; chunk = next) { next = chunk->next_chunk; - chunk_free_unchecked(chunk); + memarea_chunk_free_unchecked(chunk); } area->first->next_chunk = NULL; } area->first->next_mem = area->first->U_MEM; } -/** Remove all unused memarea chunks from the internal freelist. */ -void -memarea_clear_freelist(void) -{ - memarea_chunk_t *chunk, *next; - freelist_len = 0; - for (chunk = freelist; chunk; chunk = next) { - next = chunk->next_chunk; - tor_free(chunk); - } - freelist = NULL; -} - /** Return true iff <b>p</b> is in a range that has been returned by an * allocation from <b>area</b>. */ int @@ -237,16 +204,19 @@ memarea_alloc(memarea_t *area, size_t sz) tor_assert(sz < SIZE_T_CEILING); if (sz == 0) sz = 1; - if (chunk->next_mem+sz > chunk->U_MEM+chunk->mem_size) { + tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size); + const size_t space_remaining = + (chunk->U_MEM + chunk->mem_size) - chunk->next_mem; + if (sz > space_remaining) { if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) { /* This allocation is too big. Stick it in a special chunk, and put * that chunk second in the list. */ - memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE, 0); + memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE); new_chunk->next_chunk = chunk->next_chunk; chunk->next_chunk = new_chunk; chunk = new_chunk; } else { - memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE, 1); + memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE); new_chunk->next_chunk = chunk; area->first = chunk = new_chunk; } |