diff options
-rw-r--r-- | changes/memarea_overflow | 7 | ||||
-rw-r--r-- | src/common/memarea.c | 8 |
2 files changed, 12 insertions, 3 deletions
diff --git a/changes/memarea_overflow b/changes/memarea_overflow new file mode 100644 index 0000000000..8fdc38cc09 --- /dev/null +++ b/changes/memarea_overflow @@ -0,0 +1,7 @@ + o Minor bugfixes (pointer arithmetic): + - Fix a bug in memarea_alloc() that could have resulted in remote heap + write access, if Tor had ever passed an unchecked size to + memarea_alloc(). Fortunately, all the sizes we pass to memarea_alloc() + are pre-checked to be less than 128 kilobytes. Fixes bug 19150; bugfix + on 0.2.1.1-alpha. Bug found by Guido Vranken. + diff --git a/src/common/memarea.c b/src/common/memarea.c index 6841ba54e7..d6cad1140a 100644 --- a/src/common/memarea.c +++ b/src/common/memarea.c @@ -80,8 +80,7 @@ typedef struct memarea_chunk_t { struct memarea_chunk_t *next_chunk; size_t mem_size; /**< How much RAM is available in mem, total? */ char *next_mem; /**< Next position in mem to allocate data at. If it's - * greater than or equal to mem+mem_size, this chunk is - * full. */ + * equal to mem+mem_size, this chunk is full. */ #ifdef USE_ALIGNED_ATTRIBUTE char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN))); #else @@ -237,7 +236,10 @@ memarea_alloc(memarea_t *area, size_t sz) tor_assert(sz < SIZE_T_CEILING); if (sz == 0) sz = 1; - if (chunk->next_mem+sz > chunk->U_MEM+chunk->mem_size) { + tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size); + const size_t space_remaining = + (chunk->U_MEM + chunk->mem_size) - chunk->next_mem; + if (sz > space_remaining) { if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) { /* This allocation is too big. Stick it in a special chunk, and put * that chunk second in the list. */ |