summaryrefslogtreecommitdiff
path: root/src/common/memarea.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/common/memarea.c')
-rw-r--r--src/common/memarea.c104
1 files changed, 99 insertions, 5 deletions
diff --git a/src/common/memarea.c b/src/common/memarea.c
index 61117288c3..4e2a5e5fc5 100644
--- a/src/common/memarea.c
+++ b/src/common/memarea.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Tor Project, Inc. */
+/* Copyright (c) 2008-2017, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/** \file memarea.c
@@ -7,11 +7,15 @@
*/
#include "orconfig.h"
+#include <stddef.h>
#include <stdlib.h>
#include "memarea.h"
#include "util.h"
#include "compat.h"
#include "torlog.h"
+#include "container.h"
+
+#ifndef DISABLE_MEMORY_SENTINELS
/** If true, we try to detect any attempts to write beyond the length of a
* memarea. */
@@ -83,8 +87,7 @@ typedef struct memarea_chunk_t {
struct memarea_chunk_t *next_chunk;
size_t mem_size; /**< How much RAM is available in mem, total? */
char *next_mem; /**< Next position in mem to allocate data at. If it's
- * greater than or equal to mem+mem_size, this chunk is
- * full. */
+ * equal to mem+mem_size, this chunk is full. */
#ifdef USE_ALIGNED_ATTRIBUTE
/** Actual content of the memory chunk. */
char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN)));
@@ -99,7 +102,7 @@ typedef struct memarea_chunk_t {
/** How many bytes are needed for overhead before we get to the memory part
* of a chunk? */
-#define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, U_MEM)
+#define CHUNK_HEADER_SIZE offsetof(memarea_chunk_t, U_MEM)
/** What's the smallest that we'll allocate a chunk? */
#define CHUNK_SIZE 4096
@@ -205,7 +208,10 @@ memarea_alloc(memarea_t *area, size_t sz)
tor_assert(sz < SIZE_T_CEILING);
if (sz == 0)
sz = 1;
- if (chunk->next_mem+sz > chunk->U_MEM+chunk->mem_size) {
+ tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size);
+ const size_t space_remaining =
+ (chunk->U_MEM + chunk->mem_size) - chunk->next_mem;
+ if (sz > space_remaining) {
if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
/* This allocation is too big. Stick it in a special chunk, and put
* that chunk second in the list. */
@@ -302,3 +308,91 @@ memarea_assert_ok(memarea_t *area)
}
}
+#else
+
+struct memarea_t {
+ smartlist_t *pieces;
+};
+
+memarea_t *
+memarea_new(void)
+{
+ memarea_t *ma = tor_malloc_zero(sizeof(memarea_t));
+ ma->pieces = smartlist_new();
+ return ma;
+}
+void
+memarea_drop_all(memarea_t *area)
+{
+ memarea_clear(area);
+ smartlist_free(area->pieces);
+ tor_free(area);
+}
+void
+memarea_clear(memarea_t *area)
+{
+ SMARTLIST_FOREACH(area->pieces, void *, p, tor_free_(p));
+ smartlist_clear(area->pieces);
+}
+int
+memarea_owns_ptr(const memarea_t *area, const void *ptr)
+{
+ SMARTLIST_FOREACH(area->pieces, const void *, p, if (ptr == p) return 1;);
+ return 0;
+}
+
+void *
+memarea_alloc(memarea_t *area, size_t sz)
+{
+ void *result = tor_malloc(sz);
+ smartlist_add(area->pieces, result);
+ return result;
+}
+
+void *
+memarea_alloc_zero(memarea_t *area, size_t sz)
+{
+ void *result = tor_malloc_zero(sz);
+ smartlist_add(area->pieces, result);
+ return result;
+}
+void *
+memarea_memdup(memarea_t *area, const void *s, size_t n)
+{
+ void *r = memarea_alloc(area, n);
+ memcpy(r, s, n);
+ return r;
+}
+char *
+memarea_strdup(memarea_t *area, const char *s)
+{
+ size_t n = strlen(s);
+ char *r = memarea_alloc(area, n+1);
+ memcpy(r, s, n);
+ r[n] = 0;
+ return r;
+}
+char *
+memarea_strndup(memarea_t *area, const char *s, size_t n)
+{
+ size_t ln = strnlen(s, n);
+ char *r = memarea_alloc(area, ln+1);
+ memcpy(r, s, ln);
+ r[ln] = 0;
+ return r;
+}
+void
+memarea_get_stats(memarea_t *area,
+ size_t *allocated_out, size_t *used_out)
+{
+ (void)area;
+ *allocated_out = *used_out = 128;
+}
+void
+memarea_assert_ok(memarea_t *area)
+{
+ (void)area;
+}
+
+#endif
+