summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorNick Mathewson <nickm@torproject.org>2016-03-21 10:20:16 -0400
committerNick Mathewson <nickm@torproject.org>2016-03-21 10:20:16 -0400
commitcb3f9bc2d48e8c3f4847643c03e082d394d33168 (patch)
tree738d21dd0265596baf2f8fd5482a5a9508212e15 /src
parenta42938c07670162863decc952b4d73681d9302d6 (diff)
parent72ebf4160412f64fb6ae0cd97dd89d01d89c075a (diff)
downloadtor-cb3f9bc2d48e8c3f4847643c03e082d394d33168.tar.gz
tor-cb3f9bc2d48e8c3f4847643c03e082d394d33168.zip
Merge branch 'bug18570_027'
Diffstat (limited to 'src')
-rw-r--r--src/or/channel.c38
-rw-r--r--src/or/channeltls.c11
-rw-r--r--src/or/connection_or.c35
-rw-r--r--src/or/connection_or.h1
-rw-r--r--src/test/test_channel.c121
5 files changed, 197 insertions, 9 deletions
diff --git a/src/or/channel.c b/src/or/channel.c
index 851d8372f5..519d10641a 100644
--- a/src/or/channel.c
+++ b/src/or/channel.c
@@ -2656,6 +2656,11 @@ channel_process_cells(channel_t *chan)
/*
* Process cells until we're done or find one we have no current handler
* for.
+ *
+ * We must free the cells here after calling the handler, since custody
+ * of the buffer was given to the channel layer when they were queued;
+ * see comments on memory management in channel_queue_cell() and in
+ * channel_queue_var_cell() below.
*/
while (NULL != (q = TOR_SIMPLEQ_FIRST(&chan->incoming_queue))) {
tor_assert(q);
@@ -2673,6 +2678,7 @@ channel_process_cells(channel_t *chan)
q->u.fixed.cell, chan,
U64_PRINTF_ARG(chan->global_identifier));
chan->cell_handler(chan, q->u.fixed.cell);
+ tor_free(q->u.fixed.cell);
tor_free(q);
} else if (q->type == CELL_QUEUE_VAR &&
chan->var_cell_handler) {
@@ -2685,6 +2691,7 @@ channel_process_cells(channel_t *chan)
q->u.var.var_cell, chan,
U64_PRINTF_ARG(chan->global_identifier));
chan->var_cell_handler(chan, q->u.var.var_cell);
+ tor_free(q->u.var.var_cell);
tor_free(q);
} else {
/* Can't handle this one */
@@ -2705,6 +2712,7 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
{
int need_to_queue = 0;
cell_queue_entry_t *q;
+ cell_t *cell_copy = NULL;
tor_assert(chan);
tor_assert(cell);
@@ -2732,8 +2740,19 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
U64_PRINTF_ARG(chan->global_identifier));
chan->cell_handler(chan, cell);
} else {
- /* Otherwise queue it and then process the queue if possible. */
- q = cell_queue_entry_new_fixed(cell);
+ /*
+ * Otherwise queue it and then process the queue if possible.
+ *
+ * We queue a copy, not the original pointer - it might have been on the
+ * stack in connection_or_process_cells_from_inbuf() (or another caller
+ * if we ever have a subclass other than channel_tls_t), or be freed
+ * there after we return. This is the uncommon case; the non-copying
+ * fast path occurs in the if (!need_to_queue) case above when the
+ * upper layer has installed cell handlers.
+ */
+ cell_copy = tor_malloc_zero(sizeof(cell_t));
+ memcpy(cell_copy, cell, sizeof(cell_t));
+ q = cell_queue_entry_new_fixed(cell_copy);
log_debug(LD_CHANNEL,
"Queueing incoming cell_t %p for channel %p "
"(global ID " U64_FORMAT ")",
@@ -2759,6 +2778,7 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
{
int need_to_queue = 0;
cell_queue_entry_t *q;
+ var_cell_t *cell_copy = NULL;
tor_assert(chan);
tor_assert(var_cell);
@@ -2787,8 +2807,18 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
U64_PRINTF_ARG(chan->global_identifier));
chan->var_cell_handler(chan, var_cell);
} else {
- /* Otherwise queue it and then process the queue if possible. */
- q = cell_queue_entry_new_var(var_cell);
+ /*
+ * Otherwise queue it and then process the queue if possible.
+ *
+ * We queue a copy, not the original pointer - it might have been on the
+ * stack in connection_or_process_cells_from_inbuf() (or another caller
+ * if we ever have a subclass other than channel_tls_t), or be freed
+ * there after we return. This is the uncommon case; the non-copying
+ * fast path occurs in the if (!need_to_queue) case above when the
+ * upper layer has installed cell handlers.
+ */
+ cell_copy = var_cell_copy(var_cell);
+ q = cell_queue_entry_new_var(cell_copy);
log_debug(LD_CHANNEL,
"Queueing incoming var_cell_t %p for channel %p "
"(global ID " U64_FORMAT ")",
diff --git a/src/or/channeltls.c b/src/or/channeltls.c
index 30165bfcf2..c65af5d040 100644
--- a/src/or/channeltls.c
+++ b/src/or/channeltls.c
@@ -1011,6 +1011,11 @@ channel_tls_time_process_cell(cell_t *cell, channel_tls_t *chan, int *time,
* for cell types specific to the handshake for this transport protocol and
* handles them, and queues all other cells to the channel_t layer, which
* eventually will hand them off to command.c.
+ *
+ * The channel layer itself decides whether the cell should be queued or
+ * can be handed off immediately to the upper-layer code. It is responsible
+ * for copying in the case that it queues; we merely pass pointers through
+ * which we get from connection_or_process_cells_from_inbuf().
*/
void
@@ -1108,6 +1113,12 @@ channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
* related and live below the channel_t layer, so no variable-length
* cells ever get delivered in the current implementation, but I've left
* the mechanism in place for future use.
+ *
+ * If we were handing them off to the upper layer, the channel_t queueing
+ * code would be responsible for memory management, and we'd just be passing
+ * pointers through from connection_or_process_cells_from_inbuf(). That
+ * caller always frees them after this function returns, so this function
+ * should never free var_cell.
*/
void
diff --git a/src/or/connection_or.c b/src/or/connection_or.c
index f39d5b2255..ea49bdba77 100644
--- a/src/or/connection_or.c
+++ b/src/or/connection_or.c
@@ -488,6 +488,28 @@ var_cell_new(uint16_t payload_len)
return cell;
}
+/**
+ * Copy a var_cell_t
+ */
+
+var_cell_t *
+var_cell_copy(const var_cell_t *src)
+{
+ var_cell_t *copy = NULL;
+ size_t size = 0;
+
+ if (src != NULL) {
+ size = STRUCT_OFFSET(var_cell_t, payload) + src->payload_len;
+ copy = tor_malloc_zero(size);
+ copy->payload_len = src->payload_len;
+ copy->command = src->command;
+ copy->circ_id = src->circ_id;
+ memcpy(copy->payload, src->payload, copy->payload_len);
+ }
+
+ return copy;
+}
+
/** Release all space held by <b>cell</b>. */
void
var_cell_free(var_cell_t *cell)
@@ -2026,6 +2048,19 @@ connection_or_process_cells_from_inbuf(or_connection_t *conn)
{
var_cell_t *var_cell;
+ /*
+ * Note on memory management for incoming cells: below the channel layer,
+ * we shouldn't need to consider its internal queueing/copying logic. It
+ * is safe to pass cells to it on the stack or on the heap, but in the
+ * latter case we must be sure we free them later.
+ *
+ * The incoming cell queue code in channel.c will (in the common case)
+ * decide it can pass them to the upper layer immediately, in which case
+ * those functions may run directly on the cell pointers we pass here, or
+ * it may decide to queue them, in which case it will allocate its own
+ * buffer and copy the cell.
+ */
+
while (1) {
log_debug(LD_OR,
TOR_SOCKET_T_FORMAT": starting, inbuf_datalen %d "
diff --git a/src/or/connection_or.h b/src/or/connection_or.h
index a4b10debb4..53e1f96879 100644
--- a/src/or/connection_or.h
+++ b/src/or/connection_or.h
@@ -97,6 +97,7 @@ void cell_pack(packed_cell_t *dest, const cell_t *src, int wide_circ_ids);
int var_cell_pack_header(const var_cell_t *cell, char *hdr_out,
int wide_circ_ids);
var_cell_t *var_cell_new(uint16_t payload_len);
+var_cell_t *var_cell_copy(const var_cell_t *src);
void var_cell_free(var_cell_t *cell);
/** DOCDOC */
diff --git a/src/test/test_channel.c b/src/test/test_channel.c
index 84eddea5b4..846e419fea 100644
--- a/src/test/test_channel.c
+++ b/src/test/test_channel.c
@@ -25,7 +25,9 @@ extern uint64_t estimated_total_queue_size;
static int test_chan_accept_cells = 0;
static int test_chan_fixed_cells_recved = 0;
+static cell_t * test_chan_last_seen_fixed_cell_ptr = NULL;
static int test_chan_var_cells_recved = 0;
+static var_cell_t * test_chan_last_seen_var_cell_ptr = NULL;
static int test_cells_written = 0;
static int test_destroy_not_pending_calls = 0;
static int test_doesnt_want_writes_count = 0;
@@ -70,6 +72,7 @@ static void test_channel_flushmux(void *arg);
static void test_channel_incoming(void *arg);
static void test_channel_lifecycle(void *arg);
static void test_channel_multi(void *arg);
+static void test_channel_queue_incoming(void *arg);
static void test_channel_queue_size(void *arg);
static void test_channel_write(void *arg);
@@ -179,7 +182,7 @@ chan_test_cell_handler(channel_t *ch,
tt_assert(ch);
tt_assert(cell);
- tor_free(cell);
+ test_chan_last_seen_fixed_cell_ptr = cell;
++test_chan_fixed_cells_recved;
done:
@@ -214,7 +217,7 @@ chan_test_var_cell_handler(channel_t *ch,
tt_assert(ch);
tt_assert(var_cell);
- tor_free(var_cell);
+ test_chan_last_seen_var_cell_ptr = var_cell;
++test_chan_var_cells_recved;
done:
@@ -608,7 +611,7 @@ test_channel_dumpstats(void *arg)
make_fake_cell(cell);
old_count = test_chan_fixed_cells_recved;
channel_queue_cell(ch, cell);
- cell = NULL;
+ tor_free(cell);
tt_int_op(test_chan_fixed_cells_recved, ==, old_count + 1);
tt_assert(ch->n_bytes_recved > 0);
tt_assert(ch->n_cells_recved > 0);
@@ -819,7 +822,7 @@ test_channel_incoming(void *arg)
make_fake_cell(cell);
old_count = test_chan_fixed_cells_recved;
channel_queue_cell(ch, cell);
- cell = NULL;
+ tor_free(cell);
tt_int_op(test_chan_fixed_cells_recved, ==, old_count + 1);
/* Receive a variable-size cell */
@@ -827,7 +830,7 @@ test_channel_incoming(void *arg)
make_fake_var_cell(var_cell);
old_count = test_chan_var_cells_recved;
channel_queue_var_cell(ch, var_cell);
- var_cell = NULL;
+ tor_free(cell);
tt_int_op(test_chan_var_cells_recved, ==, old_count + 1);
/* Close it */
@@ -1423,6 +1426,113 @@ test_channel_queue_impossible(void *arg)
}
static void
+test_channel_queue_incoming(void *arg)
+{
+ channel_t *ch = NULL;
+ cell_t *cell = NULL;
+ var_cell_t *var_cell = NULL;
+ int old_fixed_count, old_var_count;
+
+ (void)arg;
+
+ /* Mock these for duration of the test */
+ MOCK(scheduler_channel_doesnt_want_writes,
+ scheduler_channel_doesnt_want_writes_mock);
+ MOCK(scheduler_release_channel,
+ scheduler_release_channel_mock);
+
+ /* Accept cells to lower layer */
+ test_chan_accept_cells = 1;
+ /* Use default overhead factor */
+ test_overhead_estimate = 1.0f;
+
+ ch = new_fake_channel();
+ tt_assert(ch);
+ /* Start it off in OPENING */
+ ch->state = CHANNEL_STATE_OPENING;
+ /* We'll need a cmux */
+ ch->cmux = circuitmux_alloc();
+
+ /* Test cell handler getters */
+ tt_ptr_op(channel_get_cell_handler(ch), ==, NULL);
+ tt_ptr_op(channel_get_var_cell_handler(ch), ==, NULL);
+
+ /* Try to register it */
+ channel_register(ch);
+ tt_assert(ch->registered);
+
+ /* Open it */
+ channel_change_state(ch, CHANNEL_STATE_OPEN);
+ tt_int_op(ch->state, ==, CHANNEL_STATE_OPEN);
+
+ /* Assert that the incoming queue is empty */
+ tt_assert(TOR_SIMPLEQ_EMPTY(&(ch->incoming_queue)));
+
+ /* Queue an incoming fixed-length cell */
+ cell = tor_malloc_zero(sizeof(cell_t));
+ make_fake_cell(cell);
+ channel_queue_cell(ch, cell);
+
+ /* Assert that the incoming queue has one entry */
+ tt_int_op(chan_cell_queue_len(&(ch->incoming_queue)), ==, 1);
+
+ /* Queue an incoming var cell */
+ var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
+ make_fake_var_cell(var_cell);
+ channel_queue_var_cell(ch, var_cell);
+
+ /* Assert that the incoming queue has two entries */
+ tt_int_op(chan_cell_queue_len(&(ch->incoming_queue)), ==, 2);
+
+ /*
+ * Install cell handlers; this will drain the queue, so save the old
+ * cell counters first
+ */
+ old_fixed_count = test_chan_fixed_cells_recved;
+ old_var_count = test_chan_var_cells_recved;
+ channel_set_cell_handlers(ch,
+ chan_test_cell_handler,
+ chan_test_var_cell_handler);
+ tt_ptr_op(channel_get_cell_handler(ch), ==, chan_test_cell_handler);
+ tt_ptr_op(channel_get_var_cell_handler(ch), ==, chan_test_var_cell_handler);
+
+ /* Assert cells were received */
+ tt_int_op(test_chan_fixed_cells_recved, ==, old_fixed_count + 1);
+ tt_int_op(test_chan_var_cells_recved, ==, old_var_count + 1);
+
+ /*
+ * Assert that the pointers are different from the cells we allocated;
+ * when queueing cells with no incoming cell handlers installed, the
+ * channel layer should copy them to a new buffer, and free them after
+ * delivery. These pointers will have already been freed by the time
+ * we get here, so don't dereference them.
+ */
+ tt_ptr_op(test_chan_last_seen_fixed_cell_ptr, !=, cell);
+ tt_ptr_op(test_chan_last_seen_var_cell_ptr, !=, var_cell);
+
+ /* Assert queue is now empty */
+ tt_assert(TOR_SIMPLEQ_EMPTY(&(ch->incoming_queue)));
+
+ /* Close it; this contains an assertion that the incoming queue is empty */
+ channel_mark_for_close(ch);
+ tt_int_op(ch->state, ==, CHANNEL_STATE_CLOSING);
+ chan_test_finish_close(ch);
+ tt_int_op(ch->state, ==, CHANNEL_STATE_CLOSED);
+ channel_run_cleanup();
+ ch = NULL;
+
+ done:
+ free_fake_channel(ch);
+ tor_free(cell);
+ tor_free(var_cell);
+
+ UNMOCK(scheduler_channel_doesnt_want_writes);
+ UNMOCK(scheduler_release_channel);
+
+ return;
+}
+
+static void
test_channel_queue_size(void *arg)
{
channel_t *ch = NULL;
@@ -1666,6 +1776,7 @@ struct testcase_t channel_tests[] = {
{ "lifecycle_2", test_channel_lifecycle_2, TT_FORK, NULL, NULL },
{ "multi", test_channel_multi, TT_FORK, NULL, NULL },
{ "queue_impossible", test_channel_queue_impossible, TT_FORK, NULL, NULL },
+ { "queue_incoming", test_channel_queue_incoming, TT_FORK, NULL, NULL },
{ "queue_size", test_channel_queue_size, TT_FORK, NULL, NULL },
{ "write", test_channel_write, TT_FORK, NULL, NULL },
END_OF_TESTCASES