summaryrefslogtreecommitdiff
path: root/src/or/scheduler_kist.c
diff options
context:
space:
mode:
authorMatt Traudt <sirmatt@ksu.edu>2017-12-11 09:03:16 -0500
committerMatt Traudt <sirmatt@ksu.edu>2017-12-11 09:43:08 -0500
commit273325e216ff10b2ce938b243122d08f075f7881 (patch)
tree346473aeaebb2db57bb98e13ab6cba3ca9008325 /src/or/scheduler_kist.c
parent5e7fdb8b3f397c8f9b1cecacf07a6bacf0d47e2d (diff)
downloadtor-273325e216ff10b2ce938b243122d08f075f7881.tar.gz
tor-273325e216ff10b2ce938b243122d08f075f7881.zip
Add all the missed scheduler_state assignments
Diffstat (limited to 'src/or/scheduler_kist.c')
-rw-r--r--src/or/scheduler_kist.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/src/or/scheduler_kist.c b/src/or/scheduler_kist.c
index e02926e478..6a5b8d4f41 100644
--- a/src/or/scheduler_kist.c
+++ b/src/or/scheduler_kist.c
@@ -611,7 +611,7 @@ kist_scheduler_run(void)
if (!CHANNEL_IS_OPEN(chan)) {
/* Channel isn't open so we put it back in IDLE mode. It is either
* renegotiating its TLS session or about to be released. */
- chan->scheduler_state = SCHED_CHAN_IDLE;
+ scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
continue;
}
/* flush_result has the # cells flushed */
@@ -632,7 +632,7 @@ kist_scheduler_run(void)
"stop scheduling it this round.",
channel_state_to_string(chan->state),
chan->scheduler_state);
- chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
continue;
}
}
@@ -659,14 +659,14 @@ kist_scheduler_run(void)
* SCHED_CHAN_WAITING_FOR_CELLS to SCHED_CHAN_IDLE and seeing if Tor
* starts having serious throughput issues. Best done in shadow/chutney.
*/
- chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
log_debug(LD_SCHED, "chan=%" PRIu64 " now waiting_for_cells",
chan->global_identifier);
} else if (!channel_more_to_flush(chan)) {
/* Case 2: no more cells to send, but still open for writes */
- chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
log_debug(LD_SCHED, "chan=%" PRIu64 " now waiting_for_cells",
chan->global_identifier);
} else if (!socket_can_write(&socket_table, chan)) {
@@ -680,7 +680,7 @@ kist_scheduler_run(void)
* after the scheduling loop is over. They can hopefully be taken care of
* in the next scheduling round.
*/
- chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
if (!to_readd) {
to_readd = smartlist_new();
}
@@ -691,7 +691,7 @@ kist_scheduler_run(void)
/* Case 4: cells to send, and still open for writes */
- chan->scheduler_state = SCHED_CHAN_PENDING;
+ scheduler_set_channel_state(chan, SCHED_CHAN_PENDING);
smartlist_pqueue_add(cp, scheduler_compare_channels,
offsetof(channel_t, sched_heap_idx), chan);
}
@@ -711,7 +711,7 @@ kist_scheduler_run(void)
/* Re-add any channels we need to */
if (to_readd) {
SMARTLIST_FOREACH_BEGIN(to_readd, channel_t *, readd_chan) {
- readd_chan->scheduler_state = SCHED_CHAN_PENDING;
+ scheduler_set_channel_state(readd_chan, SCHED_CHAN_PENDING);
if (!smartlist_contains(cp, readd_chan)) {
smartlist_pqueue_add(cp, scheduler_compare_channels,
offsetof(channel_t, sched_heap_idx), readd_chan);