summaryrefslogtreecommitdiff
path: root/src/common
diff options
context:
space:
mode:
authorNick Mathewson <nickm@torproject.org>2018-04-13 10:47:24 -0400
committerNick Mathewson <nickm@torproject.org>2018-04-13 10:47:24 -0400
commitb152d62cee7480ee7b9b68dd9b619db65b6cd112 (patch)
treeff89d131f60906f9d1b8e06af77883b3d1155769 /src/common
parentad57b1279ab68e204243ddf3cf841fd85606c331 (diff)
parent787bafc0f916c143ac244a217accf755817512df (diff)
downloadtor-b152d62cee7480ee7b9b68dd9b619db65b6cd112.tar.gz
tor-b152d62cee7480ee7b9b68dd9b619db65b6cd112.zip
Merge branch 'token_bucket_refactor_squashed'
Diffstat (limited to 'src/common')
-rw-r--r--src/common/compat_time.c13
-rw-r--r--src/common/compat_time.h1
-rw-r--r--src/common/include.am2
-rw-r--r--src/common/token_bucket.c199
-rw-r--r--src/common/token_bucket.h75
5 files changed, 290 insertions, 0 deletions
diff --git a/src/common/compat_time.c b/src/common/compat_time.c
index 183a60a480..b940447b67 100644
--- a/src/common/compat_time.c
+++ b/src/common/compat_time.c
@@ -830,11 +830,24 @@ monotime_coarse_stamp_units_to_approx_msec(uint64_t units)
return (abstime_diff * mach_time_info.numer) /
(mach_time_info.denom * ONE_MILLION);
}
+uint64_t
+monotime_msec_to_approx_coarse_stamp_units(uint64_t msec)
+{
+ uint64_t abstime_val =
+ (((uint64_t)msec) * ONE_MILLION * mach_time_info.denom) /
+ mach_time_info.numer;
+ return abstime_val >> monotime_shift;
+}
#else
uint64_t
monotime_coarse_stamp_units_to_approx_msec(uint64_t units)
{
return (units * 1000) / STAMP_TICKS_PER_SECOND;
}
+uint64_t
+monotime_msec_to_approx_coarse_stamp_units(uint64_t msec)
+{
+ return (msec * STAMP_TICKS_PER_SECOND) / 1000;
+}
#endif
diff --git a/src/common/compat_time.h b/src/common/compat_time.h
index 6ddd11883d..75b57f6f24 100644
--- a/src/common/compat_time.h
+++ b/src/common/compat_time.h
@@ -150,6 +150,7 @@ uint32_t monotime_coarse_to_stamp(const monotime_coarse_t *t);
* into an approximate number of milliseconds.
*/
uint64_t monotime_coarse_stamp_units_to_approx_msec(uint64_t units);
+uint64_t monotime_msec_to_approx_coarse_stamp_units(uint64_t msec);
uint32_t monotime_coarse_get_stamp(void);
#if defined(MONOTIME_COARSE_TYPE_IS_DIFFERENT)
diff --git a/src/common/include.am b/src/common/include.am
index 73c51ff0b2..87ab9d79e9 100644
--- a/src/common/include.am
+++ b/src/common/include.am
@@ -97,6 +97,7 @@ LIBOR_A_SRC = \
src/common/util_process.c \
src/common/sandbox.c \
src/common/storagedir.c \
+ src/common/token_bucket.c \
src/common/workqueue.c \
$(libor_extra_source) \
$(threads_impl_source) \
@@ -184,6 +185,7 @@ COMMONHEADERS = \
src/common/storagedir.h \
src/common/testsupport.h \
src/common/timers.h \
+ src/common/token_bucket.h \
src/common/torint.h \
src/common/torlog.h \
src/common/tortls.h \
diff --git a/src/common/token_bucket.c b/src/common/token_bucket.c
new file mode 100644
index 0000000000..6af2982147
--- /dev/null
+++ b/src/common/token_bucket.c
@@ -0,0 +1,199 @@
+/* Copyright (c) 2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file token_bucket.c
+ * \brief Functions to use and manipulate token buckets, used for
+ * rate-limiting on connections and globally.
+ *
+ * Tor uses these token buckets to keep track of bandwidth usage, and
+ * sometimes other things too.
+ *
+ * The time units we use internally are based on "timestamp" units -- see
+ * monotime_coarse_to_stamp() for a rationale.
+ *
+ * Token buckets may become negative.
+ **/
+
+#define TOKEN_BUCKET_PRIVATE
+
+#include "token_bucket.h"
+#include "util_bug.h"
+
+/** Convert a rate in bytes per second to a rate in bytes per step */
+static uint32_t
+rate_per_sec_to_rate_per_step(uint32_t rate)
+{
+ /*
+ The precise calculation we'd want to do is
+
+ (rate / 1000) * to_approximate_msec(TICKS_PER_STEP). But to minimize
+ rounding error, we do it this way instead, and divide last.
+ */
+ return (uint32_t)
+ monotime_coarse_stamp_units_to_approx_msec(rate*TICKS_PER_STEP)/1000;
+}
+
+/**
+ * Initialize a token bucket in *<b>bucket</b>, set up to allow <b>rate</b>
+ * bytes per second, with a maximum burst of <b>burst</b> bytes. The bucket
+ * is created such that <b>now_ts</b> is the current timestamp. The bucket
+ * starts out full.
+ */
+void
+token_bucket_init(token_bucket_t *bucket,
+ uint32_t rate,
+ uint32_t burst,
+ uint32_t now_ts)
+{
+ memset(bucket, 0, sizeof(token_bucket_t));
+ token_bucket_adjust(bucket, rate, burst);
+ token_bucket_reset(bucket, now_ts);
+}
+
+/**
+ * Change the configured rate (in bytes per second) and burst (in bytes)
+ * for the token bucket in *<b>bucket</b>.
+ */
+void
+token_bucket_adjust(token_bucket_t *bucket,
+ uint32_t rate,
+ uint32_t burst)
+{
+ tor_assert_nonfatal(rate > 0);
+ tor_assert_nonfatal(burst > 0);
+ if (burst > TOKEN_BUCKET_MAX_BURST)
+ burst = TOKEN_BUCKET_MAX_BURST;
+
+ bucket->rate = rate_per_sec_to_rate_per_step(rate);
+ bucket->burst = burst;
+ bucket->read_bucket = MIN(bucket->read_bucket, (int32_t)burst);
+ bucket->write_bucket = MIN(bucket->write_bucket, (int32_t)burst);
+}
+
+/**
+ * Reset <b>bucket</b> to be full, as of timestamp <b>now_ts</b>.
+ */
+void
+token_bucket_reset(token_bucket_t *bucket,
+ uint32_t now_ts)
+{
+ bucket->read_bucket = bucket->burst;
+ bucket->write_bucket = bucket->burst;
+ bucket->last_refilled_at_ts = now_ts;
+}
+
+/* Helper: see token_bucket_refill */
+static int
+refill_single_bucket(int32_t *bucketptr,
+ const uint32_t rate,
+ const int32_t burst,
+ const uint32_t elapsed_steps)
+{
+ const int was_empty = (*bucketptr <= 0);
+ /* The casts here prevent an underflow.
+ *
+ * Note that even if the bucket value is negative, subtracting it from
+ * "burst" will still produce a correct result. If this result is
+ * ridiculously high, then the "elapsed_steps > gap / rate" check below
+ * should catch it. */
+ const size_t gap = ((size_t)burst) - ((size_t)*bucketptr);
+
+ if (elapsed_steps > gap / rate) {
+ *bucketptr = burst;
+ } else {
+ *bucketptr += rate * elapsed_steps;
+ }
+
+ return was_empty && *bucketptr > 0;
+}
+
+/**
+ * Refill <b>bucket</b> as appropriate, given that the current timestamp
+ * is <b>now_ts</b>.
+ *
+ * Return a bitmask containing TB_READ iff read bucket was empty and became
+ * nonempty, and TB_WRITE iff the write bucket was empty and became nonempty.
+ */
+int
+token_bucket_refill(token_bucket_t *bucket,
+ uint32_t now_ts)
+{
+ const uint32_t elapsed_ticks = (now_ts - bucket->last_refilled_at_ts);
+ if (elapsed_ticks > UINT32_MAX-(300*1000)) {
+ /* Either about 48 days have passed since the last refill, or the
+ * monotonic clock has somehow moved backwards. (We're looking at you,
+ * Windows.). We accept up to a 5 minute jump backwards as
+ * "unremarkable".
+ */
+ return 0;
+ }
+ const uint32_t elapsed_steps = elapsed_ticks / TICKS_PER_STEP;
+
+ if (!elapsed_steps) {
+ /* Note that if less than one whole step elapsed, we don't advance the
+ * time in last_refilled_at_ts. That's intentional: we want to make sure
+ * that we add some bytes to it eventually. */
+ return 0;
+ }
+
+ int flags = 0;
+ if (refill_single_bucket(&bucket->read_bucket,
+ bucket->rate, bucket->burst, elapsed_steps))
+ flags |= TB_READ;
+ if (refill_single_bucket(&bucket->write_bucket,
+ bucket->rate, bucket->burst, elapsed_steps))
+ flags |= TB_WRITE;
+
+ bucket->last_refilled_at_ts = now_ts;
+ return flags;
+}
+
+static int
+decrement_single_bucket(int32_t *bucketptr,
+ ssize_t n)
+{
+ if (BUG(n < 0))
+ return 0;
+ const int becomes_empty = *bucketptr > 0 && n >= *bucketptr;
+ *bucketptr -= n;
+ return becomes_empty;
+}
+
+/**
+ * Decrement the read token bucket in <b>bucket</b> by <b>n</b> bytes.
+ *
+ * Return true if the bucket was nonempty and became empty; return false
+ * otherwise.
+ */
+int
+token_bucket_dec_read(token_bucket_t *bucket,
+ ssize_t n)
+{
+ return decrement_single_bucket(&bucket->read_bucket, n);
+}
+
+/**
+ * Decrement the write token bucket in <b>bucket</b> by <b>n</b> bytes.
+ *
+ * Return true if the bucket was nonempty and became empty; return false
+ * otherwise.
+ */
+int
+token_bucket_dec_write(token_bucket_t *bucket,
+ ssize_t n)
+{
+ return decrement_single_bucket(&bucket->write_bucket, n);
+}
+
+/**
+ * As token_bucket_dec_read and token_bucket_dec_write, in a single operation.
+ */
+void
+token_bucket_dec(token_bucket_t *bucket,
+ ssize_t n_read, ssize_t n_written)
+{
+ token_bucket_dec_read(bucket, n_read);
+ token_bucket_dec_read(bucket, n_written);
+}
+
diff --git a/src/common/token_bucket.h b/src/common/token_bucket.h
new file mode 100644
index 0000000000..2d1ccd5cf3
--- /dev/null
+++ b/src/common/token_bucket.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file token_bucket.h
+ * \brief Headers for token_bucket.c
+ **/
+
+#ifndef TOR_TOKEN_BUCKET_H
+#define TOR_TOKEN_BUCKET_H
+
+#include "torint.h"
+
+typedef struct token_bucket_t {
+ uint32_t rate;
+ int32_t burst;
+ int32_t read_bucket;
+ int32_t write_bucket;
+ uint32_t last_refilled_at_ts;
+} token_bucket_t;
+
+#define TOKEN_BUCKET_MAX_BURST INT32_MAX
+
+void token_bucket_init(token_bucket_t *bucket,
+ uint32_t rate,
+ uint32_t burst,
+ uint32_t now_ts);
+
+void token_bucket_adjust(token_bucket_t *bucket,
+ uint32_t rate, uint32_t burst);
+
+void token_bucket_reset(token_bucket_t *bucket,
+ uint32_t now_ts);
+
+#define TB_READ 1
+#define TB_WRITE 2
+
+int token_bucket_refill(token_bucket_t *bucket,
+ uint32_t now_ts);
+
+int token_bucket_dec_read(token_bucket_t *bucket,
+ ssize_t n);
+int token_bucket_dec_write(token_bucket_t *bucket,
+ ssize_t n);
+
+void token_bucket_dec(token_bucket_t *bucket,
+ ssize_t n_read, ssize_t n_written);
+
+static inline size_t token_bucket_get_read(const token_bucket_t *bucket);
+static inline size_t
+token_bucket_get_read(const token_bucket_t *bucket)
+{
+ const ssize_t b = bucket->read_bucket;
+ return b >= 0 ? b : 0;
+}
+
+static inline size_t token_bucket_get_write(const token_bucket_t *bucket);
+static inline size_t
+token_bucket_get_write(const token_bucket_t *bucket)
+{
+ const ssize_t b = bucket->write_bucket;
+ return b >= 0 ? b : 0;
+}
+
+#ifdef TOKEN_BUCKET_PRIVATE
+
+/* To avoid making the rates too small, we consider units of "steps",
+ * where a "step" is defined as this many timestamp ticks. Keep this
+ * a power of two if you can. */
+#define TICKS_PER_STEP 16
+
+#endif
+
+#endif /* TOR_TOKEN_BUCKET_H */
+