summaryrefslogtreecommitdiff
path: root/src/feature/dircommon
diff options
context:
space:
mode:
Diffstat (limited to 'src/feature/dircommon')
-rw-r--r--src/feature/dircommon/consdiff.c1414
-rw-r--r--src/feature/dircommon/consdiff.h99
-rw-r--r--src/feature/dircommon/dir_connection_st.h67
-rw-r--r--src/feature/dircommon/directory.c651
-rw-r--r--src/feature/dircommon/directory.h129
-rw-r--r--src/feature/dircommon/fp_pair.c315
-rw-r--r--src/feature/dircommon/fp_pair.h56
-rw-r--r--src/feature/dircommon/vote_timing_st.h24
-rw-r--r--src/feature/dircommon/voting_schedule.c194
-rw-r--r--src/feature/dircommon/voting_schedule.h65
10 files changed, 3014 insertions, 0 deletions
diff --git a/src/feature/dircommon/consdiff.c b/src/feature/dircommon/consdiff.c
new file mode 100644
index 0000000000..d0f7594ce3
--- /dev/null
+++ b/src/feature/dircommon/consdiff.c
@@ -0,0 +1,1414 @@
+/* Copyright (c) 2014, Daniel Martí
+ * Copyright (c) 2014-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file consdiff.c
+ * \brief Consensus diff implementation, including both the generation and the
+ * application of diffs in a minimal ed format.
+ *
+ * The consensus diff application is done in consdiff_apply_diff, which relies
+ * on apply_ed_diff for the main ed diff part and on some digest helper
+ * functions to check the digest hashes found in the consensus diff header.
+ *
+ * The consensus diff generation is more complex. consdiff_gen_diff generates
+ * it, relying on gen_ed_diff to generate the ed diff and some digest helper
+ * functions to generate the digest hashes.
+ *
+ * gen_ed_diff is the tricky bit. In it simplest form, it will take quadratic
+ * time and linear space to generate an ed diff given two smartlists. As shown
+ * in its comment section, calling calc_changes on the entire two consensuses
+ * will calculate what is to be added and what is to be deleted in the diff.
+ * Its comment section briefly explains how it works.
+ *
+ * In our case specific to consensuses, we take advantage of the fact that
+ * consensuses list routers sorted by their identities. We use that
+ * information to avoid running calc_changes on the whole smartlists.
+ * gen_ed_diff will navigate through the two consensuses identity by identity
+ * and will send small couples of slices to calc_changes, keeping the running
+ * time near-linear. This is explained in more detail in the gen_ed_diff
+ * comments.
+ *
+ * The allocation strategy tries to save time and memory by avoiding needless
+ * copies. Instead of actually splitting the inputs into separate strings, we
+ * allocate cdline_t objects, each of which represents a line in the original
+ * object or in the output. We use memarea_t allocators to manage the
+ * temporary memory we use when generating or applying diffs.
+ **/
+
+#define CONSDIFF_PRIVATE
+
+#include "core/or/or.h"
+#include "feature/dircommon/consdiff.h"
+#include "lib/memarea/memarea.h"
+#include "feature/dirparse/ns_parse.h"
+
+static const char* ns_diff_version = "network-status-diff-version 1";
+static const char* hash_token = "hash";
+
+static char *consensus_join_lines(const smartlist_t *inp);
+
+/** Return true iff a and b have the same contents. */
+STATIC int
+lines_eq(const cdline_t *a, const cdline_t *b)
+{
+ return a->len == b->len && fast_memeq(a->s, b->s, a->len);
+}
+
+/** Return true iff a has the same contents as the nul-terminated string b. */
+STATIC int
+line_str_eq(const cdline_t *a, const char *b)
+{
+ const size_t len = strlen(b);
+ tor_assert(len <= UINT32_MAX);
+ cdline_t bline = { b, (uint32_t)len };
+ return lines_eq(a, &bline);
+}
+
+/** Return true iff a begins with the same contents as the nul-terminated
+ * string b. */
+static int
+line_starts_with_str(const cdline_t *a, const char *b)
+{
+ const size_t len = strlen(b);
+ tor_assert(len <= UINT32_MAX);
+ return a->len >= len && fast_memeq(a->s, b, len);
+}
+
+/** Return a new cdline_t holding as its contents the nul-terminated
+ * string s. Use the provided memory area for storage. */
+static cdline_t *
+cdline_linecpy(memarea_t *area, const char *s)
+{
+ size_t len = strlen(s);
+ const char *ss = memarea_memdup(area, s, len);
+ cdline_t *line = memarea_alloc(area, sizeof(cdline_t));
+ line->s = ss;
+ line->len = (uint32_t)len;
+ return line;
+}
+
+/** Add a cdline_t to <b>lst</b> holding as its contents the nul-terminated
+ * string s. Use the provided memory area for storage. */
+STATIC void
+smartlist_add_linecpy(smartlist_t *lst, memarea_t *area, const char *s)
+{
+ smartlist_add(lst, cdline_linecpy(area, s));
+}
+
+/** Compute the digest of <b>cons</b>, and store the result in
+ * <b>digest_out</b>. Return 0 on success, -1 on failure. */
+/* This is a separate, mockable function so that we can override it when
+ * fuzzing. */
+MOCK_IMPL(STATIC int,
+consensus_compute_digest,(const char *cons,
+ consensus_digest_t *digest_out))
+{
+ int r = crypto_digest256((char*)digest_out->sha3_256,
+ cons, strlen(cons), DIGEST_SHA3_256);
+ return r;
+}
+
+/** Compute the digest-as-signed of <b>cons</b>, and store the result in
+ * <b>digest_out</b>. Return 0 on success, -1 on failure. */
+/* This is a separate, mockable function so that we can override it when
+ * fuzzing. */
+MOCK_IMPL(STATIC int,
+consensus_compute_digest_as_signed,(const char *cons,
+ consensus_digest_t *digest_out))
+{
+ return router_get_networkstatus_v3_sha3_as_signed(digest_out->sha3_256,
+ cons);
+}
+
+/** Return true iff <b>d1</b> and <b>d2</b> contain the same digest */
+/* This is a separate, mockable function so that we can override it when
+ * fuzzing. */
+MOCK_IMPL(STATIC int,
+consensus_digest_eq,(const uint8_t *d1,
+ const uint8_t *d2))
+{
+ return fast_memeq(d1, d2, DIGEST256_LEN);
+}
+
+/** Create (allocate) a new slice from a smartlist. Assumes that the start
+ * and the end indexes are within the bounds of the initial smartlist. The end
+ * element is not part of the resulting slice. If end is -1, the slice is to
+ * reach the end of the smartlist.
+ */
+STATIC smartlist_slice_t *
+smartlist_slice(const smartlist_t *list, int start, int end)
+{
+ int list_len = smartlist_len(list);
+ tor_assert(start >= 0);
+ tor_assert(start <= list_len);
+ if (end == -1) {
+ end = list_len;
+ }
+ tor_assert(start <= end);
+
+ smartlist_slice_t *slice = tor_malloc(sizeof(smartlist_slice_t));
+ slice->list = list;
+ slice->offset = start;
+ slice->len = end - start;
+ return slice;
+}
+
+/** Helper: Compute the longest common subsequence lengths for the two slices.
+ * Used as part of the diff generation to find the column at which to split
+ * slice2 while still having the optimal solution.
+ * If direction is -1, the navigation is reversed. Otherwise it must be 1.
+ * The length of the resulting integer array is that of the second slice plus
+ * one.
+ */
+STATIC int *
+lcs_lengths(const smartlist_slice_t *slice1, const smartlist_slice_t *slice2,
+ int direction)
+{
+ size_t a_size = sizeof(int) * (slice2->len+1);
+
+ /* Resulting lcs lengths. */
+ int *result = tor_malloc_zero(a_size);
+ /* Copy of the lcs lengths from the last iteration. */
+ int *prev = tor_malloc(a_size);
+
+ tor_assert(direction == 1 || direction == -1);
+
+ int si = slice1->offset;
+ if (direction == -1) {
+ si += (slice1->len-1);
+ }
+
+ for (int i = 0; i < slice1->len; ++i, si+=direction) {
+
+ const cdline_t *line1 = smartlist_get(slice1->list, si);
+ /* Store the last results. */
+ memcpy(prev, result, a_size);
+
+ int sj = slice2->offset;
+ if (direction == -1) {
+ sj += (slice2->len-1);
+ }
+
+ for (int j = 0; j < slice2->len; ++j, sj+=direction) {
+
+ const cdline_t *line2 = smartlist_get(slice2->list, sj);
+ if (lines_eq(line1, line2)) {
+ /* If the lines are equal, the lcs is one line longer. */
+ result[j + 1] = prev[j] + 1;
+ } else {
+ /* If not, see what lcs parent path is longer. */
+ result[j + 1] = MAX(result[j], prev[j + 1]);
+ }
+ }
+ }
+ tor_free(prev);
+ return result;
+}
+
+/** Helper: Trim any number of lines that are equally at the start or the end
+ * of both slices.
+ */
+STATIC void
+trim_slices(smartlist_slice_t *slice1, smartlist_slice_t *slice2)
+{
+ while (slice1->len>0 && slice2->len>0) {
+ const cdline_t *line1 = smartlist_get(slice1->list, slice1->offset);
+ const cdline_t *line2 = smartlist_get(slice2->list, slice2->offset);
+ if (!lines_eq(line1, line2)) {
+ break;
+ }
+ slice1->offset++; slice1->len--;
+ slice2->offset++; slice2->len--;
+ }
+
+ int i1 = (slice1->offset+slice1->len)-1;
+ int i2 = (slice2->offset+slice2->len)-1;
+
+ while (slice1->len>0 && slice2->len>0) {
+ const cdline_t *line1 = smartlist_get(slice1->list, i1);
+ const cdline_t *line2 = smartlist_get(slice2->list, i2);
+ if (!lines_eq(line1, line2)) {
+ break;
+ }
+ i1--;
+ slice1->len--;
+ i2--;
+ slice2->len--;
+ }
+}
+
+/** Like smartlist_string_pos, but uses a cdline_t, and is restricted to the
+ * bounds of the slice.
+ */
+STATIC int
+smartlist_slice_string_pos(const smartlist_slice_t *slice,
+ const cdline_t *string)
+{
+ int end = slice->offset + slice->len;
+ for (int i = slice->offset; i < end; ++i) {
+ const cdline_t *el = smartlist_get(slice->list, i);
+ if (lines_eq(el, string)) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+/** Helper: Set all the appropriate changed booleans to true. The first slice
+ * must be of length 0 or 1. All the lines of slice1 and slice2 which are not
+ * present in the other slice will be set to changed in their bool array.
+ * The two changed bool arrays are passed in the same order as the slices.
+ */
+STATIC void
+set_changed(bitarray_t *changed1, bitarray_t *changed2,
+ const smartlist_slice_t *slice1, const smartlist_slice_t *slice2)
+{
+ int toskip = -1;
+ tor_assert(slice1->len == 0 || slice1->len == 1);
+
+ if (slice1->len == 1) {
+ const cdline_t *line_common = smartlist_get(slice1->list, slice1->offset);
+ toskip = smartlist_slice_string_pos(slice2, line_common);
+ if (toskip == -1) {
+ bitarray_set(changed1, slice1->offset);
+ }
+ }
+ int end = slice2->offset + slice2->len;
+ for (int i = slice2->offset; i < end; ++i) {
+ if (i != toskip) {
+ bitarray_set(changed2, i);
+ }
+ }
+}
+
+/*
+ * Helper: Given that slice1 has been split by half into top and bot, we want
+ * to fetch the column at which to split slice2 so that we are still on track
+ * to the optimal diff solution, i.e. the shortest one. We use lcs_lengths
+ * since the shortest diff is just another way to say the longest common
+ * subsequence.
+ */
+static int
+optimal_column_to_split(const smartlist_slice_t *top,
+ const smartlist_slice_t *bot,
+ const smartlist_slice_t *slice2)
+{
+ int *lens_top = lcs_lengths(top, slice2, 1);
+ int *lens_bot = lcs_lengths(bot, slice2, -1);
+ int column=0, max_sum=-1;
+
+ for (int i = 0; i < slice2->len+1; ++i) {
+ int sum = lens_top[i] + lens_bot[slice2->len-i];
+ if (sum > max_sum) {
+ column = i;
+ max_sum = sum;
+ }
+ }
+ tor_free(lens_top);
+ tor_free(lens_bot);
+
+ return column;
+}
+
+/**
+ * Helper: Figure out what elements are new or gone on the second smartlist
+ * relative to the first smartlist, and store the booleans in the bitarrays.
+ * True on the first bitarray means the element is gone, true on the second
+ * bitarray means it's new.
+ *
+ * In its base case, either of the smartlists is of length <= 1 and we can
+ * quickly see what elements are new or are gone. In the other case, we will
+ * split one smartlist by half and we'll use optimal_column_to_split to find
+ * the optimal column at which to split the second smartlist so that we are
+ * finding the smallest diff possible.
+ */
+STATIC void
+calc_changes(smartlist_slice_t *slice1,
+ smartlist_slice_t *slice2,
+ bitarray_t *changed1, bitarray_t *changed2)
+{
+ trim_slices(slice1, slice2);
+
+ if (slice1->len <= 1) {
+ set_changed(changed1, changed2, slice1, slice2);
+
+ } else if (slice2->len <= 1) {
+ set_changed(changed2, changed1, slice2, slice1);
+
+ /* Keep on splitting the slices in two. */
+ } else {
+ smartlist_slice_t *top, *bot, *left, *right;
+
+ /* Split the first slice in half. */
+ int mid = slice1->len/2;
+ top = smartlist_slice(slice1->list, slice1->offset, slice1->offset+mid);
+ bot = smartlist_slice(slice1->list, slice1->offset+mid,
+ slice1->offset+slice1->len);
+
+ /* Split the second slice by the optimal column. */
+ int mid2 = optimal_column_to_split(top, bot, slice2);
+ left = smartlist_slice(slice2->list, slice2->offset, slice2->offset+mid2);
+ right = smartlist_slice(slice2->list, slice2->offset+mid2,
+ slice2->offset+slice2->len);
+
+ calc_changes(top, left, changed1, changed2);
+ calc_changes(bot, right, changed1, changed2);
+ tor_free(top);
+ tor_free(bot);
+ tor_free(left);
+ tor_free(right);
+ }
+}
+
+/* This table is from crypto.c. The SP and PAD defines are different. */
+#define NOT_VALID_BASE64 255
+#define X NOT_VALID_BASE64
+#define SP NOT_VALID_BASE64
+#define PAD NOT_VALID_BASE64
+static const uint8_t base64_compare_table[256] = {
+ X, X, X, X, X, X, X, X, X, SP, SP, SP, X, SP, X, X,
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,
+ SP, X, X, X, X, X, X, X, X, X, X, 62, X, X, X, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, X, X, X, PAD, X, X,
+ X, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, X, X, X, X, X,
+ X, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, X, X, X, X, X,
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,
+};
+
+/** Helper: Get the identity hash from a router line, assuming that the line
+ * at least appears to be a router line and thus starts with "r ".
+ *
+ * If an identity hash is found, store it (without decoding it) in
+ * <b>hash_out</b>, and return 0. On failure, return -1.
+ */
+STATIC int
+get_id_hash(const cdline_t *line, cdline_t *hash_out)
+{
+ if (line->len < 2)
+ return -1;
+
+ /* Skip the router name. */
+ const char *hash = memchr(line->s + 2, ' ', line->len - 2);
+ if (!hash) {
+ return -1;
+ }
+
+ hash++;
+ const char *hash_end = hash;
+ /* Stop when the first non-base64 character is found. Use unsigned chars to
+ * avoid negative indexes causing crashes.
+ */
+ while (base64_compare_table[*((unsigned char*)hash_end)]
+ != NOT_VALID_BASE64 &&
+ hash_end < line->s + line->len) {
+ hash_end++;
+ }
+
+ /* Empty hash. */
+ if (hash_end == hash) {
+ return -1;
+ }
+
+ hash_out->s = hash;
+ /* Always true because lines are limited to this length */
+ tor_assert(hash_end >= hash);
+ tor_assert((size_t)(hash_end - hash) <= UINT32_MAX);
+ hash_out->len = (uint32_t)(hash_end - hash);
+
+ return 0;
+}
+
+/** Helper: Check that a line is a valid router entry. We must at least be
+ * able to fetch a proper identity hash from it for it to be valid.
+ */
+STATIC int
+is_valid_router_entry(const cdline_t *line)
+{
+ if (line->len < 2 || fast_memneq(line->s, "r ", 2))
+ return 0;
+ cdline_t tmp;
+ return (get_id_hash(line, &tmp) == 0);
+}
+
+/** Helper: Find the next router line starting at the current position.
+ * Assumes that cur is lower than the length of the smartlist, i.e. it is a
+ * line within the bounds of the consensus. The only exception is when we
+ * don't want to skip the first line, in which case cur will be -1.
+ */
+STATIC int
+next_router(const smartlist_t *cons, int cur)
+{
+ int len = smartlist_len(cons);
+ tor_assert(cur >= -1 && cur < len);
+
+ if (++cur >= len) {
+ return len;
+ }
+
+ const cdline_t *line = smartlist_get(cons, cur);
+ while (!is_valid_router_entry(line)) {
+ if (++cur >= len) {
+ return len;
+ }
+ line = smartlist_get(cons, cur);
+ }
+ return cur;
+}
+
+/** Helper: compare two base64-encoded identity hashes, which may be of
+ * different lengths. Comparison ends when the first non-base64 char is found.
+ */
+STATIC int
+base64cmp(const cdline_t *hash1, const cdline_t *hash2)
+{
+ /* NULL is always lower, useful for last_hash which starts at NULL. */
+ if (!hash1->s && !hash2->s) {
+ return 0;
+ }
+ if (!hash1->s) {
+ return -1;
+ }
+ if (!hash2->s) {
+ return 1;
+ }
+
+ /* Don't index with a char; char may be signed. */
+ const unsigned char *a = (unsigned char*)hash1->s;
+ const unsigned char *b = (unsigned char*)hash2->s;
+ const unsigned char *a_end = a + hash1->len;
+ const unsigned char *b_end = b + hash2->len;
+ while (1) {
+ uint8_t av = base64_compare_table[*a];
+ uint8_t bv = base64_compare_table[*b];
+ if (av == NOT_VALID_BASE64) {
+ if (bv == NOT_VALID_BASE64) {
+ /* Both ended with exactly the same characters. */
+ return 0;
+ } else {
+ /* hash2 goes on longer than hash1 and thus hash1 is lower. */
+ return -1;
+ }
+ } else if (bv == NOT_VALID_BASE64) {
+ /* hash1 goes on longer than hash2 and thus hash1 is greater. */
+ return 1;
+ } else if (av < bv) {
+ /* The first difference shows that hash1 is lower. */
+ return -1;
+ } else if (av > bv) {
+ /* The first difference shows that hash1 is greater. */
+ return 1;
+ } else {
+ a++;
+ b++;
+ if (a == a_end) {
+ if (b == b_end) {
+ return 0;
+ } else {
+ return -1;
+ }
+ } else if (b == b_end) {
+ return 1;
+ }
+ }
+ }
+}
+
+/** Structure used to remember the previous and current identity hash of
+ * the "r " lines in a consensus, to enforce well-ordering. */
+typedef struct router_id_iterator_t {
+ cdline_t last_hash;
+ cdline_t hash;
+} router_id_iterator_t;
+
+/**
+ * Initializer for a router_id_iterator_t.
+ */
+#define ROUTER_ID_ITERATOR_INIT { { NULL, 0 }, { NULL, 0 } }
+
+/** Given an index *<b>idxp</b> into the consensus at <b>cons</b>, advance
+ * the index to the next router line ("r ...") in the consensus, or to
+ * an index one after the end of the list if there is no such line.
+ *
+ * Use <b>iter</b> to record the hash of the found router line, if any,
+ * and to enforce ordering on the hashes. If the hashes are mis-ordered,
+ * return -1. Else, return 0.
+ **/
+static int
+find_next_router_line(const smartlist_t *cons,
+ const char *consname,
+ int *idxp,
+ router_id_iterator_t *iter)
+{
+ *idxp = next_router(cons, *idxp);
+ if (*idxp < smartlist_len(cons)) {
+ memcpy(&iter->last_hash, &iter->hash, sizeof(cdline_t));
+ if (get_id_hash(smartlist_get(cons, *idxp), &iter->hash) < 0 ||
+ base64cmp(&iter->hash, &iter->last_hash) <= 0) {
+ log_warn(LD_CONSDIFF, "Refusing to generate consensus diff because "
+ "the %s consensus doesn't have its router entries sorted "
+ "properly.", consname);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/** Line-prefix indicating the beginning of the signatures section that we
+ * intend to delete. */
+#define START_OF_SIGNATURES_SECTION "directory-signature "
+
+/** Pre-process a consensus in <b>cons</b> (represented as a list of cdline_t)
+ * to remove the signatures from it. If the footer is removed, return a
+ * cdline_t containing a delete command to delete the footer, allocated in
+ * <b>area</>. If no footer is removed, return NULL.
+ *
+ * We remove the signatures here because they are not themselves signed, and
+ * as such there might be different encodings for them.
+ */
+static cdline_t *
+preprocess_consensus(memarea_t *area,
+ smartlist_t *cons)
+{
+ int idx;
+ int dirsig_idx = -1;
+ for (idx = 0; idx < smartlist_len(cons); ++idx) {
+ cdline_t *line = smartlist_get(cons, idx);
+ if (line_starts_with_str(line, START_OF_SIGNATURES_SECTION)) {
+ dirsig_idx = idx;
+ break;
+ }
+ }
+ if (dirsig_idx >= 0) {
+ char buf[64];
+ while (smartlist_len(cons) > dirsig_idx)
+ smartlist_del(cons, dirsig_idx);
+ tor_snprintf(buf, sizeof(buf), "%d,$d", dirsig_idx+1);
+ return cdline_linecpy(area, buf);
+ } else {
+ return NULL;
+ }
+}
+
+/** Generate an ed diff as a smartlist from two consensuses, also given as
+ * smartlists. Will return NULL if the diff could not be generated, which can
+ * happen if any lines the script had to add matched "." or if the routers
+ * were not properly ordered.
+ *
+ * All cdline_t objects in the resulting object are either references to lines
+ * in one of the inputs, or are newly allocated lines in the provided memarea.
+ *
+ * This implementation is consensus-specific. To generate an ed diff for any
+ * given input in quadratic time, you can replace all the code until the
+ * navigation in reverse order with the following:
+ *
+ * int len1 = smartlist_len(cons1);
+ * int len2 = smartlist_len(cons2);
+ * bitarray_t *changed1 = bitarray_init_zero(len1);
+ * bitarray_t *changed2 = bitarray_init_zero(len2);
+ * cons1_sl = smartlist_slice(cons1, 0, -1);
+ * cons2_sl = smartlist_slice(cons2, 0, -1);
+ * calc_changes(cons1_sl, cons2_sl, changed1, changed2);
+ */
+STATIC smartlist_t *
+gen_ed_diff(const smartlist_t *cons1_orig, const smartlist_t *cons2,
+ memarea_t *area)
+{
+ smartlist_t *cons1 = smartlist_new();
+ smartlist_add_all(cons1, cons1_orig);
+ cdline_t *remove_trailer = preprocess_consensus(area, cons1);
+
+ int len1 = smartlist_len(cons1);
+ int len2 = smartlist_len(cons2);
+ smartlist_t *result = smartlist_new();
+
+ if (remove_trailer) {
+ /* There's a delete-the-trailer line at the end, so add it here. */
+ smartlist_add(result, remove_trailer);
+ }
+
+ /* Initialize the changed bitarrays to zero, so that calc_changes only needs
+ * to set the ones that matter and leave the rest untouched.
+ */
+ bitarray_t *changed1 = bitarray_init_zero(len1);
+ bitarray_t *changed2 = bitarray_init_zero(len2);
+ int i1=-1, i2=-1;
+ int start1=0, start2=0;
+
+ /* To check that hashes are ordered properly */
+ router_id_iterator_t iter1 = ROUTER_ID_ITERATOR_INIT;
+ router_id_iterator_t iter2 = ROUTER_ID_ITERATOR_INIT;
+
+ /* i1 and i2 are initialized at the first line of each consensus. They never
+ * reach past len1 and len2 respectively, since next_router doesn't let that
+ * happen. i1 and i2 are advanced by at least one line at each iteration as
+ * long as they have not yet reached len1 and len2, so the loop is
+ * guaranteed to end, and each pair of (i1,i2) will be inspected at most
+ * once.
+ */
+ while (i1 < len1 || i2 < len2) {
+
+ /* Advance each of the two navigation positions by one router entry if not
+ * yet at the end.
+ */
+ if (i1 < len1) {
+ if (find_next_router_line(cons1, "base", &i1, &iter1) < 0) {
+ goto error_cleanup;
+ }
+ }
+
+ if (i2 < len2) {
+ if (find_next_router_line(cons2, "target", &i2, &iter2) < 0) {
+ goto error_cleanup;
+ }
+ }
+
+ /* If we have reached the end of both consensuses, there is no need to
+ * compare hashes anymore, since this is the last iteration.
+ */
+ if (i1 < len1 || i2 < len2) {
+
+ /* Keep on advancing the lower (by identity hash sorting) position until
+ * we have two matching positions. The only other possible outcome is
+ * that a lower position reaches the end of the consensus before it can
+ * reach a hash that is no longer the lower one. Since there will always
+ * be a lower hash for as long as the loop runs, one of the two indexes
+ * will always be incremented, thus assuring that the loop must end
+ * after a finite number of iterations. If that cannot be because said
+ * consensus has already reached the end, both are extended to their
+ * respecting ends since we are done.
+ */
+ int cmp = base64cmp(&iter1.hash, &iter2.hash);
+ while (cmp != 0) {
+ if (i1 < len1 && cmp < 0) {
+ if (find_next_router_line(cons1, "base", &i1, &iter1) < 0) {
+ goto error_cleanup;
+ }
+ if (i1 == len1) {
+ /* We finished the first consensus, so grab all the remaining
+ * lines of the second consensus and finish up.
+ */
+ i2 = len2;
+ break;
+ }
+ } else if (i2 < len2 && cmp > 0) {
+ if (find_next_router_line(cons2, "target", &i2, &iter2) < 0) {
+ goto error_cleanup;
+ }
+ if (i2 == len2) {
+ /* We finished the second consensus, so grab all the remaining
+ * lines of the first consensus and finish up.
+ */
+ i1 = len1;
+ break;
+ }
+ } else {
+ i1 = len1;
+ i2 = len2;
+ break;
+ }
+ cmp = base64cmp(&iter1.hash, &iter2.hash);
+ }
+ }
+
+ /* Make slices out of these chunks (up to the common router entry) and
+ * calculate the changes for them.
+ * Error if any of the two slices are longer than 10K lines. That should
+ * never happen with any pair of real consensuses. Feeding more than 10K
+ * lines to calc_changes would be very slow anyway.
+ */
+#define MAX_LINE_COUNT (10000)
+ if (i1-start1 > MAX_LINE_COUNT || i2-start2 > MAX_LINE_COUNT) {
+ log_warn(LD_CONSDIFF, "Refusing to generate consensus diff because "
+ "we found too few common router ids.");
+ goto error_cleanup;
+ }
+
+ smartlist_slice_t *cons1_sl = smartlist_slice(cons1, start1, i1);
+ smartlist_slice_t *cons2_sl = smartlist_slice(cons2, start2, i2);
+ calc_changes(cons1_sl, cons2_sl, changed1, changed2);
+ tor_free(cons1_sl);
+ tor_free(cons2_sl);
+ start1 = i1, start2 = i2;
+ }
+
+ /* Navigate the changes in reverse order and generate one ed command for
+ * each chunk of changes.
+ */
+ i1=len1-1, i2=len2-1;
+ char buf[128];
+ while (i1 >= 0 || i2 >= 0) {
+
+ int start1x, start2x, end1, end2, added, deleted;
+
+ /* We are at a point were no changed bools are true, so just keep going. */
+ if (!(i1 >= 0 && bitarray_is_set(changed1, i1)) &&
+ !(i2 >= 0 && bitarray_is_set(changed2, i2))) {
+ if (i1 >= 0) {
+ i1--;
+ }
+ if (i2 >= 0) {
+ i2--;
+ }
+ continue;
+ }
+
+ end1 = i1, end2 = i2;
+
+ /* Grab all contiguous changed lines */
+ while (i1 >= 0 && bitarray_is_set(changed1, i1)) {
+ i1--;
+ }
+ while (i2 >= 0 && bitarray_is_set(changed2, i2)) {
+ i2--;
+ }
+
+ start1x = i1+1, start2x = i2+1;
+ added = end2-i2, deleted = end1-i1;
+
+ if (added == 0) {
+ if (deleted == 1) {
+ tor_snprintf(buf, sizeof(buf), "%id", start1x+1);
+ smartlist_add_linecpy(result, area, buf);
+ } else {
+ tor_snprintf(buf, sizeof(buf), "%i,%id", start1x+1, start1x+deleted);
+ smartlist_add_linecpy(result, area, buf);
+ }
+ } else {
+ int i;
+ if (deleted == 0) {
+ tor_snprintf(buf, sizeof(buf), "%ia", start1x);
+ smartlist_add_linecpy(result, area, buf);
+ } else if (deleted == 1) {
+ tor_snprintf(buf, sizeof(buf), "%ic", start1x+1);
+ smartlist_add_linecpy(result, area, buf);
+ } else {
+ tor_snprintf(buf, sizeof(buf), "%i,%ic", start1x+1, start1x+deleted);
+ smartlist_add_linecpy(result, area, buf);
+ }
+
+ for (i = start2x; i <= end2; ++i) {
+ cdline_t *line = smartlist_get(cons2, i);
+ if (line_str_eq(line, ".")) {
+ log_warn(LD_CONSDIFF, "Cannot generate consensus diff because "
+ "one of the lines to be added is \".\".");
+ goto error_cleanup;
+ }
+ smartlist_add(result, line);
+ }
+ smartlist_add_linecpy(result, area, ".");
+ }
+ }
+
+ smartlist_free(cons1);
+ bitarray_free(changed1);
+ bitarray_free(changed2);
+
+ return result;
+
+ error_cleanup:
+
+ smartlist_free(cons1);
+ bitarray_free(changed1);
+ bitarray_free(changed2);
+
+ smartlist_free(result);
+
+ return NULL;
+}
+
+/* Helper: Read a base-10 number between 0 and INT32_MAX from <b>s</b> and
+ * store it in <b>num_out</b>. Advance <b>s</b> to the characer immediately
+ * after the number. Return 0 on success, -1 on failure. */
+static int
+get_linenum(const char **s, int *num_out)
+{
+ int ok;
+ char *next;
+ if (!TOR_ISDIGIT(**s)) {
+ return -1;
+ }
+ *num_out = (int) tor_parse_long(*s, 10, 0, INT32_MAX, &ok, &next);
+ if (ok && next) {
+ *s = next;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+/** Apply the ed diff, starting at <b>diff_starting_line</b>, to the consensus
+ * and return a new consensus, also as a line-based smartlist. Will return
+ * NULL if the ed diff is not properly formatted.
+ *
+ * All cdline_t objects in the resulting object are references to lines
+ * in one of the inputs; nothing is copied.
+ */
+STATIC smartlist_t *
+apply_ed_diff(const smartlist_t *cons1, const smartlist_t *diff,
+ int diff_starting_line)
+{
+ int diff_len = smartlist_len(diff);
+ int j = smartlist_len(cons1);
+ smartlist_t *cons2 = smartlist_new();
+
+ for (int i=diff_starting_line; i<diff_len; ++i) {
+ const cdline_t *diff_cdline = smartlist_get(diff, i);
+ char diff_line[128];
+
+ if (diff_cdline->len > sizeof(diff_line) - 1) {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "an ed command was far too long");
+ goto error_cleanup;
+ }
+ /* Copy the line to make it nul-terminated. */
+ memcpy(diff_line, diff_cdline->s, diff_cdline->len);
+ diff_line[diff_cdline->len] = 0;
+ const char *ptr = diff_line;
+ int start = 0, end = 0;
+ int had_range = 0;
+ int end_was_eof = 0;
+ if (get_linenum(&ptr, &start) < 0) {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "an ed command was missing a line number.");
+ goto error_cleanup;
+ }
+ if (*ptr == ',') {
+ /* Two-item range */
+ had_range = 1;
+ ++ptr;
+ if (*ptr == '$') {
+ end_was_eof = 1;
+ end = smartlist_len(cons1);
+ ++ptr;
+ } else if (get_linenum(&ptr, &end) < 0) {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "an ed command was missing a range end line number.");
+ goto error_cleanup;
+ }
+ /* Incoherent range. */
+ if (end <= start) {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "an invalid range was found in an ed command.");
+ goto error_cleanup;
+ }
+ } else {
+ /* We'll take <n1> as <n1>,<n1> for simplicity. */
+ end = start;
+ }
+
+ if (end > j) {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "its commands are not properly sorted in reverse order.");
+ goto error_cleanup;
+ }
+
+ if (*ptr == '\0') {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "a line with no ed command was found");
+ goto error_cleanup;
+ }
+
+ if (*(ptr+1) != '\0') {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "an ed command longer than one char was found.");
+ goto error_cleanup;
+ }
+
+ char action = *ptr;
+
+ switch (action) {
+ case 'a':
+ case 'c':
+ case 'd':
+ break;
+ default:
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "an unrecognised ed command was found.");
+ goto error_cleanup;
+ }
+
+ /** $ is not allowed with non-d actions. */
+ if (end_was_eof && action != 'd') {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "it wanted to use $ with a command other than delete");
+ goto error_cleanup;
+ }
+
+ /* 'a' commands are not allowed to have ranges. */
+ if (had_range && action == 'a') {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "it wanted to add lines after a range.");
+ goto error_cleanup;
+ }
+
+ /* Add unchanged lines. */
+ for (; j && j > end; --j) {
+ cdline_t *cons_line = smartlist_get(cons1, j-1);
+ smartlist_add(cons2, cons_line);
+ }
+
+ /* Ignore removed lines. */
+ if (action == 'c' || action == 'd') {
+ while (--j >= start) {
+ /* Skip line */
+ }
+ }
+
+ /* Add new lines in reverse order, since it will all be reversed at the
+ * end.
+ */
+ if (action == 'a' || action == 'c') {
+ int added_end = i;
+
+ i++; /* Skip the line with the range and command. */
+ while (i < diff_len) {
+ if (line_str_eq(smartlist_get(diff, i), ".")) {
+ break;
+ }
+ if (++i == diff_len) {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "it has lines to be inserted that don't end with a \".\".");
+ goto error_cleanup;
+ }
+ }
+
+ int added_i = i-1;
+
+ /* It would make no sense to add zero new lines. */
+ if (added_i == added_end) {
+ log_warn(LD_CONSDIFF, "Could not apply consensus diff because "
+ "it has an ed command that tries to insert zero lines.");
+ goto error_cleanup;
+ }
+
+ while (added_i > added_end) {
+ cdline_t *added_line = smartlist_get(diff, added_i--);
+ smartlist_add(cons2, added_line);
+ }
+ }
+ }
+
+ /* Add remaining unchanged lines. */
+ for (; j > 0; --j) {
+ cdline_t *cons_line = smartlist_get(cons1, j-1);
+ smartlist_add(cons2, cons_line);
+ }
+
+ /* Reverse the whole thing since we did it from the end. */
+ smartlist_reverse(cons2);
+ return cons2;
+
+ error_cleanup:
+
+ smartlist_free(cons2);
+
+ return NULL;
+}
+
+/** Generate a consensus diff as a smartlist from two given consensuses, also
+ * as smartlists. Will return NULL if the consensus diff could not be
+ * generated. Neither of the two consensuses are modified in any way, so it's
+ * up to the caller to free their resources.
+ */
+smartlist_t *
+consdiff_gen_diff(const smartlist_t *cons1,
+ const smartlist_t *cons2,
+ const consensus_digest_t *digests1,
+ const consensus_digest_t *digests2,
+ memarea_t *area)
+{
+ smartlist_t *ed_diff = gen_ed_diff(cons1, cons2, area);
+ /* ed diff could not be generated - reason already logged by gen_ed_diff. */
+ if (!ed_diff) {
+ goto error_cleanup;
+ }
+
+ /* See that the script actually produces what we want. */
+ smartlist_t *ed_cons2 = apply_ed_diff(cons1, ed_diff, 0);
+ if (!ed_cons2) {
+ /* LCOV_EXCL_START -- impossible if diff generation is correct */
+ log_warn(LD_BUG|LD_CONSDIFF, "Refusing to generate consensus diff because "
+ "the generated ed diff could not be tested to successfully generate "
+ "the target consensus.");
+ goto error_cleanup;
+ /* LCOV_EXCL_STOP */
+ }
+
+ int cons2_eq = 1;
+ if (smartlist_len(cons2) == smartlist_len(ed_cons2)) {
+ SMARTLIST_FOREACH_BEGIN(cons2, const cdline_t *, line1) {
+ const cdline_t *line2 = smartlist_get(ed_cons2, line1_sl_idx);
+ if (! lines_eq(line1, line2) ) {
+ cons2_eq = 0;
+ break;
+ }
+ } SMARTLIST_FOREACH_END(line1);
+ } else {
+ cons2_eq = 0;
+ }
+ smartlist_free(ed_cons2);
+ if (!cons2_eq) {
+ /* LCOV_EXCL_START -- impossible if diff generation is correct. */
+ log_warn(LD_BUG|LD_CONSDIFF, "Refusing to generate consensus diff because "
+ "the generated ed diff did not generate the target consensus "
+ "successfully when tested.");
+ goto error_cleanup;
+ /* LCOV_EXCL_STOP */
+ }
+
+ char cons1_hash_hex[HEX_DIGEST256_LEN+1];
+ char cons2_hash_hex[HEX_DIGEST256_LEN+1];
+ base16_encode(cons1_hash_hex, HEX_DIGEST256_LEN+1,
+ (const char*)digests1->sha3_256, DIGEST256_LEN);
+ base16_encode(cons2_hash_hex, HEX_DIGEST256_LEN+1,
+ (const char*)digests2->sha3_256, DIGEST256_LEN);
+
+ /* Create the resulting consensus diff. */
+ char buf[160];
+ smartlist_t *result = smartlist_new();
+ tor_snprintf(buf, sizeof(buf), "%s", ns_diff_version);
+ smartlist_add_linecpy(result, area, buf);
+ tor_snprintf(buf, sizeof(buf), "%s %s %s", hash_token,
+ cons1_hash_hex, cons2_hash_hex);
+ smartlist_add_linecpy(result, area, buf);
+ smartlist_add_all(result, ed_diff);
+ smartlist_free(ed_diff);
+ return result;
+
+ error_cleanup:
+
+ if (ed_diff) {
+ /* LCOV_EXCL_START -- ed_diff is NULL except in unreachable cases above */
+ smartlist_free(ed_diff);
+ /* LCOV_EXCL_STOP */
+ }
+
+ return NULL;
+}
+
+/** Fetch the digest of the base consensus in the consensus diff, encoded in
+ * base16 as found in the diff itself. digest1_out and digest2_out must be of
+ * length DIGEST256_LEN or larger if not NULL.
+ */
+int
+consdiff_get_digests(const smartlist_t *diff,
+ char *digest1_out,
+ char *digest2_out)
+{
+ smartlist_t *hash_words = NULL;
+ const cdline_t *format;
+ char cons1_hash[DIGEST256_LEN], cons2_hash[DIGEST256_LEN];
+ char *cons1_hash_hex, *cons2_hash_hex;
+ if (smartlist_len(diff) < 2) {
+ log_info(LD_CONSDIFF, "The provided consensus diff is too short.");
+ goto error_cleanup;
+ }
+
+ /* Check that it's the format and version we know. */
+ format = smartlist_get(diff, 0);
+ if (!line_str_eq(format, ns_diff_version)) {
+ log_warn(LD_CONSDIFF, "The provided consensus diff format is not known.");
+ goto error_cleanup;
+ }
+
+ /* Grab the base16 digests. */
+ hash_words = smartlist_new();
+ {
+ const cdline_t *line2 = smartlist_get(diff, 1);
+ char *h = tor_memdup_nulterm(line2->s, line2->len);
+ smartlist_split_string(hash_words, h, " ", 0, 0);
+ tor_free(h);
+ }
+
+ /* There have to be three words, the first of which must be hash_token. */
+ if (smartlist_len(hash_words) != 3 ||
+ strcmp(smartlist_get(hash_words, 0), hash_token)) {
+ log_info(LD_CONSDIFF, "The provided consensus diff does not include "
+ "the necessary digests.");
+ goto error_cleanup;
+ }
+
+ /* Expected hashes as found in the consensus diff header. They must be of
+ * length HEX_DIGEST256_LEN, normally 64 hexadecimal characters.
+ * If any of the decodings fail, error to make sure that the hashes are
+ * proper base16-encoded digests.
+ */
+ cons1_hash_hex = smartlist_get(hash_words, 1);
+ cons2_hash_hex = smartlist_get(hash_words, 2);
+ if (strlen(cons1_hash_hex) != HEX_DIGEST256_LEN ||
+ strlen(cons2_hash_hex) != HEX_DIGEST256_LEN) {
+ log_info(LD_CONSDIFF, "The provided consensus diff includes "
+ "base16-encoded digests of incorrect size.");
+ goto error_cleanup;
+ }
+
+ if (base16_decode(cons1_hash, DIGEST256_LEN,
+ cons1_hash_hex, HEX_DIGEST256_LEN) != DIGEST256_LEN ||
+ base16_decode(cons2_hash, DIGEST256_LEN,
+ cons2_hash_hex, HEX_DIGEST256_LEN) != DIGEST256_LEN) {
+ log_info(LD_CONSDIFF, "The provided consensus diff includes "
+ "malformed digests.");
+ goto error_cleanup;
+ }
+
+ if (digest1_out) {
+ memcpy(digest1_out, cons1_hash, DIGEST256_LEN);
+ }
+ if (digest2_out) {
+ memcpy(digest2_out, cons2_hash, DIGEST256_LEN);
+ }
+
+ SMARTLIST_FOREACH(hash_words, char *, cp, tor_free(cp));
+ smartlist_free(hash_words);
+ return 0;
+
+ error_cleanup:
+
+ if (hash_words) {
+ SMARTLIST_FOREACH(hash_words, char *, cp, tor_free(cp));
+ smartlist_free(hash_words);
+ }
+ return 1;
+}
+
+/** Apply the consensus diff to the given consensus and return a new
+ * consensus, also as a line-based smartlist. Will return NULL if the diff
+ * could not be applied. Neither the consensus nor the diff are modified in
+ * any way, so it's up to the caller to free their resources.
+ */
+char *
+consdiff_apply_diff(const smartlist_t *cons1,
+ const smartlist_t *diff,
+ const consensus_digest_t *digests1)
+{
+ smartlist_t *cons2 = NULL;
+ char *cons2_str = NULL;
+ char e_cons1_hash[DIGEST256_LEN];
+ char e_cons2_hash[DIGEST256_LEN];
+
+ if (consdiff_get_digests(diff, e_cons1_hash, e_cons2_hash) != 0) {
+ goto error_cleanup;
+ }
+
+ /* See that the consensus that was given to us matches its hash. */
+ if (!consensus_digest_eq(digests1->sha3_256,
+ (const uint8_t*)e_cons1_hash)) {
+ char hex_digest1[HEX_DIGEST256_LEN+1];
+ char e_hex_digest1[HEX_DIGEST256_LEN+1];
+ log_warn(LD_CONSDIFF, "Refusing to apply consensus diff because "
+ "the base consensus doesn't match the digest as found in "
+ "the consensus diff header.");
+ base16_encode(hex_digest1, HEX_DIGEST256_LEN+1,
+ (const char *)digests1->sha3_256, DIGEST256_LEN);
+ base16_encode(e_hex_digest1, HEX_DIGEST256_LEN+1,
+ e_cons1_hash, DIGEST256_LEN);
+ log_warn(LD_CONSDIFF, "Expected: %s; found: %s",
+ hex_digest1, e_hex_digest1);
+ goto error_cleanup;
+ }
+
+ /* Grab the ed diff and calculate the resulting consensus. */
+ /* Skip the first two lines. */
+ cons2 = apply_ed_diff(cons1, diff, 2);
+
+ /* ed diff could not be applied - reason already logged by apply_ed_diff. */
+ if (!cons2) {
+ goto error_cleanup;
+ }
+
+ cons2_str = consensus_join_lines(cons2);
+
+ consensus_digest_t cons2_digests;
+ if (consensus_compute_digest(cons2_str, &cons2_digests) < 0) {
+ /* LCOV_EXCL_START -- digest can't fail */
+ log_warn(LD_CONSDIFF, "Could not compute digests of the consensus "
+ "resulting from applying a consensus diff.");
+ goto error_cleanup;
+ /* LCOV_EXCL_STOP */
+ }
+
+ /* See that the resulting consensus matches its hash. */
+ if (!consensus_digest_eq(cons2_digests.sha3_256,
+ (const uint8_t*)e_cons2_hash)) {
+ log_warn(LD_CONSDIFF, "Refusing to apply consensus diff because "
+ "the resulting consensus doesn't match the digest as found in "
+ "the consensus diff header.");
+ char hex_digest2[HEX_DIGEST256_LEN+1];
+ char e_hex_digest2[HEX_DIGEST256_LEN+1];
+ base16_encode(hex_digest2, HEX_DIGEST256_LEN+1,
+ (const char *)cons2_digests.sha3_256, DIGEST256_LEN);
+ base16_encode(e_hex_digest2, HEX_DIGEST256_LEN+1,
+ e_cons2_hash, DIGEST256_LEN);
+ log_warn(LD_CONSDIFF, "Expected: %s; found: %s",
+ hex_digest2, e_hex_digest2);
+ goto error_cleanup;
+ }
+
+ goto done;
+
+ error_cleanup:
+ tor_free(cons2_str); /* Sets it to NULL */
+
+ done:
+ if (cons2) {
+ smartlist_free(cons2);
+ }
+
+ return cons2_str;
+}
+
+/** Any consensus line longer than this means that the input is invalid. */
+#define CONSENSUS_LINE_MAX_LEN (1<<20)
+
+/**
+ * Helper: For every NL-terminated line in <b>s</b>, add a cdline referring to
+ * that line (without trailing newline) to <b>out</b>. Return -1 if there are
+ * any non-NL terminated lines; 0 otherwise.
+ *
+ * Unlike tor_split_lines, this function avoids ambiguity on its
+ * handling of a final line that isn't NL-terminated.
+ *
+ * All cdline_t objects are allocated in the provided memarea. Strings
+ * are not copied: if <b>s</b> changes or becomes invalid, then all
+ * generated cdlines will become invalid.
+ */
+STATIC int
+consensus_split_lines(smartlist_t *out, const char *s, memarea_t *area)
+{
+ const char *end_of_str = s + strlen(s);
+ tor_assert(*end_of_str == '\0');
+
+ while (*s) {
+ const char *eol = memchr(s, '\n', end_of_str - s);
+ if (!eol) {
+ /* File doesn't end with newline. */
+ return -1;
+ }
+ if (eol - s > CONSENSUS_LINE_MAX_LEN) {
+ /* Line is far too long. */
+ return -1;
+ }
+ cdline_t *line = memarea_alloc(area, sizeof(cdline_t));
+ line->s = s;
+ line->len = (uint32_t)(eol - s);
+ smartlist_add(out, line);
+ s = eol+1;
+ }
+ return 0;
+}
+
+/** Given a list of cdline_t, return a newly allocated string containing
+ * all of the lines, terminated with NL, concatenated.
+ *
+ * Unlike smartlist_join_strings(), avoids lossy operations on empty
+ * lists. */
+static char *
+consensus_join_lines(const smartlist_t *inp)
+{
+ size_t n = 0;
+ SMARTLIST_FOREACH(inp, const cdline_t *, cdline, n += cdline->len + 1);
+ n += 1;
+ char *result = tor_malloc(n);
+ char *out = result;
+ SMARTLIST_FOREACH_BEGIN(inp, const cdline_t *, cdline) {
+ memcpy(out, cdline->s, cdline->len);
+ out += cdline->len;
+ *out++ = '\n';
+ } SMARTLIST_FOREACH_END(cdline);
+ *out++ = '\0';
+ tor_assert(out == result+n);
+ return result;
+}
+
+/** Given two consensus documents, try to compute a diff between them. On
+ * success, retun a newly allocated string containing that diff. On failure,
+ * return NULL. */
+char *
+consensus_diff_generate(const char *cons1,
+ const char *cons2)
+{
+ consensus_digest_t d1, d2;
+ smartlist_t *lines1 = NULL, *lines2 = NULL, *result_lines = NULL;
+ int r1, r2;
+ char *result = NULL;
+
+ r1 = consensus_compute_digest_as_signed(cons1, &d1);
+ r2 = consensus_compute_digest(cons2, &d2);
+ if (BUG(r1 < 0 || r2 < 0))
+ return NULL; // LCOV_EXCL_LINE
+
+ memarea_t *area = memarea_new();
+ lines1 = smartlist_new();
+ lines2 = smartlist_new();
+ if (consensus_split_lines(lines1, cons1, area) < 0)
+ goto done;
+ if (consensus_split_lines(lines2, cons2, area) < 0)
+ goto done;
+
+ result_lines = consdiff_gen_diff(lines1, lines2, &d1, &d2, area);
+
+ done:
+ if (result_lines) {
+ result = consensus_join_lines(result_lines);
+ smartlist_free(result_lines);
+ }
+
+ memarea_drop_all(area);
+ smartlist_free(lines1);
+ smartlist_free(lines2);
+
+ return result;
+}
+
+/** Given a consensus document and a diff, try to apply the diff to the
+ * consensus. On success return a newly allocated string containing the new
+ * consensus. On failure, return NULL. */
+char *
+consensus_diff_apply(const char *consensus,
+ const char *diff)
+{
+ consensus_digest_t d1;
+ smartlist_t *lines1 = NULL, *lines2 = NULL;
+ int r1;
+ char *result = NULL;
+ memarea_t *area = memarea_new();
+
+ r1 = consensus_compute_digest_as_signed(consensus, &d1);
+ if (BUG(r1 < 0))
+ goto done;
+
+ lines1 = smartlist_new();
+ lines2 = smartlist_new();
+ if (consensus_split_lines(lines1, consensus, area) < 0)
+ goto done;
+ if (consensus_split_lines(lines2, diff, area) < 0)
+ goto done;
+
+ result = consdiff_apply_diff(lines1, lines2, &d1);
+
+ done:
+ smartlist_free(lines1);
+ smartlist_free(lines2);
+ memarea_drop_all(area);
+
+ return result;
+}
+
+/** Return true iff, based on its header, <b>document</b> is likely
+ * to be a consensus diff. */
+int
+looks_like_a_consensus_diff(const char *document, size_t len)
+{
+ return (len >= strlen(ns_diff_version) &&
+ fast_memeq(document, ns_diff_version, strlen(ns_diff_version)));
+}
diff --git a/src/feature/dircommon/consdiff.h b/src/feature/dircommon/consdiff.h
new file mode 100644
index 0000000000..98217e6d46
--- /dev/null
+++ b/src/feature/dircommon/consdiff.h
@@ -0,0 +1,99 @@
+/* Copyright (c) 2014, Daniel Martí
+ * Copyright (c) 2014-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_CONSDIFF_H
+#define TOR_CONSDIFF_H
+
+#include "core/or/or.h"
+
+char *consensus_diff_generate(const char *cons1,
+ const char *cons2);
+char *consensus_diff_apply(const char *consensus,
+ const char *diff);
+
+int looks_like_a_consensus_diff(const char *document, size_t len);
+
+#ifdef CONSDIFF_PRIVATE
+#include "lib/container/bitarray.h"
+
+struct memarea_t;
+
+/** Line type used for constructing consensus diffs. Each of these lines
+ * refers to a chunk of memory allocated elsewhere, and is not necessarily
+ * NUL-terminated: this helps us avoid copies and save memory. */
+typedef struct cdline_t {
+ const char *s;
+ uint32_t len;
+} cdline_t;
+
+typedef struct consensus_digest_t {
+ uint8_t sha3_256[DIGEST256_LEN];
+} consensus_digest_t;
+
+STATIC smartlist_t *consdiff_gen_diff(const smartlist_t *cons1,
+ const smartlist_t *cons2,
+ const consensus_digest_t *digests1,
+ const consensus_digest_t *digests2,
+ struct memarea_t *area);
+STATIC char *consdiff_apply_diff(const smartlist_t *cons1,
+ const smartlist_t *diff,
+ const consensus_digest_t *digests1);
+STATIC int consdiff_get_digests(const smartlist_t *diff,
+ char *digest1_out,
+ char *digest2_out);
+
+/** Data structure to define a slice of a smarltist. */
+typedef struct smartlist_slice_t {
+ /**
+ * Smartlist that this slice is made from.
+ * References the whole original smartlist that the slice was made out of.
+ * */
+ const smartlist_t *list;
+ /** Starting position of the slice in the smartlist. */
+ int offset;
+ /** Length of the slice, i.e. the number of elements it holds. */
+ int len;
+} smartlist_slice_t;
+STATIC smartlist_t *gen_ed_diff(const smartlist_t *cons1,
+ const smartlist_t *cons2,
+ struct memarea_t *area);
+STATIC smartlist_t *apply_ed_diff(const smartlist_t *cons1,
+ const smartlist_t *diff,
+ int start_line);
+STATIC void calc_changes(smartlist_slice_t *slice1, smartlist_slice_t *slice2,
+ bitarray_t *changed1, bitarray_t *changed2);
+STATIC smartlist_slice_t *smartlist_slice(const smartlist_t *list,
+ int start, int end);
+STATIC int next_router(const smartlist_t *cons, int cur);
+STATIC int *lcs_lengths(const smartlist_slice_t *slice1,
+ const smartlist_slice_t *slice2,
+ int direction);
+STATIC void trim_slices(smartlist_slice_t *slice1, smartlist_slice_t *slice2);
+STATIC int base64cmp(const cdline_t *hash1, const cdline_t *hash2);
+STATIC int get_id_hash(const cdline_t *line, cdline_t *hash_out);
+STATIC int is_valid_router_entry(const cdline_t *line);
+STATIC int smartlist_slice_string_pos(const smartlist_slice_t *slice,
+ const cdline_t *string);
+STATIC void set_changed(bitarray_t *changed1, bitarray_t *changed2,
+ const smartlist_slice_t *slice1,
+ const smartlist_slice_t *slice2);
+STATIC int consensus_split_lines(smartlist_t *out, const char *s,
+ struct memarea_t *area);
+STATIC void smartlist_add_linecpy(smartlist_t *lst, struct memarea_t *area,
+ const char *s);
+STATIC int lines_eq(const cdline_t *a, const cdline_t *b);
+STATIC int line_str_eq(const cdline_t *a, const char *b);
+
+MOCK_DECL(STATIC int,
+ consensus_compute_digest,(const char *cons,
+ consensus_digest_t *digest_out));
+MOCK_DECL(STATIC int,
+ consensus_compute_digest_as_signed,(const char *cons,
+ consensus_digest_t *digest_out));
+MOCK_DECL(STATIC int,
+ consensus_digest_eq,(const uint8_t *d1,
+ const uint8_t *d2));
+#endif /* defined(CONSDIFF_PRIVATE) */
+
+#endif /* !defined(TOR_CONSDIFF_H) */
diff --git a/src/feature/dircommon/dir_connection_st.h b/src/feature/dircommon/dir_connection_st.h
new file mode 100644
index 0000000000..8c59cc7a46
--- /dev/null
+++ b/src/feature/dircommon/dir_connection_st.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef DIR_CONNECTION_ST_H
+#define DIR_CONNECTION_ST_H
+
+#include "core/or/connection_st.h"
+
+struct tor_compress_state_t;
+
+/** Subtype of connection_t for an "directory connection" -- that is, an HTTP
+ * connection to retrieve or serve directory material. */
+struct dir_connection_t {
+ connection_t base_;
+
+ /** Which 'resource' did we ask the directory for? This is typically the part
+ * of the URL string that defines, relative to the directory conn purpose,
+ * what thing we want. For example, in router descriptor downloads by
+ * descriptor digest, it contains "d/", then one or more +-separated
+ * fingerprints.
+ **/
+ char *requested_resource;
+ unsigned int dirconn_direct:1; /**< Is this dirconn direct, or via Tor? */
+
+ /** If we're fetching descriptors, what router purpose shall we assign
+ * to them? */
+ uint8_t router_purpose;
+
+ /** List of spooled_resource_t for objects that we're spooling. We use
+ * it from back to front. */
+ smartlist_t *spool;
+ /** The compression object doing on-the-fly compression for spooled data. */
+ struct tor_compress_state_t *compress_state;
+
+ /** What rendezvous service are we querying for? */
+ rend_data_t *rend_data;
+
+ /* Hidden service connection identifier for dir connections: Used by HS
+ client-side code to fetch HS descriptors, and by the service-side code to
+ upload descriptors. */
+ struct hs_ident_dir_conn_t *hs_ident;
+
+ /** If this is a one-hop connection, tracks the state of the directory guard
+ * for this connection (if any). */
+ struct circuit_guard_state_t *guard_state;
+
+ char identity_digest[DIGEST_LEN]; /**< Hash of the public RSA key for
+ * the directory server's signing key. */
+
+ /** Unique ID for directory requests; this used to be in connection_t, but
+ * that's going away and being used on channels instead. The dirserver still
+ * needs this for the incoming side, so it's moved here. */
+ uint64_t dirreq_id;
+
+#ifdef MEASUREMENTS_21206
+ /** Number of RELAY_DATA cells received. */
+ uint32_t data_cells_received;
+
+ /** Number of RELAY_DATA cells sent. */
+ uint32_t data_cells_sent;
+#endif /* defined(MEASUREMENTS_21206) */
+};
+
+#endif
diff --git a/src/feature/dircommon/directory.c b/src/feature/dircommon/directory.c
new file mode 100644
index 0000000000..9e6f72e9ac
--- /dev/null
+++ b/src/feature/dircommon/directory.c
@@ -0,0 +1,651 @@
+/* Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "core/or/or.h"
+
+#include "app/config/config.h"
+#include "core/mainloop/connection.h"
+#include "feature/dircache/dircache.h"
+#include "feature/dircache/dirserv.h"
+#include "feature/dirclient/dirclient.h"
+#include "feature/dircommon/directory.h"
+#include "feature/dircommon/fp_pair.h"
+#include "feature/stats/geoip_stats.h"
+#include "lib/compress/compress.h"
+
+#include "feature/dircommon/dir_connection_st.h"
+#include "feature/nodelist/routerinfo_st.h"
+
+/**
+ * \file directory.c
+ * \brief Code to send and fetch information from directory authorities and
+ * caches via HTTP.
+ *
+ * Directory caches and authorities use dirserv.c to generate the results of a
+ * query and stream them to the connection; clients use routerparse.c to parse
+ * them.
+ *
+ * Every directory request has a dir_connection_t on the client side and on
+ * the server side. In most cases, the dir_connection_t object is a linked
+ * connection, tunneled through an edge_connection_t so that it can be a
+ * stream on the Tor network. The only non-tunneled connections are those
+ * that are used to upload material (descriptors and votes) to authorities.
+ * Among tunneled connections, some use one-hop circuits, and others use
+ * multi-hop circuits for anonymity.
+ *
+ * Directory requests are launched by calling
+ * directory_initiate_request(). This
+ * launch the connection, will construct an HTTP request with
+ * directory_send_command(), send the and wait for a response. The client
+ * later handles the response with connection_dir_client_reached_eof(),
+ * which passes the information received to another part of Tor.
+ *
+ * On the server side, requests are read in directory_handle_command(),
+ * which dispatches first on the request type (GET or POST), and then on
+ * the URL requested. GET requests are processed with a table-based
+ * dispatcher in url_table[]. The process of handling larger GET requests
+ * is complicated because we need to avoid allocating a copy of all the
+ * data to be sent to the client in one huge buffer. Instead, we spool the
+ * data into the buffer using logic in connection_dirserv_flushed_some() in
+ * dirserv.c. (TODO: If we extended buf.c to have a zero-copy
+ * reference-based buffer type, we could remove most of that code, at the
+ * cost of a bit more reference counting.)
+ **/
+
+/* In-points to directory.c:
+ *
+ * - directory_post_to_dirservers(), called from
+ * router_upload_dir_desc_to_dirservers() in router.c
+ * upload_service_descriptor() in rendservice.c
+ * - directory_get_from_dirserver(), called from
+ * rend_client_refetch_renddesc() in rendclient.c
+ * run_scheduled_events() in main.c
+ * do_hup() in main.c
+ * - connection_dir_process_inbuf(), called from
+ * connection_process_inbuf() in connection.c
+ * - connection_dir_finished_flushing(), called from
+ * connection_finished_flushing() in connection.c
+ * - connection_dir_finished_connecting(), called from
+ * connection_finished_connecting() in connection.c
+ */
+
+/** Convert a connection_t* to a dir_connection_t*; assert if the cast is
+ * invalid. */
+dir_connection_t *
+TO_DIR_CONN(connection_t *c)
+{
+ tor_assert(c->magic == DIR_CONNECTION_MAGIC);
+ return DOWNCAST(dir_connection_t, c);
+}
+
+/** Return false if the directory purpose <b>dir_purpose</b>
+ * does not require an anonymous (three-hop) connection.
+ *
+ * Return true 1) by default, 2) if all directory actions have
+ * specifically been configured to be over an anonymous connection,
+ * or 3) if the router is a bridge */
+int
+purpose_needs_anonymity(uint8_t dir_purpose, uint8_t router_purpose,
+ const char *resource)
+{
+ if (get_options()->AllDirActionsPrivate)
+ return 1;
+
+ if (router_purpose == ROUTER_PURPOSE_BRIDGE) {
+ if (dir_purpose == DIR_PURPOSE_FETCH_SERVERDESC
+ && resource && !strcmp(resource, "authority.z")) {
+ /* We are asking a bridge for its own descriptor. That doesn't need
+ anonymity. */
+ return 0;
+ }
+ /* Assume all other bridge stuff needs anonymity. */
+ return 1; /* if no circuits yet, this might break bootstrapping, but it's
+ * needed to be safe. */
+ }
+
+ switch (dir_purpose)
+ {
+ case DIR_PURPOSE_UPLOAD_DIR:
+ case DIR_PURPOSE_UPLOAD_VOTE:
+ case DIR_PURPOSE_UPLOAD_SIGNATURES:
+ case DIR_PURPOSE_FETCH_STATUS_VOTE:
+ case DIR_PURPOSE_FETCH_DETACHED_SIGNATURES:
+ case DIR_PURPOSE_FETCH_CONSENSUS:
+ case DIR_PURPOSE_FETCH_CERTIFICATE:
+ case DIR_PURPOSE_FETCH_SERVERDESC:
+ case DIR_PURPOSE_FETCH_EXTRAINFO:
+ case DIR_PURPOSE_FETCH_MICRODESC:
+ return 0;
+ case DIR_PURPOSE_HAS_FETCHED_HSDESC:
+ case DIR_PURPOSE_HAS_FETCHED_RENDDESC_V2:
+ case DIR_PURPOSE_UPLOAD_RENDDESC_V2:
+ case DIR_PURPOSE_FETCH_RENDDESC_V2:
+ case DIR_PURPOSE_FETCH_HSDESC:
+ case DIR_PURPOSE_UPLOAD_HSDESC:
+ return 1;
+ case DIR_PURPOSE_SERVER:
+ default:
+ log_warn(LD_BUG, "Called with dir_purpose=%d, router_purpose=%d",
+ dir_purpose, router_purpose);
+ tor_assert_nonfatal_unreached();
+ return 1; /* Assume it needs anonymity; better safe than sorry. */
+ }
+}
+
+/** Return a newly allocated string describing <b>auth</b>. Only describes
+ * authority features. */
+char *
+authdir_type_to_string(dirinfo_type_t auth)
+{
+ char *result;
+ smartlist_t *lst = smartlist_new();
+ if (auth & V3_DIRINFO)
+ smartlist_add(lst, (void*)"V3");
+ if (auth & BRIDGE_DIRINFO)
+ smartlist_add(lst, (void*)"Bridge");
+ if (smartlist_len(lst)) {
+ result = smartlist_join_strings(lst, ", ", 0, NULL);
+ } else {
+ result = tor_strdup("[Not an authority]");
+ }
+ smartlist_free(lst);
+ return result;
+}
+
+/** Return true iff anything we say on <b>conn</b> is being encrypted before
+ * we send it to the client/server. */
+int
+connection_dir_is_encrypted(const dir_connection_t *conn)
+{
+ /* Right now it's sufficient to see if conn is or has been linked, since
+ * the only thing it could be linked to is an edge connection on a
+ * circuit, and the only way it could have been unlinked is at the edge
+ * connection getting closed.
+ */
+ return TO_CONN(conn)->linked;
+}
+
+/** Parse an HTTP request line at the start of a headers string. On failure,
+ * return -1. On success, set *<b>command_out</b> to a copy of the HTTP
+ * command ("get", "post", etc), set *<b>url_out</b> to a copy of the URL, and
+ * return 0. */
+int
+parse_http_command(const char *headers, char **command_out, char **url_out)
+{
+ const char *command, *end_of_command;
+ char *s, *start, *tmp;
+
+ s = (char *)eat_whitespace_no_nl(headers);
+ if (!*s) return -1;
+ command = s;
+ s = (char *)find_whitespace(s); /* get past GET/POST */
+ if (!*s) return -1;
+ end_of_command = s;
+ s = (char *)eat_whitespace_no_nl(s);
+ if (!*s) return -1;
+ start = s; /* this is the URL, assuming it's valid */
+ s = (char *)find_whitespace(start);
+ if (!*s) return -1;
+
+ /* tolerate the http[s] proxy style of putting the hostname in the url */
+ if (s-start >= 4 && !strcmpstart(start,"http")) {
+ tmp = start + 4;
+ if (*tmp == 's')
+ tmp++;
+ if (s-tmp >= 3 && !strcmpstart(tmp,"://")) {
+ tmp = strchr(tmp+3, '/');
+ if (tmp && tmp < s) {
+ log_debug(LD_DIR,"Skipping over 'http[s]://hostname/' string");
+ start = tmp;
+ }
+ }
+ }
+
+ /* Check if the header is well formed (next sequence
+ * should be HTTP/1.X\r\n). Assumes we're supporting 1.0? */
+ {
+ unsigned minor_ver;
+ char ch;
+ char *e = (char *)eat_whitespace_no_nl(s);
+ if (2 != tor_sscanf(e, "HTTP/1.%u%c", &minor_ver, &ch)) {
+ return -1;
+ }
+ if (ch != '\r')
+ return -1;
+ }
+
+ *url_out = tor_memdup_nulterm(start, s-start);
+ *command_out = tor_memdup_nulterm(command, end_of_command - command);
+ return 0;
+}
+
+/** Return a copy of the first HTTP header in <b>headers</b> whose key is
+ * <b>which</b>. The key should be given with a terminating colon and space;
+ * this function copies everything after, up to but not including the
+ * following \\r\\n. */
+char *
+http_get_header(const char *headers, const char *which)
+{
+ const char *cp = headers;
+ while (cp) {
+ if (!strcasecmpstart(cp, which)) {
+ char *eos;
+ cp += strlen(which);
+ if ((eos = strchr(cp,'\r')))
+ return tor_strndup(cp, eos-cp);
+ else
+ return tor_strdup(cp);
+ }
+ cp = strchr(cp, '\n');
+ if (cp)
+ ++cp;
+ }
+ return NULL;
+}
+/** Parse an HTTP response string <b>headers</b> of the form
+ * \verbatim
+ * "HTTP/1.\%d \%d\%s\r\n...".
+ * \endverbatim
+ *
+ * If it's well-formed, assign the status code to *<b>code</b> and
+ * return 0. Otherwise, return -1.
+ *
+ * On success: If <b>date</b> is provided, set *date to the Date
+ * header in the http headers, or 0 if no such header is found. If
+ * <b>compression</b> is provided, set *<b>compression</b> to the
+ * compression method given in the Content-Encoding header, or 0 if no
+ * such header is found, or -1 if the value of the header is not
+ * recognized. If <b>reason</b> is provided, strdup the reason string
+ * into it.
+ */
+int
+parse_http_response(const char *headers, int *code, time_t *date,
+ compress_method_t *compression, char **reason)
+{
+ unsigned n1, n2;
+ char datestr[RFC1123_TIME_LEN+1];
+ smartlist_t *parsed_headers;
+ tor_assert(headers);
+ tor_assert(code);
+
+ while (TOR_ISSPACE(*headers)) headers++; /* tolerate leading whitespace */
+
+ if (tor_sscanf(headers, "HTTP/1.%u %u", &n1, &n2) < 2 ||
+ (n1 != 0 && n1 != 1) ||
+ (n2 < 100 || n2 >= 600)) {
+ log_warn(LD_HTTP,"Failed to parse header %s",escaped(headers));
+ return -1;
+ }
+ *code = n2;
+
+ parsed_headers = smartlist_new();
+ smartlist_split_string(parsed_headers, headers, "\n",
+ SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, -1);
+ if (reason) {
+ smartlist_t *status_line_elements = smartlist_new();
+ tor_assert(smartlist_len(parsed_headers));
+ smartlist_split_string(status_line_elements,
+ smartlist_get(parsed_headers, 0),
+ " ", SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 3);
+ tor_assert(smartlist_len(status_line_elements) <= 3);
+ if (smartlist_len(status_line_elements) == 3) {
+ *reason = smartlist_get(status_line_elements, 2);
+ smartlist_set(status_line_elements, 2, NULL); /* Prevent free */
+ }
+ SMARTLIST_FOREACH(status_line_elements, char *, cp, tor_free(cp));
+ smartlist_free(status_line_elements);
+ }
+ if (date) {
+ *date = 0;
+ SMARTLIST_FOREACH(parsed_headers, const char *, s,
+ if (!strcmpstart(s, "Date: ")) {
+ strlcpy(datestr, s+6, sizeof(datestr));
+ /* This will do nothing on failure, so we don't need to check
+ the result. We shouldn't warn, since there are many other valid
+ date formats besides the one we use. */
+ parse_rfc1123_time(datestr, date);
+ break;
+ });
+ }
+ if (compression) {
+ const char *enc = NULL;
+ SMARTLIST_FOREACH(parsed_headers, const char *, s,
+ if (!strcmpstart(s, "Content-Encoding: ")) {
+ enc = s+18; break;
+ });
+
+ if (enc == NULL)
+ *compression = NO_METHOD;
+ else {
+ *compression = compression_method_get_by_name(enc);
+
+ if (*compression == UNKNOWN_METHOD)
+ log_info(LD_HTTP, "Unrecognized content encoding: %s. Trying to deal.",
+ escaped(enc));
+ }
+ }
+ SMARTLIST_FOREACH(parsed_headers, char *, s, tor_free(s));
+ smartlist_free(parsed_headers);
+
+ return 0;
+}
+
+/** If any directory object is arriving, and it's over 10MB large, we're
+ * getting DoS'd. (As of 0.1.2.x, raw directories are about 1MB, and we never
+ * ask for more than 96 router descriptors at a time.)
+ */
+#define MAX_DIRECTORY_OBJECT_SIZE (10*(1<<20))
+
+#define MAX_VOTE_DL_SIZE (MAX_DIRECTORY_OBJECT_SIZE * 5)
+
+/** Read handler for directory connections. (That's connections <em>to</em>
+ * directory servers and connections <em>at</em> directory servers.)
+ */
+int
+connection_dir_process_inbuf(dir_connection_t *conn)
+{
+ size_t max_size;
+ tor_assert(conn);
+ tor_assert(conn->base_.type == CONN_TYPE_DIR);
+
+ /* Directory clients write, then read data until they receive EOF;
+ * directory servers read data until they get an HTTP command, then
+ * write their response (when it's finished flushing, they mark for
+ * close).
+ */
+
+ /* If we're on the dirserver side, look for a command. */
+ if (conn->base_.state == DIR_CONN_STATE_SERVER_COMMAND_WAIT) {
+ if (directory_handle_command(conn) < 0) {
+ connection_mark_for_close(TO_CONN(conn));
+ return -1;
+ }
+ return 0;
+ }
+
+ max_size =
+ (TO_CONN(conn)->purpose == DIR_PURPOSE_FETCH_STATUS_VOTE) ?
+ MAX_VOTE_DL_SIZE : MAX_DIRECTORY_OBJECT_SIZE;
+
+ if (connection_get_inbuf_len(TO_CONN(conn)) > max_size) {
+ log_warn(LD_HTTP,
+ "Too much data received from directory connection (%s): "
+ "denial of service attempt, or you need to upgrade?",
+ conn->base_.address);
+ connection_mark_for_close(TO_CONN(conn));
+ return -1;
+ }
+
+ if (!conn->base_.inbuf_reached_eof)
+ log_debug(LD_HTTP,"Got data, not eof. Leaving on inbuf.");
+ return 0;
+}
+
+/** Called when we're about to finally unlink and free a directory connection:
+ * perform necessary accounting and cleanup */
+void
+connection_dir_about_to_close(dir_connection_t *dir_conn)
+{
+ connection_t *conn = TO_CONN(dir_conn);
+
+ if (conn->state < DIR_CONN_STATE_CLIENT_FINISHED) {
+ /* It's a directory connection and connecting or fetching
+ * failed: forget about this router, and maybe try again. */
+ connection_dir_client_request_failed(dir_conn);
+ }
+
+ connection_dir_client_refetch_hsdesc_if_needed(dir_conn);
+}
+
+/** Write handler for directory connections; called when all data has
+ * been flushed. Close the connection or wait for a response as
+ * appropriate.
+ */
+int
+connection_dir_finished_flushing(dir_connection_t *conn)
+{
+ tor_assert(conn);
+ tor_assert(conn->base_.type == CONN_TYPE_DIR);
+
+ if (conn->base_.marked_for_close)
+ return 0;
+
+ /* Note that we have finished writing the directory response. For direct
+ * connections this means we're done; for tunneled connections it's only
+ * an intermediate step. */
+ if (conn->dirreq_id)
+ geoip_change_dirreq_state(conn->dirreq_id, DIRREQ_TUNNELED,
+ DIRREQ_FLUSHING_DIR_CONN_FINISHED);
+ else
+ geoip_change_dirreq_state(TO_CONN(conn)->global_identifier,
+ DIRREQ_DIRECT,
+ DIRREQ_FLUSHING_DIR_CONN_FINISHED);
+ switch (conn->base_.state) {
+ case DIR_CONN_STATE_CONNECTING:
+ case DIR_CONN_STATE_CLIENT_SENDING:
+ log_debug(LD_DIR,"client finished sending command.");
+ conn->base_.state = DIR_CONN_STATE_CLIENT_READING;
+ return 0;
+ case DIR_CONN_STATE_SERVER_WRITING:
+ if (conn->spool) {
+ log_warn(LD_BUG, "Emptied a dirserv buffer, but it's still spooling!");
+ connection_mark_for_close(TO_CONN(conn));
+ } else {
+ log_debug(LD_DIRSERV, "Finished writing server response. Closing.");
+ connection_mark_for_close(TO_CONN(conn));
+ }
+ return 0;
+ default:
+ log_warn(LD_BUG,"called in unexpected state %d.",
+ conn->base_.state);
+ tor_fragile_assert();
+ return -1;
+ }
+ return 0;
+}
+
+/** Connected handler for directory connections: begin sending data to the
+ * server, and return 0.
+ * Only used when connections don't immediately connect. */
+int
+connection_dir_finished_connecting(dir_connection_t *conn)
+{
+ tor_assert(conn);
+ tor_assert(conn->base_.type == CONN_TYPE_DIR);
+ tor_assert(conn->base_.state == DIR_CONN_STATE_CONNECTING);
+
+ log_debug(LD_HTTP,"Dir connection to router %s:%u established.",
+ conn->base_.address,conn->base_.port);
+
+ /* start flushing conn */
+ conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
+ return 0;
+}
+
+/** Helper. Compare two fp_pair_t objects, and return negative, 0, or
+ * positive as appropriate. */
+static int
+compare_pairs_(const void **a, const void **b)
+{
+ const fp_pair_t *fp1 = *a, *fp2 = *b;
+ int r;
+ if ((r = fast_memcmp(fp1->first, fp2->first, DIGEST_LEN)))
+ return r;
+ else
+ return fast_memcmp(fp1->second, fp2->second, DIGEST_LEN);
+}
+
+/** Divide a string <b>res</b> of the form FP1-FP2+FP3-FP4...[.z], where each
+ * FP is a hex-encoded fingerprint, into a sequence of distinct sorted
+ * fp_pair_t. Skip malformed pairs. On success, return 0 and add those
+ * fp_pair_t into <b>pairs_out</b>. On failure, return -1. */
+int
+dir_split_resource_into_fingerprint_pairs(const char *res,
+ smartlist_t *pairs_out)
+{
+ smartlist_t *pairs_tmp = smartlist_new();
+ smartlist_t *pairs_result = smartlist_new();
+
+ smartlist_split_string(pairs_tmp, res, "+", 0, 0);
+ if (smartlist_len(pairs_tmp)) {
+ char *last = smartlist_get(pairs_tmp,smartlist_len(pairs_tmp)-1);
+ size_t last_len = strlen(last);
+ if (last_len > 2 && !strcmp(last+last_len-2, ".z")) {
+ last[last_len-2] = '\0';
+ }
+ }
+ SMARTLIST_FOREACH_BEGIN(pairs_tmp, char *, cp) {
+ if (strlen(cp) != HEX_DIGEST_LEN*2+1) {
+ log_info(LD_DIR,
+ "Skipping digest pair %s with non-standard length.", escaped(cp));
+ } else if (cp[HEX_DIGEST_LEN] != '-') {
+ log_info(LD_DIR,
+ "Skipping digest pair %s with missing dash.", escaped(cp));
+ } else {
+ fp_pair_t pair;
+ if (base16_decode(pair.first, DIGEST_LEN,
+ cp, HEX_DIGEST_LEN) != DIGEST_LEN ||
+ base16_decode(pair.second,DIGEST_LEN,
+ cp+HEX_DIGEST_LEN+1, HEX_DIGEST_LEN) != DIGEST_LEN) {
+ log_info(LD_DIR, "Skipping non-decodable digest pair %s", escaped(cp));
+ } else {
+ smartlist_add(pairs_result, tor_memdup(&pair, sizeof(pair)));
+ }
+ }
+ tor_free(cp);
+ } SMARTLIST_FOREACH_END(cp);
+ smartlist_free(pairs_tmp);
+
+ /* Uniq-and-sort */
+ smartlist_sort(pairs_result, compare_pairs_);
+ smartlist_uniq(pairs_result, compare_pairs_, tor_free_);
+
+ smartlist_add_all(pairs_out, pairs_result);
+ smartlist_free(pairs_result);
+ return 0;
+}
+
+/** Given a directory <b>resource</b> request, containing zero
+ * or more strings separated by plus signs, followed optionally by ".z", store
+ * the strings, in order, into <b>fp_out</b>. If <b>compressed_out</b> is
+ * non-NULL, set it to 1 if the resource ends in ".z", else set it to 0.
+ *
+ * If (flags & DSR_HEX), then delete all elements that aren't hex digests, and
+ * decode the rest. If (flags & DSR_BASE64), then use "-" rather than "+" as
+ * a separator, delete all the elements that aren't base64-encoded digests,
+ * and decode the rest. If (flags & DSR_DIGEST256), these digests should be
+ * 256 bits long; else they should be 160.
+ *
+ * If (flags & DSR_SORT_UNIQ), then sort the list and remove all duplicates.
+ */
+int
+dir_split_resource_into_fingerprints(const char *resource,
+ smartlist_t *fp_out, int *compressed_out,
+ int flags)
+{
+ const int decode_hex = flags & DSR_HEX;
+ const int decode_base64 = flags & DSR_BASE64;
+ const int digests_are_256 = flags & DSR_DIGEST256;
+ const int sort_uniq = flags & DSR_SORT_UNIQ;
+
+ const int digest_len = digests_are_256 ? DIGEST256_LEN : DIGEST_LEN;
+ const int hex_digest_len = digests_are_256 ?
+ HEX_DIGEST256_LEN : HEX_DIGEST_LEN;
+ const int base64_digest_len = digests_are_256 ?
+ BASE64_DIGEST256_LEN : BASE64_DIGEST_LEN;
+ smartlist_t *fp_tmp = smartlist_new();
+
+ tor_assert(!(decode_hex && decode_base64));
+ tor_assert(fp_out);
+
+ smartlist_split_string(fp_tmp, resource, decode_base64?"-":"+", 0, 0);
+ if (compressed_out)
+ *compressed_out = 0;
+ if (smartlist_len(fp_tmp)) {
+ char *last = smartlist_get(fp_tmp,smartlist_len(fp_tmp)-1);
+ size_t last_len = strlen(last);
+ if (last_len > 2 && !strcmp(last+last_len-2, ".z")) {
+ last[last_len-2] = '\0';
+ if (compressed_out)
+ *compressed_out = 1;
+ }
+ }
+ if (decode_hex || decode_base64) {
+ const size_t encoded_len = decode_hex ? hex_digest_len : base64_digest_len;
+ int i;
+ char *cp, *d = NULL;
+ for (i = 0; i < smartlist_len(fp_tmp); ++i) {
+ cp = smartlist_get(fp_tmp, i);
+ if (strlen(cp) != encoded_len) {
+ log_info(LD_DIR,
+ "Skipping digest %s with non-standard length.", escaped(cp));
+ smartlist_del_keeporder(fp_tmp, i--);
+ goto again;
+ }
+ d = tor_malloc_zero(digest_len);
+ if (decode_hex ?
+ (base16_decode(d, digest_len, cp, hex_digest_len) != digest_len) :
+ (base64_decode(d, digest_len, cp, base64_digest_len)
+ != digest_len)) {
+ log_info(LD_DIR, "Skipping non-decodable digest %s", escaped(cp));
+ smartlist_del_keeporder(fp_tmp, i--);
+ goto again;
+ }
+ smartlist_set(fp_tmp, i, d);
+ d = NULL;
+ again:
+ tor_free(cp);
+ tor_free(d);
+ }
+ }
+ if (sort_uniq) {
+ if (decode_hex || decode_base64) {
+ if (digests_are_256) {
+ smartlist_sort_digests256(fp_tmp);
+ smartlist_uniq_digests256(fp_tmp);
+ } else {
+ smartlist_sort_digests(fp_tmp);
+ smartlist_uniq_digests(fp_tmp);
+ }
+ } else {
+ smartlist_sort_strings(fp_tmp);
+ smartlist_uniq_strings(fp_tmp);
+ }
+ }
+ smartlist_add_all(fp_out, fp_tmp);
+ smartlist_free(fp_tmp);
+ return 0;
+}
+
+/** As dir_split_resource_into_fingerprints, but instead fills
+ * <b>spool_out</b> with a list of spoolable_resource_t for the resource
+ * identified through <b>source</b>. */
+int
+dir_split_resource_into_spoolable(const char *resource,
+ dir_spool_source_t source,
+ smartlist_t *spool_out,
+ int *compressed_out,
+ int flags)
+{
+ smartlist_t *fingerprints = smartlist_new();
+
+ tor_assert(flags & (DSR_HEX|DSR_BASE64));
+ const size_t digest_len =
+ (flags & DSR_DIGEST256) ? DIGEST256_LEN : DIGEST_LEN;
+
+ int r = dir_split_resource_into_fingerprints(resource, fingerprints,
+ compressed_out, flags);
+ /* This is not a very efficient implementation XXXX */
+ SMARTLIST_FOREACH_BEGIN(fingerprints, uint8_t *, digest) {
+ spooled_resource_t *spooled =
+ spooled_resource_new(source, digest, digest_len);
+ if (spooled)
+ smartlist_add(spool_out, spooled);
+ tor_free(digest);
+ } SMARTLIST_FOREACH_END(digest);
+
+ smartlist_free(fingerprints);
+ return r;
+}
diff --git a/src/feature/dircommon/directory.h b/src/feature/dircommon/directory.h
new file mode 100644
index 0000000000..ba3f8c1b0e
--- /dev/null
+++ b/src/feature/dircommon/directory.h
@@ -0,0 +1,129 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file directory.h
+ * \brief Header file for directory.c.
+ **/
+
+#ifndef TOR_DIRECTORY_H
+#define TOR_DIRECTORY_H
+
+dir_connection_t *TO_DIR_CONN(connection_t *c);
+
+#define DIR_CONN_STATE_MIN_ 1
+/** State for connection to directory server: waiting for connect(). */
+#define DIR_CONN_STATE_CONNECTING 1
+/** State for connection to directory server: sending HTTP request. */
+#define DIR_CONN_STATE_CLIENT_SENDING 2
+/** State for connection to directory server: reading HTTP response. */
+#define DIR_CONN_STATE_CLIENT_READING 3
+/** State for connection to directory server: happy and finished. */
+#define DIR_CONN_STATE_CLIENT_FINISHED 4
+/** State for connection at directory server: waiting for HTTP request. */
+#define DIR_CONN_STATE_SERVER_COMMAND_WAIT 5
+/** State for connection at directory server: sending HTTP response. */
+#define DIR_CONN_STATE_SERVER_WRITING 6
+#define DIR_CONN_STATE_MAX_ 6
+
+#define DIR_PURPOSE_MIN_ 4
+/** A connection to a directory server: set after a v2 rendezvous
+ * descriptor is downloaded. */
+#define DIR_PURPOSE_HAS_FETCHED_RENDDESC_V2 4
+/** A connection to a directory server: download one or more server
+ * descriptors. */
+#define DIR_PURPOSE_FETCH_SERVERDESC 6
+/** A connection to a directory server: download one or more extra-info
+ * documents. */
+#define DIR_PURPOSE_FETCH_EXTRAINFO 7
+/** A connection to a directory server: upload a server descriptor. */
+#define DIR_PURPOSE_UPLOAD_DIR 8
+/** A connection to a directory server: upload a v3 networkstatus vote. */
+#define DIR_PURPOSE_UPLOAD_VOTE 10
+/** A connection to a directory server: upload a v3 consensus signature */
+#define DIR_PURPOSE_UPLOAD_SIGNATURES 11
+/** A connection to a directory server: download one or more v3 networkstatus
+ * votes. */
+#define DIR_PURPOSE_FETCH_STATUS_VOTE 12
+/** A connection to a directory server: download a v3 detached signatures
+ * object for a consensus. */
+#define DIR_PURPOSE_FETCH_DETACHED_SIGNATURES 13
+/** A connection to a directory server: download a v3 networkstatus
+ * consensus. */
+#define DIR_PURPOSE_FETCH_CONSENSUS 14
+/** A connection to a directory server: download one or more directory
+ * authority certificates. */
+#define DIR_PURPOSE_FETCH_CERTIFICATE 15
+
+/** Purpose for connection at a directory server. */
+#define DIR_PURPOSE_SERVER 16
+/** A connection to a hidden service directory server: upload a v2 rendezvous
+ * descriptor. */
+#define DIR_PURPOSE_UPLOAD_RENDDESC_V2 17
+/** A connection to a hidden service directory server: download a v2 rendezvous
+ * descriptor. */
+#define DIR_PURPOSE_FETCH_RENDDESC_V2 18
+/** A connection to a directory server: download a microdescriptor. */
+#define DIR_PURPOSE_FETCH_MICRODESC 19
+/** A connection to a hidden service directory: upload a v3 descriptor. */
+#define DIR_PURPOSE_UPLOAD_HSDESC 20
+/** A connection to a hidden service directory: fetch a v3 descriptor. */
+#define DIR_PURPOSE_FETCH_HSDESC 21
+/** A connection to a directory server: set after a hidden service descriptor
+ * is downloaded. */
+#define DIR_PURPOSE_HAS_FETCHED_HSDESC 22
+#define DIR_PURPOSE_MAX_ 22
+
+/** True iff <b>p</b> is a purpose corresponding to uploading
+ * data to a directory server. */
+#define DIR_PURPOSE_IS_UPLOAD(p) \
+ ((p)==DIR_PURPOSE_UPLOAD_DIR || \
+ (p)==DIR_PURPOSE_UPLOAD_VOTE || \
+ (p)==DIR_PURPOSE_UPLOAD_SIGNATURES || \
+ (p)==DIR_PURPOSE_UPLOAD_RENDDESC_V2 || \
+ (p)==DIR_PURPOSE_UPLOAD_HSDESC)
+
+enum compress_method_t;
+int parse_http_response(const char *headers, int *code, time_t *date,
+ enum compress_method_t *compression, char **response);
+int parse_http_command(const char *headers,
+ char **command_out, char **url_out);
+char *http_get_header(const char *headers, const char *which);
+
+int connection_dir_is_encrypted(const dir_connection_t *conn);
+int connection_dir_reached_eof(dir_connection_t *conn);
+int connection_dir_process_inbuf(dir_connection_t *conn);
+int connection_dir_finished_flushing(dir_connection_t *conn);
+int connection_dir_finished_connecting(dir_connection_t *conn);
+void connection_dir_about_to_close(dir_connection_t *dir_conn);
+
+#define DSR_HEX (1<<0)
+#define DSR_BASE64 (1<<1)
+#define DSR_DIGEST256 (1<<2)
+#define DSR_SORT_UNIQ (1<<3)
+int dir_split_resource_into_fingerprints(const char *resource,
+ smartlist_t *fp_out, int *compressed_out,
+ int flags);
+enum dir_spool_source_t;
+int dir_split_resource_into_spoolable(const char *resource,
+ enum dir_spool_source_t source,
+ smartlist_t *spool_out,
+ int *compressed_out,
+ int flags);
+int dir_split_resource_into_fingerprint_pairs(const char *res,
+ smartlist_t *pairs_out);
+char *directory_dump_request_log(void);
+void note_request(const char *key, size_t bytes);
+
+int purpose_needs_anonymity(uint8_t dir_purpose, uint8_t router_purpose,
+ const char *resource);
+
+char *authdir_type_to_string(dirinfo_type_t auth);
+
+#define X_ADDRESS_HEADER "X-Your-Address-Is: "
+#define X_OR_DIFF_FROM_CONSENSUS_HEADER "X-Or-Diff-From-Consensus: "
+
+#endif /* !defined(TOR_DIRECTORY_H) */
diff --git a/src/feature/dircommon/fp_pair.c b/src/feature/dircommon/fp_pair.c
new file mode 100644
index 0000000000..284600df77
--- /dev/null
+++ b/src/feature/dircommon/fp_pair.c
@@ -0,0 +1,315 @@
+/* Copyright (c) 2013-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file fp_pair.c
+ *
+ * \brief Manages data structures for associating pairs of fingerprints. Used
+ * to handle combinations of identity/signing-key fingerprints for
+ * authorities.
+ *
+ * This is a nice, simple, compact data structure module that handles a map
+ * from (signing key fingerprint, identity key fingerprint) to void *. The
+ * fingerprints here are SHA1 digests of RSA keys.
+ *
+ * This structure is used in directory.c and in routerlist.c for handling
+ * handling authority certificates, since we never want more than a single
+ * certificate for any (ID key, signing key) pair.
+ **/
+
+#include "core/or/or.h"
+#include "feature/dircommon/fp_pair.h"
+
+/* Define fp_pair_map_t structures */
+
+struct fp_pair_map_entry_s {
+ HT_ENTRY(fp_pair_map_entry_s) node;
+ void *val;
+ fp_pair_t key;
+};
+
+struct fp_pair_map_s {
+ HT_HEAD(fp_pair_map_impl, fp_pair_map_entry_s) head;
+};
+
+/*
+ * Hash function and equality checker for fp_pair_map_t
+ */
+
+/** Compare fp_pair_entry_t objects by key value. */
+static inline int
+fp_pair_map_entries_eq(const fp_pair_map_entry_t *a,
+ const fp_pair_map_entry_t *b)
+{
+ return tor_memeq(&(a->key), &(b->key), sizeof(fp_pair_t));
+}
+
+/** Return a hash value for an fp_pair_entry_t. */
+static inline unsigned int
+fp_pair_map_entry_hash(const fp_pair_map_entry_t *a)
+{
+ tor_assert(sizeof(a->key) == DIGEST_LEN*2);
+ return (unsigned) siphash24g(&a->key, DIGEST_LEN*2);
+}
+
+/*
+ * Hash table functions for fp_pair_map_t
+ */
+
+HT_PROTOTYPE(fp_pair_map_impl, fp_pair_map_entry_s, node,
+ fp_pair_map_entry_hash, fp_pair_map_entries_eq)
+HT_GENERATE2(fp_pair_map_impl, fp_pair_map_entry_s, node,
+ fp_pair_map_entry_hash, fp_pair_map_entries_eq,
+ 0.6, tor_reallocarray_, tor_free_)
+
+/** Constructor to create a new empty map from fp_pair_t to void *
+ */
+
+fp_pair_map_t *
+fp_pair_map_new(void)
+{
+ fp_pair_map_t *result;
+
+ result = tor_malloc(sizeof(fp_pair_map_t));
+ HT_INIT(fp_pair_map_impl, &result->head);
+ return result;
+}
+
+/** Set the current value for key to val; returns the previous
+ * value for key if one was set, or NULL if one was not.
+ */
+
+void *
+fp_pair_map_set(fp_pair_map_t *map, const fp_pair_t *key, void *val)
+{
+ fp_pair_map_entry_t *resolve;
+ fp_pair_map_entry_t search;
+ void *oldval;
+
+ tor_assert(map);
+ tor_assert(key);
+ tor_assert(val);
+
+ memcpy(&(search.key), key, sizeof(*key));
+ resolve = HT_FIND(fp_pair_map_impl, &(map->head), &search);
+ if (resolve) {
+ oldval = resolve->val;
+ resolve->val = val;
+ } else {
+ resolve = tor_malloc_zero(sizeof(fp_pair_map_entry_t));
+ memcpy(&(resolve->key), key, sizeof(*key));
+ resolve->val = val;
+ HT_INSERT(fp_pair_map_impl, &(map->head), resolve);
+ oldval = NULL;
+ }
+
+ return oldval;
+}
+
+/** Set the current value for the key (first, second) to val; returns
+ * the previous value for key if one was set, or NULL if one was not.
+ */
+
+void *
+fp_pair_map_set_by_digests(fp_pair_map_t *map,
+ const char *first, const char *second,
+ void *val)
+{
+ fp_pair_t k;
+
+ tor_assert(first);
+ tor_assert(second);
+
+ memcpy(k.first, first, DIGEST_LEN);
+ memcpy(k.second, second, DIGEST_LEN);
+
+ return fp_pair_map_set(map, &k, val);
+}
+
+/** Return the current value associated with key, or NULL if no value is set.
+ */
+
+void *
+fp_pair_map_get(const fp_pair_map_t *map, const fp_pair_t *key)
+{
+ fp_pair_map_entry_t *resolve;
+ fp_pair_map_entry_t search;
+ void *val = NULL;
+
+ tor_assert(map);
+ tor_assert(key);
+
+ memcpy(&(search.key), key, sizeof(*key));
+ resolve = HT_FIND(fp_pair_map_impl, &(map->head), &search);
+ if (resolve) val = resolve->val;
+
+ return val;
+}
+
+/** Return the current value associated the key (first, second), or
+ * NULL if no value is set.
+ */
+
+void *
+fp_pair_map_get_by_digests(const fp_pair_map_t *map,
+ const char *first, const char *second)
+{
+ fp_pair_t k;
+
+ tor_assert(first);
+ tor_assert(second);
+
+ memcpy(k.first, first, DIGEST_LEN);
+ memcpy(k.second, second, DIGEST_LEN);
+
+ return fp_pair_map_get(map, &k);
+}
+
+/** Remove the value currently associated with key from the map.
+ * Return the value if one was set, or NULL if there was no entry for
+ * key. The caller must free any storage associated with the
+ * returned value.
+ */
+
+void *
+fp_pair_map_remove(fp_pair_map_t *map, const fp_pair_t *key)
+{
+ fp_pair_map_entry_t *resolve;
+ fp_pair_map_entry_t search;
+ void *val = NULL;
+
+ tor_assert(map);
+ tor_assert(key);
+
+ memcpy(&(search.key), key, sizeof(*key));
+ resolve = HT_REMOVE(fp_pair_map_impl, &(map->head), &search);
+ if (resolve) {
+ val = resolve->val;
+ tor_free(resolve);
+ }
+
+ return val;
+}
+
+/** Remove all entries from map, and deallocate storage for those entries.
+ * If free_val is provided, it is invoked on every value in map.
+ */
+
+void
+fp_pair_map_free_(fp_pair_map_t *map, void (*free_val)(void*))
+{
+ fp_pair_map_entry_t **ent, **next, *this;
+
+ if (map) {
+ for (ent = HT_START(fp_pair_map_impl, &(map->head));
+ ent != NULL; ent = next) {
+ this = *ent;
+ next = HT_NEXT_RMV(fp_pair_map_impl, &(map->head), ent);
+ if (free_val) free_val(this->val);
+ tor_free(this);
+ }
+ tor_assert(HT_EMPTY(&(map->head)));
+ HT_CLEAR(fp_pair_map_impl, &(map->head));
+ tor_free(map);
+ }
+}
+
+/** Return true iff map has no entries.
+ */
+
+int
+fp_pair_map_isempty(const fp_pair_map_t *map)
+{
+ tor_assert(map);
+
+ return HT_EMPTY(&(map->head));
+}
+
+/** Return the number of items in map.
+ */
+
+int
+fp_pair_map_size(const fp_pair_map_t *map)
+{
+ tor_assert(map);
+
+ return HT_SIZE(&(map->head));
+}
+
+/** return an iterator pointing to the start of map.
+ */
+
+fp_pair_map_iter_t *
+fp_pair_map_iter_init(fp_pair_map_t *map)
+{
+ tor_assert(map);
+
+ return HT_START(fp_pair_map_impl, &(map->head));
+}
+
+/** Advance iter a single step to the next entry of map, and return
+ * its new value.
+ */
+
+fp_pair_map_iter_t *
+fp_pair_map_iter_next(fp_pair_map_t *map, fp_pair_map_iter_t *iter)
+{
+ tor_assert(map);
+ tor_assert(iter);
+
+ return HT_NEXT(fp_pair_map_impl, &(map->head), iter);
+}
+
+/** Advance iter a single step to the next entry of map, removing the current
+ * entry, and return its new value.
+ */
+
+fp_pair_map_iter_t *
+fp_pair_map_iter_next_rmv(fp_pair_map_t *map, fp_pair_map_iter_t *iter)
+{
+ fp_pair_map_entry_t *rmv;
+
+ tor_assert(map);
+ tor_assert(iter);
+ tor_assert(*iter);
+
+ rmv = *iter;
+ iter = HT_NEXT_RMV(fp_pair_map_impl, &(map->head), iter);
+ tor_free(rmv);
+
+ return iter;
+}
+
+/** Set *key_out and *val_out to the current entry pointed to by iter.
+ */
+
+void
+fp_pair_map_iter_get(fp_pair_map_iter_t *iter,
+ fp_pair_t *key_out, void **val_out)
+{
+ tor_assert(iter);
+ tor_assert(*iter);
+
+ if (key_out) memcpy(key_out, &((*iter)->key), sizeof(fp_pair_t));
+ if (val_out) *val_out = (*iter)->val;
+}
+
+/** Return true iff iter has advanced past the last entry of its map.
+ */
+
+int
+fp_pair_map_iter_done(fp_pair_map_iter_t *iter)
+{
+ return (iter == NULL);
+}
+
+/** Assert if anything has gone wrong with the internal
+ * representation of map.
+ */
+
+void
+fp_pair_map_assert_ok(const fp_pair_map_t *map)
+{
+ tor_assert(!fp_pair_map_impl_HT_REP_IS_BAD_(&(map->head)));
+}
+
diff --git a/src/feature/dircommon/fp_pair.h b/src/feature/dircommon/fp_pair.h
new file mode 100644
index 0000000000..5041583e88
--- /dev/null
+++ b/src/feature/dircommon/fp_pair.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2013-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file fp_pair.h
+ * \brief Header file for fp_pair.c.
+ **/
+
+#ifndef _TOR_FP_PAIR_H
+#define _TOR_FP_PAIR_H
+
+/** A pair of digests created by dir_split_resource_info_fingerprint_pairs() */
+typedef struct {
+ char first[DIGEST_LEN];
+ char second[DIGEST_LEN];
+} fp_pair_t;
+
+/*
+ * Declare fp_pair_map_t functions and structs
+ */
+
+typedef struct fp_pair_map_entry_s fp_pair_map_entry_t;
+typedef struct fp_pair_map_s fp_pair_map_t;
+typedef fp_pair_map_entry_t *fp_pair_map_iter_t;
+
+fp_pair_map_t * fp_pair_map_new(void);
+void * fp_pair_map_set(fp_pair_map_t *map, const fp_pair_t *key, void *val);
+void * fp_pair_map_set_by_digests(fp_pair_map_t *map,
+ const char *first, const char *second,
+ void *val);
+void * fp_pair_map_get(const fp_pair_map_t *map, const fp_pair_t *key);
+void * fp_pair_map_get_by_digests(const fp_pair_map_t *map,
+ const char *first, const char *second);
+void * fp_pair_map_remove(fp_pair_map_t *map, const fp_pair_t *key);
+void fp_pair_map_free_(fp_pair_map_t *map, void (*free_val)(void*));
+#define fp_pair_map_free(map, free_val) do { \
+ fp_pair_map_free_((map), (free_val)); \
+ (map) = NULL; \
+ } while (0)
+
+int fp_pair_map_isempty(const fp_pair_map_t *map);
+int fp_pair_map_size(const fp_pair_map_t *map);
+fp_pair_map_iter_t * fp_pair_map_iter_init(fp_pair_map_t *map);
+fp_pair_map_iter_t * fp_pair_map_iter_next(fp_pair_map_t *map,
+ fp_pair_map_iter_t *iter);
+fp_pair_map_iter_t * fp_pair_map_iter_next_rmv(fp_pair_map_t *map,
+ fp_pair_map_iter_t *iter);
+void fp_pair_map_iter_get(fp_pair_map_iter_t *iter,
+ fp_pair_t *key_out, void **val_out);
+int fp_pair_map_iter_done(fp_pair_map_iter_t *iter);
+void fp_pair_map_assert_ok(const fp_pair_map_t *map);
+
+#undef DECLARE_MAP_FNS
+
+#endif /* !defined(_TOR_FP_PAIR_H) */
+
diff --git a/src/feature/dircommon/vote_timing_st.h b/src/feature/dircommon/vote_timing_st.h
new file mode 100644
index 0000000000..47b90ab009
--- /dev/null
+++ b/src/feature/dircommon/vote_timing_st.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef VOTE_TIMING_ST_H
+#define VOTE_TIMING_ST_H
+
+/** Describes the schedule by which votes should be generated. */
+struct vote_timing_t {
+ /** Length in seconds between one consensus becoming valid and the next
+ * becoming valid. */
+ int vote_interval;
+ /** For how many intervals is a consensus valid? */
+ int n_intervals_valid;
+ /** Time in seconds allowed to propagate votes */
+ int vote_delay;
+ /** Time in seconds allowed to propagate signatures */
+ int dist_delay;
+};
+
+#endif
+
diff --git a/src/feature/dircommon/voting_schedule.c b/src/feature/dircommon/voting_schedule.c
new file mode 100644
index 0000000000..0a7476eda7
--- /dev/null
+++ b/src/feature/dircommon/voting_schedule.c
@@ -0,0 +1,194 @@
+/* Copyright (c) 2018-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file voting_schedule.c
+ * \brief This file contains functions that are from the directory authority
+ * subsystem related to voting specifically but used by many part of
+ * tor. The full feature is built as part of the dirauth module.
+ **/
+
+#define VOTING_SCHEDULE_PRIVATE
+#include "feature/dircommon/voting_schedule.h"
+
+#include "core/or/or.h"
+#include "app/config/config.h"
+#include "feature/nodelist/networkstatus.h"
+
+#include "feature/nodelist/networkstatus_st.h"
+
+/* =====
+ * Vote scheduling
+ * ===== */
+
+/** Return the start of the next interval of size <b>interval</b> (in
+ * seconds) after <b>now</b>, plus <b>offset</b>. Midnight always
+ * starts a fresh interval, and if the last interval of a day would be
+ * truncated to less than half its size, it is rolled into the
+ * previous interval. */
+time_t
+voting_schedule_get_start_of_next_interval(time_t now, int interval,
+ int offset)
+{
+ struct tm tm;
+ time_t midnight_today=0;
+ time_t midnight_tomorrow;
+ time_t next;
+
+ tor_gmtime_r(&now, &tm);
+ tm.tm_hour = 0;
+ tm.tm_min = 0;
+ tm.tm_sec = 0;
+
+ if (tor_timegm(&tm, &midnight_today) < 0) {
+ // LCOV_EXCL_START
+ log_warn(LD_BUG, "Ran into an invalid time when trying to find midnight.");
+ // LCOV_EXCL_STOP
+ }
+ midnight_tomorrow = midnight_today + (24*60*60);
+
+ next = midnight_today + ((now-midnight_today)/interval + 1)*interval;
+
+ /* Intervals never cross midnight. */
+ if (next > midnight_tomorrow)
+ next = midnight_tomorrow;
+
+ /* If the interval would only last half as long as it's supposed to, then
+ * skip over to the next day. */
+ if (next + interval/2 > midnight_tomorrow)
+ next = midnight_tomorrow;
+
+ next += offset;
+ if (next - interval > now)
+ next -= interval;
+
+ return next;
+}
+
+/* Populate and return a new voting_schedule_t that can be used to schedule
+ * voting. The object is allocated on the heap and it's the responsibility of
+ * the caller to free it. Can't fail. */
+static voting_schedule_t *
+get_voting_schedule(const or_options_t *options, time_t now, int severity)
+{
+ int interval, vote_delay, dist_delay;
+ time_t start;
+ time_t end;
+ networkstatus_t *consensus;
+ voting_schedule_t *new_voting_schedule;
+
+ new_voting_schedule = tor_malloc_zero(sizeof(voting_schedule_t));
+
+ consensus = networkstatus_get_live_consensus(now);
+
+ if (consensus) {
+ interval = (int)( consensus->fresh_until - consensus->valid_after );
+ vote_delay = consensus->vote_seconds;
+ dist_delay = consensus->dist_seconds;
+
+ /* Note down the consensus valid after, so that we detect outdated voting
+ * schedules in case of skewed clocks etc. */
+ new_voting_schedule->live_consensus_valid_after = consensus->valid_after;
+ } else {
+ interval = options->TestingV3AuthInitialVotingInterval;
+ vote_delay = options->TestingV3AuthInitialVoteDelay;
+ dist_delay = options->TestingV3AuthInitialDistDelay;
+ }
+
+ tor_assert(interval > 0);
+
+ if (vote_delay + dist_delay > interval/2)
+ vote_delay = dist_delay = interval / 4;
+
+ start = new_voting_schedule->interval_starts =
+ voting_schedule_get_start_of_next_interval(now,interval,
+ options->TestingV3AuthVotingStartOffset);
+ end = voting_schedule_get_start_of_next_interval(start+1, interval,
+ options->TestingV3AuthVotingStartOffset);
+
+ tor_assert(end > start);
+
+ new_voting_schedule->fetch_missing_signatures = start - (dist_delay/2);
+ new_voting_schedule->voting_ends = start - dist_delay;
+ new_voting_schedule->fetch_missing_votes =
+ start - dist_delay - (vote_delay/2);
+ new_voting_schedule->voting_starts = start - dist_delay - vote_delay;
+
+ {
+ char tbuf[ISO_TIME_LEN+1];
+ format_iso_time(tbuf, new_voting_schedule->interval_starts);
+ tor_log(severity, LD_DIR,"Choosing expected valid-after time as %s: "
+ "consensus_set=%d, interval=%d",
+ tbuf, consensus?1:0, interval);
+ }
+
+ return new_voting_schedule;
+}
+
+#define voting_schedule_free(s) \
+ FREE_AND_NULL(voting_schedule_t, voting_schedule_free_, (s))
+
+/** Frees a voting_schedule_t. This should be used instead of the generic
+ * tor_free. */
+static void
+voting_schedule_free_(voting_schedule_t *voting_schedule_to_free)
+{
+ if (!voting_schedule_to_free)
+ return;
+ tor_free(voting_schedule_to_free);
+}
+
+voting_schedule_t voting_schedule;
+
+/* Using the time <b>now</b>, return the next voting valid-after time. */
+time_t
+voting_schedule_get_next_valid_after_time(void)
+{
+ time_t now = approx_time();
+ bool need_to_recalculate_voting_schedule = false;
+
+ /* This is a safe guard in order to make sure that the voting schedule
+ * static object is at least initialized. Using this function with a zeroed
+ * voting schedule can lead to bugs. */
+ if (tor_mem_is_zero((const char *) &voting_schedule,
+ sizeof(voting_schedule))) {
+ need_to_recalculate_voting_schedule = true;
+ goto done; /* no need for next check if we have to recalculate anyway */
+ }
+
+ /* Also make sure we are not using an outdated voting schedule. If we have a
+ * newer consensus, make sure we recalculate the voting schedule. */
+ const networkstatus_t *ns = networkstatus_get_live_consensus(now);
+ if (ns && ns->valid_after != voting_schedule.live_consensus_valid_after) {
+ log_info(LD_DIR, "Voting schedule is outdated: recalculating (%d/%d)",
+ (int) ns->valid_after,
+ (int) voting_schedule.live_consensus_valid_after);
+ need_to_recalculate_voting_schedule = true;
+ }
+
+ done:
+ if (need_to_recalculate_voting_schedule) {
+ voting_schedule_recalculate_timing(get_options(), approx_time());
+ voting_schedule.created_on_demand = 1;
+ }
+
+ return voting_schedule.interval_starts;
+}
+
+/** Set voting_schedule to hold the timing for the next vote we should be
+ * doing. All type of tor do that because HS subsystem needs the timing as
+ * well to function properly. */
+void
+voting_schedule_recalculate_timing(const or_options_t *options, time_t now)
+{
+ voting_schedule_t *new_voting_schedule;
+
+ /* get the new voting schedule */
+ new_voting_schedule = get_voting_schedule(options, now, LOG_INFO);
+ tor_assert(new_voting_schedule);
+
+ /* Fill in the global static struct now */
+ memcpy(&voting_schedule, new_voting_schedule, sizeof(voting_schedule));
+ voting_schedule_free(new_voting_schedule);
+}
+
diff --git a/src/feature/dircommon/voting_schedule.h b/src/feature/dircommon/voting_schedule.h
new file mode 100644
index 0000000000..bafd81184e
--- /dev/null
+++ b/src/feature/dircommon/voting_schedule.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2018-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file voting_schedule.h
+ * \brief Header file for voting_schedule.c.
+ **/
+
+#ifndef TOR_VOTING_SCHEDULE_H
+#define TOR_VOTING_SCHEDULE_H
+
+#include "core/or/or.h"
+
+/** Scheduling information for a voting interval. */
+typedef struct {
+ /** When do we generate and distribute our vote for this interval? */
+ time_t voting_starts;
+ /** When do we send an HTTP request for any votes that we haven't
+ * been posted yet?*/
+ time_t fetch_missing_votes;
+ /** When do we give up on getting more votes and generate a consensus? */
+ time_t voting_ends;
+ /** When do we send an HTTP request for any signatures we're expecting to
+ * see on the consensus? */
+ time_t fetch_missing_signatures;
+ /** When do we publish the consensus? */
+ time_t interval_starts;
+
+ /* True iff we have generated and distributed our vote. */
+ int have_voted;
+ /* True iff we've requested missing votes. */
+ int have_fetched_missing_votes;
+ /* True iff we have built a consensus and sent the signatures around. */
+ int have_built_consensus;
+ /* True iff we've fetched missing signatures. */
+ int have_fetched_missing_signatures;
+ /* True iff we have published our consensus. */
+ int have_published_consensus;
+
+ /* True iff this voting schedule was set on demand meaning not through the
+ * normal vote operation of a dirauth or when a consensus is set. This only
+ * applies to a directory authority that needs to recalculate the voting
+ * timings only for the first vote even though this object was initilized
+ * prior to voting. */
+ int created_on_demand;
+
+ /** The valid-after time of the last live consensus that filled this voting
+ * schedule. It's used to detect outdated voting schedules. */
+ time_t live_consensus_valid_after;
+} voting_schedule_t;
+
+/* Public API. */
+
+extern voting_schedule_t voting_schedule;
+
+void voting_schedule_recalculate_timing(const or_options_t *options,
+ time_t now);
+
+time_t voting_schedule_get_start_of_next_interval(time_t now,
+ int interval,
+ int offset);
+time_t voting_schedule_get_next_valid_after_time(void);
+
+#endif /* TOR_VOTING_SCHEDULE_H */
+