diff options
author | Nick Mathewson <nickm@torproject.org> | 2018-07-05 16:31:38 -0400 |
---|---|---|
committer | Nick Mathewson <nickm@torproject.org> | 2018-07-05 17:15:50 -0400 |
commit | 63b4ea22af8e8314dd718f02046de5f4b91edf9d (patch) | |
tree | af52b6fba37f22c86447fd5267dd5eb557807c8b /src/feature/dircommon | |
parent | ce84200542f48a92e8b56a8d032401ecd153e90c (diff) | |
download | tor-63b4ea22af8e8314dd718f02046de5f4b91edf9d.tar.gz tor-63b4ea22af8e8314dd718f02046de5f4b91edf9d.zip |
Move literally everything out of src/or
This commit won't build yet -- it just puts everything in a slightly
more logical place.
The reasoning here is that "src/core" will hold the stuff that every (or
nearly every) tor instance will need in order to do onion routing.
Other features (including some necessary ones) will live in
"src/feature". The "src/app" directory will hold the stuff needed
to have Tor be an application you can actually run.
This commit DOES NOT refactor the former contents of src/or into a
logical set of acyclic libraries, or change any code at all. That
will have to come in the future.
We will continue to move things around and split them in the future,
but I hope this lays a reasonable groundwork for doing so.
Diffstat (limited to 'src/feature/dircommon')
-rw-r--r-- | src/feature/dircommon/consdiff.c | 1414 | ||||
-rw-r--r-- | src/feature/dircommon/consdiff.h | 99 | ||||
-rw-r--r-- | src/feature/dircommon/dir_connection_st.h | 67 | ||||
-rw-r--r-- | src/feature/dircommon/fp_pair.c | 315 | ||||
-rw-r--r-- | src/feature/dircommon/fp_pair.h | 56 | ||||
-rw-r--r-- | src/feature/dircommon/vote_timing_st.h | 24 | ||||
-rw-r--r-- | src/feature/dircommon/voting_schedule.c | 194 | ||||
-rw-r--r-- | src/feature/dircommon/voting_schedule.h | 65 |
8 files changed, 2234 insertions, 0 deletions
diff --git a/src/feature/dircommon/consdiff.c b/src/feature/dircommon/consdiff.c new file mode 100644 index 0000000000..88edda716e --- /dev/null +++ b/src/feature/dircommon/consdiff.c @@ -0,0 +1,1414 @@ +/* Copyright (c) 2014, Daniel Martà + * Copyright (c) 2014-2018, The Tor Project, Inc. */ +/* See LICENSE for licensing information */ + +/** + * \file consdiff.c + * \brief Consensus diff implementation, including both the generation and the + * application of diffs in a minimal ed format. + * + * The consensus diff application is done in consdiff_apply_diff, which relies + * on apply_ed_diff for the main ed diff part and on some digest helper + * functions to check the digest hashes found in the consensus diff header. + * + * The consensus diff generation is more complex. consdiff_gen_diff generates + * it, relying on gen_ed_diff to generate the ed diff and some digest helper + * functions to generate the digest hashes. + * + * gen_ed_diff is the tricky bit. In it simplest form, it will take quadratic + * time and linear space to generate an ed diff given two smartlists. As shown + * in its comment section, calling calc_changes on the entire two consensuses + * will calculate what is to be added and what is to be deleted in the diff. + * Its comment section briefly explains how it works. + * + * In our case specific to consensuses, we take advantage of the fact that + * consensuses list routers sorted by their identities. We use that + * information to avoid running calc_changes on the whole smartlists. + * gen_ed_diff will navigate through the two consensuses identity by identity + * and will send small couples of slices to calc_changes, keeping the running + * time near-linear. This is explained in more detail in the gen_ed_diff + * comments. + * + * The allocation strategy tries to save time and memory by avoiding needless + * copies. Instead of actually splitting the inputs into separate strings, we + * allocate cdline_t objects, each of which represents a line in the original + * object or in the output. We use memarea_t allocators to manage the + * temporary memory we use when generating or applying diffs. + **/ + +#define CONSDIFF_PRIVATE + +#include "or/or.h" +#include "or/consdiff.h" +#include "lib/memarea/memarea.h" +#include "or/routerparse.h" + +static const char* ns_diff_version = "network-status-diff-version 1"; +static const char* hash_token = "hash"; + +static char *consensus_join_lines(const smartlist_t *inp); + +/** Return true iff a and b have the same contents. */ +STATIC int +lines_eq(const cdline_t *a, const cdline_t *b) +{ + return a->len == b->len && fast_memeq(a->s, b->s, a->len); +} + +/** Return true iff a has the same contents as the nul-terminated string b. */ +STATIC int +line_str_eq(const cdline_t *a, const char *b) +{ + const size_t len = strlen(b); + tor_assert(len <= UINT32_MAX); + cdline_t bline = { b, (uint32_t)len }; + return lines_eq(a, &bline); +} + +/** Return true iff a begins with the same contents as the nul-terminated + * string b. */ +static int +line_starts_with_str(const cdline_t *a, const char *b) +{ + const size_t len = strlen(b); + tor_assert(len <= UINT32_MAX); + return a->len >= len && fast_memeq(a->s, b, len); +} + +/** Return a new cdline_t holding as its contents the nul-terminated + * string s. Use the provided memory area for storage. */ +static cdline_t * +cdline_linecpy(memarea_t *area, const char *s) +{ + size_t len = strlen(s); + const char *ss = memarea_memdup(area, s, len); + cdline_t *line = memarea_alloc(area, sizeof(cdline_t)); + line->s = ss; + line->len = (uint32_t)len; + return line; +} + +/** Add a cdline_t to <b>lst</b> holding as its contents the nul-terminated + * string s. Use the provided memory area for storage. */ +STATIC void +smartlist_add_linecpy(smartlist_t *lst, memarea_t *area, const char *s) +{ + smartlist_add(lst, cdline_linecpy(area, s)); +} + +/** Compute the digest of <b>cons</b>, and store the result in + * <b>digest_out</b>. Return 0 on success, -1 on failure. */ +/* This is a separate, mockable function so that we can override it when + * fuzzing. */ +MOCK_IMPL(STATIC int, +consensus_compute_digest,(const char *cons, + consensus_digest_t *digest_out)) +{ + int r = crypto_digest256((char*)digest_out->sha3_256, + cons, strlen(cons), DIGEST_SHA3_256); + return r; +} + +/** Compute the digest-as-signed of <b>cons</b>, and store the result in + * <b>digest_out</b>. Return 0 on success, -1 on failure. */ +/* This is a separate, mockable function so that we can override it when + * fuzzing. */ +MOCK_IMPL(STATIC int, +consensus_compute_digest_as_signed,(const char *cons, + consensus_digest_t *digest_out)) +{ + return router_get_networkstatus_v3_sha3_as_signed(digest_out->sha3_256, + cons); +} + +/** Return true iff <b>d1</b> and <b>d2</b> contain the same digest */ +/* This is a separate, mockable function so that we can override it when + * fuzzing. */ +MOCK_IMPL(STATIC int, +consensus_digest_eq,(const uint8_t *d1, + const uint8_t *d2)) +{ + return fast_memeq(d1, d2, DIGEST256_LEN); +} + +/** Create (allocate) a new slice from a smartlist. Assumes that the start + * and the end indexes are within the bounds of the initial smartlist. The end + * element is not part of the resulting slice. If end is -1, the slice is to + * reach the end of the smartlist. + */ +STATIC smartlist_slice_t * +smartlist_slice(const smartlist_t *list, int start, int end) +{ + int list_len = smartlist_len(list); + tor_assert(start >= 0); + tor_assert(start <= list_len); + if (end == -1) { + end = list_len; + } + tor_assert(start <= end); + + smartlist_slice_t *slice = tor_malloc(sizeof(smartlist_slice_t)); + slice->list = list; + slice->offset = start; + slice->len = end - start; + return slice; +} + +/** Helper: Compute the longest common subsequence lengths for the two slices. + * Used as part of the diff generation to find the column at which to split + * slice2 while still having the optimal solution. + * If direction is -1, the navigation is reversed. Otherwise it must be 1. + * The length of the resulting integer array is that of the second slice plus + * one. + */ +STATIC int * +lcs_lengths(const smartlist_slice_t *slice1, const smartlist_slice_t *slice2, + int direction) +{ + size_t a_size = sizeof(int) * (slice2->len+1); + + /* Resulting lcs lengths. */ + int *result = tor_malloc_zero(a_size); + /* Copy of the lcs lengths from the last iteration. */ + int *prev = tor_malloc(a_size); + + tor_assert(direction == 1 || direction == -1); + + int si = slice1->offset; + if (direction == -1) { + si += (slice1->len-1); + } + + for (int i = 0; i < slice1->len; ++i, si+=direction) { + + const cdline_t *line1 = smartlist_get(slice1->list, si); + /* Store the last results. */ + memcpy(prev, result, a_size); + + int sj = slice2->offset; + if (direction == -1) { + sj += (slice2->len-1); + } + + for (int j = 0; j < slice2->len; ++j, sj+=direction) { + + const cdline_t *line2 = smartlist_get(slice2->list, sj); + if (lines_eq(line1, line2)) { + /* If the lines are equal, the lcs is one line longer. */ + result[j + 1] = prev[j] + 1; + } else { + /* If not, see what lcs parent path is longer. */ + result[j + 1] = MAX(result[j], prev[j + 1]); + } + } + } + tor_free(prev); + return result; +} + +/** Helper: Trim any number of lines that are equally at the start or the end + * of both slices. + */ +STATIC void +trim_slices(smartlist_slice_t *slice1, smartlist_slice_t *slice2) +{ + while (slice1->len>0 && slice2->len>0) { + const cdline_t *line1 = smartlist_get(slice1->list, slice1->offset); + const cdline_t *line2 = smartlist_get(slice2->list, slice2->offset); + if (!lines_eq(line1, line2)) { + break; + } + slice1->offset++; slice1->len--; + slice2->offset++; slice2->len--; + } + + int i1 = (slice1->offset+slice1->len)-1; + int i2 = (slice2->offset+slice2->len)-1; + + while (slice1->len>0 && slice2->len>0) { + const cdline_t *line1 = smartlist_get(slice1->list, i1); + const cdline_t *line2 = smartlist_get(slice2->list, i2); + if (!lines_eq(line1, line2)) { + break; + } + i1--; + slice1->len--; + i2--; + slice2->len--; + } +} + +/** Like smartlist_string_pos, but uses a cdline_t, and is restricted to the + * bounds of the slice. + */ +STATIC int +smartlist_slice_string_pos(const smartlist_slice_t *slice, + const cdline_t *string) +{ + int end = slice->offset + slice->len; + for (int i = slice->offset; i < end; ++i) { + const cdline_t *el = smartlist_get(slice->list, i); + if (lines_eq(el, string)) { + return i; + } + } + return -1; +} + +/** Helper: Set all the appropriate changed booleans to true. The first slice + * must be of length 0 or 1. All the lines of slice1 and slice2 which are not + * present in the other slice will be set to changed in their bool array. + * The two changed bool arrays are passed in the same order as the slices. + */ +STATIC void +set_changed(bitarray_t *changed1, bitarray_t *changed2, + const smartlist_slice_t *slice1, const smartlist_slice_t *slice2) +{ + int toskip = -1; + tor_assert(slice1->len == 0 || slice1->len == 1); + + if (slice1->len == 1) { + const cdline_t *line_common = smartlist_get(slice1->list, slice1->offset); + toskip = smartlist_slice_string_pos(slice2, line_common); + if (toskip == -1) { + bitarray_set(changed1, slice1->offset); + } + } + int end = slice2->offset + slice2->len; + for (int i = slice2->offset; i < end; ++i) { + if (i != toskip) { + bitarray_set(changed2, i); + } + } +} + +/* + * Helper: Given that slice1 has been split by half into top and bot, we want + * to fetch the column at which to split slice2 so that we are still on track + * to the optimal diff solution, i.e. the shortest one. We use lcs_lengths + * since the shortest diff is just another way to say the longest common + * subsequence. + */ +static int +optimal_column_to_split(const smartlist_slice_t *top, + const smartlist_slice_t *bot, + const smartlist_slice_t *slice2) +{ + int *lens_top = lcs_lengths(top, slice2, 1); + int *lens_bot = lcs_lengths(bot, slice2, -1); + int column=0, max_sum=-1; + + for (int i = 0; i < slice2->len+1; ++i) { + int sum = lens_top[i] + lens_bot[slice2->len-i]; + if (sum > max_sum) { + column = i; + max_sum = sum; + } + } + tor_free(lens_top); + tor_free(lens_bot); + + return column; +} + +/** + * Helper: Figure out what elements are new or gone on the second smartlist + * relative to the first smartlist, and store the booleans in the bitarrays. + * True on the first bitarray means the element is gone, true on the second + * bitarray means it's new. + * + * In its base case, either of the smartlists is of length <= 1 and we can + * quickly see what elements are new or are gone. In the other case, we will + * split one smartlist by half and we'll use optimal_column_to_split to find + * the optimal column at which to split the second smartlist so that we are + * finding the smallest diff possible. + */ +STATIC void +calc_changes(smartlist_slice_t *slice1, + smartlist_slice_t *slice2, + bitarray_t *changed1, bitarray_t *changed2) +{ + trim_slices(slice1, slice2); + + if (slice1->len <= 1) { + set_changed(changed1, changed2, slice1, slice2); + + } else if (slice2->len <= 1) { + set_changed(changed2, changed1, slice2, slice1); + + /* Keep on splitting the slices in two. */ + } else { + smartlist_slice_t *top, *bot, *left, *right; + + /* Split the first slice in half. */ + int mid = slice1->len/2; + top = smartlist_slice(slice1->list, slice1->offset, slice1->offset+mid); + bot = smartlist_slice(slice1->list, slice1->offset+mid, + slice1->offset+slice1->len); + + /* Split the second slice by the optimal column. */ + int mid2 = optimal_column_to_split(top, bot, slice2); + left = smartlist_slice(slice2->list, slice2->offset, slice2->offset+mid2); + right = smartlist_slice(slice2->list, slice2->offset+mid2, + slice2->offset+slice2->len); + + calc_changes(top, left, changed1, changed2); + calc_changes(bot, right, changed1, changed2); + tor_free(top); + tor_free(bot); + tor_free(left); + tor_free(right); + } +} + +/* This table is from crypto.c. The SP and PAD defines are different. */ +#define NOT_VALID_BASE64 255 +#define X NOT_VALID_BASE64 +#define SP NOT_VALID_BASE64 +#define PAD NOT_VALID_BASE64 +static const uint8_t base64_compare_table[256] = { + X, X, X, X, X, X, X, X, X, SP, SP, SP, X, SP, X, X, + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, + SP, X, X, X, X, X, X, X, X, X, X, 62, X, X, X, 63, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, X, X, X, PAD, X, X, + X, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, X, X, X, X, X, + X, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, X, X, X, X, X, + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, + X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, +}; + +/** Helper: Get the identity hash from a router line, assuming that the line + * at least appears to be a router line and thus starts with "r ". + * + * If an identity hash is found, store it (without decoding it) in + * <b>hash_out</b>, and return 0. On failure, return -1. + */ +STATIC int +get_id_hash(const cdline_t *line, cdline_t *hash_out) +{ + if (line->len < 2) + return -1; + + /* Skip the router name. */ + const char *hash = memchr(line->s + 2, ' ', line->len - 2); + if (!hash) { + return -1; + } + + hash++; + const char *hash_end = hash; + /* Stop when the first non-base64 character is found. Use unsigned chars to + * avoid negative indexes causing crashes. + */ + while (base64_compare_table[*((unsigned char*)hash_end)] + != NOT_VALID_BASE64 && + hash_end < line->s + line->len) { + hash_end++; + } + + /* Empty hash. */ + if (hash_end == hash) { + return -1; + } + + hash_out->s = hash; + /* Always true because lines are limited to this length */ + tor_assert(hash_end >= hash); + tor_assert((size_t)(hash_end - hash) <= UINT32_MAX); + hash_out->len = (uint32_t)(hash_end - hash); + + return 0; +} + +/** Helper: Check that a line is a valid router entry. We must at least be + * able to fetch a proper identity hash from it for it to be valid. + */ +STATIC int +is_valid_router_entry(const cdline_t *line) +{ + if (line->len < 2 || fast_memneq(line->s, "r ", 2)) + return 0; + cdline_t tmp; + return (get_id_hash(line, &tmp) == 0); +} + +/** Helper: Find the next router line starting at the current position. + * Assumes that cur is lower than the length of the smartlist, i.e. it is a + * line within the bounds of the consensus. The only exception is when we + * don't want to skip the first line, in which case cur will be -1. + */ +STATIC int +next_router(const smartlist_t *cons, int cur) +{ + int len = smartlist_len(cons); + tor_assert(cur >= -1 && cur < len); + + if (++cur >= len) { + return len; + } + + const cdline_t *line = smartlist_get(cons, cur); + while (!is_valid_router_entry(line)) { + if (++cur >= len) { + return len; + } + line = smartlist_get(cons, cur); + } + return cur; +} + +/** Helper: compare two base64-encoded identity hashes, which may be of + * different lengths. Comparison ends when the first non-base64 char is found. + */ +STATIC int +base64cmp(const cdline_t *hash1, const cdline_t *hash2) +{ + /* NULL is always lower, useful for last_hash which starts at NULL. */ + if (!hash1->s && !hash2->s) { + return 0; + } + if (!hash1->s) { + return -1; + } + if (!hash2->s) { + return 1; + } + + /* Don't index with a char; char may be signed. */ + const unsigned char *a = (unsigned char*)hash1->s; + const unsigned char *b = (unsigned char*)hash2->s; + const unsigned char *a_end = a + hash1->len; + const unsigned char *b_end = b + hash2->len; + while (1) { + uint8_t av = base64_compare_table[*a]; + uint8_t bv = base64_compare_table[*b]; + if (av == NOT_VALID_BASE64) { + if (bv == NOT_VALID_BASE64) { + /* Both ended with exactly the same characters. */ + return 0; + } else { + /* hash2 goes on longer than hash1 and thus hash1 is lower. */ + return -1; + } + } else if (bv == NOT_VALID_BASE64) { + /* hash1 goes on longer than hash2 and thus hash1 is greater. */ + return 1; + } else if (av < bv) { + /* The first difference shows that hash1 is lower. */ + return -1; + } else if (av > bv) { + /* The first difference shows that hash1 is greater. */ + return 1; + } else { + a++; + b++; + if (a == a_end) { + if (b == b_end) { + return 0; + } else { + return -1; + } + } else if (b == b_end) { + return 1; + } + } + } +} + +/** Structure used to remember the previous and current identity hash of + * the "r " lines in a consensus, to enforce well-ordering. */ +typedef struct router_id_iterator_t { + cdline_t last_hash; + cdline_t hash; +} router_id_iterator_t; + +/** + * Initializer for a router_id_iterator_t. + */ +#define ROUTER_ID_ITERATOR_INIT { { NULL, 0 }, { NULL, 0 } } + +/** Given an index *<b>idxp</b> into the consensus at <b>cons</b>, advance + * the index to the next router line ("r ...") in the consensus, or to + * an index one after the end of the list if there is no such line. + * + * Use <b>iter</b> to record the hash of the found router line, if any, + * and to enforce ordering on the hashes. If the hashes are mis-ordered, + * return -1. Else, return 0. + **/ +static int +find_next_router_line(const smartlist_t *cons, + const char *consname, + int *idxp, + router_id_iterator_t *iter) +{ + *idxp = next_router(cons, *idxp); + if (*idxp < smartlist_len(cons)) { + memcpy(&iter->last_hash, &iter->hash, sizeof(cdline_t)); + if (get_id_hash(smartlist_get(cons, *idxp), &iter->hash) < 0 || + base64cmp(&iter->hash, &iter->last_hash) <= 0) { + log_warn(LD_CONSDIFF, "Refusing to generate consensus diff because " + "the %s consensus doesn't have its router entries sorted " + "properly.", consname); + return -1; + } + } + return 0; +} + +/** Line-prefix indicating the beginning of the signatures section that we + * intend to delete. */ +#define START_OF_SIGNATURES_SECTION "directory-signature " + +/** Pre-process a consensus in <b>cons</b> (represented as a list of cdline_t) + * to remove the signatures from it. If the footer is removed, return a + * cdline_t containing a delete command to delete the footer, allocated in + * <b>area</>. If no footer is removed, return NULL. + * + * We remove the signatures here because they are not themselves signed, and + * as such there might be different encodings for them. + */ +static cdline_t * +preprocess_consensus(memarea_t *area, + smartlist_t *cons) +{ + int idx; + int dirsig_idx = -1; + for (idx = 0; idx < smartlist_len(cons); ++idx) { + cdline_t *line = smartlist_get(cons, idx); + if (line_starts_with_str(line, START_OF_SIGNATURES_SECTION)) { + dirsig_idx = idx; + break; + } + } + if (dirsig_idx >= 0) { + char buf[64]; + while (smartlist_len(cons) > dirsig_idx) + smartlist_del(cons, dirsig_idx); + tor_snprintf(buf, sizeof(buf), "%d,$d", dirsig_idx+1); + return cdline_linecpy(area, buf); + } else { + return NULL; + } +} + +/** Generate an ed diff as a smartlist from two consensuses, also given as + * smartlists. Will return NULL if the diff could not be generated, which can + * happen if any lines the script had to add matched "." or if the routers + * were not properly ordered. + * + * All cdline_t objects in the resulting object are either references to lines + * in one of the inputs, or are newly allocated lines in the provided memarea. + * + * This implementation is consensus-specific. To generate an ed diff for any + * given input in quadratic time, you can replace all the code until the + * navigation in reverse order with the following: + * + * int len1 = smartlist_len(cons1); + * int len2 = smartlist_len(cons2); + * bitarray_t *changed1 = bitarray_init_zero(len1); + * bitarray_t *changed2 = bitarray_init_zero(len2); + * cons1_sl = smartlist_slice(cons1, 0, -1); + * cons2_sl = smartlist_slice(cons2, 0, -1); + * calc_changes(cons1_sl, cons2_sl, changed1, changed2); + */ +STATIC smartlist_t * +gen_ed_diff(const smartlist_t *cons1_orig, const smartlist_t *cons2, + memarea_t *area) +{ + smartlist_t *cons1 = smartlist_new(); + smartlist_add_all(cons1, cons1_orig); + cdline_t *remove_trailer = preprocess_consensus(area, cons1); + + int len1 = smartlist_len(cons1); + int len2 = smartlist_len(cons2); + smartlist_t *result = smartlist_new(); + + if (remove_trailer) { + /* There's a delete-the-trailer line at the end, so add it here. */ + smartlist_add(result, remove_trailer); + } + + /* Initialize the changed bitarrays to zero, so that calc_changes only needs + * to set the ones that matter and leave the rest untouched. + */ + bitarray_t *changed1 = bitarray_init_zero(len1); + bitarray_t *changed2 = bitarray_init_zero(len2); + int i1=-1, i2=-1; + int start1=0, start2=0; + + /* To check that hashes are ordered properly */ + router_id_iterator_t iter1 = ROUTER_ID_ITERATOR_INIT; + router_id_iterator_t iter2 = ROUTER_ID_ITERATOR_INIT; + + /* i1 and i2 are initialized at the first line of each consensus. They never + * reach past len1 and len2 respectively, since next_router doesn't let that + * happen. i1 and i2 are advanced by at least one line at each iteration as + * long as they have not yet reached len1 and len2, so the loop is + * guaranteed to end, and each pair of (i1,i2) will be inspected at most + * once. + */ + while (i1 < len1 || i2 < len2) { + + /* Advance each of the two navigation positions by one router entry if not + * yet at the end. + */ + if (i1 < len1) { + if (find_next_router_line(cons1, "base", &i1, &iter1) < 0) { + goto error_cleanup; + } + } + + if (i2 < len2) { + if (find_next_router_line(cons2, "target", &i2, &iter2) < 0) { + goto error_cleanup; + } + } + + /* If we have reached the end of both consensuses, there is no need to + * compare hashes anymore, since this is the last iteration. + */ + if (i1 < len1 || i2 < len2) { + + /* Keep on advancing the lower (by identity hash sorting) position until + * we have two matching positions. The only other possible outcome is + * that a lower position reaches the end of the consensus before it can + * reach a hash that is no longer the lower one. Since there will always + * be a lower hash for as long as the loop runs, one of the two indexes + * will always be incremented, thus assuring that the loop must end + * after a finite number of iterations. If that cannot be because said + * consensus has already reached the end, both are extended to their + * respecting ends since we are done. + */ + int cmp = base64cmp(&iter1.hash, &iter2.hash); + while (cmp != 0) { + if (i1 < len1 && cmp < 0) { + if (find_next_router_line(cons1, "base", &i1, &iter1) < 0) { + goto error_cleanup; + } + if (i1 == len1) { + /* We finished the first consensus, so grab all the remaining + * lines of the second consensus and finish up. + */ + i2 = len2; + break; + } + } else if (i2 < len2 && cmp > 0) { + if (find_next_router_line(cons2, "target", &i2, &iter2) < 0) { + goto error_cleanup; + } + if (i2 == len2) { + /* We finished the second consensus, so grab all the remaining + * lines of the first consensus and finish up. + */ + i1 = len1; + break; + } + } else { + i1 = len1; + i2 = len2; + break; + } + cmp = base64cmp(&iter1.hash, &iter2.hash); + } + } + + /* Make slices out of these chunks (up to the common router entry) and + * calculate the changes for them. + * Error if any of the two slices are longer than 10K lines. That should + * never happen with any pair of real consensuses. Feeding more than 10K + * lines to calc_changes would be very slow anyway. + */ +#define MAX_LINE_COUNT (10000) + if (i1-start1 > MAX_LINE_COUNT || i2-start2 > MAX_LINE_COUNT) { + log_warn(LD_CONSDIFF, "Refusing to generate consensus diff because " + "we found too few common router ids."); + goto error_cleanup; + } + + smartlist_slice_t *cons1_sl = smartlist_slice(cons1, start1, i1); + smartlist_slice_t *cons2_sl = smartlist_slice(cons2, start2, i2); + calc_changes(cons1_sl, cons2_sl, changed1, changed2); + tor_free(cons1_sl); + tor_free(cons2_sl); + start1 = i1, start2 = i2; + } + + /* Navigate the changes in reverse order and generate one ed command for + * each chunk of changes. + */ + i1=len1-1, i2=len2-1; + char buf[128]; + while (i1 >= 0 || i2 >= 0) { + + int start1x, start2x, end1, end2, added, deleted; + + /* We are at a point were no changed bools are true, so just keep going. */ + if (!(i1 >= 0 && bitarray_is_set(changed1, i1)) && + !(i2 >= 0 && bitarray_is_set(changed2, i2))) { + if (i1 >= 0) { + i1--; + } + if (i2 >= 0) { + i2--; + } + continue; + } + + end1 = i1, end2 = i2; + + /* Grab all contiguous changed lines */ + while (i1 >= 0 && bitarray_is_set(changed1, i1)) { + i1--; + } + while (i2 >= 0 && bitarray_is_set(changed2, i2)) { + i2--; + } + + start1x = i1+1, start2x = i2+1; + added = end2-i2, deleted = end1-i1; + + if (added == 0) { + if (deleted == 1) { + tor_snprintf(buf, sizeof(buf), "%id", start1x+1); + smartlist_add_linecpy(result, area, buf); + } else { + tor_snprintf(buf, sizeof(buf), "%i,%id", start1x+1, start1x+deleted); + smartlist_add_linecpy(result, area, buf); + } + } else { + int i; + if (deleted == 0) { + tor_snprintf(buf, sizeof(buf), "%ia", start1x); + smartlist_add_linecpy(result, area, buf); + } else if (deleted == 1) { + tor_snprintf(buf, sizeof(buf), "%ic", start1x+1); + smartlist_add_linecpy(result, area, buf); + } else { + tor_snprintf(buf, sizeof(buf), "%i,%ic", start1x+1, start1x+deleted); + smartlist_add_linecpy(result, area, buf); + } + + for (i = start2x; i <= end2; ++i) { + cdline_t *line = smartlist_get(cons2, i); + if (line_str_eq(line, ".")) { + log_warn(LD_CONSDIFF, "Cannot generate consensus diff because " + "one of the lines to be added is \".\"."); + goto error_cleanup; + } + smartlist_add(result, line); + } + smartlist_add_linecpy(result, area, "."); + } + } + + smartlist_free(cons1); + bitarray_free(changed1); + bitarray_free(changed2); + + return result; + + error_cleanup: + + smartlist_free(cons1); + bitarray_free(changed1); + bitarray_free(changed2); + + smartlist_free(result); + + return NULL; +} + +/* Helper: Read a base-10 number between 0 and INT32_MAX from <b>s</b> and + * store it in <b>num_out</b>. Advance <b>s</b> to the characer immediately + * after the number. Return 0 on success, -1 on failure. */ +static int +get_linenum(const char **s, int *num_out) +{ + int ok; + char *next; + if (!TOR_ISDIGIT(**s)) { + return -1; + } + *num_out = (int) tor_parse_long(*s, 10, 0, INT32_MAX, &ok, &next); + if (ok && next) { + *s = next; + return 0; + } else { + return -1; + } +} + +/** Apply the ed diff, starting at <b>diff_starting_line</b>, to the consensus + * and return a new consensus, also as a line-based smartlist. Will return + * NULL if the ed diff is not properly formatted. + * + * All cdline_t objects in the resulting object are references to lines + * in one of the inputs; nothing is copied. + */ +STATIC smartlist_t * +apply_ed_diff(const smartlist_t *cons1, const smartlist_t *diff, + int diff_starting_line) +{ + int diff_len = smartlist_len(diff); + int j = smartlist_len(cons1); + smartlist_t *cons2 = smartlist_new(); + + for (int i=diff_starting_line; i<diff_len; ++i) { + const cdline_t *diff_cdline = smartlist_get(diff, i); + char diff_line[128]; + + if (diff_cdline->len > sizeof(diff_line) - 1) { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "an ed command was far too long"); + goto error_cleanup; + } + /* Copy the line to make it nul-terminated. */ + memcpy(diff_line, diff_cdline->s, diff_cdline->len); + diff_line[diff_cdline->len] = 0; + const char *ptr = diff_line; + int start = 0, end = 0; + int had_range = 0; + int end_was_eof = 0; + if (get_linenum(&ptr, &start) < 0) { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "an ed command was missing a line number."); + goto error_cleanup; + } + if (*ptr == ',') { + /* Two-item range */ + had_range = 1; + ++ptr; + if (*ptr == '$') { + end_was_eof = 1; + end = smartlist_len(cons1); + ++ptr; + } else if (get_linenum(&ptr, &end) < 0) { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "an ed command was missing a range end line number."); + goto error_cleanup; + } + /* Incoherent range. */ + if (end <= start) { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "an invalid range was found in an ed command."); + goto error_cleanup; + } + } else { + /* We'll take <n1> as <n1>,<n1> for simplicity. */ + end = start; + } + + if (end > j) { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "its commands are not properly sorted in reverse order."); + goto error_cleanup; + } + + if (*ptr == '\0') { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "a line with no ed command was found"); + goto error_cleanup; + } + + if (*(ptr+1) != '\0') { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "an ed command longer than one char was found."); + goto error_cleanup; + } + + char action = *ptr; + + switch (action) { + case 'a': + case 'c': + case 'd': + break; + default: + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "an unrecognised ed command was found."); + goto error_cleanup; + } + + /** $ is not allowed with non-d actions. */ + if (end_was_eof && action != 'd') { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "it wanted to use $ with a command other than delete"); + goto error_cleanup; + } + + /* 'a' commands are not allowed to have ranges. */ + if (had_range && action == 'a') { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "it wanted to add lines after a range."); + goto error_cleanup; + } + + /* Add unchanged lines. */ + for (; j && j > end; --j) { + cdline_t *cons_line = smartlist_get(cons1, j-1); + smartlist_add(cons2, cons_line); + } + + /* Ignore removed lines. */ + if (action == 'c' || action == 'd') { + while (--j >= start) { + /* Skip line */ + } + } + + /* Add new lines in reverse order, since it will all be reversed at the + * end. + */ + if (action == 'a' || action == 'c') { + int added_end = i; + + i++; /* Skip the line with the range and command. */ + while (i < diff_len) { + if (line_str_eq(smartlist_get(diff, i), ".")) { + break; + } + if (++i == diff_len) { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "it has lines to be inserted that don't end with a \".\"."); + goto error_cleanup; + } + } + + int added_i = i-1; + + /* It would make no sense to add zero new lines. */ + if (added_i == added_end) { + log_warn(LD_CONSDIFF, "Could not apply consensus diff because " + "it has an ed command that tries to insert zero lines."); + goto error_cleanup; + } + + while (added_i > added_end) { + cdline_t *added_line = smartlist_get(diff, added_i--); + smartlist_add(cons2, added_line); + } + } + } + + /* Add remaining unchanged lines. */ + for (; j > 0; --j) { + cdline_t *cons_line = smartlist_get(cons1, j-1); + smartlist_add(cons2, cons_line); + } + + /* Reverse the whole thing since we did it from the end. */ + smartlist_reverse(cons2); + return cons2; + + error_cleanup: + + smartlist_free(cons2); + + return NULL; +} + +/** Generate a consensus diff as a smartlist from two given consensuses, also + * as smartlists. Will return NULL if the consensus diff could not be + * generated. Neither of the two consensuses are modified in any way, so it's + * up to the caller to free their resources. + */ +smartlist_t * +consdiff_gen_diff(const smartlist_t *cons1, + const smartlist_t *cons2, + const consensus_digest_t *digests1, + const consensus_digest_t *digests2, + memarea_t *area) +{ + smartlist_t *ed_diff = gen_ed_diff(cons1, cons2, area); + /* ed diff could not be generated - reason already logged by gen_ed_diff. */ + if (!ed_diff) { + goto error_cleanup; + } + + /* See that the script actually produces what we want. */ + smartlist_t *ed_cons2 = apply_ed_diff(cons1, ed_diff, 0); + if (!ed_cons2) { + /* LCOV_EXCL_START -- impossible if diff generation is correct */ + log_warn(LD_BUG|LD_CONSDIFF, "Refusing to generate consensus diff because " + "the generated ed diff could not be tested to successfully generate " + "the target consensus."); + goto error_cleanup; + /* LCOV_EXCL_STOP */ + } + + int cons2_eq = 1; + if (smartlist_len(cons2) == smartlist_len(ed_cons2)) { + SMARTLIST_FOREACH_BEGIN(cons2, const cdline_t *, line1) { + const cdline_t *line2 = smartlist_get(ed_cons2, line1_sl_idx); + if (! lines_eq(line1, line2) ) { + cons2_eq = 0; + break; + } + } SMARTLIST_FOREACH_END(line1); + } else { + cons2_eq = 0; + } + smartlist_free(ed_cons2); + if (!cons2_eq) { + /* LCOV_EXCL_START -- impossible if diff generation is correct. */ + log_warn(LD_BUG|LD_CONSDIFF, "Refusing to generate consensus diff because " + "the generated ed diff did not generate the target consensus " + "successfully when tested."); + goto error_cleanup; + /* LCOV_EXCL_STOP */ + } + + char cons1_hash_hex[HEX_DIGEST256_LEN+1]; + char cons2_hash_hex[HEX_DIGEST256_LEN+1]; + base16_encode(cons1_hash_hex, HEX_DIGEST256_LEN+1, + (const char*)digests1->sha3_256, DIGEST256_LEN); + base16_encode(cons2_hash_hex, HEX_DIGEST256_LEN+1, + (const char*)digests2->sha3_256, DIGEST256_LEN); + + /* Create the resulting consensus diff. */ + char buf[160]; + smartlist_t *result = smartlist_new(); + tor_snprintf(buf, sizeof(buf), "%s", ns_diff_version); + smartlist_add_linecpy(result, area, buf); + tor_snprintf(buf, sizeof(buf), "%s %s %s", hash_token, + cons1_hash_hex, cons2_hash_hex); + smartlist_add_linecpy(result, area, buf); + smartlist_add_all(result, ed_diff); + smartlist_free(ed_diff); + return result; + + error_cleanup: + + if (ed_diff) { + /* LCOV_EXCL_START -- ed_diff is NULL except in unreachable cases above */ + smartlist_free(ed_diff); + /* LCOV_EXCL_STOP */ + } + + return NULL; +} + +/** Fetch the digest of the base consensus in the consensus diff, encoded in + * base16 as found in the diff itself. digest1_out and digest2_out must be of + * length DIGEST256_LEN or larger if not NULL. + */ +int +consdiff_get_digests(const smartlist_t *diff, + char *digest1_out, + char *digest2_out) +{ + smartlist_t *hash_words = NULL; + const cdline_t *format; + char cons1_hash[DIGEST256_LEN], cons2_hash[DIGEST256_LEN]; + char *cons1_hash_hex, *cons2_hash_hex; + if (smartlist_len(diff) < 2) { + log_info(LD_CONSDIFF, "The provided consensus diff is too short."); + goto error_cleanup; + } + + /* Check that it's the format and version we know. */ + format = smartlist_get(diff, 0); + if (!line_str_eq(format, ns_diff_version)) { + log_warn(LD_CONSDIFF, "The provided consensus diff format is not known."); + goto error_cleanup; + } + + /* Grab the base16 digests. */ + hash_words = smartlist_new(); + { + const cdline_t *line2 = smartlist_get(diff, 1); + char *h = tor_memdup_nulterm(line2->s, line2->len); + smartlist_split_string(hash_words, h, " ", 0, 0); + tor_free(h); + } + + /* There have to be three words, the first of which must be hash_token. */ + if (smartlist_len(hash_words) != 3 || + strcmp(smartlist_get(hash_words, 0), hash_token)) { + log_info(LD_CONSDIFF, "The provided consensus diff does not include " + "the necessary digests."); + goto error_cleanup; + } + + /* Expected hashes as found in the consensus diff header. They must be of + * length HEX_DIGEST256_LEN, normally 64 hexadecimal characters. + * If any of the decodings fail, error to make sure that the hashes are + * proper base16-encoded digests. + */ + cons1_hash_hex = smartlist_get(hash_words, 1); + cons2_hash_hex = smartlist_get(hash_words, 2); + if (strlen(cons1_hash_hex) != HEX_DIGEST256_LEN || + strlen(cons2_hash_hex) != HEX_DIGEST256_LEN) { + log_info(LD_CONSDIFF, "The provided consensus diff includes " + "base16-encoded digests of incorrect size."); + goto error_cleanup; + } + + if (base16_decode(cons1_hash, DIGEST256_LEN, + cons1_hash_hex, HEX_DIGEST256_LEN) != DIGEST256_LEN || + base16_decode(cons2_hash, DIGEST256_LEN, + cons2_hash_hex, HEX_DIGEST256_LEN) != DIGEST256_LEN) { + log_info(LD_CONSDIFF, "The provided consensus diff includes " + "malformed digests."); + goto error_cleanup; + } + + if (digest1_out) { + memcpy(digest1_out, cons1_hash, DIGEST256_LEN); + } + if (digest2_out) { + memcpy(digest2_out, cons2_hash, DIGEST256_LEN); + } + + SMARTLIST_FOREACH(hash_words, char *, cp, tor_free(cp)); + smartlist_free(hash_words); + return 0; + + error_cleanup: + + if (hash_words) { + SMARTLIST_FOREACH(hash_words, char *, cp, tor_free(cp)); + smartlist_free(hash_words); + } + return 1; +} + +/** Apply the consensus diff to the given consensus and return a new + * consensus, also as a line-based smartlist. Will return NULL if the diff + * could not be applied. Neither the consensus nor the diff are modified in + * any way, so it's up to the caller to free their resources. + */ +char * +consdiff_apply_diff(const smartlist_t *cons1, + const smartlist_t *diff, + const consensus_digest_t *digests1) +{ + smartlist_t *cons2 = NULL; + char *cons2_str = NULL; + char e_cons1_hash[DIGEST256_LEN]; + char e_cons2_hash[DIGEST256_LEN]; + + if (consdiff_get_digests(diff, e_cons1_hash, e_cons2_hash) != 0) { + goto error_cleanup; + } + + /* See that the consensus that was given to us matches its hash. */ + if (!consensus_digest_eq(digests1->sha3_256, + (const uint8_t*)e_cons1_hash)) { + char hex_digest1[HEX_DIGEST256_LEN+1]; + char e_hex_digest1[HEX_DIGEST256_LEN+1]; + log_warn(LD_CONSDIFF, "Refusing to apply consensus diff because " + "the base consensus doesn't match the digest as found in " + "the consensus diff header."); + base16_encode(hex_digest1, HEX_DIGEST256_LEN+1, + (const char *)digests1->sha3_256, DIGEST256_LEN); + base16_encode(e_hex_digest1, HEX_DIGEST256_LEN+1, + e_cons1_hash, DIGEST256_LEN); + log_warn(LD_CONSDIFF, "Expected: %s; found: %s", + hex_digest1, e_hex_digest1); + goto error_cleanup; + } + + /* Grab the ed diff and calculate the resulting consensus. */ + /* Skip the first two lines. */ + cons2 = apply_ed_diff(cons1, diff, 2); + + /* ed diff could not be applied - reason already logged by apply_ed_diff. */ + if (!cons2) { + goto error_cleanup; + } + + cons2_str = consensus_join_lines(cons2); + + consensus_digest_t cons2_digests; + if (consensus_compute_digest(cons2_str, &cons2_digests) < 0) { + /* LCOV_EXCL_START -- digest can't fail */ + log_warn(LD_CONSDIFF, "Could not compute digests of the consensus " + "resulting from applying a consensus diff."); + goto error_cleanup; + /* LCOV_EXCL_STOP */ + } + + /* See that the resulting consensus matches its hash. */ + if (!consensus_digest_eq(cons2_digests.sha3_256, + (const uint8_t*)e_cons2_hash)) { + log_warn(LD_CONSDIFF, "Refusing to apply consensus diff because " + "the resulting consensus doesn't match the digest as found in " + "the consensus diff header."); + char hex_digest2[HEX_DIGEST256_LEN+1]; + char e_hex_digest2[HEX_DIGEST256_LEN+1]; + base16_encode(hex_digest2, HEX_DIGEST256_LEN+1, + (const char *)cons2_digests.sha3_256, DIGEST256_LEN); + base16_encode(e_hex_digest2, HEX_DIGEST256_LEN+1, + e_cons2_hash, DIGEST256_LEN); + log_warn(LD_CONSDIFF, "Expected: %s; found: %s", + hex_digest2, e_hex_digest2); + goto error_cleanup; + } + + goto done; + + error_cleanup: + tor_free(cons2_str); /* Sets it to NULL */ + + done: + if (cons2) { + smartlist_free(cons2); + } + + return cons2_str; +} + +/** Any consensus line longer than this means that the input is invalid. */ +#define CONSENSUS_LINE_MAX_LEN (1<<20) + +/** + * Helper: For every NL-terminated line in <b>s</b>, add a cdline referring to + * that line (without trailing newline) to <b>out</b>. Return -1 if there are + * any non-NL terminated lines; 0 otherwise. + * + * Unlike tor_split_lines, this function avoids ambiguity on its + * handling of a final line that isn't NL-terminated. + * + * All cdline_t objects are allocated in the provided memarea. Strings + * are not copied: if <b>s</b> changes or becomes invalid, then all + * generated cdlines will become invalid. + */ +STATIC int +consensus_split_lines(smartlist_t *out, const char *s, memarea_t *area) +{ + const char *end_of_str = s + strlen(s); + tor_assert(*end_of_str == '\0'); + + while (*s) { + const char *eol = memchr(s, '\n', end_of_str - s); + if (!eol) { + /* File doesn't end with newline. */ + return -1; + } + if (eol - s > CONSENSUS_LINE_MAX_LEN) { + /* Line is far too long. */ + return -1; + } + cdline_t *line = memarea_alloc(area, sizeof(cdline_t)); + line->s = s; + line->len = (uint32_t)(eol - s); + smartlist_add(out, line); + s = eol+1; + } + return 0; +} + +/** Given a list of cdline_t, return a newly allocated string containing + * all of the lines, terminated with NL, concatenated. + * + * Unlike smartlist_join_strings(), avoids lossy operations on empty + * lists. */ +static char * +consensus_join_lines(const smartlist_t *inp) +{ + size_t n = 0; + SMARTLIST_FOREACH(inp, const cdline_t *, cdline, n += cdline->len + 1); + n += 1; + char *result = tor_malloc(n); + char *out = result; + SMARTLIST_FOREACH_BEGIN(inp, const cdline_t *, cdline) { + memcpy(out, cdline->s, cdline->len); + out += cdline->len; + *out++ = '\n'; + } SMARTLIST_FOREACH_END(cdline); + *out++ = '\0'; + tor_assert(out == result+n); + return result; +} + +/** Given two consensus documents, try to compute a diff between them. On + * success, retun a newly allocated string containing that diff. On failure, + * return NULL. */ +char * +consensus_diff_generate(const char *cons1, + const char *cons2) +{ + consensus_digest_t d1, d2; + smartlist_t *lines1 = NULL, *lines2 = NULL, *result_lines = NULL; + int r1, r2; + char *result = NULL; + + r1 = consensus_compute_digest_as_signed(cons1, &d1); + r2 = consensus_compute_digest(cons2, &d2); + if (BUG(r1 < 0 || r2 < 0)) + return NULL; // LCOV_EXCL_LINE + + memarea_t *area = memarea_new(); + lines1 = smartlist_new(); + lines2 = smartlist_new(); + if (consensus_split_lines(lines1, cons1, area) < 0) + goto done; + if (consensus_split_lines(lines2, cons2, area) < 0) + goto done; + + result_lines = consdiff_gen_diff(lines1, lines2, &d1, &d2, area); + + done: + if (result_lines) { + result = consensus_join_lines(result_lines); + smartlist_free(result_lines); + } + + memarea_drop_all(area); + smartlist_free(lines1); + smartlist_free(lines2); + + return result; +} + +/** Given a consensus document and a diff, try to apply the diff to the + * consensus. On success return a newly allocated string containing the new + * consensus. On failure, return NULL. */ +char * +consensus_diff_apply(const char *consensus, + const char *diff) +{ + consensus_digest_t d1; + smartlist_t *lines1 = NULL, *lines2 = NULL; + int r1; + char *result = NULL; + memarea_t *area = memarea_new(); + + r1 = consensus_compute_digest_as_signed(consensus, &d1); + if (BUG(r1 < 0)) + return NULL; // LCOV_EXCL_LINE + + lines1 = smartlist_new(); + lines2 = smartlist_new(); + if (consensus_split_lines(lines1, consensus, area) < 0) + goto done; + if (consensus_split_lines(lines2, diff, area) < 0) + goto done; + + result = consdiff_apply_diff(lines1, lines2, &d1); + + done: + smartlist_free(lines1); + smartlist_free(lines2); + memarea_drop_all(area); + + return result; +} + +/** Return true iff, based on its header, <b>document</b> is likely + * to be a consensus diff. */ +int +looks_like_a_consensus_diff(const char *document, size_t len) +{ + return (len >= strlen(ns_diff_version) && + fast_memeq(document, ns_diff_version, strlen(ns_diff_version))); +} diff --git a/src/feature/dircommon/consdiff.h b/src/feature/dircommon/consdiff.h new file mode 100644 index 0000000000..1cae59a1a5 --- /dev/null +++ b/src/feature/dircommon/consdiff.h @@ -0,0 +1,99 @@ +/* Copyright (c) 2014, Daniel Martà + * Copyright (c) 2014-2018, The Tor Project, Inc. */ +/* See LICENSE for licensing information */ + +#ifndef TOR_CONSDIFF_H +#define TOR_CONSDIFF_H + +#include "or/or.h" + +char *consensus_diff_generate(const char *cons1, + const char *cons2); +char *consensus_diff_apply(const char *consensus, + const char *diff); + +int looks_like_a_consensus_diff(const char *document, size_t len); + +#ifdef CONSDIFF_PRIVATE +#include "lib/container/bitarray.h" + +struct memarea_t; + +/** Line type used for constructing consensus diffs. Each of these lines + * refers to a chunk of memory allocated elsewhere, and is not necessarily + * NUL-terminated: this helps us avoid copies and save memory. */ +typedef struct cdline_t { + const char *s; + uint32_t len; +} cdline_t; + +typedef struct consensus_digest_t { + uint8_t sha3_256[DIGEST256_LEN]; +} consensus_digest_t; + +STATIC smartlist_t *consdiff_gen_diff(const smartlist_t *cons1, + const smartlist_t *cons2, + const consensus_digest_t *digests1, + const consensus_digest_t *digests2, + struct memarea_t *area); +STATIC char *consdiff_apply_diff(const smartlist_t *cons1, + const smartlist_t *diff, + const consensus_digest_t *digests1); +STATIC int consdiff_get_digests(const smartlist_t *diff, + char *digest1_out, + char *digest2_out); + +/** Data structure to define a slice of a smarltist. */ +typedef struct smartlist_slice_t { + /** + * Smartlist that this slice is made from. + * References the whole original smartlist that the slice was made out of. + * */ + const smartlist_t *list; + /** Starting position of the slice in the smartlist. */ + int offset; + /** Length of the slice, i.e. the number of elements it holds. */ + int len; +} smartlist_slice_t; +STATIC smartlist_t *gen_ed_diff(const smartlist_t *cons1, + const smartlist_t *cons2, + struct memarea_t *area); +STATIC smartlist_t *apply_ed_diff(const smartlist_t *cons1, + const smartlist_t *diff, + int start_line); +STATIC void calc_changes(smartlist_slice_t *slice1, smartlist_slice_t *slice2, + bitarray_t *changed1, bitarray_t *changed2); +STATIC smartlist_slice_t *smartlist_slice(const smartlist_t *list, + int start, int end); +STATIC int next_router(const smartlist_t *cons, int cur); +STATIC int *lcs_lengths(const smartlist_slice_t *slice1, + const smartlist_slice_t *slice2, + int direction); +STATIC void trim_slices(smartlist_slice_t *slice1, smartlist_slice_t *slice2); +STATIC int base64cmp(const cdline_t *hash1, const cdline_t *hash2); +STATIC int get_id_hash(const cdline_t *line, cdline_t *hash_out); +STATIC int is_valid_router_entry(const cdline_t *line); +STATIC int smartlist_slice_string_pos(const smartlist_slice_t *slice, + const cdline_t *string); +STATIC void set_changed(bitarray_t *changed1, bitarray_t *changed2, + const smartlist_slice_t *slice1, + const smartlist_slice_t *slice2); +STATIC int consensus_split_lines(smartlist_t *out, const char *s, + struct memarea_t *area); +STATIC void smartlist_add_linecpy(smartlist_t *lst, struct memarea_t *area, + const char *s); +STATIC int lines_eq(const cdline_t *a, const cdline_t *b); +STATIC int line_str_eq(const cdline_t *a, const char *b); + +MOCK_DECL(STATIC int, + consensus_compute_digest,(const char *cons, + consensus_digest_t *digest_out)); +MOCK_DECL(STATIC int, + consensus_compute_digest_as_signed,(const char *cons, + consensus_digest_t *digest_out)); +MOCK_DECL(STATIC int, + consensus_digest_eq,(const uint8_t *d1, + const uint8_t *d2)); +#endif /* defined(CONSDIFF_PRIVATE) */ + +#endif /* !defined(TOR_CONSDIFF_H) */ diff --git a/src/feature/dircommon/dir_connection_st.h b/src/feature/dircommon/dir_connection_st.h new file mode 100644 index 0000000000..1282f82d64 --- /dev/null +++ b/src/feature/dircommon/dir_connection_st.h @@ -0,0 +1,67 @@ +/* Copyright (c) 2001 Matej Pfajfar. + * Copyright (c) 2001-2004, Roger Dingledine. + * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. + * Copyright (c) 2007-2018, The Tor Project, Inc. */ +/* See LICENSE for licensing information */ + +#ifndef DIR_CONNECTION_ST_H +#define DIR_CONNECTION_ST_H + +#include "or/connection_st.h" + +struct tor_compress_state_t; + +/** Subtype of connection_t for an "directory connection" -- that is, an HTTP + * connection to retrieve or serve directory material. */ +struct dir_connection_t { + connection_t base_; + + /** Which 'resource' did we ask the directory for? This is typically the part + * of the URL string that defines, relative to the directory conn purpose, + * what thing we want. For example, in router descriptor downloads by + * descriptor digest, it contains "d/", then one or more +-separated + * fingerprints. + **/ + char *requested_resource; + unsigned int dirconn_direct:1; /**< Is this dirconn direct, or via Tor? */ + + /** If we're fetching descriptors, what router purpose shall we assign + * to them? */ + uint8_t router_purpose; + + /** List of spooled_resource_t for objects that we're spooling. We use + * it from back to front. */ + smartlist_t *spool; + /** The compression object doing on-the-fly compression for spooled data. */ + struct tor_compress_state_t *compress_state; + + /** What rendezvous service are we querying for? */ + rend_data_t *rend_data; + + /* Hidden service connection identifier for dir connections: Used by HS + client-side code to fetch HS descriptors, and by the service-side code to + upload descriptors. */ + struct hs_ident_dir_conn_t *hs_ident; + + /** If this is a one-hop connection, tracks the state of the directory guard + * for this connection (if any). */ + struct circuit_guard_state_t *guard_state; + + char identity_digest[DIGEST_LEN]; /**< Hash of the public RSA key for + * the directory server's signing key. */ + + /** Unique ID for directory requests; this used to be in connection_t, but + * that's going away and being used on channels instead. The dirserver still + * needs this for the incoming side, so it's moved here. */ + uint64_t dirreq_id; + +#ifdef MEASUREMENTS_21206 + /** Number of RELAY_DATA cells received. */ + uint32_t data_cells_received; + + /** Number of RELAY_DATA cells sent. */ + uint32_t data_cells_sent; +#endif /* defined(MEASUREMENTS_21206) */ +}; + +#endif diff --git a/src/feature/dircommon/fp_pair.c b/src/feature/dircommon/fp_pair.c new file mode 100644 index 0000000000..1d7b751c23 --- /dev/null +++ b/src/feature/dircommon/fp_pair.c @@ -0,0 +1,315 @@ +/* Copyright (c) 2013-2018, The Tor Project, Inc. */ +/* See LICENSE for licensing information */ + +/** + * \file fp_pair.c + * + * \brief Manages data structures for associating pairs of fingerprints. Used + * to handle combinations of identity/signing-key fingerprints for + * authorities. + * + * This is a nice, simple, compact data structure module that handles a map + * from (signing key fingerprint, identity key fingerprint) to void *. The + * fingerprints here are SHA1 digests of RSA keys. + * + * This structure is used in directory.c and in routerlist.c for handling + * handling authority certificates, since we never want more than a single + * certificate for any (ID key, signing key) pair. + **/ + +#include "or/or.h" +#include "or/fp_pair.h" + +/* Define fp_pair_map_t structures */ + +struct fp_pair_map_entry_s { + HT_ENTRY(fp_pair_map_entry_s) node; + void *val; + fp_pair_t key; +}; + +struct fp_pair_map_s { + HT_HEAD(fp_pair_map_impl, fp_pair_map_entry_s) head; +}; + +/* + * Hash function and equality checker for fp_pair_map_t + */ + +/** Compare fp_pair_entry_t objects by key value. */ +static inline int +fp_pair_map_entries_eq(const fp_pair_map_entry_t *a, + const fp_pair_map_entry_t *b) +{ + return tor_memeq(&(a->key), &(b->key), sizeof(fp_pair_t)); +} + +/** Return a hash value for an fp_pair_entry_t. */ +static inline unsigned int +fp_pair_map_entry_hash(const fp_pair_map_entry_t *a) +{ + tor_assert(sizeof(a->key) == DIGEST_LEN*2); + return (unsigned) siphash24g(&a->key, DIGEST_LEN*2); +} + +/* + * Hash table functions for fp_pair_map_t + */ + +HT_PROTOTYPE(fp_pair_map_impl, fp_pair_map_entry_s, node, + fp_pair_map_entry_hash, fp_pair_map_entries_eq) +HT_GENERATE2(fp_pair_map_impl, fp_pair_map_entry_s, node, + fp_pair_map_entry_hash, fp_pair_map_entries_eq, + 0.6, tor_reallocarray_, tor_free_) + +/** Constructor to create a new empty map from fp_pair_t to void * + */ + +fp_pair_map_t * +fp_pair_map_new(void) +{ + fp_pair_map_t *result; + + result = tor_malloc(sizeof(fp_pair_map_t)); + HT_INIT(fp_pair_map_impl, &result->head); + return result; +} + +/** Set the current value for key to val; returns the previous + * value for key if one was set, or NULL if one was not. + */ + +void * +fp_pair_map_set(fp_pair_map_t *map, const fp_pair_t *key, void *val) +{ + fp_pair_map_entry_t *resolve; + fp_pair_map_entry_t search; + void *oldval; + + tor_assert(map); + tor_assert(key); + tor_assert(val); + + memcpy(&(search.key), key, sizeof(*key)); + resolve = HT_FIND(fp_pair_map_impl, &(map->head), &search); + if (resolve) { + oldval = resolve->val; + resolve->val = val; + } else { + resolve = tor_malloc_zero(sizeof(fp_pair_map_entry_t)); + memcpy(&(resolve->key), key, sizeof(*key)); + resolve->val = val; + HT_INSERT(fp_pair_map_impl, &(map->head), resolve); + oldval = NULL; + } + + return oldval; +} + +/** Set the current value for the key (first, second) to val; returns + * the previous value for key if one was set, or NULL if one was not. + */ + +void * +fp_pair_map_set_by_digests(fp_pair_map_t *map, + const char *first, const char *second, + void *val) +{ + fp_pair_t k; + + tor_assert(first); + tor_assert(second); + + memcpy(k.first, first, DIGEST_LEN); + memcpy(k.second, second, DIGEST_LEN); + + return fp_pair_map_set(map, &k, val); +} + +/** Return the current value associated with key, or NULL if no value is set. + */ + +void * +fp_pair_map_get(const fp_pair_map_t *map, const fp_pair_t *key) +{ + fp_pair_map_entry_t *resolve; + fp_pair_map_entry_t search; + void *val = NULL; + + tor_assert(map); + tor_assert(key); + + memcpy(&(search.key), key, sizeof(*key)); + resolve = HT_FIND(fp_pair_map_impl, &(map->head), &search); + if (resolve) val = resolve->val; + + return val; +} + +/** Return the current value associated the key (first, second), or + * NULL if no value is set. + */ + +void * +fp_pair_map_get_by_digests(const fp_pair_map_t *map, + const char *first, const char *second) +{ + fp_pair_t k; + + tor_assert(first); + tor_assert(second); + + memcpy(k.first, first, DIGEST_LEN); + memcpy(k.second, second, DIGEST_LEN); + + return fp_pair_map_get(map, &k); +} + +/** Remove the value currently associated with key from the map. + * Return the value if one was set, or NULL if there was no entry for + * key. The caller must free any storage associated with the + * returned value. + */ + +void * +fp_pair_map_remove(fp_pair_map_t *map, const fp_pair_t *key) +{ + fp_pair_map_entry_t *resolve; + fp_pair_map_entry_t search; + void *val = NULL; + + tor_assert(map); + tor_assert(key); + + memcpy(&(search.key), key, sizeof(*key)); + resolve = HT_REMOVE(fp_pair_map_impl, &(map->head), &search); + if (resolve) { + val = resolve->val; + tor_free(resolve); + } + + return val; +} + +/** Remove all entries from map, and deallocate storage for those entries. + * If free_val is provided, it is invoked on every value in map. + */ + +void +fp_pair_map_free_(fp_pair_map_t *map, void (*free_val)(void*)) +{ + fp_pair_map_entry_t **ent, **next, *this; + + if (map) { + for (ent = HT_START(fp_pair_map_impl, &(map->head)); + ent != NULL; ent = next) { + this = *ent; + next = HT_NEXT_RMV(fp_pair_map_impl, &(map->head), ent); + if (free_val) free_val(this->val); + tor_free(this); + } + tor_assert(HT_EMPTY(&(map->head))); + HT_CLEAR(fp_pair_map_impl, &(map->head)); + tor_free(map); + } +} + +/** Return true iff map has no entries. + */ + +int +fp_pair_map_isempty(const fp_pair_map_t *map) +{ + tor_assert(map); + + return HT_EMPTY(&(map->head)); +} + +/** Return the number of items in map. + */ + +int +fp_pair_map_size(const fp_pair_map_t *map) +{ + tor_assert(map); + + return HT_SIZE(&(map->head)); +} + +/** return an iterator pointing to the start of map. + */ + +fp_pair_map_iter_t * +fp_pair_map_iter_init(fp_pair_map_t *map) +{ + tor_assert(map); + + return HT_START(fp_pair_map_impl, &(map->head)); +} + +/** Advance iter a single step to the next entry of map, and return + * its new value. + */ + +fp_pair_map_iter_t * +fp_pair_map_iter_next(fp_pair_map_t *map, fp_pair_map_iter_t *iter) +{ + tor_assert(map); + tor_assert(iter); + + return HT_NEXT(fp_pair_map_impl, &(map->head), iter); +} + +/** Advance iter a single step to the next entry of map, removing the current + * entry, and return its new value. + */ + +fp_pair_map_iter_t * +fp_pair_map_iter_next_rmv(fp_pair_map_t *map, fp_pair_map_iter_t *iter) +{ + fp_pair_map_entry_t *rmv; + + tor_assert(map); + tor_assert(iter); + tor_assert(*iter); + + rmv = *iter; + iter = HT_NEXT_RMV(fp_pair_map_impl, &(map->head), iter); + tor_free(rmv); + + return iter; +} + +/** Set *key_out and *val_out to the current entry pointed to by iter. + */ + +void +fp_pair_map_iter_get(fp_pair_map_iter_t *iter, + fp_pair_t *key_out, void **val_out) +{ + tor_assert(iter); + tor_assert(*iter); + + if (key_out) memcpy(key_out, &((*iter)->key), sizeof(fp_pair_t)); + if (val_out) *val_out = (*iter)->val; +} + +/** Return true iff iter has advanced past the last entry of its map. + */ + +int +fp_pair_map_iter_done(fp_pair_map_iter_t *iter) +{ + return (iter == NULL); +} + +/** Assert if anything has gone wrong with the internal + * representation of map. + */ + +void +fp_pair_map_assert_ok(const fp_pair_map_t *map) +{ + tor_assert(!fp_pair_map_impl_HT_REP_IS_BAD_(&(map->head))); +} + diff --git a/src/feature/dircommon/fp_pair.h b/src/feature/dircommon/fp_pair.h new file mode 100644 index 0000000000..500c7c9928 --- /dev/null +++ b/src/feature/dircommon/fp_pair.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2013-2018, The Tor Project, Inc. */ +/* See LICENSE for licensing information */ + +/** + * \file fp_pair.h + * \brief Header file for fp_pair.c. + **/ + +#ifndef _TOR_FP_PAIR_H +#define _TOR_FP_PAIR_H + +/** A pair of digests created by dir_split_resource_info_fingerprint_pairs() */ +typedef struct { + char first[DIGEST_LEN]; + char second[DIGEST_LEN]; +} fp_pair_t; + +/* + * Declare fp_pair_map_t functions and structs + */ + +typedef struct fp_pair_map_entry_s fp_pair_map_entry_t; +typedef struct fp_pair_map_s fp_pair_map_t; +typedef fp_pair_map_entry_t *fp_pair_map_iter_t; + +fp_pair_map_t * fp_pair_map_new(void); +void * fp_pair_map_set(fp_pair_map_t *map, const fp_pair_t *key, void *val); +void * fp_pair_map_set_by_digests(fp_pair_map_t *map, + const char *first, const char *second, + void *val); +void * fp_pair_map_get(const fp_pair_map_t *map, const fp_pair_t *key); +void * fp_pair_map_get_by_digests(const fp_pair_map_t *map, + const char *first, const char *second); +void * fp_pair_map_remove(fp_pair_map_t *map, const fp_pair_t *key); +void fp_pair_map_free_(fp_pair_map_t *map, void (*free_val)(void*)); +#define fp_pair_map_free(map, free_val) do { \ + fp_pair_map_free_((map), (free_val)); \ + (map) = NULL; \ + } while (0) + +int fp_pair_map_isempty(const fp_pair_map_t *map); +int fp_pair_map_size(const fp_pair_map_t *map); +fp_pair_map_iter_t * fp_pair_map_iter_init(fp_pair_map_t *map); +fp_pair_map_iter_t * fp_pair_map_iter_next(fp_pair_map_t *map, + fp_pair_map_iter_t *iter); +fp_pair_map_iter_t * fp_pair_map_iter_next_rmv(fp_pair_map_t *map, + fp_pair_map_iter_t *iter); +void fp_pair_map_iter_get(fp_pair_map_iter_t *iter, + fp_pair_t *key_out, void **val_out); +int fp_pair_map_iter_done(fp_pair_map_iter_t *iter); +void fp_pair_map_assert_ok(const fp_pair_map_t *map); + +#undef DECLARE_MAP_FNS + +#endif /* !defined(_TOR_FP_PAIR_H) */ + diff --git a/src/feature/dircommon/vote_timing_st.h b/src/feature/dircommon/vote_timing_st.h new file mode 100644 index 0000000000..14c13eed28 --- /dev/null +++ b/src/feature/dircommon/vote_timing_st.h @@ -0,0 +1,24 @@ +/* Copyright (c) 2001 Matej Pfajfar. + * Copyright (c) 2001-2004, Roger Dingledine. + * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. + * Copyright (c) 2007-2018, The Tor Project, Inc. */ +/* See LICENSE for licensing information */ + +#ifndef VOTE_TIMING_ST_H +#define VOTE_TIMING_ST_H + +/** Describes the schedule by which votes should be generated. */ +struct vote_timing_t { + /** Length in seconds between one consensus becoming valid and the next + * becoming valid. */ + int vote_interval; + /** For how many intervals is a consensus valid? */ + int n_intervals_valid; + /** Time in seconds allowed to propagate votes */ + int vote_delay; + /** Time in seconds allowed to propagate signatures */ + int dist_delay; +}; + +#endif + diff --git a/src/feature/dircommon/voting_schedule.c b/src/feature/dircommon/voting_schedule.c new file mode 100644 index 0000000000..6edde3f229 --- /dev/null +++ b/src/feature/dircommon/voting_schedule.c @@ -0,0 +1,194 @@ +/* Copyright (c) 2018-2018, The Tor Project, Inc. */ +/* See LICENSE for licensing information */ + +/** + * \file voting_schedule.c + * \brief This file contains functions that are from the directory authority + * subsystem related to voting specifically but used by many part of + * tor. The full feature is built as part of the dirauth module. + **/ + +#define VOTING_SCHEDULE_PRIVATE +#include "or/voting_schedule.h" + +#include "or/or.h" +#include "or/config.h" +#include "or/networkstatus.h" + +#include "or/networkstatus_st.h" + +/* ===== + * Vote scheduling + * ===== */ + +/** Return the start of the next interval of size <b>interval</b> (in + * seconds) after <b>now</b>, plus <b>offset</b>. Midnight always + * starts a fresh interval, and if the last interval of a day would be + * truncated to less than half its size, it is rolled into the + * previous interval. */ +time_t +voting_schedule_get_start_of_next_interval(time_t now, int interval, + int offset) +{ + struct tm tm; + time_t midnight_today=0; + time_t midnight_tomorrow; + time_t next; + + tor_gmtime_r(&now, &tm); + tm.tm_hour = 0; + tm.tm_min = 0; + tm.tm_sec = 0; + + if (tor_timegm(&tm, &midnight_today) < 0) { + // LCOV_EXCL_START + log_warn(LD_BUG, "Ran into an invalid time when trying to find midnight."); + // LCOV_EXCL_STOP + } + midnight_tomorrow = midnight_today + (24*60*60); + + next = midnight_today + ((now-midnight_today)/interval + 1)*interval; + + /* Intervals never cross midnight. */ + if (next > midnight_tomorrow) + next = midnight_tomorrow; + + /* If the interval would only last half as long as it's supposed to, then + * skip over to the next day. */ + if (next + interval/2 > midnight_tomorrow) + next = midnight_tomorrow; + + next += offset; + if (next - interval > now) + next -= interval; + + return next; +} + +/* Populate and return a new voting_schedule_t that can be used to schedule + * voting. The object is allocated on the heap and it's the responsibility of + * the caller to free it. Can't fail. */ +static voting_schedule_t * +get_voting_schedule(const or_options_t *options, time_t now, int severity) +{ + int interval, vote_delay, dist_delay; + time_t start; + time_t end; + networkstatus_t *consensus; + voting_schedule_t *new_voting_schedule; + + new_voting_schedule = tor_malloc_zero(sizeof(voting_schedule_t)); + + consensus = networkstatus_get_live_consensus(now); + + if (consensus) { + interval = (int)( consensus->fresh_until - consensus->valid_after ); + vote_delay = consensus->vote_seconds; + dist_delay = consensus->dist_seconds; + + /* Note down the consensus valid after, so that we detect outdated voting + * schedules in case of skewed clocks etc. */ + new_voting_schedule->live_consensus_valid_after = consensus->valid_after; + } else { + interval = options->TestingV3AuthInitialVotingInterval; + vote_delay = options->TestingV3AuthInitialVoteDelay; + dist_delay = options->TestingV3AuthInitialDistDelay; + } + + tor_assert(interval > 0); + + if (vote_delay + dist_delay > interval/2) + vote_delay = dist_delay = interval / 4; + + start = new_voting_schedule->interval_starts = + voting_schedule_get_start_of_next_interval(now,interval, + options->TestingV3AuthVotingStartOffset); + end = voting_schedule_get_start_of_next_interval(start+1, interval, + options->TestingV3AuthVotingStartOffset); + + tor_assert(end > start); + + new_voting_schedule->fetch_missing_signatures = start - (dist_delay/2); + new_voting_schedule->voting_ends = start - dist_delay; + new_voting_schedule->fetch_missing_votes = + start - dist_delay - (vote_delay/2); + new_voting_schedule->voting_starts = start - dist_delay - vote_delay; + + { + char tbuf[ISO_TIME_LEN+1]; + format_iso_time(tbuf, new_voting_schedule->interval_starts); + tor_log(severity, LD_DIR,"Choosing expected valid-after time as %s: " + "consensus_set=%d, interval=%d", + tbuf, consensus?1:0, interval); + } + + return new_voting_schedule; +} + +#define voting_schedule_free(s) \ + FREE_AND_NULL(voting_schedule_t, voting_schedule_free_, (s)) + +/** Frees a voting_schedule_t. This should be used instead of the generic + * tor_free. */ +static void +voting_schedule_free_(voting_schedule_t *voting_schedule_to_free) +{ + if (!voting_schedule_to_free) + return; + tor_free(voting_schedule_to_free); +} + +voting_schedule_t voting_schedule; + +/* Using the time <b>now</b>, return the next voting valid-after time. */ +time_t +voting_schedule_get_next_valid_after_time(void) +{ + time_t now = approx_time(); + bool need_to_recalculate_voting_schedule = false; + + /* This is a safe guard in order to make sure that the voting schedule + * static object is at least initialized. Using this function with a zeroed + * voting schedule can lead to bugs. */ + if (tor_mem_is_zero((const char *) &voting_schedule, + sizeof(voting_schedule))) { + need_to_recalculate_voting_schedule = true; + goto done; /* no need for next check if we have to recalculate anyway */ + } + + /* Also make sure we are not using an outdated voting schedule. If we have a + * newer consensus, make sure we recalculate the voting schedule. */ + const networkstatus_t *ns = networkstatus_get_live_consensus(now); + if (ns && ns->valid_after != voting_schedule.live_consensus_valid_after) { + log_info(LD_DIR, "Voting schedule is outdated: recalculating (%d/%d)", + (int) ns->valid_after, + (int) voting_schedule.live_consensus_valid_after); + need_to_recalculate_voting_schedule = true; + } + + done: + if (need_to_recalculate_voting_schedule) { + voting_schedule_recalculate_timing(get_options(), now); + voting_schedule.created_on_demand = 1; + } + + return voting_schedule.interval_starts; +} + +/** Set voting_schedule to hold the timing for the next vote we should be + * doing. All type of tor do that because HS subsystem needs the timing as + * well to function properly. */ +void +voting_schedule_recalculate_timing(const or_options_t *options, time_t now) +{ + voting_schedule_t *new_voting_schedule; + + /* get the new voting schedule */ + new_voting_schedule = get_voting_schedule(options, now, LOG_INFO); + tor_assert(new_voting_schedule); + + /* Fill in the global static struct now */ + memcpy(&voting_schedule, new_voting_schedule, sizeof(voting_schedule)); + voting_schedule_free(new_voting_schedule); +} + diff --git a/src/feature/dircommon/voting_schedule.h b/src/feature/dircommon/voting_schedule.h new file mode 100644 index 0000000000..0f27d36d52 --- /dev/null +++ b/src/feature/dircommon/voting_schedule.h @@ -0,0 +1,65 @@ +/* Copyright (c) 2018-2018, The Tor Project, Inc. */ +/* See LICENSE for licensing information */ + +/** + * \file voting_schedule.h + * \brief Header file for voting_schedule.c. + **/ + +#ifndef TOR_VOTING_SCHEDULE_H +#define TOR_VOTING_SCHEDULE_H + +#include "or/or.h" + +/** Scheduling information for a voting interval. */ +typedef struct { + /** When do we generate and distribute our vote for this interval? */ + time_t voting_starts; + /** When do we send an HTTP request for any votes that we haven't + * been posted yet?*/ + time_t fetch_missing_votes; + /** When do we give up on getting more votes and generate a consensus? */ + time_t voting_ends; + /** When do we send an HTTP request for any signatures we're expecting to + * see on the consensus? */ + time_t fetch_missing_signatures; + /** When do we publish the consensus? */ + time_t interval_starts; + + /* True iff we have generated and distributed our vote. */ + int have_voted; + /* True iff we've requested missing votes. */ + int have_fetched_missing_votes; + /* True iff we have built a consensus and sent the signatures around. */ + int have_built_consensus; + /* True iff we've fetched missing signatures. */ + int have_fetched_missing_signatures; + /* True iff we have published our consensus. */ + int have_published_consensus; + + /* True iff this voting schedule was set on demand meaning not through the + * normal vote operation of a dirauth or when a consensus is set. This only + * applies to a directory authority that needs to recalculate the voting + * timings only for the first vote even though this object was initilized + * prior to voting. */ + int created_on_demand; + + /** The valid-after time of the last live consensus that filled this voting + * schedule. It's used to detect outdated voting schedules. */ + time_t live_consensus_valid_after; +} voting_schedule_t; + +/* Public API. */ + +extern voting_schedule_t voting_schedule; + +void voting_schedule_recalculate_timing(const or_options_t *options, + time_t now); + +time_t voting_schedule_get_start_of_next_interval(time_t now, + int interval, + int offset); +time_t voting_schedule_get_next_valid_after_time(void); + +#endif /* TOR_VOTING_SCHEDULE_H */ + |