aboutsummaryrefslogtreecommitdiff
path: root/src/feature/nodelist
diff options
context:
space:
mode:
Diffstat (limited to 'src/feature/nodelist')
-rw-r--r--src/feature/nodelist/authority_cert_st.h32
-rw-r--r--src/feature/nodelist/desc_store_st.h39
-rw-r--r--src/feature/nodelist/document_signature_st.h29
-rw-r--r--src/feature/nodelist/extrainfo_st.h30
-rw-r--r--src/feature/nodelist/microdesc.c1059
-rw-r--r--src/feature/nodelist/microdesc.h60
-rw-r--r--src/feature/nodelist/microdesc_st.h74
-rw-r--r--src/feature/nodelist/networkstatus.c2726
-rw-r--r--src/feature/nodelist/networkstatus.h162
-rw-r--r--src/feature/nodelist/networkstatus_sr_info_st.h23
-rw-r--r--src/feature/nodelist/networkstatus_st.h101
-rw-r--r--src/feature/nodelist/networkstatus_voter_info_st.h30
-rw-r--r--src/feature/nodelist/node_st.h102
-rw-r--r--src/feature/nodelist/nodelist.c2513
-rw-r--r--src/feature/nodelist/nodelist.h168
-rw-r--r--src/feature/nodelist/parsecommon.c458
-rw-r--r--src/feature/nodelist/parsecommon.h324
-rw-r--r--src/feature/nodelist/routerinfo_st.h108
-rw-r--r--src/feature/nodelist/routerlist.c5848
-rw-r--r--src/feature/nodelist/routerlist.h347
-rw-r--r--src/feature/nodelist/routerlist_st.h40
-rw-r--r--src/feature/nodelist/routerparse.c5693
-rw-r--r--src/feature/nodelist/routerparse.h161
-rw-r--r--src/feature/nodelist/routerset.c463
-rw-r--r--src/feature/nodelist/routerset.h89
-rw-r--r--src/feature/nodelist/routerstatus_st.h80
-rw-r--r--src/feature/nodelist/signed_descriptor_st.h61
-rw-r--r--src/feature/nodelist/torcert.c725
-rw-r--r--src/feature/nodelist/torcert.h110
-rw-r--r--src/feature/nodelist/vote_routerstatus_st.h41
30 files changed, 21696 insertions, 0 deletions
diff --git a/src/feature/nodelist/authority_cert_st.h b/src/feature/nodelist/authority_cert_st.h
new file mode 100644
index 0000000000..19c3fda2de
--- /dev/null
+++ b/src/feature/nodelist/authority_cert_st.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef AUTHORITY_CERT_ST_H
+#define AUTHORITY_CERT_ST_H
+
+#include "or/signed_descriptor_st.h"
+
+/** Certificate for v3 directory protocol: binds long-term authority identity
+ * keys to medium-term authority signing keys. */
+struct authority_cert_t {
+ /** Information relating to caching this cert on disk and looking it up. */
+ signed_descriptor_t cache_info;
+ /** This authority's long-term authority identity key. */
+ crypto_pk_t *identity_key;
+ /** This authority's medium-term signing key. */
+ crypto_pk_t *signing_key;
+ /** The digest of <b>signing_key</b> */
+ char signing_key_digest[DIGEST_LEN];
+ /** The listed expiration time of this certificate. */
+ time_t expires;
+ /** This authority's IPv4 address, in host order. */
+ uint32_t addr;
+ /** This authority's directory port. */
+ uint16_t dir_port;
+};
+
+#endif
+
diff --git a/src/feature/nodelist/desc_store_st.h b/src/feature/nodelist/desc_store_st.h
new file mode 100644
index 0000000000..168a83b230
--- /dev/null
+++ b/src/feature/nodelist/desc_store_st.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef DESC_STORE_ST_H
+#define DESC_STORE_ST_H
+
+/** Allowable types of desc_store_t. */
+typedef enum store_type_t {
+ ROUTER_STORE = 0,
+ EXTRAINFO_STORE = 1
+} store_type_t;
+
+/** A 'store' is a set of descriptors saved on disk, with accompanying
+ * journal, mmaped as needed, rebuilt as needed. */
+struct desc_store_t {
+ /** Filename (within DataDir) for the store. We append .tmp to this
+ * filename for a temporary file when rebuilding the store, and .new to this
+ * filename for the journal. */
+ const char *fname_base;
+ /** Human-readable description of what this store contains. */
+ const char *description;
+
+ tor_mmap_t *mmap; /**< A mmap for the main file in the store. */
+
+ store_type_t type; /**< What's stored in this store? */
+
+ /** The size of the router log, in bytes. */
+ size_t journal_len;
+ /** The size of the router store, in bytes. */
+ size_t store_len;
+ /** Total bytes dropped since last rebuild: this is space currently
+ * used in the cache and the journal that could be freed by a rebuild. */
+ size_t bytes_dropped;
+};
+
+#endif
diff --git a/src/feature/nodelist/document_signature_st.h b/src/feature/nodelist/document_signature_st.h
new file mode 100644
index 0000000000..0291e099bf
--- /dev/null
+++ b/src/feature/nodelist/document_signature_st.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef DOCUMENT_SIGNATURE_ST_H
+#define DOCUMENT_SIGNATURE_ST_H
+
+/** A signature of some document by an authority. */
+struct document_signature_t {
+ /** Declared SHA-1 digest of this voter's identity key */
+ char identity_digest[DIGEST_LEN];
+ /** Declared SHA-1 digest of signing key used by this voter. */
+ char signing_key_digest[DIGEST_LEN];
+ /** Algorithm used to compute the digest of the document. */
+ digest_algorithm_t alg;
+ /** Signature of the signed thing. */
+ char *signature;
+ /** Length of <b>signature</b> */
+ int signature_len;
+ unsigned int bad_signature : 1; /**< Set to true if we've tried to verify
+ * the sig, and we know it's bad. */
+ unsigned int good_signature : 1; /**< Set to true if we've verified the sig
+ * as good. */
+};
+
+#endif
+
diff --git a/src/feature/nodelist/extrainfo_st.h b/src/feature/nodelist/extrainfo_st.h
new file mode 100644
index 0000000000..f91bba7b68
--- /dev/null
+++ b/src/feature/nodelist/extrainfo_st.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef EXTRAINFO_ST_H
+#define EXTRAINFO_ST_H
+
+#include "or/signed_descriptor_st.h"
+
+/** Information needed to keep and cache a signed extra-info document. */
+struct extrainfo_t {
+ signed_descriptor_t cache_info;
+ /** SHA256 digest of this document */
+ uint8_t digest256[DIGEST256_LEN];
+ /** The router's nickname. */
+ char nickname[MAX_NICKNAME_LEN+1];
+ /** True iff we found the right key for this extra-info, verified the
+ * signature, and found it to be bad. */
+ unsigned int bad_sig : 1;
+ /** If present, we didn't have the right key to verify this extra-info,
+ * so this is a copy of the signature in the document. */
+ char *pending_sig;
+ /** Length of pending_sig. */
+ size_t pending_sig_len;
+};
+
+#endif
+
diff --git a/src/feature/nodelist/microdesc.c b/src/feature/nodelist/microdesc.c
new file mode 100644
index 0000000000..95c5e8b6f7
--- /dev/null
+++ b/src/feature/nodelist/microdesc.c
@@ -0,0 +1,1059 @@
+/* Copyright (c) 2009-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file microdesc.c
+ *
+ * \brief Implements microdescriptors -- an abbreviated description of
+ * less-frequently-changing router information.
+ */
+
+#include "or/or.h"
+
+#include "lib/fdio/fdio.h"
+
+#include "or/circuitbuild.h"
+#include "or/config.h"
+#include "or/directory.h"
+#include "or/dirserv.h"
+#include "or/entrynodes.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/policies.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/routerparse.h"
+
+#include "or/microdesc_st.h"
+#include "or/networkstatus_st.h"
+#include "or/node_st.h"
+#include "or/routerstatus_st.h"
+
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+/** A data structure to hold a bunch of cached microdescriptors. There are
+ * two active files in the cache: a "cache file" that we mmap, and a "journal
+ * file" that we append to. Periodically, we rebuild the cache file to hold
+ * only the microdescriptors that we want to keep */
+struct microdesc_cache_t {
+ /** Map from sha256-digest to microdesc_t for every microdesc_t in the
+ * cache. */
+ HT_HEAD(microdesc_map, microdesc_t) map;
+
+ /** Name of the cache file. */
+ char *cache_fname;
+ /** Name of the journal file. */
+ char *journal_fname;
+ /** Mmap'd contents of the cache file, or NULL if there is none. */
+ tor_mmap_t *cache_content;
+ /** Number of bytes used in the journal file. */
+ size_t journal_len;
+ /** Number of bytes in descriptors removed as too old. */
+ size_t bytes_dropped;
+
+ /** Total bytes of microdescriptor bodies we have added to this cache */
+ uint64_t total_len_seen;
+ /** Total number of microdescriptors we have added to this cache */
+ unsigned n_seen;
+
+ /** True iff we have loaded this cache from disk ever. */
+ int is_loaded;
+};
+
+static microdesc_cache_t *get_microdesc_cache_noload(void);
+
+/** Helper: computes a hash of <b>md</b> to place it in a hash table. */
+static inline unsigned int
+microdesc_hash_(microdesc_t *md)
+{
+ return (unsigned) siphash24g(md->digest, sizeof(md->digest));
+}
+
+/** Helper: compares <b>a</b> and <b>b</b> for equality for hash-table
+ * purposes. */
+static inline int
+microdesc_eq_(microdesc_t *a, microdesc_t *b)
+{
+ return tor_memeq(a->digest, b->digest, DIGEST256_LEN);
+}
+
+HT_PROTOTYPE(microdesc_map, microdesc_t, node,
+ microdesc_hash_, microdesc_eq_)
+HT_GENERATE2(microdesc_map, microdesc_t, node,
+ microdesc_hash_, microdesc_eq_, 0.6,
+ tor_reallocarray_, tor_free_)
+
+/************************* md fetch fail cache *****************************/
+
+/* If we end up with too many outdated dirservers, something probably went
+ * wrong so clean up the list. */
+#define TOO_MANY_OUTDATED_DIRSERVERS 30
+
+/** List of dirservers with outdated microdesc information. The smartlist is
+ * filled with the hex digests of outdated dirservers. */
+static smartlist_t *outdated_dirserver_list = NULL;
+
+/** Note that we failed to fetch a microdescriptor from the relay with
+ * <b>relay_digest</b> (of size DIGEST_LEN). */
+void
+microdesc_note_outdated_dirserver(const char *relay_digest)
+{
+ char relay_hexdigest[HEX_DIGEST_LEN+1];
+
+ /* Don't register outdated dirservers if we don't have a live consensus,
+ * since we might be trying to fetch microdescriptors that are not even
+ * currently active. */
+ if (!networkstatus_get_live_consensus(approx_time())) {
+ return;
+ }
+
+ if (!outdated_dirserver_list) {
+ outdated_dirserver_list = smartlist_new();
+ }
+
+ tor_assert(outdated_dirserver_list);
+
+ /* If the list grows too big, clean it up */
+ if (BUG(smartlist_len(outdated_dirserver_list) >
+ TOO_MANY_OUTDATED_DIRSERVERS)) {
+ microdesc_reset_outdated_dirservers_list();
+ }
+
+ /* Turn the binary relay digest to a hex since smartlists have better support
+ * for strings than digests. */
+ base16_encode(relay_hexdigest,sizeof(relay_hexdigest),
+ relay_digest, DIGEST_LEN);
+
+ /* Make sure we don't add a dirauth as an outdated dirserver */
+ if (router_get_trusteddirserver_by_digest(relay_digest)) {
+ log_info(LD_GENERAL, "Auth %s gave us outdated dirinfo.", relay_hexdigest);
+ return;
+ }
+
+ /* Don't double-add outdated dirservers */
+ if (smartlist_contains_string(outdated_dirserver_list, relay_hexdigest)) {
+ return;
+ }
+
+ /* Add it to the list of outdated dirservers */
+ smartlist_add_strdup(outdated_dirserver_list, relay_hexdigest);
+
+ log_info(LD_GENERAL, "Noted %s as outdated md dirserver", relay_hexdigest);
+}
+
+/** Return True if the relay with <b>relay_digest</b> (size DIGEST_LEN) is an
+ * outdated dirserver */
+int
+microdesc_relay_is_outdated_dirserver(const char *relay_digest)
+{
+ char relay_hexdigest[HEX_DIGEST_LEN+1];
+
+ if (!outdated_dirserver_list) {
+ return 0;
+ }
+
+ /* Convert identity digest to hex digest */
+ base16_encode(relay_hexdigest, sizeof(relay_hexdigest),
+ relay_digest, DIGEST_LEN);
+
+ /* Last time we tried to fetch microdescs, was this directory mirror missing
+ * any mds we asked for? */
+ if (smartlist_contains_string(outdated_dirserver_list, relay_hexdigest)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/** Reset the list of outdated dirservers. */
+void
+microdesc_reset_outdated_dirservers_list(void)
+{
+ if (!outdated_dirserver_list) {
+ return;
+ }
+
+ SMARTLIST_FOREACH(outdated_dirserver_list, char *, cp, tor_free(cp));
+ smartlist_clear(outdated_dirserver_list);
+}
+
+/****************************************************************************/
+
+/** Write the body of <b>md</b> into <b>f</b>, with appropriate annotations.
+ * On success, return the total number of bytes written, and set
+ * *<b>annotation_len_out</b> to the number of bytes written as
+ * annotations. */
+static ssize_t
+dump_microdescriptor(int fd, microdesc_t *md, size_t *annotation_len_out)
+{
+ ssize_t r = 0;
+ ssize_t written;
+ if (md->body == NULL) {
+ *annotation_len_out = 0;
+ return 0;
+ }
+ /* XXXX drops unknown annotations. */
+ if (md->last_listed) {
+ char buf[ISO_TIME_LEN+1];
+ char annotation[ISO_TIME_LEN+32];
+ format_iso_time(buf, md->last_listed);
+ tor_snprintf(annotation, sizeof(annotation), "@last-listed %s\n", buf);
+ if (write_all_to_fd(fd, annotation, strlen(annotation)) < 0) {
+ log_warn(LD_DIR,
+ "Couldn't write microdescriptor annotation: %s",
+ strerror(errno));
+ return -1;
+ }
+ r += strlen(annotation);
+ *annotation_len_out = r;
+ } else {
+ *annotation_len_out = 0;
+ }
+
+ md->off = tor_fd_getpos(fd);
+ written = write_all_to_fd(fd, md->body, md->bodylen);
+ if (written != (ssize_t)md->bodylen) {
+ written = written < 0 ? 0 : written;
+ log_warn(LD_DIR,
+ "Couldn't dump microdescriptor (wrote %ld out of %lu): %s",
+ (long)written, (unsigned long)md->bodylen,
+ strerror(errno));
+ return -1;
+ }
+ r += md->bodylen;
+ return r;
+}
+
+/** Holds a pointer to the current microdesc_cache_t object, or NULL if no
+ * such object has been allocated. */
+static microdesc_cache_t *the_microdesc_cache = NULL;
+
+/** Return a pointer to the microdescriptor cache, loading it if necessary. */
+microdesc_cache_t *
+get_microdesc_cache(void)
+{
+ microdesc_cache_t *cache = get_microdesc_cache_noload();
+ if (PREDICT_UNLIKELY(cache->is_loaded == 0)) {
+ microdesc_cache_reload(cache);
+ }
+ return cache;
+}
+
+/** Return a pointer to the microdescriptor cache, creating (but not loading)
+ * it if necessary. */
+static microdesc_cache_t *
+get_microdesc_cache_noload(void)
+{
+ if (PREDICT_UNLIKELY(the_microdesc_cache==NULL)) {
+ microdesc_cache_t *cache = tor_malloc_zero(sizeof(*cache));
+ HT_INIT(microdesc_map, &cache->map);
+ cache->cache_fname = get_cachedir_fname("cached-microdescs");
+ cache->journal_fname = get_cachedir_fname("cached-microdescs.new");
+ the_microdesc_cache = cache;
+ }
+ return the_microdesc_cache;
+}
+
+/* There are three sources of microdescriptors:
+ 1) Generated by us while acting as a directory authority.
+ 2) Loaded from the cache on disk.
+ 3) Downloaded.
+*/
+
+/** Decode the microdescriptors from the string starting at <b>s</b> and
+ * ending at <b>eos</b>, and store them in <b>cache</b>. If <b>no_save</b>,
+ * mark them as non-writable to disk. If <b>where</b> is SAVED_IN_CACHE,
+ * leave their bodies as pointers to the mmap'd cache. If where is
+ * <b>SAVED_NOWHERE</b>, do not allow annotations. If listed_at is not -1,
+ * set the last_listed field of every microdesc to listed_at. If
+ * requested_digests is non-null, then it contains a list of digests we mean
+ * to allow, so we should reject any non-requested microdesc with a different
+ * digest, and alter the list to contain only the digests of those microdescs
+ * we didn't find.
+ * Return a newly allocated list of the added microdescriptors, or NULL */
+smartlist_t *
+microdescs_add_to_cache(microdesc_cache_t *cache,
+ const char *s, const char *eos, saved_location_t where,
+ int no_save, time_t listed_at,
+ smartlist_t *requested_digests256)
+{
+ void * const DIGEST_REQUESTED = (void*)1;
+ void * const DIGEST_RECEIVED = (void*)2;
+ void * const DIGEST_INVALID = (void*)3;
+
+ smartlist_t *descriptors, *added;
+ const int allow_annotations = (where != SAVED_NOWHERE);
+ smartlist_t *invalid_digests = smartlist_new();
+
+ descriptors = microdescs_parse_from_string(s, eos,
+ allow_annotations,
+ where, invalid_digests);
+ if (listed_at != (time_t)-1) {
+ SMARTLIST_FOREACH(descriptors, microdesc_t *, md,
+ md->last_listed = listed_at);
+ }
+ if (requested_digests256) {
+ digest256map_t *requested;
+ requested = digest256map_new();
+ /* Set requested[d] to DIGEST_REQUESTED for every md we requested. */
+ SMARTLIST_FOREACH(requested_digests256, const uint8_t *, cp,
+ digest256map_set(requested, cp, DIGEST_REQUESTED));
+ /* Set requested[d] to DIGEST_INVALID for every md we requested which we
+ * will never be able to parse. Remove the ones we didn't request from
+ * invalid_digests.
+ */
+ SMARTLIST_FOREACH_BEGIN(invalid_digests, uint8_t *, cp) {
+ if (digest256map_get(requested, cp)) {
+ digest256map_set(requested, cp, DIGEST_INVALID);
+ } else {
+ tor_free(cp);
+ SMARTLIST_DEL_CURRENT(invalid_digests, cp);
+ }
+ } SMARTLIST_FOREACH_END(cp);
+ /* Update requested[d] to 2 for the mds we asked for and got. Delete the
+ * ones we never requested from the 'descriptors' smartlist.
+ */
+ SMARTLIST_FOREACH_BEGIN(descriptors, microdesc_t *, md) {
+ if (digest256map_get(requested, (const uint8_t*)md->digest)) {
+ digest256map_set(requested, (const uint8_t*)md->digest,
+ DIGEST_RECEIVED);
+ } else {
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR, "Received non-requested microdesc");
+ microdesc_free(md);
+ SMARTLIST_DEL_CURRENT(descriptors, md);
+ }
+ } SMARTLIST_FOREACH_END(md);
+ /* Remove the ones we got or the invalid ones from requested_digests256.
+ */
+ SMARTLIST_FOREACH_BEGIN(requested_digests256, uint8_t *, cp) {
+ void *status = digest256map_get(requested, cp);
+ if (status == DIGEST_RECEIVED || status == DIGEST_INVALID) {
+ tor_free(cp);
+ SMARTLIST_DEL_CURRENT(requested_digests256, cp);
+ }
+ } SMARTLIST_FOREACH_END(cp);
+ digest256map_free(requested, NULL);
+ }
+
+ /* For every requested microdescriptor that was unparseable, mark it
+ * as not to be retried. */
+ if (smartlist_len(invalid_digests)) {
+ networkstatus_t *ns =
+ networkstatus_get_latest_consensus_by_flavor(FLAV_MICRODESC);
+ if (ns) {
+ SMARTLIST_FOREACH_BEGIN(invalid_digests, char *, d) {
+ routerstatus_t *rs =
+ router_get_mutable_consensus_status_by_descriptor_digest(ns, d);
+ if (rs && tor_memeq(d, rs->descriptor_digest, DIGEST256_LEN)) {
+ download_status_mark_impossible(&rs->dl_status);
+ }
+ } SMARTLIST_FOREACH_END(d);
+ }
+ }
+ SMARTLIST_FOREACH(invalid_digests, uint8_t *, d, tor_free(d));
+ smartlist_free(invalid_digests);
+
+ added = microdescs_add_list_to_cache(cache, descriptors, where, no_save);
+ smartlist_free(descriptors);
+ return added;
+}
+
+/** As microdescs_add_to_cache, but takes a list of microdescriptors instead of
+ * a string to decode. Frees any members of <b>descriptors</b> that it does
+ * not add. */
+smartlist_t *
+microdescs_add_list_to_cache(microdesc_cache_t *cache,
+ smartlist_t *descriptors, saved_location_t where,
+ int no_save)
+{
+ smartlist_t *added;
+ open_file_t *open_file = NULL;
+ int fd = -1;
+ // int n_added = 0;
+ ssize_t size = 0;
+
+ if (where == SAVED_NOWHERE && !no_save) {
+ fd = start_writing_to_file(cache->journal_fname,
+ OPEN_FLAGS_APPEND|O_BINARY,
+ 0600, &open_file);
+ if (fd < 0) {
+ log_warn(LD_DIR, "Couldn't append to journal in %s: %s",
+ cache->journal_fname, strerror(errno));
+ }
+ }
+
+ added = smartlist_new();
+ SMARTLIST_FOREACH_BEGIN(descriptors, microdesc_t *, md) {
+ microdesc_t *md2;
+ md2 = HT_FIND(microdesc_map, &cache->map, md);
+ if (md2) {
+ /* We already had this one. */
+ if (md2->last_listed < md->last_listed)
+ md2->last_listed = md->last_listed;
+ microdesc_free(md);
+ if (where != SAVED_NOWHERE)
+ cache->bytes_dropped += size;
+ continue;
+ }
+
+ /* Okay, it's a new one. */
+ if (fd >= 0) {
+ size_t annotation_len;
+ size = dump_microdescriptor(fd, md, &annotation_len);
+ if (size < 0) {
+ /* we already warned in dump_microdescriptor */
+ abort_writing_to_file(open_file);
+ fd = -1;
+ } else {
+ md->saved_location = SAVED_IN_JOURNAL;
+ cache->journal_len += size;
+ }
+ } else {
+ md->saved_location = where;
+ }
+
+ md->no_save = no_save;
+
+ HT_INSERT(microdesc_map, &cache->map, md);
+ md->held_in_map = 1;
+ smartlist_add(added, md);
+ ++cache->n_seen;
+ cache->total_len_seen += md->bodylen;
+ } SMARTLIST_FOREACH_END(md);
+
+ if (fd >= 0) {
+ if (finish_writing_to_file(open_file) < 0) {
+ log_warn(LD_DIR, "Error appending to microdescriptor file: %s",
+ strerror(errno));
+ smartlist_clear(added);
+ return added;
+ }
+ }
+
+ {
+ networkstatus_t *ns = networkstatus_get_latest_consensus();
+ if (ns && ns->flavor == FLAV_MICRODESC)
+ SMARTLIST_FOREACH(added, microdesc_t *, md, nodelist_add_microdesc(md));
+ }
+
+ if (smartlist_len(added))
+ router_dir_info_changed();
+
+ return added;
+}
+
+/** Remove every microdescriptor in <b>cache</b>. */
+void
+microdesc_cache_clear(microdesc_cache_t *cache)
+{
+ microdesc_t **entry, **next;
+
+ for (entry = HT_START(microdesc_map, &cache->map); entry; entry = next) {
+ microdesc_t *md = *entry;
+ next = HT_NEXT_RMV(microdesc_map, &cache->map, entry);
+ md->held_in_map = 0;
+ microdesc_free(md);
+ }
+ HT_CLEAR(microdesc_map, &cache->map);
+ if (cache->cache_content) {
+ int res = tor_munmap_file(cache->cache_content);
+ if (res != 0) {
+ log_warn(LD_FS,
+ "tor_munmap_file() failed clearing microdesc cache; "
+ "we are probably about to leak memory.");
+ /* TODO something smarter? */
+ }
+ cache->cache_content = NULL;
+ }
+ cache->total_len_seen = 0;
+ cache->n_seen = 0;
+ cache->bytes_dropped = 0;
+}
+
+/** Reload the contents of <b>cache</b> from disk. If it is empty, load it
+ * for the first time. Return 0 on success, -1 on failure. */
+int
+microdesc_cache_reload(microdesc_cache_t *cache)
+{
+ struct stat st;
+ char *journal_content;
+ smartlist_t *added;
+ tor_mmap_t *mm;
+ int total = 0;
+
+ microdesc_cache_clear(cache);
+
+ cache->is_loaded = 1;
+
+ mm = cache->cache_content = tor_mmap_file(cache->cache_fname);
+ if (mm) {
+ added = microdescs_add_to_cache(cache, mm->data, mm->data+mm->size,
+ SAVED_IN_CACHE, 0, -1, NULL);
+ if (added) {
+ total += smartlist_len(added);
+ smartlist_free(added);
+ }
+ }
+
+ journal_content = read_file_to_str(cache->journal_fname,
+ RFTS_IGNORE_MISSING, &st);
+ if (journal_content) {
+ cache->journal_len = (size_t) st.st_size;
+ added = microdescs_add_to_cache(cache, journal_content,
+ journal_content+st.st_size,
+ SAVED_IN_JOURNAL, 0, -1, NULL);
+ if (added) {
+ total += smartlist_len(added);
+ smartlist_free(added);
+ }
+ tor_free(journal_content);
+ }
+ log_info(LD_DIR, "Reloaded microdescriptor cache. Found %d descriptors.",
+ total);
+
+ microdesc_cache_rebuild(cache, 0 /* don't force */);
+
+ return 0;
+}
+
+/** By default, we remove any microdescriptors that have gone at least this
+ * long without appearing in a current consensus. */
+#define TOLERATE_MICRODESC_AGE (7*24*60*60)
+
+/** Remove all microdescriptors from <b>cache</b> that haven't been listed for
+ * a long time. Does not rebuild the cache on disk. If <b>cutoff</b> is
+ * positive, specifically remove microdescriptors that have been unlisted
+ * since <b>cutoff</b>. If <b>force</b> is true, remove microdescriptors even
+ * if we have no current live microdescriptor consensus.
+ */
+void
+microdesc_cache_clean(microdesc_cache_t *cache, time_t cutoff, int force)
+{
+ microdesc_t **mdp, *victim;
+ int dropped=0, kept=0;
+ size_t bytes_dropped = 0;
+ time_t now = time(NULL);
+
+ /* If we don't know a live consensus, don't believe last_listed values: we
+ * might be starting up after being down for a while. */
+ if (! force &&
+ ! networkstatus_get_reasonably_live_consensus(now, FLAV_MICRODESC))
+ return;
+
+ if (cutoff <= 0)
+ cutoff = now - TOLERATE_MICRODESC_AGE;
+
+ for (mdp = HT_START(microdesc_map, &cache->map); mdp != NULL; ) {
+ const int is_old = (*mdp)->last_listed < cutoff;
+ const unsigned held_by_nodes = (*mdp)->held_by_nodes;
+ if (is_old && !held_by_nodes) {
+ ++dropped;
+ victim = *mdp;
+ mdp = HT_NEXT_RMV(microdesc_map, &cache->map, mdp);
+ victim->held_in_map = 0;
+ bytes_dropped += victim->bodylen;
+ microdesc_free(victim);
+ } else {
+ if (is_old) {
+ /* It's old, but it has held_by_nodes set. That's not okay. */
+ /* Let's try to diagnose and fix #7164 . */
+ smartlist_t *nodes = nodelist_find_nodes_with_microdesc(*mdp);
+ const networkstatus_t *ns = networkstatus_get_latest_consensus();
+ long networkstatus_age = -1;
+ const int ht_badness = HT_REP_IS_BAD_(microdesc_map, &cache->map);
+ if (ns) {
+ networkstatus_age = now - ns->valid_after;
+ }
+ log_warn(LD_BUG, "Microdescriptor seemed very old "
+ "(last listed %d hours ago vs %d hour cutoff), but is still "
+ "marked as being held by %d node(s). I found %d node(s) "
+ "holding it. Current networkstatus is %ld hours old. "
+ "Hashtable badness is %d.",
+ (int)((now - (*mdp)->last_listed) / 3600),
+ (int)((now - cutoff) / 3600),
+ held_by_nodes,
+ smartlist_len(nodes),
+ networkstatus_age / 3600,
+ ht_badness);
+
+ SMARTLIST_FOREACH_BEGIN(nodes, const node_t *, node) {
+ const char *rs_match = "No RS";
+ const char *rs_present = "";
+ if (node->rs) {
+ if (tor_memeq(node->rs->descriptor_digest,
+ (*mdp)->digest, DIGEST256_LEN)) {
+ rs_match = "Microdesc digest in RS matches";
+ } else {
+ rs_match = "Microdesc digest in RS does match";
+ }
+ if (ns) {
+ /* This should be impossible, but let's see! */
+ rs_present = " RS not present in networkstatus.";
+ SMARTLIST_FOREACH(ns->routerstatus_list, routerstatus_t *,rs, {
+ if (rs == node->rs) {
+ rs_present = " RS okay in networkstatus.";
+ }
+ });
+ }
+ }
+ log_warn(LD_BUG, " [%d]: ID=%s. md=%p, rs=%p, ri=%p. %s.%s",
+ node_sl_idx,
+ hex_str(node->identity, DIGEST_LEN),
+ node->md, node->rs, node->ri, rs_match, rs_present);
+ } SMARTLIST_FOREACH_END(node);
+ smartlist_free(nodes);
+ (*mdp)->last_listed = now;
+ }
+
+ ++kept;
+ mdp = HT_NEXT(microdesc_map, &cache->map, mdp);
+ }
+ }
+
+ if (dropped) {
+ log_info(LD_DIR, "Removed %d/%d microdescriptors as old.",
+ dropped,dropped+kept);
+ cache->bytes_dropped += bytes_dropped;
+ }
+}
+
+static int
+should_rebuild_md_cache(microdesc_cache_t *cache)
+{
+ const size_t old_len =
+ cache->cache_content ? cache->cache_content->size : 0;
+ const size_t journal_len = cache->journal_len;
+ const size_t dropped = cache->bytes_dropped;
+
+ if (journal_len < 16384)
+ return 0; /* Don't bother, not enough has happened yet. */
+ if (dropped > (journal_len + old_len) / 3)
+ return 1; /* We could save 1/3 or more of the currently used space. */
+ if (journal_len > old_len / 2)
+ return 1; /* We should append to the regular file */
+
+ return 0;
+}
+
+/**
+ * Mark <b>md</b> as having no body, and release any storage previously held
+ * by its body.
+ */
+static void
+microdesc_wipe_body(microdesc_t *md)
+{
+ if (!md)
+ return;
+
+ if (md->saved_location != SAVED_IN_CACHE)
+ tor_free(md->body);
+
+ md->off = 0;
+ md->saved_location = SAVED_NOWHERE;
+ md->body = NULL;
+ md->bodylen = 0;
+ md->no_save = 1;
+}
+
+/** Regenerate the main cache file for <b>cache</b>, clear the journal file,
+ * and update every microdesc_t in the cache with pointers to its new
+ * location. If <b>force</b> is true, do this unconditionally. If
+ * <b>force</b> is false, do it only if we expect to save space on disk. */
+int
+microdesc_cache_rebuild(microdesc_cache_t *cache, int force)
+{
+ open_file_t *open_file;
+ int fd = -1, res;
+ microdesc_t **mdp;
+ smartlist_t *wrote;
+ ssize_t size;
+ off_t off = 0, off_real;
+ int orig_size, new_size;
+
+ if (cache == NULL) {
+ cache = the_microdesc_cache;
+ if (cache == NULL)
+ return 0;
+ }
+
+ /* Remove dead descriptors */
+ microdesc_cache_clean(cache, 0/*cutoff*/, 0/*force*/);
+
+ if (!force && !should_rebuild_md_cache(cache))
+ return 0;
+
+ log_info(LD_DIR, "Rebuilding the microdescriptor cache...");
+
+ orig_size = (int)(cache->cache_content ? cache->cache_content->size : 0);
+ orig_size += (int)cache->journal_len;
+
+ fd = start_writing_to_file(cache->cache_fname,
+ OPEN_FLAGS_REPLACE|O_BINARY,
+ 0600, &open_file);
+ if (fd < 0)
+ return -1;
+
+ wrote = smartlist_new();
+
+ HT_FOREACH(mdp, microdesc_map, &cache->map) {
+ microdesc_t *md = *mdp;
+ size_t annotation_len;
+ if (md->no_save || !md->body)
+ continue;
+
+ size = dump_microdescriptor(fd, md, &annotation_len);
+ if (size < 0) {
+ microdesc_wipe_body(md);
+
+ /* rewind, in case it was a partial write. */
+ tor_fd_setpos(fd, off);
+ continue;
+ }
+ tor_assert(((size_t)size) == annotation_len + md->bodylen);
+ md->off = off + annotation_len;
+ off += size;
+ off_real = tor_fd_getpos(fd);
+ if (off_real != off) {
+ log_warn(LD_BUG, "Discontinuity in position in microdescriptor cache."
+ "By my count, I'm at %"PRId64
+ ", but I should be at %"PRId64,
+ (off), (off_real));
+ if (off_real >= 0)
+ off = off_real;
+ }
+ if (md->saved_location != SAVED_IN_CACHE) {
+ tor_free(md->body);
+ md->saved_location = SAVED_IN_CACHE;
+ }
+ smartlist_add(wrote, md);
+ }
+
+ /* We must do this unmap _before_ we call finish_writing_to_file(), or
+ * windows will not actually replace the file. */
+ if (cache->cache_content) {
+ res = tor_munmap_file(cache->cache_content);
+ if (res != 0) {
+ log_warn(LD_FS,
+ "Failed to unmap old microdescriptor cache while rebuilding");
+ }
+ cache->cache_content = NULL;
+ }
+
+ if (finish_writing_to_file(open_file) < 0) {
+ log_warn(LD_DIR, "Error rebuilding microdescriptor cache: %s",
+ strerror(errno));
+ /* Okay. Let's prevent from making things worse elsewhere. */
+ cache->cache_content = NULL;
+ HT_FOREACH(mdp, microdesc_map, &cache->map) {
+ microdesc_t *md = *mdp;
+ if (md->saved_location == SAVED_IN_CACHE) {
+ microdesc_wipe_body(md);
+ }
+ }
+ smartlist_free(wrote);
+ return -1;
+ }
+
+ cache->cache_content = tor_mmap_file(cache->cache_fname);
+
+ if (!cache->cache_content && smartlist_len(wrote)) {
+ log_err(LD_DIR, "Couldn't map file that we just wrote to %s!",
+ cache->cache_fname);
+ smartlist_free(wrote);
+ return -1;
+ }
+ SMARTLIST_FOREACH_BEGIN(wrote, microdesc_t *, md) {
+ tor_assert(md->saved_location == SAVED_IN_CACHE);
+ md->body = (char*)cache->cache_content->data + md->off;
+ if (PREDICT_UNLIKELY(
+ md->bodylen < 9 || fast_memneq(md->body, "onion-key", 9) != 0)) {
+ /* XXXX once bug 2022 is solved, we can kill this block and turn it
+ * into just the tor_assert(fast_memeq) */
+ off_t avail = cache->cache_content->size - md->off;
+ char *bad_str;
+ tor_assert(avail >= 0);
+ bad_str = tor_strndup(md->body, MIN(128, (size_t)avail));
+ log_err(LD_BUG, "After rebuilding microdesc cache, offsets seem wrong. "
+ " At offset %d, I expected to find a microdescriptor starting "
+ " with \"onion-key\". Instead I got %s.",
+ (int)md->off, escaped(bad_str));
+ tor_free(bad_str);
+ tor_assert(fast_memeq(md->body, "onion-key", 9));
+ }
+ } SMARTLIST_FOREACH_END(md);
+
+ smartlist_free(wrote);
+
+ write_str_to_file(cache->journal_fname, "", 1);
+ cache->journal_len = 0;
+ cache->bytes_dropped = 0;
+
+ new_size = cache->cache_content ? (int)cache->cache_content->size : 0;
+ log_info(LD_DIR, "Done rebuilding microdesc cache. "
+ "Saved %d bytes; %d still used.",
+ orig_size-new_size, new_size);
+
+ return 0;
+}
+
+/** Make sure that the reference count of every microdescriptor in cache is
+ * accurate. */
+void
+microdesc_check_counts(void)
+{
+ microdesc_t **mdp;
+ if (!the_microdesc_cache)
+ return;
+
+ HT_FOREACH(mdp, microdesc_map, &the_microdesc_cache->map) {
+ microdesc_t *md = *mdp;
+ unsigned int found=0;
+ const smartlist_t *nodes = nodelist_get_list();
+ SMARTLIST_FOREACH(nodes, node_t *, node, {
+ if (node->md == md) {
+ ++found;
+ }
+ });
+ tor_assert(found == md->held_by_nodes);
+ }
+}
+
+/** Deallocate a single microdescriptor. Note: the microdescriptor MUST have
+ * previously been removed from the cache if it had ever been inserted. */
+void
+microdesc_free_(microdesc_t *md, const char *fname, int lineno)
+{
+ if (!md)
+ return;
+
+ /* Make sure that the microdesc was really removed from the appropriate data
+ structures. */
+ if (md->held_in_map) {
+ microdesc_cache_t *cache = get_microdesc_cache_noload();
+ microdesc_t *md2 = HT_FIND(microdesc_map, &cache->map, md);
+ if (md2 == md) {
+ log_warn(LD_BUG, "microdesc_free() called from %s:%d, but md was still "
+ "in microdesc_map", fname, lineno);
+ HT_REMOVE(microdesc_map, &cache->map, md);
+ } else {
+ log_warn(LD_BUG, "microdesc_free() called from %s:%d with held_in_map "
+ "set, but microdesc was not in the map.", fname, lineno);
+ }
+ tor_fragile_assert();
+ }
+ if (md->held_by_nodes) {
+ microdesc_cache_t *cache = get_microdesc_cache_noload();
+ int found=0;
+ const smartlist_t *nodes = nodelist_get_list();
+ const int ht_badness = HT_REP_IS_BAD_(microdesc_map, &cache->map);
+ SMARTLIST_FOREACH(nodes, node_t *, node, {
+ if (node->md == md) {
+ ++found;
+ node->md = NULL;
+ }
+ });
+ if (found) {
+ log_warn(LD_BUG, "microdesc_free() called from %s:%d, but md was still "
+ "referenced %d node(s); held_by_nodes == %u, ht_badness == %d",
+ fname, lineno, found, md->held_by_nodes, ht_badness);
+ } else {
+ log_warn(LD_BUG, "microdesc_free() called from %s:%d with held_by_nodes "
+ "set to %u, but md was not referenced by any nodes. "
+ "ht_badness == %d",
+ fname, lineno, md->held_by_nodes, ht_badness);
+ }
+ tor_fragile_assert();
+ }
+ //tor_assert(md->held_in_map == 0);
+ //tor_assert(md->held_by_nodes == 0);
+
+ if (md->onion_pkey)
+ crypto_pk_free(md->onion_pkey);
+ tor_free(md->onion_curve25519_pkey);
+ tor_free(md->ed25519_identity_pkey);
+ if (md->body && md->saved_location != SAVED_IN_CACHE)
+ tor_free(md->body);
+
+ if (md->family) {
+ SMARTLIST_FOREACH(md->family, char *, cp, tor_free(cp));
+ smartlist_free(md->family);
+ }
+ short_policy_free(md->exit_policy);
+ short_policy_free(md->ipv6_exit_policy);
+
+ tor_free(md);
+}
+
+/** Free all storage held in the microdesc.c module. */
+void
+microdesc_free_all(void)
+{
+ if (the_microdesc_cache) {
+ microdesc_cache_clear(the_microdesc_cache);
+ tor_free(the_microdesc_cache->cache_fname);
+ tor_free(the_microdesc_cache->journal_fname);
+ tor_free(the_microdesc_cache);
+ }
+
+ if (outdated_dirserver_list) {
+ SMARTLIST_FOREACH(outdated_dirserver_list, char *, cp, tor_free(cp));
+ smartlist_free(outdated_dirserver_list);
+ }
+}
+
+/** If there is a microdescriptor in <b>cache</b> whose sha256 digest is
+ * <b>d</b>, return it. Otherwise return NULL. */
+microdesc_t *
+microdesc_cache_lookup_by_digest256(microdesc_cache_t *cache, const char *d)
+{
+ microdesc_t *md, search;
+ if (!cache)
+ cache = get_microdesc_cache();
+ memcpy(search.digest, d, DIGEST256_LEN);
+ md = HT_FIND(microdesc_map, &cache->map, &search);
+ return md;
+}
+
+/** Return a smartlist of all the sha256 digest of the microdescriptors that
+ * are listed in <b>ns</b> but not present in <b>cache</b>. Returns pointers
+ * to internals of <b>ns</b>; you should not free the members of the resulting
+ * smartlist. Omit all microdescriptors whose digest appear in <b>skip</b>. */
+smartlist_t *
+microdesc_list_missing_digest256(networkstatus_t *ns, microdesc_cache_t *cache,
+ int downloadable_only, digest256map_t *skip)
+{
+ smartlist_t *result = smartlist_new();
+ time_t now = time(NULL);
+ tor_assert(ns->flavor == FLAV_MICRODESC);
+ SMARTLIST_FOREACH_BEGIN(ns->routerstatus_list, routerstatus_t *, rs) {
+ if (microdesc_cache_lookup_by_digest256(cache, rs->descriptor_digest))
+ continue;
+ if (downloadable_only &&
+ !download_status_is_ready(&rs->dl_status, now))
+ continue;
+ if (skip && digest256map_get(skip, (const uint8_t*)rs->descriptor_digest))
+ continue;
+ if (tor_mem_is_zero(rs->descriptor_digest, DIGEST256_LEN))
+ continue;
+ /* XXXX Also skip if we're a noncache and wouldn't use this router.
+ * XXXX NM Microdesc
+ */
+ smartlist_add(result, rs->descriptor_digest);
+ } SMARTLIST_FOREACH_END(rs);
+ return result;
+}
+
+/** Launch download requests for microdescriptors as appropriate.
+ *
+ * Specifically, we should launch download requests if we are configured to
+ * download mirodescriptors, and there are some microdescriptors listed in the
+ * current microdesc consensus that we don't have, and either we never asked
+ * for them, or we failed to download them but we're willing to retry.
+ */
+void
+update_microdesc_downloads(time_t now)
+{
+ const or_options_t *options = get_options();
+ networkstatus_t *consensus;
+ smartlist_t *missing;
+ digest256map_t *pending;
+
+ if (should_delay_dir_fetches(options, NULL))
+ return;
+ if (directory_too_idle_to_fetch_descriptors(options, now))
+ return;
+
+ consensus = networkstatus_get_reasonably_live_consensus(now, FLAV_MICRODESC);
+ if (!consensus)
+ return;
+
+ if (!we_fetch_microdescriptors(options))
+ return;
+
+ pending = digest256map_new();
+ list_pending_microdesc_downloads(pending);
+
+ missing = microdesc_list_missing_digest256(consensus,
+ get_microdesc_cache(),
+ 1,
+ pending);
+ digest256map_free(pending, NULL);
+
+ launch_descriptor_downloads(DIR_PURPOSE_FETCH_MICRODESC,
+ missing, NULL, now);
+
+ smartlist_free(missing);
+}
+
+/** For every microdescriptor listed in the current microdescriptor consensus,
+ * update its last_listed field to be at least as recent as the publication
+ * time of the current microdescriptor consensus.
+ */
+void
+update_microdescs_from_networkstatus(time_t now)
+{
+ microdesc_cache_t *cache = get_microdesc_cache();
+ microdesc_t *md;
+ networkstatus_t *ns =
+ networkstatus_get_reasonably_live_consensus(now, FLAV_MICRODESC);
+
+ if (! ns)
+ return;
+
+ tor_assert(ns->flavor == FLAV_MICRODESC);
+
+ SMARTLIST_FOREACH_BEGIN(ns->routerstatus_list, routerstatus_t *, rs) {
+ md = microdesc_cache_lookup_by_digest256(cache, rs->descriptor_digest);
+ if (md && ns->valid_after > md->last_listed)
+ md->last_listed = ns->valid_after;
+ } SMARTLIST_FOREACH_END(rs);
+}
+
+/** Return true iff we should prefer to use microdescriptors rather than
+ * routerdescs for building circuits. */
+int
+we_use_microdescriptors_for_circuits(const or_options_t *options)
+{
+ if (options->UseMicrodescriptors == 0)
+ return 0; /* the user explicitly picked no */
+ return 1; /* yes and auto both mean yes */
+}
+
+/** Return true iff we should try to download microdescriptors at all. */
+int
+we_fetch_microdescriptors(const or_options_t *options)
+{
+ if (directory_caches_dir_info(options))
+ return 1;
+ if (options->FetchUselessDescriptors)
+ return 1;
+ return we_use_microdescriptors_for_circuits(options);
+}
+
+/** Return true iff we should try to download router descriptors at all. */
+int
+we_fetch_router_descriptors(const or_options_t *options)
+{
+ if (directory_caches_dir_info(options))
+ return 1;
+ if (options->FetchUselessDescriptors)
+ return 1;
+ return ! we_use_microdescriptors_for_circuits(options);
+}
+
+/** Return the consensus flavor we actually want to use to build circuits. */
+MOCK_IMPL(int,
+usable_consensus_flavor,(void))
+{
+ if (we_use_microdescriptors_for_circuits(get_options())) {
+ return FLAV_MICRODESC;
+ } else {
+ return FLAV_NS;
+ }
+}
diff --git a/src/feature/nodelist/microdesc.h b/src/feature/nodelist/microdesc.h
new file mode 100644
index 0000000000..f11b841cf1
--- /dev/null
+++ b/src/feature/nodelist/microdesc.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file microdesc.h
+ * \brief Header file for microdesc.c.
+ **/
+
+#ifndef TOR_MICRODESC_H
+#define TOR_MICRODESC_H
+
+microdesc_cache_t *get_microdesc_cache(void);
+
+void microdesc_check_counts(void);
+
+smartlist_t *microdescs_add_to_cache(microdesc_cache_t *cache,
+ const char *s, const char *eos, saved_location_t where,
+ int no_save, time_t listed_at,
+ smartlist_t *requested_digests256);
+smartlist_t *microdescs_add_list_to_cache(microdesc_cache_t *cache,
+ smartlist_t *descriptors, saved_location_t where,
+ int no_save);
+
+void microdesc_cache_clean(microdesc_cache_t *cache, time_t cutoff, int force);
+int microdesc_cache_rebuild(microdesc_cache_t *cache, int force);
+int microdesc_cache_reload(microdesc_cache_t *cache);
+void microdesc_cache_clear(microdesc_cache_t *cache);
+
+microdesc_t *microdesc_cache_lookup_by_digest256(microdesc_cache_t *cache,
+ const char *d);
+
+smartlist_t *microdesc_list_missing_digest256(networkstatus_t *ns,
+ microdesc_cache_t *cache,
+ int downloadable_only,
+ digest256map_t *skip);
+
+void microdesc_free_(microdesc_t *md, const char *fname, int line);
+#define microdesc_free(md) do { \
+ microdesc_free_((md), __FILE__, __LINE__); \
+ (md) = NULL; \
+ } while (0)
+void microdesc_free_all(void);
+
+void update_microdesc_downloads(time_t now);
+void update_microdescs_from_networkstatus(time_t now);
+
+MOCK_DECL(int, usable_consensus_flavor,(void));
+int we_fetch_microdescriptors(const or_options_t *options);
+int we_fetch_router_descriptors(const or_options_t *options);
+int we_use_microdescriptors_for_circuits(const or_options_t *options);
+
+void microdesc_note_outdated_dirserver(const char *relay_digest);
+int microdesc_relay_is_outdated_dirserver(const char *relay_digest);
+void microdesc_reset_outdated_dirservers_list(void);
+
+#endif /* !defined(TOR_MICRODESC_H) */
+
diff --git a/src/feature/nodelist/microdesc_st.h b/src/feature/nodelist/microdesc_st.h
new file mode 100644
index 0000000000..e9dc3e0174
--- /dev/null
+++ b/src/feature/nodelist/microdesc_st.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef MICRODESC_ST_H
+#define MICRODESC_ST_H
+
+struct curve25519_public_key_t;
+struct ed25519_public_key_t;
+struct short_policy_t;
+
+/** A microdescriptor is the smallest amount of information needed to build a
+ * circuit through a router. They are generated by the directory authorities,
+ * using information from the uploaded routerinfo documents. They are not
+ * self-signed, but are rather authenticated by having their hash in a signed
+ * networkstatus document. */
+struct microdesc_t {
+ /** Hashtable node, used to look up the microdesc by its digest. */
+ HT_ENTRY(microdesc_t) node;
+
+ /* Cache information */
+
+ /** When was this microdescriptor last listed in a consensus document?
+ * Once a microdesc has been unlisted long enough, we can drop it.
+ */
+ time_t last_listed;
+ /** Where is this microdescriptor currently stored? */
+ saved_location_bitfield_t saved_location : 3;
+ /** If true, do not attempt to cache this microdescriptor on disk. */
+ unsigned int no_save : 1;
+ /** If true, this microdesc has an entry in the microdesc_map */
+ unsigned int held_in_map : 1;
+ /** Reference count: how many node_ts have a reference to this microdesc? */
+ unsigned int held_by_nodes;
+
+ /** If saved_location == SAVED_IN_CACHE, this field holds the offset of the
+ * microdescriptor in the cache. */
+ off_t off;
+
+ /* The string containing the microdesc. */
+
+ /** A pointer to the encoded body of the microdescriptor. If the
+ * saved_location is SAVED_IN_CACHE, then the body is a pointer into an
+ * mmap'd region. Otherwise, it is a malloc'd string. The string might not
+ * be NUL-terminated; take the length from <b>bodylen</b>. */
+ char *body;
+ /** The length of the microdescriptor in <b>body</b>. */
+ size_t bodylen;
+ /** A SHA256-digest of the microdescriptor. */
+ char digest[DIGEST256_LEN];
+
+ /* Fields in the microdescriptor. */
+
+ /** As routerinfo_t.onion_pkey */
+ crypto_pk_t *onion_pkey;
+ /** As routerinfo_t.onion_curve25519_pkey */
+ struct curve25519_public_key_t *onion_curve25519_pkey;
+ /** Ed25519 identity key, if included. */
+ struct ed25519_public_key_t *ed25519_identity_pkey;
+ /** As routerinfo_t.ipv6_addr */
+ tor_addr_t ipv6_addr;
+ /** As routerinfo_t.ipv6_orport */
+ uint16_t ipv6_orport;
+ /** As routerinfo_t.family */
+ smartlist_t *family;
+ /** IPv4 exit policy summary */
+ struct short_policy_t *exit_policy;
+ /** IPv6 exit policy summary */
+ struct short_policy_t *ipv6_exit_policy;
+};
+
+#endif
diff --git a/src/feature/nodelist/networkstatus.c b/src/feature/nodelist/networkstatus.c
new file mode 100644
index 0000000000..f91e46cdd7
--- /dev/null
+++ b/src/feature/nodelist/networkstatus.c
@@ -0,0 +1,2726 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file networkstatus.c
+ * \brief Functions and structures for handling networkstatus documents as a
+ * client or as a directory cache.
+ *
+ * A consensus networkstatus object is created by the directory
+ * authorities. It authenticates a set of network parameters--most
+ * importantly, the list of all the relays in the network. This list
+ * of relays is represented as an array of routerstatus_t objects.
+ *
+ * There are currently two flavors of consensus. With the older "NS"
+ * flavor, each relay is associated with a digest of its router
+ * descriptor. Tor instances that use this consensus keep the list of
+ * router descriptors as routerinfo_t objects stored and managed in
+ * routerlist.c. With the newer "microdesc" flavor, each relay is
+ * associated with a digest of the microdescriptor that the authorities
+ * made for it. These are stored and managed in microdesc.c. Information
+ * about the router is divided between the the networkstatus and the
+ * microdescriptor according to the general rule that microdescriptors
+ * should hold information that changes much less frequently than the
+ * information in the networkstatus.
+ *
+ * Modern clients use microdescriptor networkstatuses. Directory caches
+ * need to keep both kinds of networkstatus document, so they can serve them.
+ *
+ * This module manages fetching, holding, storing, updating, and
+ * validating networkstatus objects. The download-and-validate process
+ * is slightly complicated by the fact that the keys you need to
+ * validate a consensus are stored in the authority certificates, which
+ * you might not have yet when you download the consensus.
+ */
+
+#define NETWORKSTATUS_PRIVATE
+#include "or/or.h"
+#include "or/bridges.h"
+#include "or/channel.h"
+#include "or/circuitmux.h"
+#include "or/circuitmux_ewma.h"
+#include "or/circuitstats.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/connection_edge.h"
+#include "or/connection_or.h"
+#include "or/consdiffmgr.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/directory.h"
+#include "or/dirserv.h"
+#include "or/dos.h"
+#include "or/entrynodes.h"
+#include "or/hibernate.h"
+#include "or/main.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/protover.h"
+#include "or/relay.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/routerparse.h"
+#include "or/scheduler.h"
+#include "or/transports.h"
+#include "or/torcert.h"
+#include "or/channelpadding.h"
+#include "or/voting_schedule.h"
+
+#include "or/dirauth/dirvote.h"
+#include "or/dirauth/mode.h"
+#include "or/dirauth/shared_random.h"
+
+#include "or/authority_cert_st.h"
+#include "or/dir_connection_st.h"
+#include "or/dir_server_st.h"
+#include "or/document_signature_st.h"
+#include "or/networkstatus_st.h"
+#include "or/networkstatus_voter_info_st.h"
+#include "or/ns_detached_signatures_st.h"
+#include "or/node_st.h"
+#include "or/routerinfo_st.h"
+#include "or/routerlist_st.h"
+#include "or/vote_microdesc_hash_st.h"
+#include "or/vote_routerstatus_st.h"
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+/** Most recently received and validated v3 "ns"-flavored consensus network
+ * status. */
+STATIC networkstatus_t *current_ns_consensus = NULL;
+
+/** Most recently received and validated v3 "microdesc"-flavored consensus
+ * network status. */
+STATIC networkstatus_t *current_md_consensus = NULL;
+
+/** A v3 consensus networkstatus that we've received, but which we don't
+ * have enough certificates to be happy about. */
+typedef struct consensus_waiting_for_certs_t {
+ /** The consensus itself. */
+ networkstatus_t *consensus;
+ /** The encoded version of the consensus, nul-terminated. */
+ char *body;
+ /** When did we set the current value of consensus_waiting_for_certs? If
+ * this is too recent, we shouldn't try to fetch a new consensus for a
+ * little while, to give ourselves time to get certificates for this one. */
+ time_t set_at;
+ /** Set to 1 if we've been holding on to it for so long we should maybe
+ * treat it as being bad. */
+ int dl_failed;
+} consensus_waiting_for_certs_t;
+
+/** An array, for each flavor of consensus we might want, of consensuses that
+ * we have downloaded, but which we cannot verify due to having insufficient
+ * authority certificates. */
+static consensus_waiting_for_certs_t
+ consensus_waiting_for_certs[N_CONSENSUS_FLAVORS];
+
+/** A time before which we shouldn't try to replace the current consensus:
+ * this will be at some point after the next consensus becomes valid, but
+ * before the current consensus becomes invalid. */
+static time_t time_to_download_next_consensus[N_CONSENSUS_FLAVORS];
+/** Download status for the current consensus networkstatus. */
+static download_status_t consensus_dl_status[N_CONSENSUS_FLAVORS] =
+ {
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_FAILURE, 0, 0 },
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_FAILURE, 0, 0 },
+ };
+
+#define N_CONSENSUS_BOOTSTRAP_SCHEDULES 2
+#define CONSENSUS_BOOTSTRAP_SOURCE_AUTHORITY 0
+#define CONSENSUS_BOOTSTRAP_SOURCE_ANY_DIRSERVER 1
+
+/* Using DL_SCHED_INCREMENT_ATTEMPT on these schedules means that
+ * download_status_increment_failure won't increment these entries.
+ * However, any bootstrap connection failures that occur after we have
+ * a valid consensus will count against the failure counts on the non-bootstrap
+ * schedules. There should only be one of these, as all the others will have
+ * been cancelled. (This doesn't seem to be a significant issue.) */
+static download_status_t
+ consensus_bootstrap_dl_status[N_CONSENSUS_BOOTSTRAP_SCHEDULES] =
+ {
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_AUTHORITY,
+ DL_SCHED_INCREMENT_ATTEMPT, 0, 0 },
+ /* During bootstrap, DL_WANT_ANY_DIRSERVER means "use fallbacks". */
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_ATTEMPT, 0, 0 },
+ };
+
+/** True iff we have logged a warning about this OR's version being older than
+ * listed by the authorities. */
+static int have_warned_about_old_version = 0;
+/** True iff we have logged a warning about this OR's version being newer than
+ * listed by the authorities. */
+static int have_warned_about_new_version = 0;
+
+static void update_consensus_bootstrap_multiple_downloads(
+ time_t now,
+ const or_options_t *options);
+static int networkstatus_check_required_protocols(const networkstatus_t *ns,
+ int client_mode,
+ char **warning_out);
+
+/** Forget that we've warned about anything networkstatus-related, so we will
+ * give fresh warnings if the same behavior happens again. */
+void
+networkstatus_reset_warnings(void)
+{
+ SMARTLIST_FOREACH(nodelist_get_list(), node_t *, node,
+ node->name_lookup_warned = 0);
+
+ have_warned_about_old_version = 0;
+ have_warned_about_new_version = 0;
+}
+
+/** Reset the descriptor download failure count on all networkstatus docs, so
+ * that we can retry any long-failed documents immediately.
+ */
+void
+networkstatus_reset_download_failures(void)
+{
+ int i;
+
+ log_debug(LD_GENERAL,
+ "In networkstatus_reset_download_failures()");
+
+ for (i=0; i < N_CONSENSUS_FLAVORS; ++i)
+ download_status_reset(&consensus_dl_status[i]);
+
+ for (i=0; i < N_CONSENSUS_BOOTSTRAP_SCHEDULES; ++i)
+ download_status_reset(&consensus_bootstrap_dl_status[i]);
+}
+
+/**
+ * Read and and return the cached consensus of type <b>flavorname</b>. If
+ * <b>unverified</b> is false, get the one we haven't verified. Return NULL if
+ * the file isn't there. */
+static char *
+networkstatus_read_cached_consensus_impl(int flav,
+ const char *flavorname,
+ int unverified_consensus)
+{
+ char buf[128];
+ const char *prefix;
+ if (unverified_consensus) {
+ prefix = "unverified";
+ } else {
+ prefix = "cached";
+ }
+ if (flav == FLAV_NS) {
+ tor_snprintf(buf, sizeof(buf), "%s-consensus", prefix);
+ } else {
+ tor_snprintf(buf, sizeof(buf), "%s-%s-consensus", prefix, flavorname);
+ }
+
+ char *filename = get_cachedir_fname(buf);
+ char *result = read_file_to_str(filename, RFTS_IGNORE_MISSING, NULL);
+ tor_free(filename);
+ return result;
+}
+
+/** Return a new string containing the current cached consensus of flavor
+ * <b>flavorname</b>. */
+char *
+networkstatus_read_cached_consensus(const char *flavorname)
+ {
+ int flav = networkstatus_parse_flavor_name(flavorname);
+ if (flav < 0)
+ return NULL;
+ return networkstatus_read_cached_consensus_impl(flav, flavorname, 0);
+}
+
+/** Read every cached v3 consensus networkstatus from the disk. */
+int
+router_reload_consensus_networkstatus(void)
+{
+ const unsigned int flags = NSSET_FROM_CACHE | NSSET_DONT_DOWNLOAD_CERTS;
+ int flav;
+
+ /* FFFF Suppress warnings if cached consensus is bad? */
+ for (flav = 0; flav < N_CONSENSUS_FLAVORS; ++flav) {
+ const char *flavor = networkstatus_get_flavor_name(flav);
+ char *s = networkstatus_read_cached_consensus_impl(flav, flavor, 0);
+ if (s) {
+ if (networkstatus_set_current_consensus(s, flavor, flags, NULL) < -1) {
+ log_warn(LD_FS, "Couldn't load consensus %s networkstatus from cache",
+ flavor);
+ }
+ tor_free(s);
+ }
+
+ s = networkstatus_read_cached_consensus_impl(flav, flavor, 1);
+ if (s) {
+ if (networkstatus_set_current_consensus(s, flavor,
+ flags | NSSET_WAS_WAITING_FOR_CERTS,
+ NULL)) {
+ log_info(LD_FS, "Couldn't load unverified consensus %s networkstatus "
+ "from cache", flavor);
+ }
+ tor_free(s);
+ }
+ }
+
+ update_certificate_downloads(time(NULL));
+
+ routers_update_all_from_networkstatus(time(NULL), 3);
+ update_microdescs_from_networkstatus(time(NULL));
+
+ return 0;
+}
+
+/** Free all storage held by the vote_routerstatus object <b>rs</b>. */
+void
+vote_routerstatus_free_(vote_routerstatus_t *rs)
+{
+ vote_microdesc_hash_t *h, *next;
+ if (!rs)
+ return;
+ tor_free(rs->version);
+ tor_free(rs->protocols);
+ tor_free(rs->status.exitsummary);
+ for (h = rs->microdesc; h; h = next) {
+ tor_free(h->microdesc_hash_line);
+ next = h->next;
+ tor_free(h);
+ }
+ tor_free(rs);
+}
+
+/** Free all storage held by the routerstatus object <b>rs</b>. */
+void
+routerstatus_free_(routerstatus_t *rs)
+{
+ if (!rs)
+ return;
+ tor_free(rs->exitsummary);
+ tor_free(rs);
+}
+
+/** Free all storage held in <b>sig</b> */
+void
+document_signature_free_(document_signature_t *sig)
+{
+ tor_free(sig->signature);
+ tor_free(sig);
+}
+
+/** Return a newly allocated copy of <b>sig</b> */
+document_signature_t *
+document_signature_dup(const document_signature_t *sig)
+{
+ document_signature_t *r = tor_memdup(sig, sizeof(document_signature_t));
+ if (r->signature)
+ r->signature = tor_memdup(sig->signature, sig->signature_len);
+ return r;
+}
+
+/** Free all storage held in <b>ns</b>. */
+void
+networkstatus_vote_free_(networkstatus_t *ns)
+{
+ if (!ns)
+ return;
+
+ tor_free(ns->client_versions);
+ tor_free(ns->server_versions);
+ tor_free(ns->recommended_client_protocols);
+ tor_free(ns->recommended_relay_protocols);
+ tor_free(ns->required_client_protocols);
+ tor_free(ns->required_relay_protocols);
+
+ if (ns->known_flags) {
+ SMARTLIST_FOREACH(ns->known_flags, char *, c, tor_free(c));
+ smartlist_free(ns->known_flags);
+ }
+ if (ns->weight_params) {
+ SMARTLIST_FOREACH(ns->weight_params, char *, c, tor_free(c));
+ smartlist_free(ns->weight_params);
+ }
+ if (ns->net_params) {
+ SMARTLIST_FOREACH(ns->net_params, char *, c, tor_free(c));
+ smartlist_free(ns->net_params);
+ }
+ if (ns->supported_methods) {
+ SMARTLIST_FOREACH(ns->supported_methods, char *, c, tor_free(c));
+ smartlist_free(ns->supported_methods);
+ }
+ if (ns->package_lines) {
+ SMARTLIST_FOREACH(ns->package_lines, char *, c, tor_free(c));
+ smartlist_free(ns->package_lines);
+ }
+ if (ns->voters) {
+ SMARTLIST_FOREACH_BEGIN(ns->voters, networkstatus_voter_info_t *, voter) {
+ tor_free(voter->nickname);
+ tor_free(voter->address);
+ tor_free(voter->contact);
+ if (voter->sigs) {
+ SMARTLIST_FOREACH(voter->sigs, document_signature_t *, sig,
+ document_signature_free(sig));
+ smartlist_free(voter->sigs);
+ }
+ tor_free(voter);
+ } SMARTLIST_FOREACH_END(voter);
+ smartlist_free(ns->voters);
+ }
+ authority_cert_free(ns->cert);
+
+ if (ns->routerstatus_list) {
+ if (ns->type == NS_TYPE_VOTE || ns->type == NS_TYPE_OPINION) {
+ SMARTLIST_FOREACH(ns->routerstatus_list, vote_routerstatus_t *, rs,
+ vote_routerstatus_free(rs));
+ } else {
+ SMARTLIST_FOREACH(ns->routerstatus_list, routerstatus_t *, rs,
+ routerstatus_free(rs));
+ }
+
+ smartlist_free(ns->routerstatus_list);
+ }
+
+ digestmap_free(ns->desc_digest_map, NULL);
+
+ if (ns->sr_info.commits) {
+ dirvote_clear_commits(ns);
+ }
+ tor_free(ns->sr_info.previous_srv);
+ tor_free(ns->sr_info.current_srv);
+
+ memwipe(ns, 11, sizeof(*ns));
+ tor_free(ns);
+}
+
+/** Return the voter info from <b>vote</b> for the voter whose identity digest
+ * is <b>identity</b>, or NULL if no such voter is associated with
+ * <b>vote</b>. */
+networkstatus_voter_info_t *
+networkstatus_get_voter_by_id(networkstatus_t *vote,
+ const char *identity)
+{
+ if (!vote || !vote->voters)
+ return NULL;
+ SMARTLIST_FOREACH(vote->voters, networkstatus_voter_info_t *, voter,
+ if (fast_memeq(voter->identity_digest, identity, DIGEST_LEN))
+ return voter);
+ return NULL;
+}
+
+/** Return the signature made by <b>voter</b> using the algorithm
+ * <b>alg</b>, or NULL if none is found. */
+document_signature_t *
+networkstatus_get_voter_sig_by_alg(const networkstatus_voter_info_t *voter,
+ digest_algorithm_t alg)
+{
+ if (!voter->sigs)
+ return NULL;
+ SMARTLIST_FOREACH(voter->sigs, document_signature_t *, sig,
+ if (sig->alg == alg)
+ return sig);
+ return NULL;
+}
+
+/** Check whether the signature <b>sig</b> is correctly signed with the
+ * signing key in <b>cert</b>. Return -1 if <b>cert</b> doesn't match the
+ * signing key; otherwise set the good_signature or bad_signature flag on
+ * <b>voter</b>, and return 0. */
+int
+networkstatus_check_document_signature(const networkstatus_t *consensus,
+ document_signature_t *sig,
+ const authority_cert_t *cert)
+{
+ char key_digest[DIGEST_LEN];
+ const int dlen = sig->alg == DIGEST_SHA1 ? DIGEST_LEN : DIGEST256_LEN;
+ char *signed_digest;
+ size_t signed_digest_len;
+
+ if (crypto_pk_get_digest(cert->signing_key, key_digest)<0)
+ return -1;
+ if (tor_memneq(sig->signing_key_digest, key_digest, DIGEST_LEN) ||
+ tor_memneq(sig->identity_digest, cert->cache_info.identity_digest,
+ DIGEST_LEN))
+ return -1;
+
+ if (authority_cert_is_blacklisted(cert)) {
+ /* We implement blacklisting for authority signing keys by treating
+ * all their signatures as always bad. That way we don't get into
+ * crazy loops of dropping and re-fetching signatures. */
+ log_warn(LD_DIR, "Ignoring a consensus signature made with deprecated"
+ " signing key %s",
+ hex_str(cert->signing_key_digest, DIGEST_LEN));
+ sig->bad_signature = 1;
+ return 0;
+ }
+
+ signed_digest_len = crypto_pk_keysize(cert->signing_key);
+ signed_digest = tor_malloc(signed_digest_len);
+ if (crypto_pk_public_checksig(cert->signing_key,
+ signed_digest,
+ signed_digest_len,
+ sig->signature,
+ sig->signature_len) < dlen ||
+ tor_memneq(signed_digest, consensus->digests.d[sig->alg], dlen)) {
+ log_warn(LD_DIR, "Got a bad signature on a networkstatus vote");
+ sig->bad_signature = 1;
+ } else {
+ sig->good_signature = 1;
+ }
+ tor_free(signed_digest);
+ return 0;
+}
+
+/** Given a v3 networkstatus consensus in <b>consensus</b>, check every
+ * as-yet-unchecked signature on <b>consensus</b>. Return 1 if there is a
+ * signature from every recognized authority on it, 0 if there are
+ * enough good signatures from recognized authorities on it, -1 if we might
+ * get enough good signatures by fetching missing certificates, and -2
+ * otherwise. Log messages at INFO or WARN: if <b>warn</b> is over 1, warn
+ * about every problem; if warn is at least 1, warn only if we can't get
+ * enough signatures; if warn is negative, log nothing at all. */
+int
+networkstatus_check_consensus_signature(networkstatus_t *consensus,
+ int warn)
+{
+ int n_good = 0;
+ int n_missing_key = 0, n_dl_failed_key = 0;
+ int n_bad = 0;
+ int n_unknown = 0;
+ int n_no_signature = 0;
+ int n_v3_authorities = get_n_authorities(V3_DIRINFO);
+ int n_required = n_v3_authorities/2 + 1;
+ smartlist_t *list_good = smartlist_new();
+ smartlist_t *list_no_signature = smartlist_new();
+ smartlist_t *need_certs_from = smartlist_new();
+ smartlist_t *unrecognized = smartlist_new();
+ smartlist_t *missing_authorities = smartlist_new();
+ int severity;
+ time_t now = time(NULL);
+
+ tor_assert(consensus->type == NS_TYPE_CONSENSUS);
+
+ SMARTLIST_FOREACH_BEGIN(consensus->voters, networkstatus_voter_info_t *,
+ voter) {
+ int good_here = 0;
+ int bad_here = 0;
+ int unknown_here = 0;
+ int missing_key_here = 0, dl_failed_key_here = 0;
+ SMARTLIST_FOREACH_BEGIN(voter->sigs, document_signature_t *, sig) {
+ if (!sig->good_signature && !sig->bad_signature &&
+ sig->signature) {
+ /* we can try to check the signature. */
+ int is_v3_auth = trusteddirserver_get_by_v3_auth_digest(
+ sig->identity_digest) != NULL;
+ authority_cert_t *cert =
+ authority_cert_get_by_digests(sig->identity_digest,
+ sig->signing_key_digest);
+ tor_assert(tor_memeq(sig->identity_digest, voter->identity_digest,
+ DIGEST_LEN));
+
+ if (!is_v3_auth) {
+ smartlist_add(unrecognized, voter);
+ ++unknown_here;
+ continue;
+ } else if (!cert || cert->expires < now) {
+ smartlist_add(need_certs_from, voter);
+ ++missing_key_here;
+ if (authority_cert_dl_looks_uncertain(sig->identity_digest))
+ ++dl_failed_key_here;
+ continue;
+ }
+ if (networkstatus_check_document_signature(consensus, sig, cert) < 0) {
+ smartlist_add(need_certs_from, voter);
+ ++missing_key_here;
+ if (authority_cert_dl_looks_uncertain(sig->identity_digest))
+ ++dl_failed_key_here;
+ continue;
+ }
+ }
+ if (sig->good_signature)
+ ++good_here;
+ else if (sig->bad_signature)
+ ++bad_here;
+ } SMARTLIST_FOREACH_END(sig);
+
+ if (good_here) {
+ ++n_good;
+ smartlist_add(list_good, voter->nickname);
+ } else if (bad_here) {
+ ++n_bad;
+ } else if (missing_key_here) {
+ ++n_missing_key;
+ if (dl_failed_key_here)
+ ++n_dl_failed_key;
+ } else if (unknown_here) {
+ ++n_unknown;
+ } else {
+ ++n_no_signature;
+ smartlist_add(list_no_signature, voter->nickname);
+ }
+ } SMARTLIST_FOREACH_END(voter);
+
+ /* Now see whether we're missing any voters entirely. */
+ SMARTLIST_FOREACH(router_get_trusted_dir_servers(),
+ dir_server_t *, ds,
+ {
+ if ((ds->type & V3_DIRINFO) &&
+ !networkstatus_get_voter_by_id(consensus, ds->v3_identity_digest))
+ smartlist_add(missing_authorities, ds);
+ });
+
+ if (warn > 1 || (warn >= 0 &&
+ (n_good + n_missing_key - n_dl_failed_key < n_required))) {
+ severity = LOG_WARN;
+ } else {
+ severity = LOG_INFO;
+ }
+
+ if (warn >= 0) {
+ SMARTLIST_FOREACH(unrecognized, networkstatus_voter_info_t *, voter,
+ {
+ tor_log(severity, LD_DIR, "Consensus includes unrecognized authority "
+ "'%s' at %s:%d (contact %s; identity %s)",
+ voter->nickname, voter->address, (int)voter->dir_port,
+ voter->contact?voter->contact:"n/a",
+ hex_str(voter->identity_digest, DIGEST_LEN));
+ });
+ SMARTLIST_FOREACH(need_certs_from, networkstatus_voter_info_t *, voter,
+ {
+ tor_log(severity, LD_DIR, "Looks like we need to download a new "
+ "certificate from authority '%s' at %s:%d (contact %s; "
+ "identity %s)",
+ voter->nickname, voter->address, (int)voter->dir_port,
+ voter->contact?voter->contact:"n/a",
+ hex_str(voter->identity_digest, DIGEST_LEN));
+ });
+ SMARTLIST_FOREACH(missing_authorities, dir_server_t *, ds,
+ {
+ tor_log(severity, LD_DIR, "Consensus does not include configured "
+ "authority '%s' at %s:%d (identity %s)",
+ ds->nickname, ds->address, (int)ds->dir_port,
+ hex_str(ds->v3_identity_digest, DIGEST_LEN));
+ });
+ {
+ char *joined;
+ smartlist_t *sl = smartlist_new();
+ char *tmp = smartlist_join_strings(list_good, " ", 0, NULL);
+ smartlist_add_asprintf(sl,
+ "A consensus needs %d good signatures from recognized "
+ "authorities for us to accept it. "
+ "This %s one has %d (%s).",
+ n_required,
+ networkstatus_get_flavor_name(consensus->flavor),
+ n_good, tmp);
+ tor_free(tmp);
+ if (n_no_signature) {
+ tmp = smartlist_join_strings(list_no_signature, " ", 0, NULL);
+ smartlist_add_asprintf(sl,
+ "%d (%s) of the authorities we know didn't sign it.",
+ n_no_signature, tmp);
+ tor_free(tmp);
+ }
+ if (n_unknown) {
+ smartlist_add_asprintf(sl,
+ "It has %d signatures from authorities we don't "
+ "recognize.", n_unknown);
+ }
+ if (n_bad) {
+ smartlist_add_asprintf(sl, "%d of the signatures on it didn't verify "
+ "correctly.", n_bad);
+ }
+ if (n_missing_key) {
+ smartlist_add_asprintf(sl,
+ "We were unable to check %d of the signatures, "
+ "because we were missing the keys.", n_missing_key);
+ }
+ joined = smartlist_join_strings(sl, " ", 0, NULL);
+ tor_log(severity, LD_DIR, "%s", joined);
+ tor_free(joined);
+ SMARTLIST_FOREACH(sl, char *, c, tor_free(c));
+ smartlist_free(sl);
+ }
+ }
+
+ smartlist_free(list_good);
+ smartlist_free(list_no_signature);
+ smartlist_free(unrecognized);
+ smartlist_free(need_certs_from);
+ smartlist_free(missing_authorities);
+
+ if (n_good == n_v3_authorities)
+ return 1;
+ else if (n_good >= n_required)
+ return 0;
+ else if (n_good + n_missing_key >= n_required)
+ return -1;
+ else
+ return -2;
+}
+
+/** How far in the future do we allow a network-status to get before removing
+ * it? (seconds) */
+#define NETWORKSTATUS_ALLOW_SKEW (24*60*60)
+
+/** Helper for bsearching a list of routerstatus_t pointers: compare a
+ * digest in the key to the identity digest of a routerstatus_t. */
+int
+compare_digest_to_routerstatus_entry(const void *_key, const void **_member)
+{
+ const char *key = _key;
+ const routerstatus_t *rs = *_member;
+ return tor_memcmp(key, rs->identity_digest, DIGEST_LEN);
+}
+
+/** Helper for bsearching a list of routerstatus_t pointers: compare a
+ * digest in the key to the identity digest of a routerstatus_t. */
+int
+compare_digest_to_vote_routerstatus_entry(const void *_key,
+ const void **_member)
+{
+ const char *key = _key;
+ const vote_routerstatus_t *vrs = *_member;
+ return tor_memcmp(key, vrs->status.identity_digest, DIGEST_LEN);
+}
+
+/** As networkstatus_find_entry, but do not return a const pointer */
+routerstatus_t *
+networkstatus_vote_find_mutable_entry(networkstatus_t *ns, const char *digest)
+{
+ return smartlist_bsearch(ns->routerstatus_list, digest,
+ compare_digest_to_routerstatus_entry);
+}
+
+/** Return the entry in <b>ns</b> for the identity digest <b>digest</b>, or
+ * NULL if none was found. */
+const routerstatus_t *
+networkstatus_vote_find_entry(networkstatus_t *ns, const char *digest)
+{
+ return networkstatus_vote_find_mutable_entry(ns, digest);
+}
+
+/*XXXX MOVE make this static once functions are moved into this file. */
+/** Search the routerstatuses in <b>ns</b> for one whose identity digest is
+ * <b>digest</b>. Return value and set *<b>found_out</b> as for
+ * smartlist_bsearch_idx(). */
+int
+networkstatus_vote_find_entry_idx(networkstatus_t *ns,
+ const char *digest, int *found_out)
+{
+ return smartlist_bsearch_idx(ns->routerstatus_list, digest,
+ compare_digest_to_routerstatus_entry,
+ found_out);
+}
+
+/** As router_get_consensus_status_by_descriptor_digest, but does not return
+ * a const pointer. */
+MOCK_IMPL(routerstatus_t *,
+router_get_mutable_consensus_status_by_descriptor_digest,(
+ networkstatus_t *consensus,
+ const char *digest))
+{
+ if (!consensus)
+ consensus = networkstatus_get_latest_consensus();
+ if (!consensus)
+ return NULL;
+ if (!consensus->desc_digest_map) {
+ digestmap_t *m = consensus->desc_digest_map = digestmap_new();
+ SMARTLIST_FOREACH(consensus->routerstatus_list,
+ routerstatus_t *, rs,
+ {
+ digestmap_set(m, rs->descriptor_digest, rs);
+ });
+ }
+ return digestmap_get(consensus->desc_digest_map, digest);
+}
+
+/** Return the consensus view of the status of the router whose current
+ * <i>descriptor</i> digest in <b>consensus</b> is <b>digest</b>, or NULL if
+ * no such router is known. */
+const routerstatus_t *
+router_get_consensus_status_by_descriptor_digest(networkstatus_t *consensus,
+ const char *digest)
+{
+ return router_get_mutable_consensus_status_by_descriptor_digest(
+ consensus, digest);
+}
+
+/** Return a smartlist of all router descriptor digests in a consensus */
+static smartlist_t *
+router_get_descriptor_digests_in_consensus(networkstatus_t *consensus)
+{
+ smartlist_t *result = smartlist_new();
+ digestmap_iter_t *i;
+ const char *digest;
+ void *rs;
+ char *digest_tmp;
+
+ for (i = digestmap_iter_init(consensus->desc_digest_map);
+ !(digestmap_iter_done(i));
+ i = digestmap_iter_next(consensus->desc_digest_map, i)) {
+ digestmap_iter_get(i, &digest, &rs);
+ digest_tmp = tor_malloc(DIGEST_LEN);
+ memcpy(digest_tmp, digest, DIGEST_LEN);
+ smartlist_add(result, digest_tmp);
+ }
+
+ return result;
+}
+
+/** Return a smartlist of all router descriptor digests in the current
+ * consensus */
+MOCK_IMPL(smartlist_t *,
+router_get_descriptor_digests,(void))
+{
+ smartlist_t *result = NULL;
+
+ if (current_ns_consensus) {
+ result =
+ router_get_descriptor_digests_in_consensus(current_ns_consensus);
+ }
+
+ return result;
+}
+
+/** Given the digest of a router descriptor, return its current download
+ * status, or NULL if the digest is unrecognized. */
+MOCK_IMPL(download_status_t *,
+router_get_dl_status_by_descriptor_digest,(const char *d))
+{
+ routerstatus_t *rs;
+ if (!current_ns_consensus)
+ return NULL;
+ if ((rs = router_get_mutable_consensus_status_by_descriptor_digest(
+ current_ns_consensus, d)))
+ return &rs->dl_status;
+
+ return NULL;
+}
+
+/** As router_get_consensus_status_by_id, but do not return a const pointer */
+routerstatus_t *
+router_get_mutable_consensus_status_by_id(const char *digest)
+{
+ const networkstatus_t *ns = networkstatus_get_latest_consensus();
+ if (!ns)
+ return NULL;
+ smartlist_t *rslist = ns->routerstatus_list;
+ return smartlist_bsearch(rslist, digest,
+ compare_digest_to_routerstatus_entry);
+}
+
+/** Return the consensus view of the status of the router whose identity
+ * digest is <b>digest</b>, or NULL if we don't know about any such router. */
+const routerstatus_t *
+router_get_consensus_status_by_id(const char *digest)
+{
+ return router_get_mutable_consensus_status_by_id(digest);
+}
+
+/** How frequently do directory authorities re-download fresh networkstatus
+ * documents? */
+#define AUTHORITY_NS_CACHE_INTERVAL (10*60)
+
+/** How frequently do non-authority directory caches re-download fresh
+ * networkstatus documents? */
+#define NONAUTHORITY_NS_CACHE_INTERVAL (60*60)
+
+/** Return true iff, given the options listed in <b>options</b>, <b>flavor</b>
+ * is the flavor of a consensus networkstatus that we would like to fetch.
+ *
+ * For certificate fetches, use we_want_to_fetch_unknown_auth_certs, and
+ * for serving fetched documents, use directory_caches_dir_info. */
+int
+we_want_to_fetch_flavor(const or_options_t *options, int flavor)
+{
+ if (flavor < 0 || flavor > N_CONSENSUS_FLAVORS) {
+ /* This flavor is crazy; we don't want it */
+ /*XXXX handle unrecognized flavors later */
+ return 0;
+ }
+ if (authdir_mode_v3(options) || directory_caches_dir_info(options)) {
+ /* We want to serve all flavors to others, regardless if we would use
+ * it ourselves. */
+ return 1;
+ }
+ if (options->FetchUselessDescriptors) {
+ /* In order to get all descriptors, we need to fetch all consensuses. */
+ return 1;
+ }
+ /* Otherwise, we want the flavor only if we want to use it to build
+ * circuits. */
+ return flavor == usable_consensus_flavor();
+}
+
+/** Return true iff, given the options listed in <b>options</b>, we would like
+ * to fetch and store unknown authority certificates.
+ *
+ * For consensus and descriptor fetches, use we_want_to_fetch_flavor, and
+ * for serving fetched certificates, use directory_caches_unknown_auth_certs.
+ */
+int
+we_want_to_fetch_unknown_auth_certs(const or_options_t *options)
+{
+ if (authdir_mode_v3(options) ||
+ directory_caches_unknown_auth_certs((options))) {
+ /* We want to serve all certs to others, regardless if we would use
+ * them ourselves. */
+ return 1;
+ }
+ if (options->FetchUselessDescriptors) {
+ /* Unknown certificates are definitely useless. */
+ return 1;
+ }
+ /* Otherwise, don't fetch unknown certificates. */
+ return 0;
+}
+
+/** How long will we hang onto a possibly live consensus for which we're
+ * fetching certs before we check whether there is a better one? */
+#define DELAY_WHILE_FETCHING_CERTS (20*60)
+
+/** What is the minimum time we need to have waited fetching certs, before we
+ * increment the consensus download schedule on failure? */
+#define MIN_DELAY_FOR_FETCH_CERT_STATUS_FAILURE (1*60)
+
+/* Check if a downloaded consensus flavor should still wait for certificates
+ * to download now. If we decide not to wait, check if enough time has passed
+ * to consider the certificate download failure a separate failure. If so,
+ * fail dls.
+ * If waiting for certificates to download, return 1. If not, return 0. */
+static int
+check_consensus_waiting_for_certs(int flavor, time_t now,
+ download_status_t *dls)
+{
+ consensus_waiting_for_certs_t *waiting;
+
+ /* We should always have a known flavor, because we_want_to_fetch_flavor()
+ * filters out unknown flavors. */
+ tor_assert(flavor >= 0 && flavor < N_CONSENSUS_FLAVORS);
+
+ waiting = &consensus_waiting_for_certs[flavor];
+ if (waiting->consensus) {
+ /* XXXX make sure this doesn't delay sane downloads. */
+ if (waiting->set_at + DELAY_WHILE_FETCHING_CERTS > now &&
+ waiting->consensus->valid_until > now) {
+ return 1;
+ } else {
+ if (!waiting->dl_failed) {
+ if (waiting->set_at + MIN_DELAY_FOR_FETCH_CERT_STATUS_FAILURE > now) {
+ download_status_failed(dls, 0);
+ }
+ waiting->dl_failed=1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/** If we want to download a fresh consensus, launch a new download as
+ * appropriate. */
+static void
+update_consensus_networkstatus_downloads(time_t now)
+{
+ int i;
+ const or_options_t *options = get_options();
+ const int we_are_bootstrapping = networkstatus_consensus_is_bootstrapping(
+ now);
+ const int use_multi_conn =
+ networkstatus_consensus_can_use_multiple_directories(options);
+
+ if (should_delay_dir_fetches(options, NULL))
+ return;
+
+ for (i=0; i < N_CONSENSUS_FLAVORS; ++i) {
+ /* XXXX need some way to download unknown flavors if we are caching. */
+ const char *resource;
+ networkstatus_t *c;
+ int max_in_progress_conns = 1;
+
+ if (! we_want_to_fetch_flavor(options, i))
+ continue;
+
+ c = networkstatus_get_latest_consensus_by_flavor(i);
+ if (! (c && c->valid_after <= now && now <= c->valid_until)) {
+ /* No live consensus? Get one now!*/
+ time_to_download_next_consensus[i] = now;
+ }
+
+ if (time_to_download_next_consensus[i] > now)
+ continue; /* Wait until the current consensus is older. */
+
+ resource = networkstatus_get_flavor_name(i);
+
+ /* Check if we already have enough connections in progress */
+ if (we_are_bootstrapping && use_multi_conn) {
+ max_in_progress_conns =
+ options->ClientBootstrapConsensusMaxInProgressTries;
+ }
+ if (connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ resource)
+ >= max_in_progress_conns) {
+ continue;
+ }
+
+ /* Check if we want to launch another download for a usable consensus.
+ * Only used during bootstrap. */
+ if (we_are_bootstrapping && use_multi_conn
+ && i == usable_consensus_flavor()) {
+
+ /* Check if we're already downloading a usable consensus */
+ if (networkstatus_consensus_is_already_downloading(resource))
+ continue;
+
+ /* Make multiple connections for a bootstrap consensus download. */
+ update_consensus_bootstrap_multiple_downloads(now, options);
+ } else {
+ /* Check if we failed downloading a consensus too recently */
+
+ /* Let's make sure we remembered to update consensus_dl_status */
+ tor_assert(consensus_dl_status[i].schedule == DL_SCHED_CONSENSUS);
+
+ if (!download_status_is_ready(&consensus_dl_status[i], now)) {
+ continue;
+ }
+
+ /** Check if we're waiting for certificates to download. If we are,
+ * launch download for missing directory authority certificates. */
+ if (check_consensus_waiting_for_certs(i, now, &consensus_dl_status[i])) {
+ update_certificate_downloads(now);
+ continue;
+ }
+
+ /* Try the requested attempt */
+ log_info(LD_DIR, "Launching %s standard networkstatus consensus "
+ "download.", networkstatus_get_flavor_name(i));
+ directory_get_from_dirserver(DIR_PURPOSE_FETCH_CONSENSUS,
+ ROUTER_PURPOSE_GENERAL, resource,
+ PDS_RETRY_IF_NO_SERVERS,
+ consensus_dl_status[i].want_authority);
+ }
+ }
+}
+
+/** When we're bootstrapping, launch one or more consensus download
+ * connections, if schedule indicates connection(s) should be made after now.
+ * If is_authority, connect to an authority, otherwise, use a fallback
+ * directory mirror.
+ */
+static void
+update_consensus_bootstrap_attempt_downloads(
+ time_t now,
+ download_status_t *dls,
+ download_want_authority_t want_authority)
+{
+ const char *resource = networkstatus_get_flavor_name(
+ usable_consensus_flavor());
+
+ /* Let's make sure we remembered to update schedule */
+ tor_assert(dls->schedule == DL_SCHED_CONSENSUS);
+
+ /* Allow for multiple connections in the same second, if the schedule value
+ * is 0. */
+ while (download_status_is_ready(dls, now)) {
+ log_info(LD_DIR, "Launching %s bootstrap %s networkstatus consensus "
+ "download.", resource, (want_authority == DL_WANT_AUTHORITY
+ ? "authority"
+ : "mirror"));
+
+ directory_get_from_dirserver(DIR_PURPOSE_FETCH_CONSENSUS,
+ ROUTER_PURPOSE_GENERAL, resource,
+ PDS_RETRY_IF_NO_SERVERS, want_authority);
+ /* schedule the next attempt */
+ download_status_increment_attempt(dls, resource, now);
+ }
+}
+
+/** If we're bootstrapping, check the connection schedules and see if we want
+ * to make additional, potentially concurrent, consensus download
+ * connections.
+ * Only call when bootstrapping, and when we want to make additional
+ * connections. Only nodes that satisfy
+ * networkstatus_consensus_can_use_multiple_directories make additional
+ * connections.
+ */
+static void
+update_consensus_bootstrap_multiple_downloads(time_t now,
+ const or_options_t *options)
+{
+ const int usable_flavor = usable_consensus_flavor();
+
+ /* make sure we can use multiple connections */
+ if (!networkstatus_consensus_can_use_multiple_directories(options)) {
+ return;
+ }
+
+ /* Launch concurrent consensus download attempt(s) based on the mirror and
+ * authority schedules. Try the mirror first - this makes it slightly more
+ * likely that we'll connect to the fallback first, and then end the
+ * authority connection attempt. */
+
+ /* If a consensus download fails because it's waiting for certificates,
+ * we'll fail both the authority and fallback schedules. This is better than
+ * failing only one of the schedules, and having the other continue
+ * unchecked.
+ */
+
+ /* If we don't have or can't use extra fallbacks, don't try them. */
+ if (networkstatus_consensus_can_use_extra_fallbacks(options)) {
+ download_status_t *dls_f =
+ &consensus_bootstrap_dl_status[CONSENSUS_BOOTSTRAP_SOURCE_ANY_DIRSERVER];
+
+ if (!check_consensus_waiting_for_certs(usable_flavor, now, dls_f)) {
+ /* During bootstrap, DL_WANT_ANY_DIRSERVER means "use fallbacks". */
+ update_consensus_bootstrap_attempt_downloads(now, dls_f,
+ DL_WANT_ANY_DIRSERVER);
+ }
+ }
+
+ /* Now try an authority. */
+ download_status_t *dls_a =
+ &consensus_bootstrap_dl_status[CONSENSUS_BOOTSTRAP_SOURCE_AUTHORITY];
+
+ if (!check_consensus_waiting_for_certs(usable_flavor, now, dls_a)) {
+ update_consensus_bootstrap_attempt_downloads(now, dls_a,
+ DL_WANT_AUTHORITY);
+ }
+}
+
+/** Called when an attempt to download a consensus fails: note that the
+ * failure occurred, and possibly retry. */
+void
+networkstatus_consensus_download_failed(int status_code, const char *flavname)
+{
+ int flav = networkstatus_parse_flavor_name(flavname);
+ if (flav >= 0) {
+ tor_assert(flav < N_CONSENSUS_FLAVORS);
+ /* XXXX handle unrecognized flavors */
+ download_status_failed(&consensus_dl_status[flav], status_code);
+ /* Retry immediately, if appropriate. */
+ update_consensus_networkstatus_downloads(time(NULL));
+ }
+}
+
+/** How long do we (as a cache) wait after a consensus becomes non-fresh
+ * before trying to fetch another? */
+#define CONSENSUS_MIN_SECONDS_BEFORE_CACHING 120
+
+/** Update the time at which we'll consider replacing the current
+ * consensus of flavor <b>flav</b> */
+static void
+update_consensus_networkstatus_fetch_time_impl(time_t now, int flav)
+{
+ const or_options_t *options = get_options();
+ networkstatus_t *c = networkstatus_get_latest_consensus_by_flavor(flav);
+ const char *flavor = networkstatus_get_flavor_name(flav);
+ if (! we_want_to_fetch_flavor(get_options(), flav))
+ return;
+
+ if (c && c->valid_after <= now && now <= c->valid_until) {
+ long dl_interval;
+ long interval = c->fresh_until - c->valid_after;
+ long min_sec_before_caching = CONSENSUS_MIN_SECONDS_BEFORE_CACHING;
+ time_t start;
+
+ if (min_sec_before_caching > interval/16) {
+ /* Usually we allow 2-minutes slop factor in case clocks get
+ desynchronized a little. If we're on a private network with
+ a crazy-fast voting interval, though, 2 minutes may be too
+ much. */
+ min_sec_before_caching = interval/16;
+ /* make sure we always delay by at least a second before caching */
+ if (min_sec_before_caching == 0) {
+ min_sec_before_caching = 1;
+ }
+ }
+
+ if (directory_fetches_dir_info_early(options)) {
+ /* We want to cache the next one at some point after this one
+ * is no longer fresh... */
+ start = (time_t)(c->fresh_until + min_sec_before_caching);
+ /* Some clients may need the consensus sooner than others. */
+ if (options->FetchDirInfoExtraEarly || authdir_mode_v3(options)) {
+ dl_interval = 60;
+ if (min_sec_before_caching + dl_interval > interval)
+ dl_interval = interval/2;
+ } else {
+ /* But only in the first half-interval after that. */
+ dl_interval = interval/2;
+ }
+ } else {
+ /* We're an ordinary client, a bridge, or a hidden service.
+ * Give all the caches enough time to download the consensus. */
+ start = (time_t)(c->fresh_until + (interval*3)/4);
+ /* But download the next one well before this one is expired. */
+ dl_interval = ((c->valid_until - start) * 7 )/ 8;
+
+ /* If we're a bridge user, make use of the numbers we just computed
+ * to choose the rest of the interval *after* them. */
+ if (directory_fetches_dir_info_later(options)) {
+ /* Give all the *clients* enough time to download the consensus. */
+ start = (time_t)(start + dl_interval + min_sec_before_caching);
+ /* But try to get it before ours actually expires. */
+ dl_interval = (c->valid_until - start) - min_sec_before_caching;
+ }
+ }
+ /* catch low dl_interval in crazy-fast networks */
+ if (dl_interval < 1)
+ dl_interval = 1;
+ /* catch late start in crazy-fast networks */
+ if (start+dl_interval >= c->valid_until)
+ start = c->valid_until - dl_interval - 1;
+ log_debug(LD_DIR,
+ "fresh_until: %ld start: %ld "
+ "dl_interval: %ld valid_until: %ld ",
+ (long)c->fresh_until, (long)start, dl_interval,
+ (long)c->valid_until);
+ /* We must not try to replace c while it's still fresh: */
+ tor_assert(c->fresh_until < start);
+ /* We must download the next one before c is invalid: */
+ tor_assert(start+dl_interval < c->valid_until);
+ time_to_download_next_consensus[flav] =
+ start + crypto_rand_int((int)dl_interval);
+ {
+ char tbuf1[ISO_TIME_LEN+1];
+ char tbuf2[ISO_TIME_LEN+1];
+ char tbuf3[ISO_TIME_LEN+1];
+ format_local_iso_time(tbuf1, c->fresh_until);
+ format_local_iso_time(tbuf2, c->valid_until);
+ format_local_iso_time(tbuf3, time_to_download_next_consensus[flav]);
+ log_info(LD_DIR, "Live %s consensus %s the most recent until %s and "
+ "will expire at %s; fetching the next one at %s.",
+ flavor, (c->fresh_until > now) ? "will be" : "was",
+ tbuf1, tbuf2, tbuf3);
+ }
+ } else {
+ time_to_download_next_consensus[flav] = now;
+ log_info(LD_DIR, "No live %s consensus; we should fetch one immediately.",
+ flavor);
+ }
+}
+
+/** Update the time at which we'll consider replacing the current
+ * consensus of flavor 'flavor' */
+void
+update_consensus_networkstatus_fetch_time(time_t now)
+{
+ int i;
+ for (i = 0; i < N_CONSENSUS_FLAVORS; ++i) {
+ if (we_want_to_fetch_flavor(get_options(), i))
+ update_consensus_networkstatus_fetch_time_impl(now, i);
+ }
+}
+
+/** Return 1 if there's a reason we shouldn't try any directory
+ * fetches yet (e.g. we demand bridges and none are yet known).
+ * Else return 0.
+
+ * If we return 1 and <b>msg_out</b> is provided, set <b>msg_out</b>
+ * to an explanation of why directory fetches are delayed. (If we
+ * return 0, we set msg_out to NULL.)
+ */
+int
+should_delay_dir_fetches(const or_options_t *options, const char **msg_out)
+{
+ if (msg_out) {
+ *msg_out = NULL;
+ }
+
+ if (options->DisableNetwork) {
+ if (msg_out) {
+ *msg_out = "DisableNetwork is set.";
+ }
+ log_info(LD_DIR, "Delaying dir fetches (DisableNetwork is set)");
+ return 1;
+ }
+
+ if (we_are_hibernating()) {
+ if (msg_out) {
+ *msg_out = "We are hibernating or shutting down.";
+ }
+ log_info(LD_DIR, "Delaying dir fetches (Hibernating or shutting down)");
+ return 1;
+ }
+
+ if (options->UseBridges) {
+ /* If we know that none of our bridges can possibly work, avoid fetching
+ * directory documents. But if some of them might work, try again. */
+ if (num_bridges_usable(1) == 0) {
+ if (msg_out) {
+ *msg_out = "No running bridges";
+ }
+ log_info(LD_DIR, "Delaying dir fetches (no running bridges known)");
+ return 1;
+ }
+
+ if (pt_proxies_configuration_pending()) {
+ if (msg_out) {
+ *msg_out = "Pluggable transport proxies still configuring";
+ }
+ log_info(LD_DIR, "Delaying dir fetches (pt proxies still configuring)");
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/** Launch requests for networkstatus documents as appropriate. This is called
+ * when we retry all the connections on a SIGHUP and periodically by a Periodic
+ * event which checks whether we want to download any networkstatus documents.
+ */
+void
+update_networkstatus_downloads(time_t now)
+{
+ const or_options_t *options = get_options();
+ if (should_delay_dir_fetches(options, NULL))
+ return;
+ /** Launch a consensus download request, we will wait for the consensus to
+ * download and when it completes we will launch a certificate download
+ * request. */
+ update_consensus_networkstatus_downloads(now);
+}
+
+/** Launch requests as appropriate for missing directory authority
+ * certificates. */
+void
+update_certificate_downloads(time_t now)
+{
+ int i;
+ for (i = 0; i < N_CONSENSUS_FLAVORS; ++i) {
+ if (consensus_waiting_for_certs[i].consensus)
+ authority_certs_fetch_missing(consensus_waiting_for_certs[i].consensus,
+ now, NULL);
+ }
+
+ if (current_ns_consensus)
+ authority_certs_fetch_missing(current_ns_consensus, now, NULL);
+ if (current_md_consensus)
+ authority_certs_fetch_missing(current_md_consensus, now, NULL);
+}
+
+/** Return 1 if we have a consensus but we don't have enough certificates
+ * to start using it yet. */
+int
+consensus_is_waiting_for_certs(void)
+{
+ return consensus_waiting_for_certs[usable_consensus_flavor()].consensus
+ ? 1 : 0;
+}
+
+/** Look up the currently active (depending on bootstrap status) download
+ * status for this consensus flavor and return a pointer to it.
+ */
+MOCK_IMPL(download_status_t *,
+networkstatus_get_dl_status_by_flavor,(consensus_flavor_t flavor))
+{
+ download_status_t *dl = NULL;
+ const int we_are_bootstrapping =
+ networkstatus_consensus_is_bootstrapping(time(NULL));
+
+ if ((int)flavor <= N_CONSENSUS_FLAVORS) {
+ dl = &((we_are_bootstrapping ?
+ consensus_bootstrap_dl_status : consensus_dl_status)[flavor]);
+ }
+
+ return dl;
+}
+
+/** Look up the bootstrap download status for this consensus flavor
+ * and return a pointer to it. */
+MOCK_IMPL(download_status_t *,
+networkstatus_get_dl_status_by_flavor_bootstrap,(consensus_flavor_t flavor))
+{
+ download_status_t *dl = NULL;
+
+ if ((int)flavor <= N_CONSENSUS_FLAVORS) {
+ dl = &(consensus_bootstrap_dl_status[flavor]);
+ }
+
+ return dl;
+}
+
+/** Look up the running (non-bootstrap) download status for this consensus
+ * flavor and return a pointer to it. */
+MOCK_IMPL(download_status_t *,
+networkstatus_get_dl_status_by_flavor_running,(consensus_flavor_t flavor))
+{
+ download_status_t *dl = NULL;
+
+ if ((int)flavor <= N_CONSENSUS_FLAVORS) {
+ dl = &(consensus_dl_status[flavor]);
+ }
+
+ return dl;
+}
+
+/** Return the most recent consensus that we have downloaded, or NULL if we
+ * don't have one. */
+MOCK_IMPL(networkstatus_t *,
+networkstatus_get_latest_consensus,(void))
+{
+ if (we_use_microdescriptors_for_circuits(get_options()))
+ return current_md_consensus;
+ else
+ return current_ns_consensus;
+}
+
+/** Return the latest consensus we have whose flavor matches <b>f</b>, or NULL
+ * if we don't have one. */
+MOCK_IMPL(networkstatus_t *,
+networkstatus_get_latest_consensus_by_flavor,(consensus_flavor_t f))
+{
+ if (f == FLAV_NS)
+ return current_ns_consensus;
+ else if (f == FLAV_MICRODESC)
+ return current_md_consensus;
+ else {
+ tor_assert(0);
+ return NULL;
+ }
+}
+
+/** Return the most recent consensus that we have downloaded, or NULL if it is
+ * no longer live. */
+MOCK_IMPL(networkstatus_t *,
+networkstatus_get_live_consensus,(time_t now))
+{
+ networkstatus_t *ns = networkstatus_get_latest_consensus();
+ if (ns && networkstatus_is_live(ns, now))
+ return ns;
+ else
+ return NULL;
+}
+
+/** Given a consensus in <b>ns</b>, return true iff currently live and
+ * unexpired. */
+int
+networkstatus_is_live(const networkstatus_t *ns, time_t now)
+{
+ return (ns->valid_after <= now && now <= ns->valid_until);
+}
+
+/** Determine if <b>consensus</b> is valid or expired recently enough that
+ * we can still use it.
+ *
+ * Return 1 if the consensus is reasonably live, or 0 if it is too old.
+ */
+int
+networkstatus_consensus_reasonably_live(const networkstatus_t *consensus,
+ time_t now)
+{
+ if (BUG(!consensus))
+ return 0;
+
+ return networkstatus_valid_until_is_reasonably_live(consensus->valid_until,
+ now);
+}
+
+/** As networkstatus_consensus_reasonably_live, but takes a valid_until
+ * time rather than an entire consensus. */
+int
+networkstatus_valid_until_is_reasonably_live(time_t valid_until,
+ time_t now)
+{
+#define REASONABLY_LIVE_TIME (24*60*60)
+ return (now <= valid_until + REASONABLY_LIVE_TIME);
+}
+
+/* XXXX remove this in favor of get_live_consensus. But actually,
+ * leave something like it for bridge users, who need to not totally
+ * lose if they spend a while fetching a new consensus. */
+/** As networkstatus_get_live_consensus(), but is way more tolerant of expired
+ * consensuses. */
+networkstatus_t *
+networkstatus_get_reasonably_live_consensus(time_t now, int flavor)
+{
+ networkstatus_t *consensus =
+ networkstatus_get_latest_consensus_by_flavor(flavor);
+ if (consensus &&
+ consensus->valid_after <= now &&
+ networkstatus_consensus_reasonably_live(consensus, now))
+ return consensus;
+ else
+ return NULL;
+}
+
+/** Check if we need to download a consensus during tor's bootstrap phase.
+ * If we have no consensus, or our consensus is unusably old, return 1.
+ * As soon as we have received a consensus, return 0, even if we don't have
+ * enough certificates to validate it.
+ * If a fallback directory gives us a consensus we can never get certs for,
+ * check_consensus_waiting_for_certs() will wait 20 minutes before failing
+ * the cert downloads. After that, a new consensus will be fetched from a
+ * randomly chosen fallback. */
+MOCK_IMPL(int,
+networkstatus_consensus_is_bootstrapping,(time_t now))
+{
+ /* If we have a validated, reasonably live consensus, we're not
+ * bootstrapping a consensus at all. */
+ if (networkstatus_get_reasonably_live_consensus(
+ now,
+ usable_consensus_flavor())) {
+ return 0;
+ }
+
+ /* If we have a consensus, but we're waiting for certificates,
+ * we're not waiting for a consensus download while bootstrapping. */
+ if (consensus_is_waiting_for_certs()) {
+ return 0;
+ }
+
+ /* If we have no consensus, or our consensus is very old, we are
+ * bootstrapping, and we need to download a consensus. */
+ return 1;
+}
+
+/** Check if we can use multiple directories for a consensus download.
+ * Only clients (including bridge relays, which act like clients) benefit
+ * from multiple simultaneous consensus downloads. */
+int
+networkstatus_consensus_can_use_multiple_directories(
+ const or_options_t *options)
+{
+ /* If we are a client, bridge, bridge client, or hidden service */
+ return !public_server_mode(options);
+}
+
+/** Check if we can use fallback directory mirrors for a consensus download.
+ * If we have fallbacks and don't want to fetch from the authorities,
+ * we can use them. */
+MOCK_IMPL(int,
+networkstatus_consensus_can_use_extra_fallbacks,(const or_options_t *options))
+{
+ /* The list length comparisons are a quick way to check if we have any
+ * non-authority fallback directories. If we ever have any authorities that
+ * aren't fallback directories, we will need to change this code. */
+ tor_assert(smartlist_len(router_get_fallback_dir_servers())
+ >= smartlist_len(router_get_trusted_dir_servers()));
+ /* If we don't fetch from the authorities, and we have additional mirrors,
+ * we can use them. */
+ return (!directory_fetches_from_authorities(options)
+ && (smartlist_len(router_get_fallback_dir_servers())
+ > smartlist_len(router_get_trusted_dir_servers())));
+}
+
+/* Is there a consensus fetch for flavor <b>resource</b> that's far
+ * enough along to be attached to a circuit? */
+int
+networkstatus_consensus_is_already_downloading(const char *resource)
+{
+ int answer = 0;
+
+ /* First, get a list of all the dir conns that are fetching a consensus,
+ * fetching *this* consensus, and are in state "reading" (meaning they
+ * have already flushed their request onto the socks connection). */
+ smartlist_t *fetching_conns =
+ connection_dir_list_by_purpose_resource_and_state(
+ DIR_PURPOSE_FETCH_CONSENSUS, resource, DIR_CONN_STATE_CLIENT_READING);
+
+ /* Then, walk through each conn, to see if its linked socks connection
+ * is in an attached state. We have to check this separately, since with
+ * the optimistic data feature, fetches can send their request to the
+ * socks connection and go into state 'reading', even before they're
+ * attached to any circuit. */
+ SMARTLIST_FOREACH_BEGIN(fetching_conns, dir_connection_t *, dirconn) {
+ /* Do any of these other dir conns have a linked socks conn that is
+ * attached to a circuit already? */
+ connection_t *base = TO_CONN(dirconn);
+ if (base->linked_conn &&
+ base->linked_conn->type == CONN_TYPE_AP &&
+ !AP_CONN_STATE_IS_UNATTACHED(base->linked_conn->state)) {
+ answer = 1;
+ break; /* stop looping, because we know the answer will be yes */
+ }
+ } SMARTLIST_FOREACH_END(dirconn);
+ smartlist_free(fetching_conns);
+
+ return answer;
+}
+
+/* Does the current, reasonably live consensus have IPv6 addresses?
+ * Returns 1 if there is a reasonably live consensus and its consensus method
+ * includes IPv6 addresses in the consensus.
+ * Otherwise, if there is no consensus, or the method does not include IPv6
+ * addresses, returns 0. */
+int
+networkstatus_consensus_has_ipv6(const or_options_t* options)
+{
+ const networkstatus_t *cons = networkstatus_get_reasonably_live_consensus(
+ approx_time(),
+ usable_consensus_flavor());
+
+ /* If we have no consensus, we have no IPv6 in it */
+ if (!cons) {
+ return 0;
+ }
+
+ /* Different flavours of consensus gained IPv6 at different times */
+ if (we_use_microdescriptors_for_circuits(options)) {
+ return
+ cons->consensus_method >= MIN_METHOD_FOR_A_LINES_IN_MICRODESC_CONSENSUS;
+ } else {
+ return 1;
+ }
+}
+
+/** Given two router status entries for the same router identity, return 1 if
+ * if the contents have changed between them. Otherwise, return 0. */
+static int
+routerstatus_has_changed(const routerstatus_t *a, const routerstatus_t *b)
+{
+ tor_assert(tor_memeq(a->identity_digest, b->identity_digest, DIGEST_LEN));
+
+ return strcmp(a->nickname, b->nickname) ||
+ fast_memneq(a->descriptor_digest, b->descriptor_digest, DIGEST_LEN) ||
+ a->addr != b->addr ||
+ a->or_port != b->or_port ||
+ a->dir_port != b->dir_port ||
+ a->is_authority != b->is_authority ||
+ a->is_exit != b->is_exit ||
+ a->is_stable != b->is_stable ||
+ a->is_fast != b->is_fast ||
+ a->is_flagged_running != b->is_flagged_running ||
+ a->is_named != b->is_named ||
+ a->is_unnamed != b->is_unnamed ||
+ a->is_valid != b->is_valid ||
+ a->is_possible_guard != b->is_possible_guard ||
+ a->is_bad_exit != b->is_bad_exit ||
+ a->is_hs_dir != b->is_hs_dir;
+ // XXXX this function needs a huge refactoring; it has gotten out
+ // XXXX of sync with routerstatus_t, and it will do so again.
+}
+
+/** Notify controllers of any router status entries that changed between
+ * <b>old_c</b> and <b>new_c</b>. */
+static void
+notify_control_networkstatus_changed(const networkstatus_t *old_c,
+ const networkstatus_t *new_c)
+{
+ smartlist_t *changed;
+ if (old_c == new_c)
+ return;
+
+ /* tell the controller exactly which relays are still listed, as well
+ * as what they're listed as */
+ control_event_newconsensus(new_c);
+
+ if (!control_event_is_interesting(EVENT_NS))
+ return;
+
+ if (!old_c) {
+ control_event_networkstatus_changed(new_c->routerstatus_list);
+ return;
+ }
+ changed = smartlist_new();
+
+ SMARTLIST_FOREACH_JOIN(
+ old_c->routerstatus_list, const routerstatus_t *, rs_old,
+ new_c->routerstatus_list, const routerstatus_t *, rs_new,
+ tor_memcmp(rs_old->identity_digest,
+ rs_new->identity_digest, DIGEST_LEN),
+ smartlist_add(changed, (void*) rs_new)) {
+ if (routerstatus_has_changed(rs_old, rs_new))
+ smartlist_add(changed, (void*)rs_new);
+ } SMARTLIST_FOREACH_JOIN_END(rs_old, rs_new);
+
+ control_event_networkstatus_changed(changed);
+ smartlist_free(changed);
+}
+
+/* Called before the consensus changes from old_c to new_c. */
+static void
+notify_before_networkstatus_changes(const networkstatus_t *old_c,
+ const networkstatus_t *new_c)
+{
+ notify_control_networkstatus_changed(old_c, new_c);
+ dos_consensus_has_changed(new_c);
+ relay_consensus_has_changed(new_c);
+}
+
+/* Called after a new consensus has been put in the global state. It is safe
+ * to use the consensus getters in this function. */
+static void
+notify_after_networkstatus_changes(void)
+{
+ scheduler_notify_networkstatus_changed();
+}
+
+/** Copy all the ancillary information (like router download status and so on)
+ * from <b>old_c</b> to <b>new_c</b>. */
+static void
+networkstatus_copy_old_consensus_info(networkstatus_t *new_c,
+ const networkstatus_t *old_c)
+{
+ if (old_c == new_c)
+ return;
+ if (!old_c || !smartlist_len(old_c->routerstatus_list))
+ return;
+
+ SMARTLIST_FOREACH_JOIN(old_c->routerstatus_list, routerstatus_t *, rs_old,
+ new_c->routerstatus_list, routerstatus_t *, rs_new,
+ tor_memcmp(rs_old->identity_digest,
+ rs_new->identity_digest, DIGEST_LEN),
+ STMT_NIL) {
+ /* Okay, so we're looking at the same identity. */
+ rs_new->last_dir_503_at = rs_old->last_dir_503_at;
+
+ if (tor_memeq(rs_old->descriptor_digest, rs_new->descriptor_digest,
+ DIGEST256_LEN)) {
+ /* And the same descriptor too! */
+ memcpy(&rs_new->dl_status, &rs_old->dl_status,sizeof(download_status_t));
+ }
+ } SMARTLIST_FOREACH_JOIN_END(rs_old, rs_new);
+}
+
+#ifdef TOR_UNIT_TESTS
+/**Accept a <b>flavor</b> consensus <b>c</b> without any additional
+ * validation. This is exclusively for unit tests.
+ * We copy any ancillary information from a pre-existing consensus
+ * and then free the current one and replace it with the newly
+ * provided instance. Returns -1 on unrecognized flavor, 0 otherwise.
+ */
+int
+networkstatus_set_current_consensus_from_ns(networkstatus_t *c,
+ const char *flavor)
+{
+ int flav = networkstatus_parse_flavor_name(flavor);
+ switch (flav) {
+ case FLAV_NS:
+ if (current_ns_consensus) {
+ networkstatus_copy_old_consensus_info(c, current_ns_consensus);
+ networkstatus_vote_free(current_ns_consensus);
+ }
+ current_ns_consensus = c;
+ break;
+ case FLAV_MICRODESC:
+ if (current_md_consensus) {
+ networkstatus_copy_old_consensus_info(c, current_md_consensus);
+ networkstatus_vote_free(current_md_consensus);
+ }
+ current_md_consensus = c;
+ break;
+ }
+ return current_md_consensus ? 0 : -1;
+}
+#endif /* defined(TOR_UNIT_TESTS) */
+
+/**
+ * Helper for handle_missing_protocol_warning: handles either the
+ * client case (if <b>is_client</b> is set) or the server case otherwise.
+ */
+static void
+handle_missing_protocol_warning_impl(const networkstatus_t *c,
+ int is_client)
+{
+ char *protocol_warning = NULL;
+
+ int should_exit = networkstatus_check_required_protocols(c,
+ is_client,
+ &protocol_warning);
+ if (protocol_warning) {
+ tor_log(should_exit ? LOG_ERR : LOG_WARN,
+ LD_GENERAL,
+ "%s", protocol_warning);
+ }
+ if (should_exit) {
+ tor_assert_nonfatal(protocol_warning);
+ }
+ tor_free(protocol_warning);
+ if (should_exit)
+ exit(1); // XXXX bad exit: should return from main.
+}
+
+/** Called when we have received a networkstatus <b>c</b>. If there are
+ * any _required_ protocols we are missing, log an error and exit
+ * immediately. If there are any _recommended_ protocols we are missing,
+ * warn. */
+static void
+handle_missing_protocol_warning(const networkstatus_t *c,
+ const or_options_t *options)
+{
+ const int is_server = server_mode(options);
+ const int is_client = options_any_client_port_set(options) || !is_server;
+
+ if (is_server)
+ handle_missing_protocol_warning_impl(c, 0);
+ if (is_client)
+ handle_missing_protocol_warning_impl(c, 1);
+}
+
+/**
+ * Check whether we received a consensus that appears to be coming
+ * from the future. Because we implicitly trust the directory
+ * authorities' idea of the current time, we produce a warning if we
+ * get an early consensus.
+ *
+ * If we got a consensus that is time stamped far in the past, that
+ * could simply have come from a stale cache. Possible ways to get a
+ * consensus from the future can include:
+ *
+ * - enough directory authorities have wrong clocks
+ * - directory authorities collude to produce misleading time stamps
+ * - our own clock is wrong (this is by far the most likely)
+ *
+ * We neglect highly improbable scenarios that involve actual time
+ * travel.
+ */
+STATIC void
+warn_early_consensus(const networkstatus_t *c, const char *flavor,
+ time_t now)
+{
+ char tbuf[ISO_TIME_LEN+1];
+ char dbuf[64];
+ long delta = now - c->valid_after;
+ char *flavormsg = NULL;
+
+/** If a consensus appears more than this many seconds before it could
+ * possibly be a sufficiently-signed consensus, declare that our clock
+ * is skewed. */
+#define EARLY_CONSENSUS_NOTICE_SKEW 60
+
+ /* We assume that if a majority of dirauths have accurate clocks,
+ * the earliest that a dirauth with a skewed clock could possibly
+ * publish a sufficiently-signed consensus is (valid_after -
+ * dist_seconds). Before that time, the skewed dirauth would be
+ * unable to obtain enough authority signatures for the consensus to
+ * be valid. */
+ if (now >= c->valid_after - c->dist_seconds - EARLY_CONSENSUS_NOTICE_SKEW)
+ return;
+
+ format_iso_time(tbuf, c->valid_after);
+ format_time_interval(dbuf, sizeof(dbuf), delta);
+ log_warn(LD_GENERAL, "Our clock is %s behind the time published in the "
+ "consensus network status document (%s UTC). Tor needs an "
+ "accurate clock to work correctly. Please check your time and "
+ "date settings!", dbuf, tbuf);
+ tor_asprintf(&flavormsg, "%s flavor consensus", flavor);
+ clock_skew_warning(NULL, delta, 1, LD_GENERAL, flavormsg, "CONSENSUS");
+ tor_free(flavormsg);
+}
+
+/** Try to replace the current cached v3 networkstatus with the one in
+ * <b>consensus</b>. If we don't have enough certificates to validate it,
+ * store it in consensus_waiting_for_certs and launch a certificate fetch.
+ *
+ * If flags & NSSET_FROM_CACHE, this networkstatus has come from the disk
+ * cache. If flags & NSSET_WAS_WAITING_FOR_CERTS, this networkstatus was
+ * already received, but we were waiting for certificates on it. If flags &
+ * NSSET_DONT_DOWNLOAD_CERTS, do not launch certificate downloads as needed.
+ * If flags & NSSET_ACCEPT_OBSOLETE, then we should be willing to take this
+ * consensus, even if it comes from many days in the past.
+ *
+ * If source_dir is non-NULL, it's the identity digest for a directory that
+ * we've just successfully retrieved a consensus or certificates from, so try
+ * it first to fetch any missing certificates.
+ *
+ * Return 0 on success, <0 on failure. On failure, caller should increment
+ * the failure count as appropriate.
+ *
+ * We return -1 for mild failures that don't need to be reported to the
+ * user, and -2 for more serious problems.
+ */
+int
+networkstatus_set_current_consensus(const char *consensus,
+ const char *flavor,
+ unsigned flags,
+ const char *source_dir)
+{
+ networkstatus_t *c=NULL;
+ int r, result = -1;
+ time_t now = approx_time();
+ const or_options_t *options = get_options();
+ char *unverified_fname = NULL, *consensus_fname = NULL;
+ int flav = networkstatus_parse_flavor_name(flavor);
+ const unsigned from_cache = flags & NSSET_FROM_CACHE;
+ const unsigned was_waiting_for_certs = flags & NSSET_WAS_WAITING_FOR_CERTS;
+ const unsigned dl_certs = !(flags & NSSET_DONT_DOWNLOAD_CERTS);
+ const unsigned accept_obsolete = flags & NSSET_ACCEPT_OBSOLETE;
+ const unsigned require_flavor = flags & NSSET_REQUIRE_FLAVOR;
+ const common_digests_t *current_digests = NULL;
+ consensus_waiting_for_certs_t *waiting = NULL;
+ time_t current_valid_after = 0;
+ int free_consensus = 1; /* Free 'c' at the end of the function */
+ int checked_protocols_already = 0;
+
+ if (flav < 0) {
+ /* XXXX we don't handle unrecognized flavors yet. */
+ log_warn(LD_BUG, "Unrecognized consensus flavor %s", flavor);
+ return -2;
+ }
+
+ /* Make sure it's parseable. */
+ c = networkstatus_parse_vote_from_string(consensus, NULL, NS_TYPE_CONSENSUS);
+ if (!c) {
+ log_warn(LD_DIR, "Unable to parse networkstatus consensus");
+ result = -2;
+ goto done;
+ }
+
+ if (from_cache && !was_waiting_for_certs) {
+ /* We previously stored this; check _now_ to make sure that version-kills
+ * really work. This happens even before we check signatures: we did so
+ * before when we stored this to disk. This does mean an attacker who can
+ * write to the datadir can make us not start: such an attacker could
+ * already harm us by replacing our guards, which would be worse. */
+ checked_protocols_already = 1;
+ handle_missing_protocol_warning(c, options);
+ }
+
+ if ((int)c->flavor != flav) {
+ /* This wasn't the flavor we thought we were getting. */
+ if (require_flavor) {
+ log_warn(LD_DIR, "Got consensus with unexpected flavor %s (wanted %s)",
+ networkstatus_get_flavor_name(c->flavor), flavor);
+ goto done;
+ }
+ flav = c->flavor;
+ flavor = networkstatus_get_flavor_name(flav);
+ }
+
+ if (flav != usable_consensus_flavor() &&
+ !we_want_to_fetch_flavor(options, flav)) {
+ /* This consensus is totally boring to us: we won't use it, we didn't want
+ * it, and we won't serve it. Drop it. */
+ goto done;
+ }
+
+ if (from_cache && !accept_obsolete &&
+ c->valid_until < now-OLD_ROUTER_DESC_MAX_AGE) {
+ log_info(LD_DIR, "Loaded an expired consensus. Discarding.");
+ goto done;
+ }
+
+ if (!strcmp(flavor, "ns")) {
+ consensus_fname = get_cachedir_fname("cached-consensus");
+ unverified_fname = get_cachedir_fname("unverified-consensus");
+ if (current_ns_consensus) {
+ current_digests = &current_ns_consensus->digests;
+ current_valid_after = current_ns_consensus->valid_after;
+ }
+ } else if (!strcmp(flavor, "microdesc")) {
+ consensus_fname = get_cachedir_fname("cached-microdesc-consensus");
+ unverified_fname = get_cachedir_fname("unverified-microdesc-consensus");
+ if (current_md_consensus) {
+ current_digests = &current_md_consensus->digests;
+ current_valid_after = current_md_consensus->valid_after;
+ }
+ } else {
+ tor_assert_nonfatal_unreached();
+ result = -2;
+ goto done;
+ }
+
+ if (current_digests &&
+ tor_memeq(&c->digests, current_digests, sizeof(c->digests))) {
+ /* We already have this one. That's a failure. */
+ log_info(LD_DIR, "Got a %s consensus we already have", flavor);
+ goto done;
+ }
+
+ if (current_valid_after && c->valid_after <= current_valid_after) {
+ /* We have a newer one. There's no point in accepting this one,
+ * even if it's great. */
+ log_info(LD_DIR, "Got a %s consensus at least as old as the one we have",
+ flavor);
+ goto done;
+ }
+
+ /* Make sure it's signed enough. */
+ if ((r=networkstatus_check_consensus_signature(c, 1))<0) {
+ if (r == -1) {
+ /* Okay, so it _might_ be signed enough if we get more certificates. */
+ if (!was_waiting_for_certs) {
+ log_info(LD_DIR,
+ "Not enough certificates to check networkstatus consensus");
+ }
+ if (!current_valid_after ||
+ c->valid_after > current_valid_after) {
+ waiting = &consensus_waiting_for_certs[flav];
+ networkstatus_vote_free(waiting->consensus);
+ tor_free(waiting->body);
+ waiting->consensus = c;
+ free_consensus = 0;
+ waiting->body = tor_strdup(consensus);
+ waiting->set_at = now;
+ waiting->dl_failed = 0;
+ if (!from_cache) {
+ write_str_to_file(unverified_fname, consensus, 0);
+ }
+ if (dl_certs)
+ authority_certs_fetch_missing(c, now, source_dir);
+ /* This case is not a success or a failure until we get the certs
+ * or fail to get the certs. */
+ result = 0;
+ } else {
+ /* Even if we had enough signatures, we'd never use this as the
+ * latest consensus. */
+ if (was_waiting_for_certs && from_cache)
+ if (unlink(unverified_fname) != 0) {
+ log_warn(LD_FS,
+ "Failed to unlink %s: %s",
+ unverified_fname, strerror(errno));
+ }
+ }
+ goto done;
+ } else {
+ /* This can never be signed enough: Kill it. */
+ if (!was_waiting_for_certs) {
+ log_warn(LD_DIR, "Not enough good signatures on networkstatus "
+ "consensus");
+ result = -2;
+ }
+ if (was_waiting_for_certs && (r < -1) && from_cache) {
+ if (unlink(unverified_fname) != 0) {
+ log_warn(LD_FS,
+ "Failed to unlink %s: %s",
+ unverified_fname, strerror(errno));
+ }
+ }
+ goto done;
+ }
+ }
+
+ /* Signatures from the consensus are verified */
+ if (from_cache && was_waiting_for_certs) {
+ /* We check if the consensus is loaded from disk cache and that it
+ * it is an unverified consensus. If it is unverified, rename it to
+ * cached-*-consensus since it has been verified. */
+ log_info(LD_DIR, "Unverified consensus signatures verified.");
+ tor_rename(unverified_fname, consensus_fname);
+ }
+
+ if (!from_cache && flav == usable_consensus_flavor())
+ control_event_client_status(LOG_NOTICE, "CONSENSUS_ARRIVED");
+
+ if (!checked_protocols_already) {
+ handle_missing_protocol_warning(c, options);
+ }
+
+ /* Are we missing any certificates at all? */
+ if (r != 1 && dl_certs)
+ authority_certs_fetch_missing(c, now, source_dir);
+
+ const int is_usable_flavor = flav == usable_consensus_flavor();
+
+ /* Before we switch to the new consensus, notify that we are about to change
+ * it using the old consensus and the new one. */
+ if (is_usable_flavor) {
+ notify_before_networkstatus_changes(networkstatus_get_latest_consensus(),
+ c);
+ }
+ if (flav == FLAV_NS) {
+ if (current_ns_consensus) {
+ networkstatus_copy_old_consensus_info(c, current_ns_consensus);
+ networkstatus_vote_free(current_ns_consensus);
+ /* Defensive programming : we should set current_ns_consensus very soon
+ * but we're about to call some stuff in the meantime, and leaving this
+ * dangling pointer around has proven to be trouble. */
+ current_ns_consensus = NULL;
+ }
+ current_ns_consensus = c;
+ free_consensus = 0; /* avoid free */
+ } else if (flav == FLAV_MICRODESC) {
+ if (current_md_consensus) {
+ networkstatus_copy_old_consensus_info(c, current_md_consensus);
+ networkstatus_vote_free(current_md_consensus);
+ /* more defensive programming */
+ current_md_consensus = NULL;
+ }
+ current_md_consensus = c;
+ free_consensus = 0; /* avoid free */
+ }
+
+ waiting = &consensus_waiting_for_certs[flav];
+ if (waiting->consensus &&
+ waiting->consensus->valid_after <= c->valid_after) {
+ networkstatus_vote_free(waiting->consensus);
+ waiting->consensus = NULL;
+ if (consensus != waiting->body)
+ tor_free(waiting->body);
+ else
+ waiting->body = NULL;
+ waiting->set_at = 0;
+ waiting->dl_failed = 0;
+ if (unlink(unverified_fname) != 0) {
+ log_warn(LD_FS,
+ "Failed to unlink %s: %s",
+ unverified_fname, strerror(errno));
+ }
+ }
+
+ if (is_usable_flavor) {
+ /* Notify that we just changed the consensus so the current global value
+ * can be looked at. */
+ notify_after_networkstatus_changes();
+
+ /* The "current" consensus has just been set and it is a usable flavor so
+ * the first thing we need to do is recalculate the voting schedule static
+ * object so we can use the timings in there needed by some subsystems
+ * such as hidden service and shared random. */
+ voting_schedule_recalculate_timing(options, now);
+ reschedule_dirvote(options);
+
+ nodelist_set_consensus(c);
+
+ /* XXXXNM Microdescs: needs a non-ns variant. ???? NM*/
+ update_consensus_networkstatus_fetch_time(now);
+
+ /* Change the cell EWMA settings */
+ cmux_ewma_set_options(options, c);
+
+ /* XXXX this call might be unnecessary here: can changing the
+ * current consensus really alter our view of any OR's rate limits? */
+ connection_or_update_token_buckets(get_connection_array(), options);
+
+ circuit_build_times_new_consensus_params(
+ get_circuit_build_times_mutable(), c);
+ channelpadding_new_consensus_params(c);
+ }
+
+ /* Reset the failure count only if this consensus is actually valid. */
+ if (c->valid_after <= now && now <= c->valid_until) {
+ download_status_reset(&consensus_dl_status[flav]);
+ } else {
+ if (!from_cache)
+ download_status_failed(&consensus_dl_status[flav], 0);
+ }
+
+ if (we_want_to_fetch_flavor(options, flav)) {
+ dirserv_set_cached_consensus_networkstatus(consensus,
+ flavor,
+ &c->digests,
+ c->digest_sha3_as_signed,
+ c->valid_after);
+ if (dir_server_mode(get_options())) {
+ consdiffmgr_add_consensus(consensus, c);
+ }
+ }
+
+ if (!from_cache) {
+ write_str_to_file(consensus_fname, consensus, 0);
+ }
+
+ warn_early_consensus(c, flavor, now);
+
+ /* We got a new consesus. Reset our md fetch fail cache */
+ microdesc_reset_outdated_dirservers_list();
+
+ router_dir_info_changed();
+
+ result = 0;
+ done:
+ if (free_consensus)
+ networkstatus_vote_free(c);
+ tor_free(consensus_fname);
+ tor_free(unverified_fname);
+ return result;
+}
+
+/** Called when we have gotten more certificates: see whether we can
+ * now verify a pending consensus.
+ *
+ * If source_dir is non-NULL, it's the identity digest for a directory that
+ * we've just successfully retrieved certificates from, so try it first to
+ * fetch any missing certificates.
+ */
+void
+networkstatus_note_certs_arrived(const char *source_dir)
+{
+ int i;
+ for (i=0; i<N_CONSENSUS_FLAVORS; ++i) {
+ const char *flavor_name = networkstatus_get_flavor_name(i);
+ consensus_waiting_for_certs_t *waiting = &consensus_waiting_for_certs[i];
+ if (!waiting->consensus)
+ continue;
+ if (networkstatus_check_consensus_signature(waiting->consensus, 0)>=0) {
+ char *waiting_body = waiting->body;
+ if (!networkstatus_set_current_consensus(
+ waiting_body,
+ flavor_name,
+ NSSET_WAS_WAITING_FOR_CERTS,
+ source_dir)) {
+ tor_free(waiting_body);
+ }
+ }
+ }
+}
+
+/** If the network-status list has changed since the last time we called this
+ * function, update the status of every routerinfo from the network-status
+ * list. If <b>dir_version</b> is 2, it's a v2 networkstatus that changed.
+ * If <b>dir_version</b> is 3, it's a v3 consensus that changed.
+ */
+void
+routers_update_all_from_networkstatus(time_t now, int dir_version)
+{
+ routerlist_t *rl = router_get_routerlist();
+ networkstatus_t *consensus = networkstatus_get_reasonably_live_consensus(now,
+ FLAV_NS);
+
+ if (!consensus || dir_version < 3) /* nothing more we should do */
+ return;
+
+ /* calls router_dir_info_changed() when it's done -- more routers
+ * might be up or down now, which might affect whether there's enough
+ * directory info. */
+ routers_update_status_from_consensus_networkstatus(rl->routers, 0);
+
+ SMARTLIST_FOREACH(rl->routers, routerinfo_t *, ri,
+ ri->cache_info.routerlist_index = ri_sl_idx);
+ if (rl->old_routers)
+ signed_descs_update_status_from_consensus_networkstatus(rl->old_routers);
+
+ if (!have_warned_about_old_version) {
+ int is_server = server_mode(get_options());
+ version_status_t status;
+ const char *recommended = is_server ?
+ consensus->server_versions : consensus->client_versions;
+ status = tor_version_is_obsolete(VERSION, recommended);
+
+ if (status == VS_RECOMMENDED) {
+ log_info(LD_GENERAL, "The directory authorities say my version is ok.");
+ } else if (status == VS_EMPTY) {
+ log_info(LD_GENERAL,
+ "The directory authorities don't recommend any versions.");
+ } else if (status == VS_NEW || status == VS_NEW_IN_SERIES) {
+ if (!have_warned_about_new_version) {
+ log_notice(LD_GENERAL, "This version of Tor (%s) is newer than any "
+ "recommended version%s, according to the directory "
+ "authorities. Recommended versions are: %s",
+ VERSION,
+ status == VS_NEW_IN_SERIES ? " in its series" : "",
+ recommended);
+ have_warned_about_new_version = 1;
+ control_event_general_status(LOG_WARN, "DANGEROUS_VERSION "
+ "CURRENT=%s REASON=%s RECOMMENDED=\"%s\"",
+ VERSION, "NEW", recommended);
+ }
+ } else {
+ log_warn(LD_GENERAL, "Please upgrade! "
+ "This version of Tor (%s) is %s, according to the directory "
+ "authorities. Recommended versions are: %s",
+ VERSION,
+ status == VS_OLD ? "obsolete" : "not recommended",
+ recommended);
+ have_warned_about_old_version = 1;
+ control_event_general_status(LOG_WARN, "DANGEROUS_VERSION "
+ "CURRENT=%s REASON=%s RECOMMENDED=\"%s\"",
+ VERSION, status == VS_OLD ? "OBSOLETE" : "UNRECOMMENDED",
+ recommended);
+ }
+ }
+}
+
+/** Given a list <b>routers</b> of routerinfo_t *, update each status field
+ * according to our current consensus networkstatus. May re-order
+ * <b>routers</b>. */
+void
+routers_update_status_from_consensus_networkstatus(smartlist_t *routers,
+ int reset_failures)
+{
+ const or_options_t *options = get_options();
+ int authdir = authdir_mode_v3(options);
+ networkstatus_t *ns = networkstatus_get_latest_consensus();
+ if (!ns || !smartlist_len(ns->routerstatus_list))
+ return;
+
+ routers_sort_by_identity(routers);
+
+ SMARTLIST_FOREACH_JOIN(ns->routerstatus_list, routerstatus_t *, rs,
+ routers, routerinfo_t *, router,
+ tor_memcmp(rs->identity_digest,
+ router->cache_info.identity_digest, DIGEST_LEN),
+ {
+ }) {
+ /* Is it the same descriptor, or only the same identity? */
+ if (tor_memeq(router->cache_info.signed_descriptor_digest,
+ rs->descriptor_digest, DIGEST_LEN)) {
+ if (ns->valid_until > router->cache_info.last_listed_as_valid_until)
+ router->cache_info.last_listed_as_valid_until = ns->valid_until;
+ }
+
+ if (authdir) {
+ /* If we _are_ an authority, we should check whether this router
+ * is one that will cause us to need a reachability test. */
+ routerinfo_t *old_router =
+ router_get_mutable_by_digest(router->cache_info.identity_digest);
+ if (old_router != router) {
+ router->needs_retest_if_added =
+ dirserv_should_launch_reachability_test(router, old_router);
+ }
+ }
+ if (reset_failures) {
+ download_status_reset(&rs->dl_status);
+ }
+ } SMARTLIST_FOREACH_JOIN_END(rs, router);
+
+ router_dir_info_changed();
+}
+
+/** Given a list of signed_descriptor_t, update their fields (mainly, when
+ * they were last listed) from the most recent consensus. */
+void
+signed_descs_update_status_from_consensus_networkstatus(smartlist_t *descs)
+{
+ networkstatus_t *ns = current_ns_consensus;
+ if (!ns)
+ return;
+
+ if (!ns->desc_digest_map) {
+ char dummy[DIGEST_LEN];
+ /* instantiates the digest map. */
+ memset(dummy, 0, sizeof(dummy));
+ router_get_consensus_status_by_descriptor_digest(ns, dummy);
+ }
+ SMARTLIST_FOREACH(descs, signed_descriptor_t *, d,
+ {
+ const routerstatus_t *rs = digestmap_get(ns->desc_digest_map,
+ d->signed_descriptor_digest);
+ if (rs) {
+ if (ns->valid_until > d->last_listed_as_valid_until)
+ d->last_listed_as_valid_until = ns->valid_until;
+ }
+ });
+}
+
+/** Generate networkstatus lines for a single routerstatus_t object, and
+ * return the result in a newly allocated string. Used only by controller
+ * interface (for now.) */
+char *
+networkstatus_getinfo_helper_single(const routerstatus_t *rs)
+{
+ return routerstatus_format_entry(rs, NULL, NULL, NS_CONTROL_PORT,
+ ROUTERSTATUS_FORMAT_NO_CONSENSUS_METHOD,
+ NULL);
+}
+
+/** Alloc and return a string describing routerstatuses for the most
+ * recent info of each router we know about that is of purpose
+ * <b>purpose_string</b>. Return NULL if unrecognized purpose.
+ *
+ * Right now this function is oriented toward listing bridges (you
+ * shouldn't use this for general-purpose routers, since those
+ * should be listed from the consensus, not from the routers list). */
+char *
+networkstatus_getinfo_by_purpose(const char *purpose_string, time_t now)
+{
+ const time_t cutoff = now - ROUTER_MAX_AGE_TO_PUBLISH;
+ char *answer;
+ routerlist_t *rl = router_get_routerlist();
+ smartlist_t *statuses;
+ const uint8_t purpose = router_purpose_from_string(purpose_string);
+ routerstatus_t rs;
+ const int bridge_auth = authdir_mode_bridge(get_options());
+
+ if (purpose == ROUTER_PURPOSE_UNKNOWN) {
+ log_info(LD_DIR, "Unrecognized purpose '%s' when listing router statuses.",
+ purpose_string);
+ return NULL;
+ }
+
+ statuses = smartlist_new();
+ SMARTLIST_FOREACH_BEGIN(rl->routers, routerinfo_t *, ri) {
+ node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
+ if (!node)
+ continue;
+ if (ri->cache_info.published_on < cutoff)
+ continue;
+ if (ri->purpose != purpose)
+ continue;
+ /* TODO: modifying the running flag in a getinfo is a bad idea */
+ if (bridge_auth && ri->purpose == ROUTER_PURPOSE_BRIDGE)
+ dirserv_set_router_is_running(ri, now);
+ /* then generate and write out status lines for each of them */
+ set_routerstatus_from_routerinfo(&rs, node, ri, now, 0);
+ smartlist_add(statuses, networkstatus_getinfo_helper_single(&rs));
+ } SMARTLIST_FOREACH_END(ri);
+
+ answer = smartlist_join_strings(statuses, "", 0, NULL);
+ SMARTLIST_FOREACH(statuses, char *, cp, tor_free(cp));
+ smartlist_free(statuses);
+ return answer;
+}
+
+/** Write out router status entries for all our bridge descriptors. */
+void
+networkstatus_dump_bridge_status_to_file(time_t now)
+{
+ char *status = networkstatus_getinfo_by_purpose("bridge", now);
+ char *fname = NULL;
+ char *thresholds = NULL;
+ char *published_thresholds_and_status = NULL;
+ char published[ISO_TIME_LEN+1];
+ const routerinfo_t *me = router_get_my_routerinfo();
+ char fingerprint[FINGERPRINT_LEN+1];
+ char *fingerprint_line = NULL;
+
+ if (me && crypto_pk_get_fingerprint(me->identity_pkey,
+ fingerprint, 0) >= 0) {
+ tor_asprintf(&fingerprint_line, "fingerprint %s\n", fingerprint);
+ } else {
+ log_warn(LD_BUG, "Error computing fingerprint for bridge status.");
+ }
+ format_iso_time(published, now);
+ dirserv_compute_bridge_flag_thresholds();
+ thresholds = dirserv_get_flag_thresholds_line();
+ tor_asprintf(&published_thresholds_and_status,
+ "published %s\nflag-thresholds %s\n%s%s",
+ published, thresholds, fingerprint_line ? fingerprint_line : "",
+ status);
+ fname = get_datadir_fname("networkstatus-bridges");
+ write_str_to_file(fname,published_thresholds_and_status,0);
+ tor_free(thresholds);
+ tor_free(published_thresholds_and_status);
+ tor_free(fname);
+ tor_free(status);
+ tor_free(fingerprint_line);
+}
+
+/* DOCDOC get_net_param_from_list */
+static int32_t
+get_net_param_from_list(smartlist_t *net_params, const char *param_name,
+ int32_t default_val, int32_t min_val, int32_t max_val)
+{
+ int32_t res = default_val;
+ size_t name_len = strlen(param_name);
+
+ tor_assert(max_val > min_val);
+ tor_assert(min_val <= default_val);
+ tor_assert(max_val >= default_val);
+
+ SMARTLIST_FOREACH_BEGIN(net_params, const char *, p) {
+ if (!strcmpstart(p, param_name) && p[name_len] == '=') {
+ int ok=0;
+ long v = tor_parse_long(p+name_len+1, 10, INT32_MIN,
+ INT32_MAX, &ok, NULL);
+ if (ok) {
+ res = (int32_t) v;
+ break;
+ }
+ }
+ } SMARTLIST_FOREACH_END(p);
+
+ if (res < min_val) {
+ log_warn(LD_DIR, "Consensus parameter %s is too small. Got %d, raising to "
+ "%d.", param_name, res, min_val);
+ res = min_val;
+ } else if (res > max_val) {
+ log_warn(LD_DIR, "Consensus parameter %s is too large. Got %d, capping to "
+ "%d.", param_name, res, max_val);
+ res = max_val;
+ }
+
+ return res;
+}
+
+/** Return the value of a integer parameter from the networkstatus <b>ns</b>
+ * whose name is <b>param_name</b>. If <b>ns</b> is NULL, try loading the
+ * latest consensus ourselves. Return <b>default_val</b> if no latest
+ * consensus, or if it has no parameter called <b>param_name</b>.
+ * Make sure the value parsed from the consensus is at least
+ * <b>min_val</b> and at most <b>max_val</b> and raise/cap the parsed value
+ * if necessary. */
+MOCK_IMPL(int32_t,
+networkstatus_get_param, (const networkstatus_t *ns, const char *param_name,
+ int32_t default_val, int32_t min_val, int32_t max_val))
+{
+ if (!ns) /* if they pass in null, go find it ourselves */
+ ns = networkstatus_get_latest_consensus();
+
+ if (!ns || !ns->net_params)
+ return default_val;
+
+ return get_net_param_from_list(ns->net_params, param_name,
+ default_val, min_val, max_val);
+}
+
+/**
+ * As networkstatus_get_param(), but check torrc_value before checking the
+ * consensus. If torrc_value is in-range, then return it instead of the
+ * value from the consensus.
+ */
+int32_t
+networkstatus_get_overridable_param(const networkstatus_t *ns,
+ int32_t torrc_value,
+ const char *param_name,
+ int32_t default_val,
+ int32_t min_val, int32_t max_val)
+{
+ if (torrc_value >= min_val && torrc_value <= max_val)
+ return torrc_value;
+ else
+ return networkstatus_get_param(
+ ns, param_name, default_val, min_val, max_val);
+}
+
+/**
+ * Retrieve the consensus parameter that governs the
+ * fixed-point precision of our network balancing 'bandwidth-weights'
+ * (which are themselves integer consensus values). We divide them
+ * by this value and ensure they never exceed this value.
+ */
+int
+networkstatus_get_weight_scale_param(networkstatus_t *ns)
+{
+ return networkstatus_get_param(ns, "bwweightscale",
+ BW_WEIGHT_SCALE,
+ BW_MIN_WEIGHT_SCALE,
+ BW_MAX_WEIGHT_SCALE);
+}
+
+/** Return the value of a integer bw weight parameter from the networkstatus
+ * <b>ns</b> whose name is <b>weight_name</b>. If <b>ns</b> is NULL, try
+ * loading the latest consensus ourselves. Return <b>default_val</b> if no
+ * latest consensus, or if it has no parameter called <b>weight_name</b>. */
+int32_t
+networkstatus_get_bw_weight(networkstatus_t *ns, const char *weight_name,
+ int32_t default_val)
+{
+ int32_t param;
+ int max;
+ if (!ns) /* if they pass in null, go find it ourselves */
+ ns = networkstatus_get_latest_consensus();
+
+ if (!ns || !ns->weight_params)
+ return default_val;
+
+ max = networkstatus_get_weight_scale_param(ns);
+ param = get_net_param_from_list(ns->weight_params, weight_name,
+ default_val, -1,
+ BW_MAX_WEIGHT_SCALE);
+ if (param > max) {
+ log_warn(LD_DIR, "Value of consensus weight %s was too large, capping "
+ "to %d", weight_name, max);
+ param = max;
+ }
+ return param;
+}
+
+/** Return the name of the consensus flavor <b>flav</b> as used to identify
+ * the flavor in directory documents. */
+const char *
+networkstatus_get_flavor_name(consensus_flavor_t flav)
+{
+ switch (flav) {
+ case FLAV_NS:
+ return "ns";
+ case FLAV_MICRODESC:
+ return "microdesc";
+ default:
+ tor_fragile_assert();
+ return "??";
+ }
+}
+
+/** Return the consensus_flavor_t value for the flavor called <b>flavname</b>,
+ * or -1 if the flavor is not recognized. */
+int
+networkstatus_parse_flavor_name(const char *flavname)
+{
+ if (!strcmp(flavname, "ns"))
+ return FLAV_NS;
+ else if (!strcmp(flavname, "microdesc"))
+ return FLAV_MICRODESC;
+ else
+ return -1;
+}
+
+/** Return 0 if this routerstatus is obsolete, too new, isn't
+ * running, or otherwise not a descriptor that we would make any
+ * use of even if we had it. Else return 1. */
+int
+client_would_use_router(const routerstatus_t *rs, time_t now)
+{
+ if (!rs->is_flagged_running) {
+ /* If we had this router descriptor, we wouldn't even bother using it.
+ * (Fetching and storing depends on by we_want_to_fetch_flavor().) */
+ return 0;
+ }
+ if (rs->published_on + OLD_ROUTER_DESC_MAX_AGE < now) {
+ /* We'd drop it immediately for being too old. */
+ return 0;
+ }
+ if (!routerstatus_version_supports_extend2_cells(rs, 1)) {
+ /* We'd ignore it because it doesn't support EXTEND2 cells.
+ * If we don't know the version, download the descriptor so we can
+ * check if it supports EXTEND2 cells and ntor. */
+ return 0;
+ }
+ return 1;
+}
+
+/** If <b>question</b> is a string beginning with "ns/" in a format the
+ * control interface expects for a GETINFO question, set *<b>answer</b> to a
+ * newly-allocated string containing networkstatus lines for the appropriate
+ * ORs. Return 0 on success, -1 on unrecognized question format. */
+int
+getinfo_helper_networkstatus(control_connection_t *conn,
+ const char *question, char **answer,
+ const char **errmsg)
+{
+ const routerstatus_t *status;
+ (void) conn;
+
+ if (!networkstatus_get_latest_consensus()) {
+ *answer = tor_strdup("");
+ return 0;
+ }
+
+ if (!strcmp(question, "ns/all")) {
+ smartlist_t *statuses = smartlist_new();
+ SMARTLIST_FOREACH(networkstatus_get_latest_consensus()->routerstatus_list,
+ const routerstatus_t *, rs,
+ {
+ smartlist_add(statuses, networkstatus_getinfo_helper_single(rs));
+ });
+ *answer = smartlist_join_strings(statuses, "", 0, NULL);
+ SMARTLIST_FOREACH(statuses, char *, cp, tor_free(cp));
+ smartlist_free(statuses);
+ return 0;
+ } else if (!strcmpstart(question, "ns/id/")) {
+ char d[DIGEST_LEN];
+ const char *q = question + 6;
+ if (*q == '$')
+ ++q;
+
+ if (base16_decode(d, DIGEST_LEN, q, strlen(q)) != DIGEST_LEN) {
+ *errmsg = "Data not decodeable as hex";
+ return -1;
+ }
+ status = router_get_consensus_status_by_id(d);
+ } else if (!strcmpstart(question, "ns/name/")) {
+ const node_t *n = node_get_by_nickname(question+8, 0);
+ status = n ? n->rs : NULL;
+ } else if (!strcmpstart(question, "ns/purpose/")) {
+ *answer = networkstatus_getinfo_by_purpose(question+11, time(NULL));
+ return *answer ? 0 : -1;
+ } else if (!strcmp(question, "consensus/packages")) {
+ const networkstatus_t *ns = networkstatus_get_latest_consensus();
+ if (ns && ns->package_lines)
+ *answer = smartlist_join_strings(ns->package_lines, "\n", 0, NULL);
+ else
+ *errmsg = "No consensus available";
+ return *answer ? 0 : -1;
+ } else if (!strcmp(question, "consensus/valid-after") ||
+ !strcmp(question, "consensus/fresh-until") ||
+ !strcmp(question, "consensus/valid-until")) {
+ const networkstatus_t *ns = networkstatus_get_latest_consensus();
+ if (ns) {
+ time_t t;
+ if (!strcmp(question, "consensus/valid-after"))
+ t = ns->valid_after;
+ else if (!strcmp(question, "consensus/fresh-until"))
+ t = ns->fresh_until;
+ else
+ t = ns->valid_until;
+
+ char tbuf[ISO_TIME_LEN+1];
+ format_iso_time(tbuf, t);
+ *answer = tor_strdup(tbuf);
+ } else {
+ *errmsg = "No consensus available";
+ }
+ return *answer ? 0 : -1;
+ } else {
+ return 0;
+ }
+
+ if (status)
+ *answer = networkstatus_getinfo_helper_single(status);
+ return 0;
+}
+
+/** Check whether the networkstatus <b>ns</b> lists any protocol
+ * versions as "required" or "recommended" that we do not support. If
+ * so, set *<b>warning_out</b> to a newly allocated string describing
+ * the problem.
+ *
+ * Return 1 if we should exit, 0 if we should not. */
+int
+networkstatus_check_required_protocols(const networkstatus_t *ns,
+ int client_mode,
+ char **warning_out)
+{
+ const char *func = client_mode ? "client" : "relay";
+ const char *required, *recommended;
+ char *missing = NULL;
+
+ tor_assert(warning_out);
+
+ if (client_mode) {
+ required = ns->required_client_protocols;
+ recommended = ns->recommended_client_protocols;
+ } else {
+ required = ns->required_relay_protocols;
+ recommended = ns->recommended_relay_protocols;
+ }
+
+ if (!protover_all_supported(required, &missing)) {
+ tor_asprintf(warning_out, "At least one protocol listed as required in "
+ "the consensus is not supported by this version of Tor. "
+ "You should upgrade. This version of Tor will not work as a "
+ "%s on the Tor network. The missing protocols are: %s",
+ func, missing);
+ tor_free(missing);
+ return 1;
+ }
+
+ if (! protover_all_supported(recommended, &missing)) {
+ tor_asprintf(warning_out, "At least one protocol listed as recommended in "
+ "the consensus is not supported by this version of Tor. "
+ "You should upgrade. This version of Tor will eventually "
+ "stop working as a %s on the Tor network. The missing "
+ "protocols are: %s",
+ func, missing);
+ tor_free(missing);
+ }
+
+ tor_assert_nonfatal(missing == NULL);
+
+ return 0;
+}
+
+/** Release all storage held in <b>s</b>. */
+void
+ns_detached_signatures_free_(ns_detached_signatures_t *s)
+{
+ if (!s)
+ return;
+ if (s->signatures) {
+ STRMAP_FOREACH(s->signatures, flavor, smartlist_t *, sigs) {
+ SMARTLIST_FOREACH(sigs, document_signature_t *, sig,
+ document_signature_free(sig));
+ smartlist_free(sigs);
+ } STRMAP_FOREACH_END;
+ strmap_free(s->signatures, NULL);
+ strmap_free(s->digests, tor_free_);
+ }
+
+ tor_free(s);
+}
+
+/** Free all storage held locally in this module. */
+void
+networkstatus_free_all(void)
+{
+ int i;
+ networkstatus_vote_free(current_ns_consensus);
+ networkstatus_vote_free(current_md_consensus);
+ current_md_consensus = current_ns_consensus = NULL;
+
+ for (i=0; i < N_CONSENSUS_FLAVORS; ++i) {
+ consensus_waiting_for_certs_t *waiting = &consensus_waiting_for_certs[i];
+ if (waiting->consensus) {
+ networkstatus_vote_free(waiting->consensus);
+ waiting->consensus = NULL;
+ }
+ tor_free(waiting->body);
+ }
+}
diff --git a/src/feature/nodelist/networkstatus.h b/src/feature/nodelist/networkstatus.h
new file mode 100644
index 0000000000..cc6badf0b2
--- /dev/null
+++ b/src/feature/nodelist/networkstatus.h
@@ -0,0 +1,162 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file networkstatus.h
+ * \brief Header file for networkstatus.c.
+ **/
+
+#ifndef TOR_NETWORKSTATUS_H
+#define TOR_NETWORKSTATUS_H
+
+#include "lib/testsupport/testsupport.h"
+
+void networkstatus_reset_warnings(void);
+void networkstatus_reset_download_failures(void);
+char *networkstatus_read_cached_consensus(const char *flavorname);
+int router_reload_consensus_networkstatus(void);
+void routerstatus_free_(routerstatus_t *rs);
+#define routerstatus_free(rs) \
+ FREE_AND_NULL(routerstatus_t, routerstatus_free_, (rs))
+void networkstatus_vote_free_(networkstatus_t *ns);
+#define networkstatus_vote_free(ns) \
+ FREE_AND_NULL(networkstatus_t, networkstatus_vote_free_, (ns))
+void ns_detached_signatures_free_(ns_detached_signatures_t *s);
+#define ns_detached_signatures_free(s) \
+ FREE_AND_NULL(ns_detached_signatures_t, ns_detached_signatures_free_, (s))
+networkstatus_voter_info_t *networkstatus_get_voter_by_id(
+ networkstatus_t *vote,
+ const char *identity);
+document_signature_t *networkstatus_get_voter_sig_by_alg(
+ const networkstatus_voter_info_t *voter,
+ digest_algorithm_t alg);
+
+int networkstatus_check_consensus_signature(networkstatus_t *consensus,
+ int warn);
+int networkstatus_check_document_signature(const networkstatus_t *consensus,
+ document_signature_t *sig,
+ const authority_cert_t *cert);
+int compare_digest_to_routerstatus_entry(const void *_key,
+ const void **_member);
+int compare_digest_to_vote_routerstatus_entry(const void *_key,
+ const void **_member);
+const routerstatus_t *networkstatus_vote_find_entry(networkstatus_t *ns,
+ const char *digest);
+routerstatus_t *networkstatus_vote_find_mutable_entry(networkstatus_t *ns,
+ const char *digest);
+int networkstatus_vote_find_entry_idx(networkstatus_t *ns,
+ const char *digest, int *found_out);
+
+MOCK_DECL(download_status_t *,
+ networkstatus_get_dl_status_by_flavor,
+ (consensus_flavor_t flavor));
+MOCK_DECL(download_status_t *,
+ networkstatus_get_dl_status_by_flavor_bootstrap,
+ (consensus_flavor_t flavor));
+MOCK_DECL(download_status_t *,
+ networkstatus_get_dl_status_by_flavor_running,
+ (consensus_flavor_t flavor));
+
+MOCK_DECL(smartlist_t *, router_get_descriptor_digests, (void));
+MOCK_DECL(download_status_t *,router_get_dl_status_by_descriptor_digest,
+ (const char *d));
+
+const routerstatus_t *router_get_consensus_status_by_id(const char *digest);
+routerstatus_t *router_get_mutable_consensus_status_by_id(
+ const char *digest);
+const routerstatus_t *router_get_consensus_status_by_descriptor_digest(
+ networkstatus_t *consensus,
+ const char *digest);
+MOCK_DECL(routerstatus_t *,
+ router_get_mutable_consensus_status_by_descriptor_digest,
+ (networkstatus_t *consensus, const char *digest));
+int we_want_to_fetch_flavor(const or_options_t *options, int flavor);
+int we_want_to_fetch_unknown_auth_certs(const or_options_t *options);
+void networkstatus_consensus_download_failed(int status_code,
+ const char *flavname);
+void update_consensus_networkstatus_fetch_time(time_t now);
+int should_delay_dir_fetches(const or_options_t *options,const char **msg_out);
+void update_networkstatus_downloads(time_t now);
+void update_certificate_downloads(time_t now);
+int consensus_is_waiting_for_certs(void);
+int client_would_use_router(const routerstatus_t *rs, time_t now);
+MOCK_DECL(networkstatus_t *,networkstatus_get_latest_consensus,(void));
+MOCK_DECL(networkstatus_t *,networkstatus_get_latest_consensus_by_flavor,
+ (consensus_flavor_t f));
+MOCK_DECL(networkstatus_t *, networkstatus_get_live_consensus,(time_t now));
+int networkstatus_is_live(const networkstatus_t *ns, time_t now);
+int networkstatus_consensus_reasonably_live(const networkstatus_t *consensus,
+ time_t now);
+int networkstatus_valid_until_is_reasonably_live(time_t valid_until,
+ time_t now);
+networkstatus_t *networkstatus_get_reasonably_live_consensus(time_t now,
+ int flavor);
+MOCK_DECL(int, networkstatus_consensus_is_bootstrapping,(time_t now));
+int networkstatus_consensus_can_use_multiple_directories(
+ const or_options_t *options);
+MOCK_DECL(int, networkstatus_consensus_can_use_extra_fallbacks,(
+ const or_options_t *options));
+int networkstatus_consensus_is_already_downloading(const char *resource);
+int networkstatus_consensus_has_ipv6(const or_options_t* options);
+
+#define NSSET_FROM_CACHE 1
+#define NSSET_WAS_WAITING_FOR_CERTS 2
+#define NSSET_DONT_DOWNLOAD_CERTS 4
+#define NSSET_ACCEPT_OBSOLETE 8
+#define NSSET_REQUIRE_FLAVOR 16
+int networkstatus_set_current_consensus(const char *consensus,
+ const char *flavor,
+ unsigned flags,
+ const char *source_dir);
+void networkstatus_note_certs_arrived(const char *source_dir);
+void routers_update_all_from_networkstatus(time_t now, int dir_version);
+void routers_update_status_from_consensus_networkstatus(smartlist_t *routers,
+ int reset_failures);
+void signed_descs_update_status_from_consensus_networkstatus(
+ smartlist_t *descs);
+
+char *networkstatus_getinfo_helper_single(const routerstatus_t *rs);
+char *networkstatus_getinfo_by_purpose(const char *purpose_string, time_t now);
+void networkstatus_dump_bridge_status_to_file(time_t now);
+MOCK_DECL(int32_t, networkstatus_get_param,
+ (const networkstatus_t *ns, const char *param_name,
+ int32_t default_val, int32_t min_val, int32_t max_val));
+int32_t networkstatus_get_overridable_param(const networkstatus_t *ns,
+ int32_t torrc_value,
+ const char *param_name,
+ int32_t default_val,
+ int32_t min_val, int32_t max_val);
+int getinfo_helper_networkstatus(control_connection_t *conn,
+ const char *question, char **answer,
+ const char **errmsg);
+int32_t networkstatus_get_bw_weight(networkstatus_t *ns, const char *weight,
+ int32_t default_val);
+const char *networkstatus_get_flavor_name(consensus_flavor_t flav);
+int networkstatus_parse_flavor_name(const char *flavname);
+void document_signature_free_(document_signature_t *sig);
+#define document_signature_free(sig) \
+ FREE_AND_NULL(document_signature_t, document_signature_free_, (sig))
+document_signature_t *document_signature_dup(const document_signature_t *sig);
+void networkstatus_free_all(void);
+int networkstatus_get_weight_scale_param(networkstatus_t *ns);
+
+void vote_routerstatus_free_(vote_routerstatus_t *rs);
+#define vote_routerstatus_free(rs) \
+ FREE_AND_NULL(vote_routerstatus_t, vote_routerstatus_free_, (rs))
+
+#ifdef NETWORKSTATUS_PRIVATE
+#ifdef TOR_UNIT_TESTS
+STATIC int networkstatus_set_current_consensus_from_ns(networkstatus_t *c,
+ const char *flavor);
+STATIC void warn_early_consensus(const networkstatus_t *c, const char *flavor,
+ time_t now);
+extern networkstatus_t *current_ns_consensus;
+extern networkstatus_t *current_md_consensus;
+#endif /* defined(TOR_UNIT_TESTS) */
+#endif /* defined(NETWORKSTATUS_PRIVATE) */
+
+#endif /* !defined(TOR_NETWORKSTATUS_H) */
+
diff --git a/src/feature/nodelist/networkstatus_sr_info_st.h b/src/feature/nodelist/networkstatus_sr_info_st.h
new file mode 100644
index 0000000000..6c937a75f5
--- /dev/null
+++ b/src/feature/nodelist/networkstatus_sr_info_st.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef NETWORKSTATUS_SR_INFO_ST_H
+#define NETWORKSTATUS_SR_INFO_ST_H
+
+struct networkstatus_sr_info_t {
+ /* Indicate if the dirauth partitipates in the SR protocol with its vote.
+ * This is tied to the SR flag in the vote. */
+ unsigned int participate:1;
+ /* Both vote and consensus: Current and previous SRV. If list is empty,
+ * this means none were found in either the consensus or vote. */
+ struct sr_srv_t *previous_srv;
+ struct sr_srv_t *current_srv;
+ /* Vote only: List of commitments. */
+ smartlist_t *commits;
+};
+
+#endif
+
diff --git a/src/feature/nodelist/networkstatus_st.h b/src/feature/nodelist/networkstatus_st.h
new file mode 100644
index 0000000000..4a193ad149
--- /dev/null
+++ b/src/feature/nodelist/networkstatus_st.h
@@ -0,0 +1,101 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef NETWORKSTATUS_ST_H
+#define NETWORKSTATUS_ST_H
+
+#include "or/networkstatus_sr_info_st.h"
+
+/** Enumerates the possible seriousness values of a networkstatus document. */
+typedef enum networkstatus_type_t {
+ NS_TYPE_VOTE,
+ NS_TYPE_CONSENSUS,
+ NS_TYPE_OPINION,
+} networkstatus_type_t;
+
+/** A common structure to hold a v3 network status vote, or a v3 network
+ * status consensus. */
+struct networkstatus_t {
+ networkstatus_type_t type; /**< Vote, consensus, or opinion? */
+ consensus_flavor_t flavor; /**< If a consensus, what kind? */
+ unsigned int has_measured_bws : 1;/**< True iff this networkstatus contains
+ * measured= bandwidth values. */
+
+ time_t published; /**< Vote only: Time when vote was written. */
+ time_t valid_after; /**< Time after which this vote or consensus applies. */
+ time_t fresh_until; /**< Time before which this is the most recent vote or
+ * consensus. */
+ time_t valid_until; /**< Time after which this vote or consensus should not
+ * be used. */
+
+ /** Consensus only: what method was used to produce this consensus? */
+ int consensus_method;
+ /** Vote only: what methods is this voter willing to use? */
+ smartlist_t *supported_methods;
+
+ /** List of 'package' lines describing hashes of downloadable packages */
+ smartlist_t *package_lines;
+
+ /** How long does this vote/consensus claim that authorities take to
+ * distribute their votes to one another? */
+ int vote_seconds;
+ /** How long does this vote/consensus claim that authorities take to
+ * distribute their consensus signatures to one another? */
+ int dist_seconds;
+
+ /** Comma-separated list of recommended client software, or NULL if this
+ * voter has no opinion. */
+ char *client_versions;
+ char *server_versions;
+
+ /** Lists of subprotocol versions which are _recommended_ for relays and
+ * clients, or which are _require_ for relays and clients. Tor shouldn't
+ * make any more network connections if a required protocol is missing.
+ */
+ char *recommended_relay_protocols;
+ char *recommended_client_protocols;
+ char *required_relay_protocols;
+ char *required_client_protocols;
+
+ /** List of flags that this vote/consensus applies to routers. If a flag is
+ * not listed here, the voter has no opinion on what its value should be. */
+ smartlist_t *known_flags;
+
+ /** List of key=value strings for the parameters in this vote or
+ * consensus, sorted by key. */
+ smartlist_t *net_params;
+
+ /** List of key=value strings for the bw weight parameters in the
+ * consensus. */
+ smartlist_t *weight_params;
+
+ /** List of networkstatus_voter_info_t. For a vote, only one element
+ * is included. For a consensus, one element is included for every voter
+ * whose vote contributed to the consensus. */
+ smartlist_t *voters;
+
+ struct authority_cert_t *cert; /**< Vote only: the voter's certificate. */
+
+ /** Digests of this document, as signed. */
+ common_digests_t digests;
+ /** A SHA3-256 digest of the document, not including signatures: used for
+ * consensus diffs */
+ uint8_t digest_sha3_as_signed[DIGEST256_LEN];
+
+ /** List of router statuses, sorted by identity digest. For a vote,
+ * the elements are vote_routerstatus_t; for a consensus, the elements
+ * are routerstatus_t. */
+ smartlist_t *routerstatus_list;
+
+ /** If present, a map from descriptor digest to elements of
+ * routerstatus_list. */
+ digestmap_t *desc_digest_map;
+
+ /** Contains the shared random protocol data from a vote or consensus. */
+ networkstatus_sr_info_t sr_info;
+};
+
+#endif
diff --git a/src/feature/nodelist/networkstatus_voter_info_st.h b/src/feature/nodelist/networkstatus_voter_info_st.h
new file mode 100644
index 0000000000..93ff3cd418
--- /dev/null
+++ b/src/feature/nodelist/networkstatus_voter_info_st.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef NETWORKSTATUS_VOTER_INFO_ST_H
+#define NETWORKSTATUS_VOTER_INFO_ST_H
+
+/** Information about a single voter in a vote or a consensus. */
+struct networkstatus_voter_info_t {
+ /** Declared SHA-1 digest of this voter's identity key */
+ char identity_digest[DIGEST_LEN];
+ char *nickname; /**< Nickname of this voter */
+ /** Digest of this voter's "legacy" identity key, if any. In vote only; for
+ * consensuses, we treat legacy keys as additional signers. */
+ char legacy_id_digest[DIGEST_LEN];
+ char *address; /**< Address of this voter, in string format. */
+ uint32_t addr; /**< Address of this voter, in IPv4, in host order. */
+ uint16_t dir_port; /**< Directory port of this voter */
+ uint16_t or_port; /**< OR port of this voter */
+ char *contact; /**< Contact information for this voter. */
+ char vote_digest[DIGEST_LEN]; /**< Digest of this voter's vote, as signed. */
+
+ /* Nothing from here on is signed. */
+ /** The signature of the document and the signature's status. */
+ smartlist_t *sigs;
+};
+
+#endif
diff --git a/src/feature/nodelist/node_st.h b/src/feature/nodelist/node_st.h
new file mode 100644
index 0000000000..d56ce27884
--- /dev/null
+++ b/src/feature/nodelist/node_st.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef NODE_ST_H
+#define NODE_ST_H
+
+#include "or/hsdir_index_st.h"
+#include "lib/crypt_ops/crypto_ed25519.h"
+
+/** A node_t represents a Tor router.
+ *
+ * Specifically, a node_t is a Tor router as we are using it: a router that
+ * we are considering for circuits, connections, and so on. A node_t is a
+ * thin wrapper around the routerstatus, routerinfo, and microdesc for a
+ * single router, and provides a consistent interface for all of them.
+ *
+ * Also, a node_t has mutable state. While a routerinfo, a routerstatus,
+ * and a microdesc have[*] only the information read from a router
+ * descriptor, a consensus entry, and a microdescriptor (respectively)...
+ * a node_t has flags based on *our own current opinion* of the node.
+ *
+ * [*] Actually, there is some leftover information in each that is mutable.
+ * We should try to excise that.
+ */
+struct node_t {
+ /* Indexing information */
+
+ /** Used to look up the node_t by its identity digest. */
+ HT_ENTRY(node_t) ht_ent;
+ /** Used to look up the node_t by its ed25519 identity digest. */
+ HT_ENTRY(node_t) ed_ht_ent;
+ /** Position of the node within the list of nodes */
+ int nodelist_idx;
+
+ /** The identity digest of this node_t. No more than one node_t per
+ * identity may exist at a time. */
+ char identity[DIGEST_LEN];
+
+ /** The ed25519 identity of this node_t. This field is nonzero iff we
+ * currently have an ed25519 identity for this node in either md or ri,
+ * _and_ this node has been inserted to the ed25519-to-node map in the
+ * nodelist.
+ */
+ ed25519_public_key_t ed25519_id;
+
+ microdesc_t *md;
+ routerinfo_t *ri;
+ routerstatus_t *rs;
+
+ /* local info: copied from routerstatus, then possibly frobbed based
+ * on experience. Authorities set this stuff directly. Note that
+ * these reflect knowledge of the primary (IPv4) OR port only. */
+
+ unsigned int is_running:1; /**< As far as we know, is this OR currently
+ * running? */
+ unsigned int is_valid:1; /**< Has a trusted dirserver validated this OR?
+ * (For Authdir: Have we validated this OR?) */
+ unsigned int is_fast:1; /** Do we think this is a fast OR? */
+ unsigned int is_stable:1; /** Do we think this is a stable OR? */
+ unsigned int is_possible_guard:1; /**< Do we think this is an OK guard? */
+ unsigned int is_exit:1; /**< Do we think this is an OK exit? */
+ unsigned int is_bad_exit:1; /**< Do we think this exit is censored, borked,
+ * or otherwise nasty? */
+ unsigned int is_hs_dir:1; /**< True iff this router is a hidden service
+ * directory according to the authorities. */
+
+ /* Local info: warning state. */
+
+ unsigned int name_lookup_warned:1; /**< Have we warned the user for referring
+ * to this (unnamed) router by nickname?
+ */
+
+ /** Local info: we treat this node as if it rejects everything */
+ unsigned int rejects_all:1;
+
+ /* Local info: derived. */
+
+ /** True if the IPv6 OR port is preferred over the IPv4 OR port.
+ * XX/teor - can this become out of date if the torrc changes? */
+ unsigned int ipv6_preferred:1;
+
+ /** According to the geoip db what country is this router in? */
+ /* XXXprop186 what is this suppose to mean with multiple OR ports? */
+ country_t country;
+
+ /* The below items are used only by authdirservers for
+ * reachability testing. */
+
+ /** When was the last time we could reach this OR? */
+ time_t last_reachable; /* IPv4. */
+ time_t last_reachable6; /* IPv6. */
+
+ /* Hidden service directory index data. This is used by a service or client
+ * in order to know what's the hs directory index for this node at the time
+ * the consensus is set. */
+ struct hsdir_index_t hsdir_index;
+};
+
+#endif
diff --git a/src/feature/nodelist/nodelist.c b/src/feature/nodelist/nodelist.c
new file mode 100644
index 0000000000..51fd0015df
--- /dev/null
+++ b/src/feature/nodelist/nodelist.c
@@ -0,0 +1,2513 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file nodelist.c
+ *
+ * \brief Structures and functions for tracking what we know about the routers
+ * on the Tor network, and correlating information from networkstatus,
+ * routerinfo, and microdescs.
+ *
+ * The key structure here is node_t: that's the canonical way to refer
+ * to a Tor relay that we might want to build a circuit through. Every
+ * node_t has either a routerinfo_t, or a routerstatus_t from the current
+ * networkstatus consensus. If it has a routerstatus_t, it will also
+ * need to have a microdesc_t before you can use it for circuits.
+ *
+ * The nodelist_t is a global singleton that maps identities to node_t
+ * objects. Access them with the node_get_*() functions. The nodelist_t
+ * is maintained by calls throughout the codebase
+ *
+ * Generally, other code should not have to reach inside a node_t to
+ * see what information it has. Instead, you should call one of the
+ * many accessor functions that works on a generic node_t. If there
+ * isn't one that does what you need, it's better to make such a function,
+ * and then use it.
+ *
+ * For historical reasons, some of the functions that select a node_t
+ * from the list of all usable node_t objects are in the routerlist.c
+ * module, since they originally selected a routerinfo_t. (TODO: They
+ * should move!)
+ *
+ * (TODO: Perhaps someday we should abstract the remaining ways of
+ * talking about a relay to also be node_t instances. Those would be
+ * routerstatus_t as used for directory requests, and dir_server_t as
+ * used for authorities and fallback directories.)
+ */
+
+#define NODELIST_PRIVATE
+
+#include "or/or.h"
+#include "lib/net/address.h"
+#include "or/address_set.h"
+#include "or/bridges.h"
+#include "or/config.h"
+#include "or/control.h"
+#include "or/dirserv.h"
+#include "or/entrynodes.h"
+#include "or/geoip.h"
+#include "or/hs_common.h"
+#include "or/hs_client.h"
+#include "or/main.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/policies.h"
+#include "or/protover.h"
+#include "or/rendservice.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/routerparse.h"
+#include "or/routerset.h"
+#include "or/torcert.h"
+
+#include <string.h>
+
+#include "or/dirauth/mode.h"
+
+#include "or/dir_server_st.h"
+#include "or/microdesc_st.h"
+#include "or/networkstatus_st.h"
+#include "or/node_st.h"
+#include "or/routerinfo_st.h"
+#include "or/routerlist_st.h"
+#include "or/routerstatus_st.h"
+
+static void nodelist_drop_node(node_t *node, int remove_from_ht);
+#define node_free(val) \
+ FREE_AND_NULL(node_t, node_free_, (val))
+static void node_free_(node_t *node);
+
+/** count_usable_descriptors counts descriptors with these flag(s)
+ */
+typedef enum {
+ /* All descriptors regardless of flags */
+ USABLE_DESCRIPTOR_ALL = 0,
+ /* Only descriptors with the Exit flag */
+ USABLE_DESCRIPTOR_EXIT_ONLY = 1
+} usable_descriptor_t;
+static void count_usable_descriptors(int *num_present,
+ int *num_usable,
+ smartlist_t *descs_out,
+ const networkstatus_t *consensus,
+ time_t now,
+ routerset_t *in_set,
+ usable_descriptor_t exit_only);
+static void update_router_have_minimum_dir_info(void);
+static double get_frac_paths_needed_for_circs(const or_options_t *options,
+ const networkstatus_t *ns);
+static void node_add_to_address_set(const node_t *node);
+
+/** A nodelist_t holds a node_t object for every router we're "willing to use
+ * for something". Specifically, it should hold a node_t for every node that
+ * is currently in the routerlist, or currently in the consensus we're using.
+ */
+typedef struct nodelist_t {
+ /* A list of all the nodes. */
+ smartlist_t *nodes;
+ /* Hash table to map from node ID digest to node. */
+ HT_HEAD(nodelist_map, node_t) nodes_by_id;
+ /* Hash table to map from node Ed25519 ID to node.
+ *
+ * Whenever a node's routerinfo or microdescriptor is about to change,
+ * you should remove it from this map with node_remove_from_ed25519_map().
+ * Whenever a node's routerinfo or microdescriptor has just chaned,
+ * you should add it to this map with node_add_to_ed25519_map().
+ */
+ HT_HEAD(nodelist_ed_map, node_t) nodes_by_ed_id;
+
+ /* Set of addresses that belong to nodes we believe in. */
+ address_set_t *node_addrs;
+
+ /* The valid-after time of the last live consensus that initialized the
+ * nodelist. We use this to detect outdated nodelists that need to be
+ * rebuilt using a newer consensus. */
+ time_t live_consensus_valid_after;
+} nodelist_t;
+
+static inline unsigned int
+node_id_hash(const node_t *node)
+{
+ return (unsigned) siphash24g(node->identity, DIGEST_LEN);
+}
+
+static inline unsigned int
+node_id_eq(const node_t *node1, const node_t *node2)
+{
+ return tor_memeq(node1->identity, node2->identity, DIGEST_LEN);
+}
+
+HT_PROTOTYPE(nodelist_map, node_t, ht_ent, node_id_hash, node_id_eq)
+HT_GENERATE2(nodelist_map, node_t, ht_ent, node_id_hash, node_id_eq,
+ 0.6, tor_reallocarray_, tor_free_)
+
+static inline unsigned int
+node_ed_id_hash(const node_t *node)
+{
+ return (unsigned) siphash24g(node->ed25519_id.pubkey, ED25519_PUBKEY_LEN);
+}
+
+static inline unsigned int
+node_ed_id_eq(const node_t *node1, const node_t *node2)
+{
+ return ed25519_pubkey_eq(&node1->ed25519_id, &node2->ed25519_id);
+}
+
+HT_PROTOTYPE(nodelist_ed_map, node_t, ed_ht_ent, node_ed_id_hash,
+ node_ed_id_eq)
+HT_GENERATE2(nodelist_ed_map, node_t, ed_ht_ent, node_ed_id_hash,
+ node_ed_id_eq, 0.6, tor_reallocarray_, tor_free_)
+
+/** The global nodelist. */
+static nodelist_t *the_nodelist=NULL;
+
+/** Create an empty nodelist if we haven't done so already. */
+static void
+init_nodelist(void)
+{
+ if (PREDICT_UNLIKELY(the_nodelist == NULL)) {
+ the_nodelist = tor_malloc_zero(sizeof(nodelist_t));
+ HT_INIT(nodelist_map, &the_nodelist->nodes_by_id);
+ HT_INIT(nodelist_ed_map, &the_nodelist->nodes_by_ed_id);
+ the_nodelist->nodes = smartlist_new();
+ }
+}
+
+/** As node_get_by_id, but returns a non-const pointer */
+MOCK_IMPL(node_t *,
+node_get_mutable_by_id,(const char *identity_digest))
+{
+ node_t search, *node;
+ if (PREDICT_UNLIKELY(the_nodelist == NULL))
+ return NULL;
+
+ memcpy(&search.identity, identity_digest, DIGEST_LEN);
+ node = HT_FIND(nodelist_map, &the_nodelist->nodes_by_id, &search);
+ return node;
+}
+
+/** As node_get_by_ed25519_id, but returns a non-const pointer */
+node_t *
+node_get_mutable_by_ed25519_id(const ed25519_public_key_t *ed_id)
+{
+ node_t search, *node;
+ if (PREDICT_UNLIKELY(the_nodelist == NULL))
+ return NULL;
+ if (BUG(ed_id == NULL) || BUG(ed25519_public_key_is_zero(ed_id)))
+ return NULL;
+
+ memcpy(&search.ed25519_id, ed_id, sizeof(search.ed25519_id));
+ node = HT_FIND(nodelist_ed_map, &the_nodelist->nodes_by_ed_id, &search);
+ return node;
+}
+
+/** Return the node_t whose identity is <b>identity_digest</b>, or NULL
+ * if no such node exists. */
+MOCK_IMPL(const node_t *,
+node_get_by_id,(const char *identity_digest))
+{
+ return node_get_mutable_by_id(identity_digest);
+}
+
+/** Return the node_t whose ed25519 identity is <b>ed_id</b>, or NULL
+ * if no such node exists. */
+MOCK_IMPL(const node_t *,
+node_get_by_ed25519_id,(const ed25519_public_key_t *ed_id))
+{
+ return node_get_mutable_by_ed25519_id(ed_id);
+}
+
+/** Internal: return the node_t whose identity_digest is
+ * <b>identity_digest</b>. If none exists, create a new one, add it to the
+ * nodelist, and return it.
+ *
+ * Requires that the nodelist be initialized.
+ */
+static node_t *
+node_get_or_create(const char *identity_digest)
+{
+ node_t *node;
+
+ if ((node = node_get_mutable_by_id(identity_digest)))
+ return node;
+
+ node = tor_malloc_zero(sizeof(node_t));
+ memcpy(node->identity, identity_digest, DIGEST_LEN);
+ HT_INSERT(nodelist_map, &the_nodelist->nodes_by_id, node);
+
+ smartlist_add(the_nodelist->nodes, node);
+ node->nodelist_idx = smartlist_len(the_nodelist->nodes) - 1;
+
+ node->country = -1;
+
+ return node;
+}
+
+/** Remove <b>node</b> from the ed25519 map (if it present), and
+ * set its ed25519_id field to zero. */
+static int
+node_remove_from_ed25519_map(node_t *node)
+{
+ tor_assert(the_nodelist);
+ tor_assert(node);
+
+ if (ed25519_public_key_is_zero(&node->ed25519_id)) {
+ return 0;
+ }
+
+ int rv = 0;
+ node_t *search =
+ HT_FIND(nodelist_ed_map, &the_nodelist->nodes_by_ed_id, node);
+ if (BUG(search != node)) {
+ goto clear_and_return;
+ }
+
+ search = HT_REMOVE(nodelist_ed_map, &the_nodelist->nodes_by_ed_id, node);
+ tor_assert(search == node);
+ rv = 1;
+
+ clear_and_return:
+ memset(&node->ed25519_id, 0, sizeof(node->ed25519_id));
+ return rv;
+}
+
+/** If <b>node</b> has an ed25519 id, and it is not already in the ed25519 id
+ * map, set its ed25519_id field, and add it to the ed25519 map.
+ */
+static int
+node_add_to_ed25519_map(node_t *node)
+{
+ tor_assert(the_nodelist);
+ tor_assert(node);
+
+ if (! ed25519_public_key_is_zero(&node->ed25519_id)) {
+ return 0;
+ }
+
+ const ed25519_public_key_t *key = node_get_ed25519_id(node);
+ if (!key) {
+ return 0;
+ }
+
+ node_t *old;
+ memcpy(&node->ed25519_id, key, sizeof(node->ed25519_id));
+ old = HT_FIND(nodelist_ed_map, &the_nodelist->nodes_by_ed_id, node);
+ if (BUG(old)) {
+ /* XXXX order matters here, and this may mean that authorities aren't
+ * pinning. */
+ if (old != node)
+ memset(&node->ed25519_id, 0, sizeof(node->ed25519_id));
+ return 0;
+ }
+
+ HT_INSERT(nodelist_ed_map, &the_nodelist->nodes_by_ed_id, node);
+ return 1;
+}
+
+/* For a given <b>node</b> for the consensus <b>ns</b>, set the hsdir index
+ * for the node, both current and next if possible. This can only fails if the
+ * node_t ed25519 identity key can't be found which would be a bug. */
+STATIC void
+node_set_hsdir_index(node_t *node, const networkstatus_t *ns)
+{
+ time_t now = approx_time();
+ const ed25519_public_key_t *node_identity_pk;
+ uint8_t *fetch_srv = NULL, *store_first_srv = NULL, *store_second_srv = NULL;
+ uint64_t next_time_period_num, current_time_period_num;
+ uint64_t fetch_tp, store_first_tp, store_second_tp;
+
+ tor_assert(node);
+ tor_assert(ns);
+
+ if (!networkstatus_is_live(ns, now)) {
+ static struct ratelim_t live_consensus_ratelim = RATELIM_INIT(30 * 60);
+ log_fn_ratelim(&live_consensus_ratelim, LOG_INFO, LD_GENERAL,
+ "Not setting hsdir index with a non-live consensus.");
+ goto done;
+ }
+
+ node_identity_pk = node_get_ed25519_id(node);
+ if (node_identity_pk == NULL) {
+ log_debug(LD_GENERAL, "ed25519 identity public key not found when "
+ "trying to build the hsdir indexes for node %s",
+ node_describe(node));
+ goto done;
+ }
+
+ /* Get the current and next time period number. */
+ current_time_period_num = hs_get_time_period_num(0);
+ next_time_period_num = hs_get_next_time_period_num(0);
+
+ /* We always use the current time period for fetching descs */
+ fetch_tp = current_time_period_num;
+
+ /* Now extract the needed SRVs and time periods for building hsdir indices */
+ if (hs_in_period_between_tp_and_srv(ns, now)) {
+ fetch_srv = hs_get_current_srv(fetch_tp, ns);
+
+ store_first_tp = hs_get_previous_time_period_num(0);
+ store_second_tp = current_time_period_num;
+ } else {
+ fetch_srv = hs_get_previous_srv(fetch_tp, ns);
+
+ store_first_tp = current_time_period_num;
+ store_second_tp = next_time_period_num;
+ }
+
+ /* We always use the old SRV for storing the first descriptor and the latest
+ * SRV for storing the second descriptor */
+ store_first_srv = hs_get_previous_srv(store_first_tp, ns);
+ store_second_srv = hs_get_current_srv(store_second_tp, ns);
+
+ /* Build the fetch index. */
+ hs_build_hsdir_index(node_identity_pk, fetch_srv, fetch_tp,
+ node->hsdir_index.fetch);
+
+ /* If we are in the time segment between SRV#N and TP#N, the fetch index is
+ the same as the first store index */
+ if (!hs_in_period_between_tp_and_srv(ns, now)) {
+ memcpy(node->hsdir_index.store_first, node->hsdir_index.fetch,
+ sizeof(node->hsdir_index.store_first));
+ } else {
+ hs_build_hsdir_index(node_identity_pk, store_first_srv, store_first_tp,
+ node->hsdir_index.store_first);
+ }
+
+ /* If we are in the time segment between TP#N and SRV#N+1, the fetch index is
+ the same as the second store index */
+ if (hs_in_period_between_tp_and_srv(ns, now)) {
+ memcpy(node->hsdir_index.store_second, node->hsdir_index.fetch,
+ sizeof(node->hsdir_index.store_second));
+ } else {
+ hs_build_hsdir_index(node_identity_pk, store_second_srv, store_second_tp,
+ node->hsdir_index.store_second);
+ }
+
+ done:
+ tor_free(fetch_srv);
+ tor_free(store_first_srv);
+ tor_free(store_second_srv);
+ return;
+}
+
+/** Called when a node's address changes. */
+static void
+node_addrs_changed(node_t *node)
+{
+ node->last_reachable = node->last_reachable6 = 0;
+ node->country = -1;
+}
+
+/** Add all address information about <b>node</b> to the current address
+ * set (if there is one).
+ */
+static void
+node_add_to_address_set(const node_t *node)
+{
+ if (!the_nodelist || !the_nodelist->node_addrs)
+ return;
+
+ /* These various address sources can be redundant, but it's likely faster
+ * to add them all than to compare them all for equality. */
+
+ if (node->rs) {
+ if (node->rs->addr)
+ address_set_add_ipv4h(the_nodelist->node_addrs, node->rs->addr);
+ if (!tor_addr_is_null(&node->rs->ipv6_addr))
+ address_set_add(the_nodelist->node_addrs, &node->rs->ipv6_addr);
+ }
+ if (node->ri) {
+ if (node->ri->addr)
+ address_set_add_ipv4h(the_nodelist->node_addrs, node->ri->addr);
+ if (!tor_addr_is_null(&node->ri->ipv6_addr))
+ address_set_add(the_nodelist->node_addrs, &node->ri->ipv6_addr);
+ }
+ if (node->md) {
+ if (!tor_addr_is_null(&node->md->ipv6_addr))
+ address_set_add(the_nodelist->node_addrs, &node->md->ipv6_addr);
+ }
+}
+
+/** Return true if <b>addr</b> is the address of some node in the nodelist.
+ * If not, probably return false. */
+int
+nodelist_probably_contains_address(const tor_addr_t *addr)
+{
+ if (BUG(!addr))
+ return 0;
+
+ if (!the_nodelist || !the_nodelist->node_addrs)
+ return 0;
+
+ return address_set_probably_contains(the_nodelist->node_addrs, addr);
+}
+
+/** Add <b>ri</b> to an appropriate node in the nodelist. If we replace an
+ * old routerinfo, and <b>ri_old_out</b> is not NULL, set *<b>ri_old_out</b>
+ * to the previous routerinfo.
+ */
+node_t *
+nodelist_set_routerinfo(routerinfo_t *ri, routerinfo_t **ri_old_out)
+{
+ node_t *node;
+ const char *id_digest;
+ int had_router = 0;
+ tor_assert(ri);
+
+ init_nodelist();
+ id_digest = ri->cache_info.identity_digest;
+ node = node_get_or_create(id_digest);
+
+ node_remove_from_ed25519_map(node);
+
+ if (node->ri) {
+ if (!routers_have_same_or_addrs(node->ri, ri)) {
+ node_addrs_changed(node);
+ }
+ had_router = 1;
+ if (ri_old_out)
+ *ri_old_out = node->ri;
+ } else {
+ if (ri_old_out)
+ *ri_old_out = NULL;
+ }
+ node->ri = ri;
+
+ node_add_to_ed25519_map(node);
+
+ if (node->country == -1)
+ node_set_country(node);
+
+ if (authdir_mode(get_options()) && !had_router) {
+ const char *discard=NULL;
+ uint32_t status = dirserv_router_get_status(ri, &discard, LOG_INFO);
+ dirserv_set_node_flags_from_authoritative_status(node, status);
+ }
+
+ /* Setting the HSDir index requires the ed25519 identity key which can
+ * only be found either in the ri or md. This is why this is called here.
+ * Only nodes supporting HSDir=2 protocol version needs this index. */
+ if (node->rs && node->rs->pv.supports_v3_hsdir) {
+ node_set_hsdir_index(node,
+ networkstatus_get_latest_consensus());
+ }
+
+ node_add_to_address_set(node);
+
+ return node;
+}
+
+/** Set the appropriate node_t to use <b>md</b> as its microdescriptor.
+ *
+ * Called when a new microdesc has arrived and the usable consensus flavor
+ * is "microdesc".
+ **/
+node_t *
+nodelist_add_microdesc(microdesc_t *md)
+{
+ networkstatus_t *ns =
+ networkstatus_get_latest_consensus_by_flavor(FLAV_MICRODESC);
+ const routerstatus_t *rs;
+ node_t *node;
+ if (ns == NULL)
+ return NULL;
+ init_nodelist();
+
+ /* Microdescriptors don't carry an identity digest, so we need to figure
+ * it out by looking up the routerstatus. */
+ rs = router_get_consensus_status_by_descriptor_digest(ns, md->digest);
+ if (rs == NULL)
+ return NULL;
+ node = node_get_mutable_by_id(rs->identity_digest);
+ if (node == NULL)
+ return NULL;
+
+ node_remove_from_ed25519_map(node);
+ if (node->md)
+ node->md->held_by_nodes--;
+
+ node->md = md;
+ md->held_by_nodes++;
+ /* Setting the HSDir index requires the ed25519 identity key which can
+ * only be found either in the ri or md. This is why this is called here.
+ * Only nodes supporting HSDir=2 protocol version needs this index. */
+ if (rs->pv.supports_v3_hsdir) {
+ node_set_hsdir_index(node, ns);
+ }
+ node_add_to_ed25519_map(node);
+ node_add_to_address_set(node);
+
+ return node;
+}
+
+/* Default value. */
+#define ESTIMATED_ADDRESS_PER_NODE 2
+
+/* Return the estimated number of address per node_t. This is used for the
+ * size of the bloom filter in the nodelist (node_addrs). */
+MOCK_IMPL(int,
+get_estimated_address_per_node, (void))
+{
+ return ESTIMATED_ADDRESS_PER_NODE;
+}
+
+/** Tell the nodelist that the current usable consensus is <b>ns</b>.
+ * This makes the nodelist change all of the routerstatus entries for
+ * the nodes, drop nodes that no longer have enough info to get used,
+ * and grab microdescriptors into nodes as appropriate.
+ */
+void
+nodelist_set_consensus(networkstatus_t *ns)
+{
+ const or_options_t *options = get_options();
+ int authdir = authdir_mode_v3(options);
+
+ init_nodelist();
+ if (ns->flavor == FLAV_MICRODESC)
+ (void) get_microdesc_cache(); /* Make sure it exists first. */
+
+ SMARTLIST_FOREACH(the_nodelist->nodes, node_t *, node,
+ node->rs = NULL);
+
+ /* Conservatively estimate that every node will have 2 addresses. */
+ const int estimated_addresses = smartlist_len(ns->routerstatus_list) *
+ get_estimated_address_per_node();
+ address_set_free(the_nodelist->node_addrs);
+ the_nodelist->node_addrs = address_set_new(estimated_addresses);
+
+ SMARTLIST_FOREACH_BEGIN(ns->routerstatus_list, routerstatus_t *, rs) {
+ node_t *node = node_get_or_create(rs->identity_digest);
+ node->rs = rs;
+ if (ns->flavor == FLAV_MICRODESC) {
+ if (node->md == NULL ||
+ tor_memneq(node->md->digest,rs->descriptor_digest,DIGEST256_LEN)) {
+ node_remove_from_ed25519_map(node);
+ if (node->md)
+ node->md->held_by_nodes--;
+ node->md = microdesc_cache_lookup_by_digest256(NULL,
+ rs->descriptor_digest);
+ if (node->md)
+ node->md->held_by_nodes++;
+ node_add_to_ed25519_map(node);
+ }
+ }
+
+ if (rs->pv.supports_v3_hsdir) {
+ node_set_hsdir_index(node, ns);
+ }
+ node_set_country(node);
+
+ /* If we're not an authdir, believe others. */
+ if (!authdir) {
+ node->is_valid = rs->is_valid;
+ node->is_running = rs->is_flagged_running;
+ node->is_fast = rs->is_fast;
+ node->is_stable = rs->is_stable;
+ node->is_possible_guard = rs->is_possible_guard;
+ node->is_exit = rs->is_exit;
+ node->is_bad_exit = rs->is_bad_exit;
+ node->is_hs_dir = rs->is_hs_dir;
+ node->ipv6_preferred = 0;
+ if (fascist_firewall_prefer_ipv6_orport(options) &&
+ (tor_addr_is_null(&rs->ipv6_addr) == 0 ||
+ (node->md && tor_addr_is_null(&node->md->ipv6_addr) == 0)))
+ node->ipv6_preferred = 1;
+ }
+
+ } SMARTLIST_FOREACH_END(rs);
+
+ nodelist_purge();
+
+ /* Now add all the nodes we have to the address set. */
+ SMARTLIST_FOREACH_BEGIN(the_nodelist->nodes, node_t *, node) {
+ node_add_to_address_set(node);
+ } SMARTLIST_FOREACH_END(node);
+
+ if (! authdir) {
+ SMARTLIST_FOREACH_BEGIN(the_nodelist->nodes, node_t *, node) {
+ /* We have no routerstatus for this router. Clear flags so we can skip
+ * it, maybe.*/
+ if (!node->rs) {
+ tor_assert(node->ri); /* if it had only an md, or nothing, purge
+ * would have removed it. */
+ if (node->ri->purpose == ROUTER_PURPOSE_GENERAL) {
+ /* Clear all flags. */
+ node->is_valid = node->is_running = node->is_hs_dir =
+ node->is_fast = node->is_stable =
+ node->is_possible_guard = node->is_exit =
+ node->is_bad_exit = node->ipv6_preferred = 0;
+ }
+ }
+ } SMARTLIST_FOREACH_END(node);
+ }
+
+ /* If the consensus is live, note down the consensus valid-after that formed
+ * the nodelist. */
+ if (networkstatus_is_live(ns, approx_time())) {
+ the_nodelist->live_consensus_valid_after = ns->valid_after;
+ }
+}
+
+/** Return 1 iff <b>node</b> has Exit flag and no BadExit flag.
+ * Otherwise, return 0.
+ */
+int
+node_is_good_exit(const node_t *node)
+{
+ return node->is_exit && ! node->is_bad_exit;
+}
+
+/** Helper: return true iff a node has a usable amount of information*/
+static inline int
+node_is_usable(const node_t *node)
+{
+ return (node->rs) || (node->ri);
+}
+
+/** Tell the nodelist that <b>md</b> is no longer a microdescriptor for the
+ * node with <b>identity_digest</b>. */
+void
+nodelist_remove_microdesc(const char *identity_digest, microdesc_t *md)
+{
+ node_t *node = node_get_mutable_by_id(identity_digest);
+ if (node && node->md == md) {
+ node->md = NULL;
+ md->held_by_nodes--;
+ if (! node_get_ed25519_id(node)) {
+ node_remove_from_ed25519_map(node);
+ }
+ }
+}
+
+/** Tell the nodelist that <b>ri</b> is no longer in the routerlist. */
+void
+nodelist_remove_routerinfo(routerinfo_t *ri)
+{
+ node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
+ if (node && node->ri == ri) {
+ node->ri = NULL;
+ if (! node_is_usable(node)) {
+ nodelist_drop_node(node, 1);
+ node_free(node);
+ }
+ }
+}
+
+/** Remove <b>node</b> from the nodelist. (Asserts that it was there to begin
+ * with.) */
+static void
+nodelist_drop_node(node_t *node, int remove_from_ht)
+{
+ node_t *tmp;
+ int idx;
+ if (remove_from_ht) {
+ tmp = HT_REMOVE(nodelist_map, &the_nodelist->nodes_by_id, node);
+ tor_assert(tmp == node);
+ }
+ node_remove_from_ed25519_map(node);
+
+ idx = node->nodelist_idx;
+ tor_assert(idx >= 0);
+
+ tor_assert(node == smartlist_get(the_nodelist->nodes, idx));
+ smartlist_del(the_nodelist->nodes, idx);
+ if (idx < smartlist_len(the_nodelist->nodes)) {
+ tmp = smartlist_get(the_nodelist->nodes, idx);
+ tmp->nodelist_idx = idx;
+ }
+ node->nodelist_idx = -1;
+}
+
+/** Return a newly allocated smartlist of the nodes that have <b>md</b> as
+ * their microdescriptor. */
+smartlist_t *
+nodelist_find_nodes_with_microdesc(const microdesc_t *md)
+{
+ smartlist_t *result = smartlist_new();
+
+ if (the_nodelist == NULL)
+ return result;
+
+ SMARTLIST_FOREACH_BEGIN(the_nodelist->nodes, node_t *, node) {
+ if (node->md == md) {
+ smartlist_add(result, node);
+ }
+ } SMARTLIST_FOREACH_END(node);
+
+ return result;
+}
+
+/** Release storage held by <b>node</b> */
+static void
+node_free_(node_t *node)
+{
+ if (!node)
+ return;
+ if (node->md)
+ node->md->held_by_nodes--;
+ tor_assert(node->nodelist_idx == -1);
+ tor_free(node);
+}
+
+/** Remove all entries from the nodelist that don't have enough info to be
+ * usable for anything. */
+void
+nodelist_purge(void)
+{
+ node_t **iter;
+ if (PREDICT_UNLIKELY(the_nodelist == NULL))
+ return;
+
+ /* Remove the non-usable nodes. */
+ for (iter = HT_START(nodelist_map, &the_nodelist->nodes_by_id); iter; ) {
+ node_t *node = *iter;
+
+ if (node->md && !node->rs) {
+ /* An md is only useful if there is an rs. */
+ node->md->held_by_nodes--;
+ node->md = NULL;
+ }
+
+ if (node_is_usable(node)) {
+ iter = HT_NEXT(nodelist_map, &the_nodelist->nodes_by_id, iter);
+ } else {
+ iter = HT_NEXT_RMV(nodelist_map, &the_nodelist->nodes_by_id, iter);
+ nodelist_drop_node(node, 0);
+ node_free(node);
+ }
+ }
+ nodelist_assert_ok();
+}
+
+/** Release all storage held by the nodelist. */
+void
+nodelist_free_all(void)
+{
+ if (PREDICT_UNLIKELY(the_nodelist == NULL))
+ return;
+
+ HT_CLEAR(nodelist_map, &the_nodelist->nodes_by_id);
+ HT_CLEAR(nodelist_ed_map, &the_nodelist->nodes_by_ed_id);
+ SMARTLIST_FOREACH_BEGIN(the_nodelist->nodes, node_t *, node) {
+ node->nodelist_idx = -1;
+ node_free(node);
+ } SMARTLIST_FOREACH_END(node);
+
+ smartlist_free(the_nodelist->nodes);
+
+ address_set_free(the_nodelist->node_addrs);
+ the_nodelist->node_addrs = NULL;
+
+ tor_free(the_nodelist);
+}
+
+/** Check that the nodelist is internally consistent, and consistent with
+ * the directory info it's derived from.
+ */
+void
+nodelist_assert_ok(void)
+{
+ routerlist_t *rl = router_get_routerlist();
+ networkstatus_t *ns = networkstatus_get_latest_consensus();
+ digestmap_t *dm;
+
+ if (!the_nodelist)
+ return;
+
+ dm = digestmap_new();
+
+ /* every routerinfo in rl->routers should be in the nodelist. */
+ if (rl) {
+ SMARTLIST_FOREACH_BEGIN(rl->routers, routerinfo_t *, ri) {
+ const node_t *node = node_get_by_id(ri->cache_info.identity_digest);
+ tor_assert(node && node->ri == ri);
+ tor_assert(fast_memeq(ri->cache_info.identity_digest,
+ node->identity, DIGEST_LEN));
+ tor_assert(! digestmap_get(dm, node->identity));
+ digestmap_set(dm, node->identity, (void*)node);
+ } SMARTLIST_FOREACH_END(ri);
+ }
+
+ /* every routerstatus in ns should be in the nodelist */
+ if (ns) {
+ SMARTLIST_FOREACH_BEGIN(ns->routerstatus_list, routerstatus_t *, rs) {
+ const node_t *node = node_get_by_id(rs->identity_digest);
+ tor_assert(node && node->rs == rs);
+ tor_assert(fast_memeq(rs->identity_digest, node->identity, DIGEST_LEN));
+ digestmap_set(dm, node->identity, (void*)node);
+ if (ns->flavor == FLAV_MICRODESC) {
+ /* If it's a microdesc consensus, every entry that has a
+ * microdescriptor should be in the nodelist.
+ */
+ microdesc_t *md =
+ microdesc_cache_lookup_by_digest256(NULL, rs->descriptor_digest);
+ tor_assert(md == node->md);
+ if (md)
+ tor_assert(md->held_by_nodes >= 1);
+ }
+ } SMARTLIST_FOREACH_END(rs);
+ }
+
+ /* The nodelist should have no other entries, and its entries should be
+ * well-formed. */
+ SMARTLIST_FOREACH_BEGIN(the_nodelist->nodes, node_t *, node) {
+ tor_assert(digestmap_get(dm, node->identity) != NULL);
+ tor_assert(node_sl_idx == node->nodelist_idx);
+ } SMARTLIST_FOREACH_END(node);
+
+ /* Every node listed with an ed25519 identity should be listed by that
+ * identity.
+ */
+ SMARTLIST_FOREACH_BEGIN(the_nodelist->nodes, node_t *, node) {
+ if (!ed25519_public_key_is_zero(&node->ed25519_id)) {
+ tor_assert(node == node_get_by_ed25519_id(&node->ed25519_id));
+ }
+ } SMARTLIST_FOREACH_END(node);
+
+ node_t **idx;
+ HT_FOREACH(idx, nodelist_ed_map, &the_nodelist->nodes_by_ed_id) {
+ node_t *node = *idx;
+ tor_assert(node == node_get_by_ed25519_id(&node->ed25519_id));
+ }
+
+ tor_assert((long)smartlist_len(the_nodelist->nodes) ==
+ (long)HT_SIZE(&the_nodelist->nodes_by_id));
+
+ tor_assert((long)smartlist_len(the_nodelist->nodes) >=
+ (long)HT_SIZE(&the_nodelist->nodes_by_ed_id));
+
+ digestmap_free(dm, NULL);
+}
+
+/** Ensure that the nodelist has been created with the most recent consensus.
+ * If that's not the case, make it so. */
+void
+nodelist_ensure_freshness(networkstatus_t *ns)
+{
+ tor_assert(ns);
+
+ /* We don't even have a nodelist: this is a NOP. */
+ if (!the_nodelist) {
+ return;
+ }
+
+ if (the_nodelist->live_consensus_valid_after != ns->valid_after) {
+ log_info(LD_GENERAL, "Nodelist was not fresh: rebuilding. (%d / %d)",
+ (int) the_nodelist->live_consensus_valid_after,
+ (int) ns->valid_after);
+ nodelist_set_consensus(ns);
+ }
+}
+/** Return a list of a node_t * for every node we know about. The caller
+ * MUST NOT modify the list. (You can set and clear flags in the nodes if
+ * you must, but you must not add or remove nodes.) */
+MOCK_IMPL(smartlist_t *,
+nodelist_get_list,(void))
+{
+ init_nodelist();
+ return the_nodelist->nodes;
+}
+
+/** Given a hex-encoded nickname of the format DIGEST, $DIGEST, $DIGEST=name,
+ * or $DIGEST~name, return the node with the matching identity digest and
+ * nickname (if any). Return NULL if no such node exists, or if <b>hex_id</b>
+ * is not well-formed. DOCDOC flags */
+const node_t *
+node_get_by_hex_id(const char *hex_id, unsigned flags)
+{
+ char digest_buf[DIGEST_LEN];
+ char nn_buf[MAX_NICKNAME_LEN+1];
+ char nn_char='\0';
+
+ (void) flags; // XXXX
+
+ if (hex_digest_nickname_decode(hex_id, digest_buf, &nn_char, nn_buf)==0) {
+ const node_t *node = node_get_by_id(digest_buf);
+ if (!node)
+ return NULL;
+ if (nn_char == '=') {
+ /* "=" indicates a Named relay, but there aren't any of those now. */
+ return NULL;
+ }
+ return node;
+ }
+
+ return NULL;
+}
+
+/** Given a nickname (possibly verbose, possibly a hexadecimal digest), return
+ * the corresponding node_t, or NULL if none exists. Warn the user if they
+ * have specified a router by nickname, unless the NNF_NO_WARN_UNNAMED bit is
+ * set in <b>flags</b>. */
+MOCK_IMPL(const node_t *,
+node_get_by_nickname,(const char *nickname, unsigned flags))
+{
+ const int warn_if_unnamed = !(flags & NNF_NO_WARN_UNNAMED);
+
+ if (!the_nodelist)
+ return NULL;
+
+ /* Handle these cases: DIGEST, $DIGEST, $DIGEST=name, $DIGEST~name. */
+ {
+ const node_t *node;
+ if ((node = node_get_by_hex_id(nickname, flags)) != NULL)
+ return node;
+ }
+
+ if (!strcasecmp(nickname, UNNAMED_ROUTER_NICKNAME))
+ return NULL;
+
+ /* Okay, so the name is not canonical for anybody. */
+ {
+ smartlist_t *matches = smartlist_new();
+ const node_t *choice = NULL;
+
+ SMARTLIST_FOREACH_BEGIN(the_nodelist->nodes, node_t *, node) {
+ if (!strcasecmp(node_get_nickname(node), nickname))
+ smartlist_add(matches, node);
+ } SMARTLIST_FOREACH_END(node);
+
+ if (smartlist_len(matches)>1 && warn_if_unnamed) {
+ int any_unwarned = 0;
+ SMARTLIST_FOREACH_BEGIN(matches, node_t *, node) {
+ if (!node->name_lookup_warned) {
+ node->name_lookup_warned = 1;
+ any_unwarned = 1;
+ }
+ } SMARTLIST_FOREACH_END(node);
+
+ if (any_unwarned) {
+ log_warn(LD_CONFIG, "There are multiple matches for the name %s, "
+ "but none is listed as Named in the directory consensus. "
+ "Choosing one arbitrarily.", nickname);
+ }
+ } else if (smartlist_len(matches)==1 && warn_if_unnamed) {
+ char fp[HEX_DIGEST_LEN+1];
+ node_t *node = smartlist_get(matches, 0);
+ if (! node->name_lookup_warned) {
+ base16_encode(fp, sizeof(fp), node->identity, DIGEST_LEN);
+ log_warn(LD_CONFIG,
+ "You specified a relay \"%s\" by name, but nicknames can be "
+ "used by any relay, not just the one you meant. "
+ "To make sure you get the same relay in the future, refer "
+ "to it by key, as \"$%s\".", nickname, fp);
+ node->name_lookup_warned = 1;
+ }
+ }
+
+ if (smartlist_len(matches))
+ choice = smartlist_get(matches, 0);
+
+ smartlist_free(matches);
+ return choice;
+ }
+}
+
+/** Return the Ed25519 identity key for the provided node, or NULL if it
+ * doesn't have one. */
+const ed25519_public_key_t *
+node_get_ed25519_id(const node_t *node)
+{
+ const ed25519_public_key_t *ri_pk = NULL;
+ const ed25519_public_key_t *md_pk = NULL;
+
+ if (node->ri) {
+ if (node->ri->cache_info.signing_key_cert) {
+ ri_pk = &node->ri->cache_info.signing_key_cert->signing_key;
+ /* Checking whether routerinfo ed25519 is all zero.
+ * Our descriptor parser should make sure this never happens. */
+ if (BUG(ed25519_public_key_is_zero(ri_pk)))
+ ri_pk = NULL;
+ }
+ }
+
+ if (node->md) {
+ if (node->md->ed25519_identity_pkey) {
+ md_pk = node->md->ed25519_identity_pkey;
+ /* Checking whether microdesc ed25519 is all zero.
+ * Our descriptor parser should make sure this never happens. */
+ if (BUG(ed25519_public_key_is_zero(md_pk)))
+ md_pk = NULL;
+ }
+ }
+
+ if (ri_pk && md_pk) {
+ if (ed25519_pubkey_eq(ri_pk, md_pk)) {
+ return ri_pk;
+ } else {
+ /* This can happen if the relay gets flagged NoEdConsensus which will be
+ * triggered on all relays of the network. Thus a protocol warning. */
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Inconsistent ed25519 identities in the nodelist");
+ return NULL;
+ }
+ } else if (ri_pk) {
+ return ri_pk;
+ } else {
+ return md_pk;
+ }
+}
+
+/** Return true iff this node's Ed25519 identity matches <b>id</b>.
+ * (An absent Ed25519 identity matches NULL or zero.) */
+int
+node_ed25519_id_matches(const node_t *node, const ed25519_public_key_t *id)
+{
+ const ed25519_public_key_t *node_id = node_get_ed25519_id(node);
+ if (node_id == NULL || ed25519_public_key_is_zero(node_id)) {
+ return id == NULL || ed25519_public_key_is_zero(id);
+ } else {
+ return id && ed25519_pubkey_eq(node_id, id);
+ }
+}
+
+/** Dummy object that should be unreturnable. Used to ensure that
+ * node_get_protover_summary_flags() always returns non-NULL. */
+static const protover_summary_flags_t zero_protover_flags = {
+ 0,0,0,0,0,0,0
+};
+
+/** Return the protover_summary_flags for a given node. */
+static const protover_summary_flags_t *
+node_get_protover_summary_flags(const node_t *node)
+{
+ if (node->rs) {
+ return &node->rs->pv;
+ } else if (node->ri) {
+ return &node->ri->pv;
+ } else {
+ /* This should be impossible: every node should have a routerstatus or a
+ * router descriptor or both. But just in case we've messed up somehow,
+ * return a nice empty set of flags to indicate "this node supports
+ * nothing." */
+ tor_assert_nonfatal_unreached_once();
+ return &zero_protover_flags;
+ }
+}
+
+/** Return true iff <b>node</b> supports authenticating itself
+ * by ed25519 ID during the link handshake. If <b>compatible_with_us</b>,
+ * it needs to be using a link authentication method that we understand.
+ * If not, any plausible link authentication method will do. */
+int
+node_supports_ed25519_link_authentication(const node_t *node,
+ int compatible_with_us)
+{
+ if (! node_get_ed25519_id(node))
+ return 0;
+
+ const protover_summary_flags_t *pv = node_get_protover_summary_flags(node);
+
+ if (compatible_with_us)
+ return pv->supports_ed25519_link_handshake_compat;
+ else
+ return pv->supports_ed25519_link_handshake_any;
+}
+
+/** Return true iff <b>node</b> supports the hidden service directory version
+ * 3 protocol (proposal 224). */
+int
+node_supports_v3_hsdir(const node_t *node)
+{
+ tor_assert(node);
+
+ return node_get_protover_summary_flags(node)->supports_v3_hsdir;
+}
+
+/** Return true iff <b>node</b> supports ed25519 authentication as an hidden
+ * service introduction point.*/
+int
+node_supports_ed25519_hs_intro(const node_t *node)
+{
+ tor_assert(node);
+
+ return node_get_protover_summary_flags(node)->supports_ed25519_hs_intro;
+}
+
+/** Return true iff <b>node</b> supports to be a rendezvous point for hidden
+ * service version 3 (HSRend=2). */
+int
+node_supports_v3_rendezvous_point(const node_t *node)
+{
+ tor_assert(node);
+
+ return node_get_protover_summary_flags(node)->supports_v3_rendezvous_point;
+}
+
+/** Return the RSA ID key's SHA1 digest for the provided node. */
+const uint8_t *
+node_get_rsa_id_digest(const node_t *node)
+{
+ tor_assert(node);
+ return (const uint8_t*)node->identity;
+}
+
+/** Return the nickname of <b>node</b>, or NULL if we can't find one. */
+const char *
+node_get_nickname(const node_t *node)
+{
+ tor_assert(node);
+ if (node->rs)
+ return node->rs->nickname;
+ else if (node->ri)
+ return node->ri->nickname;
+ else
+ return NULL;
+}
+
+/** Return true iff <b>node</b> appears to be a directory authority or
+ * directory cache */
+int
+node_is_dir(const node_t *node)
+{
+ if (node->rs) {
+ routerstatus_t * rs = node->rs;
+ /* This is true if supports_tunnelled_dir_requests is true which
+ * indicates that we support directory request tunnelled or through the
+ * DirPort. */
+ return rs->is_v2_dir;
+ } else if (node->ri) {
+ routerinfo_t * ri = node->ri;
+ /* Both tunnelled request is supported or DirPort is set. */
+ return ri->supports_tunnelled_dir_requests;
+ } else {
+ return 0;
+ }
+}
+
+/** Return true iff <b>node</b> has either kind of descriptor -- that
+ * is, a routerdescriptor or a microdescriptor.
+ *
+ * You should probably use node_has_preferred_descriptor() instead.
+ **/
+int
+node_has_any_descriptor(const node_t *node)
+{
+ return (node->ri ||
+ (node->rs && node->md));
+}
+
+/** Return true iff <b>node</b> has the kind of descriptor we would prefer to
+ * use for it, given our configuration and how we intend to use the node.
+ *
+ * If <b>for_direct_connect</b> is true, we intend to connect to the node
+ * directly, as the first hop of a circuit; otherwise, we intend to connect to
+ * it indirectly, or use it as if we were connecting to it indirectly. */
+int
+node_has_preferred_descriptor(const node_t *node,
+ int for_direct_connect)
+{
+ const int is_bridge = node_is_a_configured_bridge(node);
+ const int we_use_mds = we_use_microdescriptors_for_circuits(get_options());
+
+ if ((is_bridge && for_direct_connect) || !we_use_mds) {
+ /* We need an ri in this case. */
+ if (!node->ri)
+ return 0;
+ } else {
+ /* Otherwise we need an rs and an md. */
+ if (node->rs == NULL || node->md == NULL)
+ return 0;
+ }
+
+ return 1;
+}
+
+/** Return the router_purpose of <b>node</b>. */
+int
+node_get_purpose(const node_t *node)
+{
+ if (node->ri)
+ return node->ri->purpose;
+ else
+ return ROUTER_PURPOSE_GENERAL;
+}
+
+/** Compute the verbose ("extended") nickname of <b>node</b> and store it
+ * into the MAX_VERBOSE_NICKNAME_LEN+1 character buffer at
+ * <b>verbose_name_out</b> */
+void
+node_get_verbose_nickname(const node_t *node,
+ char *verbose_name_out)
+{
+ const char *nickname = node_get_nickname(node);
+ verbose_name_out[0] = '$';
+ base16_encode(verbose_name_out+1, HEX_DIGEST_LEN+1, node->identity,
+ DIGEST_LEN);
+ if (!nickname)
+ return;
+ verbose_name_out[1+HEX_DIGEST_LEN] = '~';
+ strlcpy(verbose_name_out+1+HEX_DIGEST_LEN+1, nickname, MAX_NICKNAME_LEN+1);
+}
+
+/** Compute the verbose ("extended") nickname of node with
+ * given <b>id_digest</b> and store it into the MAX_VERBOSE_NICKNAME_LEN+1
+ * character buffer at <b>verbose_name_out</b>
+ *
+ * If node_get_by_id() returns NULL, base 16 encoding of
+ * <b>id_digest</b> is returned instead. */
+void
+node_get_verbose_nickname_by_id(const char *id_digest,
+ char *verbose_name_out)
+{
+ const node_t *node = node_get_by_id(id_digest);
+ if (!node) {
+ verbose_name_out[0] = '$';
+ base16_encode(verbose_name_out+1, HEX_DIGEST_LEN+1, id_digest, DIGEST_LEN);
+ } else {
+ node_get_verbose_nickname(node, verbose_name_out);
+ }
+}
+
+/** Return true iff it seems that <b>node</b> allows circuits to exit
+ * through it directlry from the client. */
+int
+node_allows_single_hop_exits(const node_t *node)
+{
+ if (node && node->ri)
+ return node->ri->allow_single_hop_exits;
+ else
+ return 0;
+}
+
+/** Return true iff it seems that <b>node</b> has an exit policy that doesn't
+ * actually permit anything to exit, or we don't know its exit policy */
+int
+node_exit_policy_rejects_all(const node_t *node)
+{
+ if (node->rejects_all)
+ return 1;
+
+ if (node->ri)
+ return node->ri->policy_is_reject_star;
+ else if (node->md)
+ return node->md->exit_policy == NULL ||
+ short_policy_is_reject_star(node->md->exit_policy);
+ else
+ return 1;
+}
+
+/** Return true iff the exit policy for <b>node</b> is such that we can treat
+ * rejecting an address of type <b>family</b> unexpectedly as a sign of that
+ * node's failure. */
+int
+node_exit_policy_is_exact(const node_t *node, sa_family_t family)
+{
+ if (family == AF_UNSPEC) {
+ return 1; /* Rejecting an address but not telling us what address
+ * is a bad sign. */
+ } else if (family == AF_INET) {
+ return node->ri != NULL;
+ } else if (family == AF_INET6) {
+ return 0;
+ }
+ tor_fragile_assert();
+ return 1;
+}
+
+/* Check if the "addr" and port_field fields from r are a valid non-listening
+ * address/port. If so, set valid to true and add a newly allocated
+ * tor_addr_port_t containing "addr" and port_field to sl.
+ * "addr" is an IPv4 host-order address and port_field is a uint16_t.
+ * r is typically a routerinfo_t or routerstatus_t.
+ */
+#define SL_ADD_NEW_IPV4_AP(r, port_field, sl, valid) \
+ STMT_BEGIN \
+ if (tor_addr_port_is_valid_ipv4h((r)->addr, (r)->port_field, 0)) { \
+ valid = 1; \
+ tor_addr_port_t *ap = tor_malloc(sizeof(tor_addr_port_t)); \
+ tor_addr_from_ipv4h(&ap->addr, (r)->addr); \
+ ap->port = (r)->port_field; \
+ smartlist_add((sl), ap); \
+ } \
+ STMT_END
+
+/* Check if the "addr" and port_field fields from r are a valid non-listening
+ * address/port. If so, set valid to true and add a newly allocated
+ * tor_addr_port_t containing "addr" and port_field to sl.
+ * "addr" is a tor_addr_t and port_field is a uint16_t.
+ * r is typically a routerinfo_t or routerstatus_t.
+ */
+#define SL_ADD_NEW_IPV6_AP(r, port_field, sl, valid) \
+ STMT_BEGIN \
+ if (tor_addr_port_is_valid(&(r)->ipv6_addr, (r)->port_field, 0)) { \
+ valid = 1; \
+ tor_addr_port_t *ap = tor_malloc(sizeof(tor_addr_port_t)); \
+ tor_addr_copy(&ap->addr, &(r)->ipv6_addr); \
+ ap->port = (r)->port_field; \
+ smartlist_add((sl), ap); \
+ } \
+ STMT_END
+
+/** Return list of tor_addr_port_t with all OR ports (in the sense IP
+ * addr + TCP port) for <b>node</b>. Caller must free all elements
+ * using tor_free() and free the list using smartlist_free().
+ *
+ * XXX this is potentially a memory fragmentation hog -- if on
+ * critical path consider the option of having the caller allocate the
+ * memory
+ */
+smartlist_t *
+node_get_all_orports(const node_t *node)
+{
+ smartlist_t *sl = smartlist_new();
+ int valid = 0;
+
+ /* Find a valid IPv4 address and port */
+ if (node->ri != NULL) {
+ SL_ADD_NEW_IPV4_AP(node->ri, or_port, sl, valid);
+ }
+
+ /* If we didn't find a valid address/port in the ri, try the rs */
+ if (!valid && node->rs != NULL) {
+ SL_ADD_NEW_IPV4_AP(node->rs, or_port, sl, valid);
+ }
+
+ /* Find a valid IPv6 address and port */
+ valid = 0;
+ if (node->ri != NULL) {
+ SL_ADD_NEW_IPV6_AP(node->ri, ipv6_orport, sl, valid);
+ }
+
+ if (!valid && node->rs != NULL) {
+ SL_ADD_NEW_IPV6_AP(node->rs, ipv6_orport, sl, valid);
+ }
+
+ if (!valid && node->md != NULL) {
+ SL_ADD_NEW_IPV6_AP(node->md, ipv6_orport, sl, valid);
+ }
+
+ return sl;
+}
+
+#undef SL_ADD_NEW_IPV4_AP
+#undef SL_ADD_NEW_IPV6_AP
+
+/** Wrapper around node_get_prim_orport for backward
+ compatibility. */
+void
+node_get_addr(const node_t *node, tor_addr_t *addr_out)
+{
+ tor_addr_port_t ap;
+ node_get_prim_orport(node, &ap);
+ tor_addr_copy(addr_out, &ap.addr);
+}
+
+/** Return the host-order IPv4 address for <b>node</b>, or 0 if it doesn't
+ * seem to have one. */
+uint32_t
+node_get_prim_addr_ipv4h(const node_t *node)
+{
+ /* Don't check the ORPort or DirPort, as this function isn't port-specific,
+ * and the node might have a valid IPv4 address, yet have a zero
+ * ORPort or DirPort.
+ */
+ if (node->ri && tor_addr_is_valid_ipv4h(node->ri->addr, 0)) {
+ return node->ri->addr;
+ } else if (node->rs && tor_addr_is_valid_ipv4h(node->rs->addr, 0)) {
+ return node->rs->addr;
+ }
+ return 0;
+}
+
+/** Copy a string representation of an IP address for <b>node</b> into
+ * the <b>len</b>-byte buffer at <b>buf</b>. */
+void
+node_get_address_string(const node_t *node, char *buf, size_t len)
+{
+ uint32_t ipv4_addr = node_get_prim_addr_ipv4h(node);
+
+ if (tor_addr_is_valid_ipv4h(ipv4_addr, 0)) {
+ tor_addr_t addr;
+ tor_addr_from_ipv4h(&addr, ipv4_addr);
+ tor_addr_to_str(buf, &addr, len, 0);
+ } else if (len > 0) {
+ buf[0] = '\0';
+ }
+}
+
+/** Return <b>node</b>'s declared uptime, or -1 if it doesn't seem to have
+ * one. */
+long
+node_get_declared_uptime(const node_t *node)
+{
+ if (node->ri)
+ return node->ri->uptime;
+ else
+ return -1;
+}
+
+/** Return <b>node</b>'s platform string, or NULL if we don't know it. */
+const char *
+node_get_platform(const node_t *node)
+{
+ /* If we wanted, we could record the version in the routerstatus_t, since
+ * the consensus lists it. We don't, though, so this function just won't
+ * work with microdescriptors. */
+ if (node->ri)
+ return node->ri->platform;
+ else
+ return NULL;
+}
+
+/** Return true iff <b>node</b> is one representing this router. */
+int
+node_is_me(const node_t *node)
+{
+ return router_digest_is_me(node->identity);
+}
+
+/** Return <b>node</b> declared family (as a list of names), or NULL if
+ * the node didn't declare a family. */
+const smartlist_t *
+node_get_declared_family(const node_t *node)
+{
+ if (node->ri && node->ri->declared_family)
+ return node->ri->declared_family;
+ else if (node->md && node->md->family)
+ return node->md->family;
+ else
+ return NULL;
+}
+
+/* Does this node have a valid IPv6 address?
+ * Prefer node_has_ipv6_orport() or node_has_ipv6_dirport() for
+ * checking specific ports. */
+int
+node_has_ipv6_addr(const node_t *node)
+{
+ /* Don't check the ORPort or DirPort, as this function isn't port-specific,
+ * and the node might have a valid IPv6 address, yet have a zero
+ * ORPort or DirPort.
+ */
+ if (node->ri && tor_addr_is_valid(&node->ri->ipv6_addr, 0))
+ return 1;
+ if (node->rs && tor_addr_is_valid(&node->rs->ipv6_addr, 0))
+ return 1;
+ if (node->md && tor_addr_is_valid(&node->md->ipv6_addr, 0))
+ return 1;
+
+ return 0;
+}
+
+/* Does this node have a valid IPv6 ORPort? */
+int
+node_has_ipv6_orport(const node_t *node)
+{
+ tor_addr_port_t ipv6_orport;
+ node_get_pref_ipv6_orport(node, &ipv6_orport);
+ return tor_addr_port_is_valid_ap(&ipv6_orport, 0);
+}
+
+/* Does this node have a valid IPv6 DirPort? */
+int
+node_has_ipv6_dirport(const node_t *node)
+{
+ tor_addr_port_t ipv6_dirport;
+ node_get_pref_ipv6_dirport(node, &ipv6_dirport);
+ return tor_addr_port_is_valid_ap(&ipv6_dirport, 0);
+}
+
+/** Return 1 if we prefer the IPv6 address and OR TCP port of
+ * <b>node</b>, else 0.
+ *
+ * We prefer the IPv6 address if the router has an IPv6 address,
+ * and we can use IPv6 addresses, and:
+ * i) the node_t says that it prefers IPv6
+ * or
+ * ii) the router has no IPv4 OR address.
+ *
+ * If you don't have a node, consider looking it up.
+ * If there is no node, use fascist_firewall_prefer_ipv6_orport().
+ */
+int
+node_ipv6_or_preferred(const node_t *node)
+{
+ const or_options_t *options = get_options();
+ tor_addr_port_t ipv4_addr;
+ node_assert_ok(node);
+
+ /* XX/teor - node->ipv6_preferred is set from
+ * fascist_firewall_prefer_ipv6_orport() each time the consensus is loaded.
+ */
+ node_get_prim_orport(node, &ipv4_addr);
+ if (!fascist_firewall_use_ipv6(options)) {
+ return 0;
+ } else if (node->ipv6_preferred ||
+ !tor_addr_port_is_valid_ap(&ipv4_addr, 0)) {
+ return node_has_ipv6_orport(node);
+ }
+ return 0;
+}
+
+#define RETURN_IPV4_AP(r, port_field, ap_out) \
+ STMT_BEGIN \
+ if (r && tor_addr_port_is_valid_ipv4h((r)->addr, (r)->port_field, 0)) { \
+ tor_addr_from_ipv4h(&(ap_out)->addr, (r)->addr); \
+ (ap_out)->port = (r)->port_field; \
+ } \
+ STMT_END
+
+/** Copy the primary (IPv4) OR port (IP address and TCP port) for <b>node</b>
+ * into *<b>ap_out</b>. */
+void
+node_get_prim_orport(const node_t *node, tor_addr_port_t *ap_out)
+{
+ node_assert_ok(node);
+ tor_assert(ap_out);
+
+ /* Clear the address, as a safety precaution if calling functions ignore the
+ * return value */
+ tor_addr_make_null(&ap_out->addr, AF_INET);
+ ap_out->port = 0;
+
+ /* Check ri first, because rewrite_node_address_for_bridge() updates
+ * node->ri with the configured bridge address. */
+
+ RETURN_IPV4_AP(node->ri, or_port, ap_out);
+ RETURN_IPV4_AP(node->rs, or_port, ap_out);
+ /* Microdescriptors only have an IPv6 address */
+}
+
+/** Copy the preferred OR port (IP address and TCP port) for
+ * <b>node</b> into *<b>ap_out</b>. */
+void
+node_get_pref_orport(const node_t *node, tor_addr_port_t *ap_out)
+{
+ tor_assert(ap_out);
+
+ if (node_ipv6_or_preferred(node)) {
+ node_get_pref_ipv6_orport(node, ap_out);
+ } else {
+ /* the primary ORPort is always on IPv4 */
+ node_get_prim_orport(node, ap_out);
+ }
+}
+
+/** Copy the preferred IPv6 OR port (IP address and TCP port) for
+ * <b>node</b> into *<b>ap_out</b>. */
+void
+node_get_pref_ipv6_orport(const node_t *node, tor_addr_port_t *ap_out)
+{
+ node_assert_ok(node);
+ tor_assert(ap_out);
+ memset(ap_out, 0, sizeof(*ap_out));
+
+ /* Check ri first, because rewrite_node_address_for_bridge() updates
+ * node->ri with the configured bridge address.
+ * Prefer rs over md for consistency with the fascist_firewall_* functions.
+ * Check if the address or port are valid, and try another alternative
+ * if they are not. */
+
+ if (node->ri && tor_addr_port_is_valid(&node->ri->ipv6_addr,
+ node->ri->ipv6_orport, 0)) {
+ tor_addr_copy(&ap_out->addr, &node->ri->ipv6_addr);
+ ap_out->port = node->ri->ipv6_orport;
+ } else if (node->rs && tor_addr_port_is_valid(&node->rs->ipv6_addr,
+ node->rs->ipv6_orport, 0)) {
+ tor_addr_copy(&ap_out->addr, &node->rs->ipv6_addr);
+ ap_out->port = node->rs->ipv6_orport;
+ } else if (node->md && tor_addr_port_is_valid(&node->md->ipv6_addr,
+ node->md->ipv6_orport, 0)) {
+ tor_addr_copy(&ap_out->addr, &node->md->ipv6_addr);
+ ap_out->port = node->md->ipv6_orport;
+ } else {
+ tor_addr_make_null(&ap_out->addr, AF_INET6);
+ ap_out->port = 0;
+ }
+}
+
+/** Return 1 if we prefer the IPv6 address and Dir TCP port of
+ * <b>node</b>, else 0.
+ *
+ * We prefer the IPv6 address if the router has an IPv6 address,
+ * and we can use IPv6 addresses, and:
+ * i) the router has no IPv4 Dir address.
+ * or
+ * ii) our preference is for IPv6 Dir addresses.
+ *
+ * If there is no node, use fascist_firewall_prefer_ipv6_dirport().
+ */
+int
+node_ipv6_dir_preferred(const node_t *node)
+{
+ const or_options_t *options = get_options();
+ tor_addr_port_t ipv4_addr;
+ node_assert_ok(node);
+
+ /* node->ipv6_preferred is set from fascist_firewall_prefer_ipv6_orport(),
+ * so we can't use it to determine DirPort IPv6 preference.
+ * This means that bridge clients will use IPv4 DirPorts by default.
+ */
+ node_get_prim_dirport(node, &ipv4_addr);
+ if (!fascist_firewall_use_ipv6(options)) {
+ return 0;
+ } else if (!tor_addr_port_is_valid_ap(&ipv4_addr, 0)
+ || fascist_firewall_prefer_ipv6_dirport(get_options())) {
+ return node_has_ipv6_dirport(node);
+ }
+ return 0;
+}
+
+/** Copy the primary (IPv4) Dir port (IP address and TCP port) for <b>node</b>
+ * into *<b>ap_out</b>. */
+void
+node_get_prim_dirport(const node_t *node, tor_addr_port_t *ap_out)
+{
+ node_assert_ok(node);
+ tor_assert(ap_out);
+
+ /* Clear the address, as a safety precaution if calling functions ignore the
+ * return value */
+ tor_addr_make_null(&ap_out->addr, AF_INET);
+ ap_out->port = 0;
+
+ /* Check ri first, because rewrite_node_address_for_bridge() updates
+ * node->ri with the configured bridge address. */
+
+ RETURN_IPV4_AP(node->ri, dir_port, ap_out);
+ RETURN_IPV4_AP(node->rs, dir_port, ap_out);
+ /* Microdescriptors only have an IPv6 address */
+}
+
+#undef RETURN_IPV4_AP
+
+/** Copy the preferred Dir port (IP address and TCP port) for
+ * <b>node</b> into *<b>ap_out</b>. */
+void
+node_get_pref_dirport(const node_t *node, tor_addr_port_t *ap_out)
+{
+ tor_assert(ap_out);
+
+ if (node_ipv6_dir_preferred(node)) {
+ node_get_pref_ipv6_dirport(node, ap_out);
+ } else {
+ /* the primary DirPort is always on IPv4 */
+ node_get_prim_dirport(node, ap_out);
+ }
+}
+
+/** Copy the preferred IPv6 Dir port (IP address and TCP port) for
+ * <b>node</b> into *<b>ap_out</b>. */
+void
+node_get_pref_ipv6_dirport(const node_t *node, tor_addr_port_t *ap_out)
+{
+ node_assert_ok(node);
+ tor_assert(ap_out);
+
+ /* Check ri first, because rewrite_node_address_for_bridge() updates
+ * node->ri with the configured bridge address.
+ * Prefer rs over md for consistency with the fascist_firewall_* functions.
+ * Check if the address or port are valid, and try another alternative
+ * if they are not. */
+
+ /* Assume IPv4 and IPv6 dirports are the same */
+ if (node->ri && tor_addr_port_is_valid(&node->ri->ipv6_addr,
+ node->ri->dir_port, 0)) {
+ tor_addr_copy(&ap_out->addr, &node->ri->ipv6_addr);
+ ap_out->port = node->ri->dir_port;
+ } else if (node->rs && tor_addr_port_is_valid(&node->rs->ipv6_addr,
+ node->rs->dir_port, 0)) {
+ tor_addr_copy(&ap_out->addr, &node->rs->ipv6_addr);
+ ap_out->port = node->rs->dir_port;
+ } else {
+ tor_addr_make_null(&ap_out->addr, AF_INET6);
+ ap_out->port = 0;
+ }
+}
+
+/** Return true iff <b>md</b> has a curve25519 onion key.
+ * Use node_has_curve25519_onion_key() instead of calling this directly. */
+static int
+microdesc_has_curve25519_onion_key(const microdesc_t *md)
+{
+ if (!md) {
+ return 0;
+ }
+
+ if (!md->onion_curve25519_pkey) {
+ return 0;
+ }
+
+ if (tor_mem_is_zero((const char*)md->onion_curve25519_pkey->public_key,
+ CURVE25519_PUBKEY_LEN)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/** Return true iff <b>node</b> has a curve25519 onion key. */
+int
+node_has_curve25519_onion_key(const node_t *node)
+{
+ return node_get_curve25519_onion_key(node) != NULL;
+}
+
+/** Return the curve25519 key of <b>node</b>, or NULL if none. */
+const curve25519_public_key_t *
+node_get_curve25519_onion_key(const node_t *node)
+{
+ if (!node)
+ return NULL;
+ if (routerinfo_has_curve25519_onion_key(node->ri))
+ return node->ri->onion_curve25519_pkey;
+ else if (microdesc_has_curve25519_onion_key(node->md))
+ return node->md->onion_curve25519_pkey;
+ else
+ return NULL;
+}
+
+/** Refresh the country code of <b>ri</b>. This function MUST be called on
+ * each router when the GeoIP database is reloaded, and on all new routers. */
+void
+node_set_country(node_t *node)
+{
+ tor_addr_t addr = TOR_ADDR_NULL;
+
+ /* XXXXipv6 */
+ if (node->rs)
+ tor_addr_from_ipv4h(&addr, node->rs->addr);
+ else if (node->ri)
+ tor_addr_from_ipv4h(&addr, node->ri->addr);
+
+ node->country = geoip_get_country_by_addr(&addr);
+}
+
+/** Set the country code of all routers in the routerlist. */
+void
+nodelist_refresh_countries(void)
+{
+ smartlist_t *nodes = nodelist_get_list();
+ SMARTLIST_FOREACH(nodes, node_t *, node,
+ node_set_country(node));
+}
+
+/** Return true iff router1 and router2 have similar enough network addresses
+ * that we should treat them as being in the same family */
+int
+addrs_in_same_network_family(const tor_addr_t *a1,
+ const tor_addr_t *a2)
+{
+ return 0 == tor_addr_compare_masked(a1, a2, 16, CMP_SEMANTIC);
+}
+
+/** Return true if <b>node</b>'s nickname matches <b>nickname</b>
+ * (case-insensitive), or if <b>node's</b> identity key digest
+ * matches a hexadecimal value stored in <b>nickname</b>. Return
+ * false otherwise. */
+static int
+node_nickname_matches(const node_t *node, const char *nickname)
+{
+ const char *n = node_get_nickname(node);
+ if (n && nickname[0]!='$' && !strcasecmp(n, nickname))
+ return 1;
+ return hex_digest_nickname_matches(nickname,
+ node->identity,
+ n);
+}
+
+/** Return true iff <b>node</b> is named by some nickname in <b>lst</b>. */
+static inline int
+node_in_nickname_smartlist(const smartlist_t *lst, const node_t *node)
+{
+ if (!lst) return 0;
+ SMARTLIST_FOREACH(lst, const char *, name, {
+ if (node_nickname_matches(node, name))
+ return 1;
+ });
+ return 0;
+}
+
+/** Return true iff r1 and r2 are in the same family, but not the same
+ * router. */
+int
+nodes_in_same_family(const node_t *node1, const node_t *node2)
+{
+ const or_options_t *options = get_options();
+
+ /* Are they in the same family because of their addresses? */
+ if (options->EnforceDistinctSubnets) {
+ tor_addr_t a1, a2;
+ node_get_addr(node1, &a1);
+ node_get_addr(node2, &a2);
+ if (addrs_in_same_network_family(&a1, &a2))
+ return 1;
+ }
+
+ /* Are they in the same family because the agree they are? */
+ {
+ const smartlist_t *f1, *f2;
+ f1 = node_get_declared_family(node1);
+ f2 = node_get_declared_family(node2);
+ if (f1 && f2 &&
+ node_in_nickname_smartlist(f1, node2) &&
+ node_in_nickname_smartlist(f2, node1))
+ return 1;
+ }
+
+ /* Are they in the same option because the user says they are? */
+ if (options->NodeFamilySets) {
+ SMARTLIST_FOREACH(options->NodeFamilySets, const routerset_t *, rs, {
+ if (routerset_contains_node(rs, node1) &&
+ routerset_contains_node(rs, node2))
+ return 1;
+ });
+ }
+
+ return 0;
+}
+
+/**
+ * Add all the family of <b>node</b>, including <b>node</b> itself, to
+ * the smartlist <b>sl</b>.
+ *
+ * This is used to make sure we don't pick siblings in a single path, or
+ * pick more than one relay from a family for our entry guard list.
+ * Note that a node may be added to <b>sl</b> more than once if it is
+ * part of <b>node</b>'s family for more than one reason.
+ */
+void
+nodelist_add_node_and_family(smartlist_t *sl, const node_t *node)
+{
+ const smartlist_t *all_nodes = nodelist_get_list();
+ const smartlist_t *declared_family;
+ const or_options_t *options = get_options();
+
+ tor_assert(node);
+
+ declared_family = node_get_declared_family(node);
+
+ /* Let's make sure that we have the node itself, if it's a real node. */
+ {
+ const node_t *real_node = node_get_by_id(node->identity);
+ if (real_node)
+ smartlist_add(sl, (node_t*)real_node);
+ }
+
+ /* First, add any nodes with similar network addresses. */
+ if (options->EnforceDistinctSubnets) {
+ tor_addr_t node_addr;
+ node_get_addr(node, &node_addr);
+
+ SMARTLIST_FOREACH_BEGIN(all_nodes, const node_t *, node2) {
+ tor_addr_t a;
+ node_get_addr(node2, &a);
+ if (addrs_in_same_network_family(&a, &node_addr))
+ smartlist_add(sl, (void*)node2);
+ } SMARTLIST_FOREACH_END(node2);
+ }
+
+ /* Now, add all nodes in the declared_family of this node, if they
+ * also declare this node to be in their family. */
+ if (declared_family) {
+ /* Add every r such that router declares familyness with node, and node
+ * declares familyhood with router. */
+ SMARTLIST_FOREACH_BEGIN(declared_family, const char *, name) {
+ const node_t *node2;
+ const smartlist_t *family2;
+ if (!(node2 = node_get_by_nickname(name, NNF_NO_WARN_UNNAMED)))
+ continue;
+ if (!(family2 = node_get_declared_family(node2)))
+ continue;
+ SMARTLIST_FOREACH_BEGIN(family2, const char *, name2) {
+ if (node_nickname_matches(node, name2)) {
+ smartlist_add(sl, (void*)node2);
+ break;
+ }
+ } SMARTLIST_FOREACH_END(name2);
+ } SMARTLIST_FOREACH_END(name);
+ }
+
+ /* If the user declared any families locally, honor those too. */
+ if (options->NodeFamilySets) {
+ SMARTLIST_FOREACH(options->NodeFamilySets, const routerset_t *, rs, {
+ if (routerset_contains_node(rs, node)) {
+ routerset_get_all_nodes(sl, rs, NULL, 0);
+ }
+ });
+ }
+}
+
+/** Find a router that's up, that has this IP address, and
+ * that allows exit to this address:port, or return NULL if there
+ * isn't a good one.
+ * Don't exit enclave to excluded relays -- it wouldn't actually
+ * hurt anything, but this way there are fewer confused users.
+ */
+const node_t *
+router_find_exact_exit_enclave(const char *address, uint16_t port)
+{/*XXXX MOVE*/
+ uint32_t addr;
+ struct in_addr in;
+ tor_addr_t a;
+ const or_options_t *options = get_options();
+
+ if (!tor_inet_aton(address, &in))
+ return NULL; /* it's not an IP already */
+ addr = ntohl(in.s_addr);
+
+ tor_addr_from_ipv4h(&a, addr);
+
+ SMARTLIST_FOREACH(nodelist_get_list(), const node_t *, node, {
+ if (node_get_addr_ipv4h(node) == addr &&
+ node->is_running &&
+ compare_tor_addr_to_node_policy(&a, port, node) ==
+ ADDR_POLICY_ACCEPTED &&
+ !routerset_contains_node(options->ExcludeExitNodesUnion_, node))
+ return node;
+ });
+ return NULL;
+}
+
+/** Return 1 if <b>router</b> is not suitable for these parameters, else 0.
+ * If <b>need_uptime</b> is non-zero, we require a minimum uptime.
+ * If <b>need_capacity</b> is non-zero, we require a minimum advertised
+ * bandwidth.
+ * If <b>need_guard</b>, we require that the router is a possible entry guard.
+ */
+int
+node_is_unreliable(const node_t *node, int need_uptime,
+ int need_capacity, int need_guard)
+{
+ if (need_uptime && !node->is_stable)
+ return 1;
+ if (need_capacity && !node->is_fast)
+ return 1;
+ if (need_guard && !node->is_possible_guard)
+ return 1;
+ return 0;
+}
+
+/** Return 1 if all running sufficiently-stable routers we can use will reject
+ * addr:port. Return 0 if any might accept it. */
+int
+router_exit_policy_all_nodes_reject(const tor_addr_t *addr, uint16_t port,
+ int need_uptime)
+{
+ addr_policy_result_t r;
+
+ SMARTLIST_FOREACH_BEGIN(nodelist_get_list(), const node_t *, node) {
+ if (node->is_running &&
+ !node_is_unreliable(node, need_uptime, 0, 0)) {
+
+ r = compare_tor_addr_to_node_policy(addr, port, node);
+
+ if (r != ADDR_POLICY_REJECTED && r != ADDR_POLICY_PROBABLY_REJECTED)
+ return 0; /* this one could be ok. good enough. */
+ }
+ } SMARTLIST_FOREACH_END(node);
+ return 1; /* all will reject. */
+}
+
+/** Mark the router with ID <b>digest</b> as running or non-running
+ * in our routerlist. */
+void
+router_set_status(const char *digest, int up)
+{
+ node_t *node;
+ tor_assert(digest);
+
+ SMARTLIST_FOREACH(router_get_fallback_dir_servers(),
+ dir_server_t *, d,
+ if (tor_memeq(d->digest, digest, DIGEST_LEN))
+ d->is_running = up);
+
+ SMARTLIST_FOREACH(router_get_trusted_dir_servers(),
+ dir_server_t *, d,
+ if (tor_memeq(d->digest, digest, DIGEST_LEN))
+ d->is_running = up);
+
+ node = node_get_mutable_by_id(digest);
+ if (node) {
+#if 0
+ log_debug(LD_DIR,"Marking router %s as %s.",
+ node_describe(node), up ? "up" : "down");
+#endif
+ if (!up && node_is_me(node) && !net_is_disabled())
+ log_warn(LD_NET, "We just marked ourself as down. Are your external "
+ "addresses reachable?");
+
+ if (bool_neq(node->is_running, up))
+ router_dir_info_changed();
+
+ node->is_running = up;
+ }
+}
+
+/** True iff, the last time we checked whether we had enough directory info
+ * to build circuits, the answer was "yes". If there are no exits in the
+ * consensus, we act as if we have 100% of the exit directory info. */
+static int have_min_dir_info = 0;
+
+/** Does the consensus contain nodes that can exit? */
+static consensus_path_type_t have_consensus_path = CONSENSUS_PATH_UNKNOWN;
+
+/** True iff enough has changed since the last time we checked whether we had
+ * enough directory info to build circuits that our old answer can no longer
+ * be trusted. */
+static int need_to_update_have_min_dir_info = 1;
+/** String describing what we're missing before we have enough directory
+ * info. */
+static char dir_info_status[512] = "";
+
+/** Return true iff we have enough consensus information to
+ * start building circuits. Right now, this means "a consensus that's
+ * less than a day old, and at least 60% of router descriptors (configurable),
+ * weighted by bandwidth. Treat the exit fraction as 100% if there are
+ * no exits in the consensus."
+ * To obtain the final weighted bandwidth, we multiply the
+ * weighted bandwidth fraction for each position (guard, middle, exit). */
+MOCK_IMPL(int,
+router_have_minimum_dir_info,(void))
+{
+ static int logged_delay=0;
+ const char *delay_fetches_msg = NULL;
+ if (should_delay_dir_fetches(get_options(), &delay_fetches_msg)) {
+ if (!logged_delay)
+ log_notice(LD_DIR, "Delaying directory fetches: %s", delay_fetches_msg);
+ logged_delay=1;
+ strlcpy(dir_info_status, delay_fetches_msg, sizeof(dir_info_status));
+ return 0;
+ }
+ logged_delay = 0; /* reset it if we get this far */
+
+ if (PREDICT_UNLIKELY(need_to_update_have_min_dir_info)) {
+ update_router_have_minimum_dir_info();
+ }
+
+ return have_min_dir_info;
+}
+
+/** Set to CONSENSUS_PATH_EXIT if there is at least one exit node
+ * in the consensus. We update this flag in compute_frac_paths_available if
+ * there is at least one relay that has an Exit flag in the consensus.
+ * Used to avoid building exit circuits when they will almost certainly fail.
+ * Set to CONSENSUS_PATH_INTERNAL if there are no exits in the consensus.
+ * (This situation typically occurs during bootstrap of a test network.)
+ * Set to CONSENSUS_PATH_UNKNOWN if we have never checked, or have
+ * reason to believe our last known value was invalid or has expired.
+ * If we're in a network with TestingDirAuthVoteExit set,
+ * this can cause router_have_consensus_path() to be set to
+ * CONSENSUS_PATH_EXIT, even if there are no nodes with accept exit policies.
+ */
+MOCK_IMPL(consensus_path_type_t,
+router_have_consensus_path, (void))
+{
+ return have_consensus_path;
+}
+
+/** Called when our internal view of the directory has changed. This can be
+ * when the authorities change, networkstatuses change, the list of routerdescs
+ * changes, or number of running routers changes.
+ */
+void
+router_dir_info_changed(void)
+{
+ need_to_update_have_min_dir_info = 1;
+ rend_hsdir_routers_changed();
+ hs_service_dir_info_changed();
+ hs_client_dir_info_changed();
+}
+
+/** Return a string describing what we're missing before we have enough
+ * directory info. */
+const char *
+get_dir_info_status_string(void)
+{
+ return dir_info_status;
+}
+
+/** Iterate over the servers listed in <b>consensus</b>, and count how many of
+ * them seem like ones we'd use (store this in *<b>num_usable</b>), and how
+ * many of <em>those</em> we have descriptors for (store this in
+ * *<b>num_present</b>).
+ *
+ * If <b>in_set</b> is non-NULL, only consider those routers in <b>in_set</b>.
+ * If <b>exit_only</b> is USABLE_DESCRIPTOR_EXIT_ONLY, only consider nodes
+ * with the Exit flag.
+ * If *<b>descs_out</b> is present, add a node_t for each usable descriptor
+ * to it.
+ */
+static void
+count_usable_descriptors(int *num_present, int *num_usable,
+ smartlist_t *descs_out,
+ const networkstatus_t *consensus,
+ time_t now,
+ routerset_t *in_set,
+ usable_descriptor_t exit_only)
+{
+ const int md = (consensus->flavor == FLAV_MICRODESC);
+ *num_present = 0, *num_usable = 0;
+
+ SMARTLIST_FOREACH_BEGIN(consensus->routerstatus_list, routerstatus_t *, rs)
+ {
+ const node_t *node = node_get_by_id(rs->identity_digest);
+ if (!node)
+ continue; /* This would be a bug: every entry in the consensus is
+ * supposed to have a node. */
+ if (exit_only == USABLE_DESCRIPTOR_EXIT_ONLY && ! rs->is_exit)
+ continue;
+ if (in_set && ! routerset_contains_routerstatus(in_set, rs, -1))
+ continue;
+ if (client_would_use_router(rs, now)) {
+ const char * const digest = rs->descriptor_digest;
+ int present;
+ ++*num_usable; /* the consensus says we want it. */
+ if (md)
+ present = NULL != microdesc_cache_lookup_by_digest256(NULL, digest);
+ else
+ present = NULL != router_get_by_descriptor_digest(digest);
+ if (present) {
+ /* we have the descriptor listed in the consensus. */
+ ++*num_present;
+ }
+ if (descs_out)
+ smartlist_add(descs_out, (node_t*)node);
+ }
+ }
+ SMARTLIST_FOREACH_END(rs);
+
+ log_debug(LD_DIR, "%d usable, %d present (%s%s).",
+ *num_usable, *num_present,
+ md ? "microdesc" : "desc",
+ exit_only == USABLE_DESCRIPTOR_EXIT_ONLY ? " exits" : "s");
+}
+
+/** Return an estimate of which fraction of usable paths through the Tor
+ * network we have available for use. Count how many routers seem like ones
+ * we'd use (store this in *<b>num_usable_out</b>), and how many of
+ * <em>those</em> we have descriptors for (store this in
+ * *<b>num_present_out</b>.)
+ *
+ * If **<b>status_out</b> is present, allocate a new string and print the
+ * available percentages of guard, middle, and exit nodes to it, noting
+ * whether there are exits in the consensus.
+ * If there are no exits in the consensus, we treat the exit fraction as 100%,
+ * but set router_have_consensus_path() so that we can only build internal
+ * paths. */
+static double
+compute_frac_paths_available(const networkstatus_t *consensus,
+ const or_options_t *options, time_t now,
+ int *num_present_out, int *num_usable_out,
+ char **status_out)
+{
+ smartlist_t *guards = smartlist_new();
+ smartlist_t *mid = smartlist_new();
+ smartlist_t *exits = smartlist_new();
+ double f_guard, f_mid, f_exit;
+ double f_path = 0.0;
+ /* Used to determine whether there are any exits in the consensus */
+ int np = 0;
+ /* Used to determine whether there are any exits with descriptors */
+ int nu = 0;
+ const int authdir = authdir_mode_v3(options);
+
+ count_usable_descriptors(num_present_out, num_usable_out,
+ mid, consensus, now, NULL,
+ USABLE_DESCRIPTOR_ALL);
+ if (options->EntryNodes) {
+ count_usable_descriptors(&np, &nu, guards, consensus, now,
+ options->EntryNodes, USABLE_DESCRIPTOR_ALL);
+ } else {
+ SMARTLIST_FOREACH(mid, const node_t *, node, {
+ if (authdir) {
+ if (node->rs && node->rs->is_possible_guard)
+ smartlist_add(guards, (node_t*)node);
+ } else {
+ if (node->is_possible_guard)
+ smartlist_add(guards, (node_t*)node);
+ }
+ });
+ }
+
+ /* All nodes with exit flag
+ * If we're in a network with TestingDirAuthVoteExit set,
+ * this can cause false positives on have_consensus_path,
+ * incorrectly setting it to CONSENSUS_PATH_EXIT. This is
+ * an unavoidable feature of forcing authorities to declare
+ * certain nodes as exits.
+ */
+ count_usable_descriptors(&np, &nu, exits, consensus, now,
+ NULL, USABLE_DESCRIPTOR_EXIT_ONLY);
+ log_debug(LD_NET,
+ "%s: %d present, %d usable",
+ "exits",
+ np,
+ nu);
+
+ /* We need at least 1 exit present in the consensus to consider
+ * building exit paths */
+ /* Update our understanding of whether the consensus has exits */
+ consensus_path_type_t old_have_consensus_path = have_consensus_path;
+ have_consensus_path = ((nu > 0) ?
+ CONSENSUS_PATH_EXIT :
+ CONSENSUS_PATH_INTERNAL);
+
+ if (have_consensus_path == CONSENSUS_PATH_INTERNAL
+ && old_have_consensus_path != have_consensus_path) {
+ log_notice(LD_NET,
+ "The current consensus has no exit nodes. "
+ "Tor can only build internal paths, "
+ "such as paths to hidden services.");
+
+ /* However, exit nodes can reachability self-test using this consensus,
+ * join the network, and appear in a later consensus. This will allow
+ * the network to build exit paths, such as paths for world wide web
+ * browsing (as distinct from hidden service web browsing). */
+ }
+
+ f_guard = frac_nodes_with_descriptors(guards, WEIGHT_FOR_GUARD, 1);
+ f_mid = frac_nodes_with_descriptors(mid, WEIGHT_FOR_MID, 0);
+ f_exit = frac_nodes_with_descriptors(exits, WEIGHT_FOR_EXIT, 0);
+
+ /* If we are using bridges and have at least one bridge with a full
+ * descriptor, assume f_guard is 1.0. */
+ if (options->UseBridges && num_bridges_usable(0) > 0)
+ f_guard = 1.0;
+
+ log_debug(LD_NET,
+ "f_guard: %.2f, f_mid: %.2f, f_exit: %.2f",
+ f_guard,
+ f_mid,
+ f_exit);
+
+ smartlist_free(guards);
+ smartlist_free(mid);
+ smartlist_free(exits);
+
+ if (options->ExitNodes) {
+ double f_myexit, f_myexit_unflagged;
+ smartlist_t *myexits= smartlist_new();
+ smartlist_t *myexits_unflagged = smartlist_new();
+
+ /* All nodes with exit flag in ExitNodes option */
+ count_usable_descriptors(&np, &nu, myexits, consensus, now,
+ options->ExitNodes, USABLE_DESCRIPTOR_EXIT_ONLY);
+ log_debug(LD_NET,
+ "%s: %d present, %d usable",
+ "myexits",
+ np,
+ nu);
+
+ /* Now compute the nodes in the ExitNodes option where which we don't know
+ * what their exit policy is, or we know it permits something. */
+ count_usable_descriptors(&np, &nu, myexits_unflagged,
+ consensus, now,
+ options->ExitNodes, USABLE_DESCRIPTOR_ALL);
+ log_debug(LD_NET,
+ "%s: %d present, %d usable",
+ "myexits_unflagged (initial)",
+ np,
+ nu);
+
+ SMARTLIST_FOREACH_BEGIN(myexits_unflagged, const node_t *, node) {
+ if (node_has_preferred_descriptor(node, 0) &&
+ node_exit_policy_rejects_all(node)) {
+ SMARTLIST_DEL_CURRENT(myexits_unflagged, node);
+ /* this node is not actually an exit */
+ np--;
+ /* this node is unusable as an exit */
+ nu--;
+ }
+ } SMARTLIST_FOREACH_END(node);
+
+ log_debug(LD_NET,
+ "%s: %d present, %d usable",
+ "myexits_unflagged (final)",
+ np,
+ nu);
+
+ f_myexit= frac_nodes_with_descriptors(myexits, WEIGHT_FOR_EXIT, 0);
+ f_myexit_unflagged=
+ frac_nodes_with_descriptors(myexits_unflagged,
+ WEIGHT_FOR_EXIT, 0);
+
+ log_debug(LD_NET,
+ "f_exit: %.2f, f_myexit: %.2f, f_myexit_unflagged: %.2f",
+ f_exit,
+ f_myexit,
+ f_myexit_unflagged);
+
+ /* If our ExitNodes list has eliminated every possible Exit node, and there
+ * were some possible Exit nodes, then instead consider nodes that permit
+ * exiting to some ports. */
+ if (smartlist_len(myexits) == 0 &&
+ smartlist_len(myexits_unflagged)) {
+ f_myexit = f_myexit_unflagged;
+ }
+
+ smartlist_free(myexits);
+ smartlist_free(myexits_unflagged);
+
+ /* This is a tricky point here: we don't want to make it easy for a
+ * directory to trickle exits to us until it learns which exits we have
+ * configured, so require that we have a threshold both of total exits
+ * and usable exits. */
+ if (f_myexit < f_exit)
+ f_exit = f_myexit;
+ }
+
+ /* if the consensus has no exits, treat the exit fraction as 100% */
+ if (router_have_consensus_path() != CONSENSUS_PATH_EXIT) {
+ f_exit = 1.0;
+ }
+
+ f_path = f_guard * f_mid * f_exit;
+
+ if (status_out)
+ tor_asprintf(status_out,
+ "%d%% of guards bw, "
+ "%d%% of midpoint bw, and "
+ "%d%% of exit bw%s = "
+ "%d%% of path bw",
+ (int)(f_guard*100),
+ (int)(f_mid*100),
+ (int)(f_exit*100),
+ (router_have_consensus_path() == CONSENSUS_PATH_EXIT ?
+ "" :
+ " (no exits in consensus)"),
+ (int)(f_path*100));
+
+ return f_path;
+}
+
+/** We just fetched a new set of descriptors. Compute how far through
+ * the "loading descriptors" bootstrapping phase we are, so we can inform
+ * the controller of our progress. */
+int
+count_loading_descriptors_progress(void)
+{
+ int num_present = 0, num_usable=0;
+ time_t now = time(NULL);
+ const or_options_t *options = get_options();
+ const networkstatus_t *consensus =
+ networkstatus_get_reasonably_live_consensus(now,usable_consensus_flavor());
+ double paths, fraction;
+
+ if (!consensus)
+ return 0; /* can't count descriptors if we have no list of them */
+
+ paths = compute_frac_paths_available(consensus, options, now,
+ &num_present, &num_usable,
+ NULL);
+
+ fraction = paths / get_frac_paths_needed_for_circs(options,consensus);
+ if (fraction > 1.0)
+ return 0; /* it's not the number of descriptors holding us back */
+ return BOOTSTRAP_STATUS_LOADING_DESCRIPTORS + (int)
+ (fraction*(BOOTSTRAP_STATUS_CONN_OR-1 -
+ BOOTSTRAP_STATUS_LOADING_DESCRIPTORS));
+}
+
+/** Return the fraction of paths needed before we're willing to build
+ * circuits, as configured in <b>options</b>, or in the consensus <b>ns</b>. */
+static double
+get_frac_paths_needed_for_circs(const or_options_t *options,
+ const networkstatus_t *ns)
+{
+#define DFLT_PCT_USABLE_NEEDED 60
+ if (options->PathsNeededToBuildCircuits >= 0.0) {
+ return options->PathsNeededToBuildCircuits;
+ } else {
+ return networkstatus_get_param(ns, "min_paths_for_circs_pct",
+ DFLT_PCT_USABLE_NEEDED,
+ 25, 95)/100.0;
+ }
+}
+
+/** Change the value of have_min_dir_info, setting it true iff we have enough
+ * network and router information to build circuits. Clear the value of
+ * need_to_update_have_min_dir_info. */
+static void
+update_router_have_minimum_dir_info(void)
+{
+ time_t now = time(NULL);
+ int res;
+ int num_present=0, num_usable=0;
+ const or_options_t *options = get_options();
+ const networkstatus_t *consensus =
+ networkstatus_get_reasonably_live_consensus(now,usable_consensus_flavor());
+ int using_md;
+
+ if (!consensus) {
+ if (!networkstatus_get_latest_consensus())
+ strlcpy(dir_info_status, "We have no usable consensus.",
+ sizeof(dir_info_status));
+ else
+ strlcpy(dir_info_status, "We have no recent usable consensus.",
+ sizeof(dir_info_status));
+ res = 0;
+ goto done;
+ }
+
+ using_md = consensus->flavor == FLAV_MICRODESC;
+
+ /* Check fraction of available paths */
+ {
+ char *status = NULL;
+ double paths = compute_frac_paths_available(consensus, options, now,
+ &num_present, &num_usable,
+ &status);
+
+ if (paths < get_frac_paths_needed_for_circs(options,consensus)) {
+ tor_snprintf(dir_info_status, sizeof(dir_info_status),
+ "We need more %sdescriptors: we have %d/%d, and "
+ "can only build %d%% of likely paths. (We have %s.)",
+ using_md?"micro":"", num_present, num_usable,
+ (int)(paths*100), status);
+ tor_free(status);
+ res = 0;
+ control_event_bootstrap(BOOTSTRAP_STATUS_REQUESTING_DESCRIPTORS, 0);
+ goto done;
+ }
+
+ tor_free(status);
+ res = 1;
+ }
+
+ { /* Check entry guard dirinfo status */
+ char *guard_error = entry_guards_get_err_str_if_dir_info_missing(using_md,
+ num_present,
+ num_usable);
+ if (guard_error) {
+ strlcpy(dir_info_status, guard_error, sizeof(dir_info_status));
+ tor_free(guard_error);
+ res = 0;
+ goto done;
+ }
+ }
+
+ done:
+
+ /* If paths have just become available in this update. */
+ if (res && !have_min_dir_info) {
+ control_event_client_status(LOG_NOTICE, "ENOUGH_DIR_INFO");
+ if (control_event_bootstrap(BOOTSTRAP_STATUS_CONN_OR, 0) == 0) {
+ log_notice(LD_DIR,
+ "We now have enough directory information to build circuits.");
+ }
+ }
+
+ /* If paths have just become unavailable in this update. */
+ if (!res && have_min_dir_info) {
+ int quiet = directory_too_idle_to_fetch_descriptors(options, now);
+ tor_log(quiet ? LOG_INFO : LOG_NOTICE, LD_DIR,
+ "Our directory information is no longer up-to-date "
+ "enough to build circuits: %s", dir_info_status);
+
+ /* a) make us log when we next complete a circuit, so we know when Tor
+ * is back up and usable, and b) disable some activities that Tor
+ * should only do while circuits are working, like reachability tests
+ * and fetching bridge descriptors only over circuits. */
+ note_that_we_maybe_cant_complete_circuits();
+ have_consensus_path = CONSENSUS_PATH_UNKNOWN;
+ control_event_client_status(LOG_NOTICE, "NOT_ENOUGH_DIR_INFO");
+ }
+ have_min_dir_info = res;
+ need_to_update_have_min_dir_info = 0;
+}
+
diff --git a/src/feature/nodelist/nodelist.h b/src/feature/nodelist/nodelist.h
new file mode 100644
index 0000000000..ed3a542971
--- /dev/null
+++ b/src/feature/nodelist/nodelist.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file nodelist.h
+ * \brief Header file for nodelist.c.
+ **/
+
+#ifndef TOR_NODELIST_H
+#define TOR_NODELIST_H
+
+struct ed25519_public_key_t;
+struct curve25519_public_key_t;
+
+#define node_assert_ok(n) STMT_BEGIN { \
+ tor_assert((n)->ri || (n)->rs); \
+ } STMT_END
+
+MOCK_DECL(node_t *, node_get_mutable_by_id,(const char *identity_digest));
+MOCK_DECL(const node_t *, node_get_by_id, (const char *identity_digest));
+node_t *node_get_mutable_by_ed25519_id(
+ const struct ed25519_public_key_t *ed_id);
+MOCK_DECL(const node_t *, node_get_by_ed25519_id,
+ (const struct ed25519_public_key_t *ed_id));
+
+#define NNF_NO_WARN_UNNAMED (1u<<0)
+
+const node_t *node_get_by_hex_id(const char *identity_digest,
+ unsigned flags);
+node_t *nodelist_set_routerinfo(routerinfo_t *ri, routerinfo_t **ri_old_out);
+node_t *nodelist_add_microdesc(microdesc_t *md);
+void nodelist_set_consensus(networkstatus_t *ns);
+void nodelist_ensure_freshness(networkstatus_t *ns);
+int nodelist_probably_contains_address(const tor_addr_t *addr);
+
+void nodelist_remove_microdesc(const char *identity_digest, microdesc_t *md);
+void nodelist_remove_routerinfo(routerinfo_t *ri);
+void nodelist_purge(void);
+smartlist_t *nodelist_find_nodes_with_microdesc(const microdesc_t *md);
+
+void nodelist_free_all(void);
+void nodelist_assert_ok(void);
+
+MOCK_DECL(const node_t *, node_get_by_nickname,
+ (const char *nickname, unsigned flags));
+void node_get_verbose_nickname(const node_t *node,
+ char *verbose_name_out);
+void node_get_verbose_nickname_by_id(const char *id_digest,
+ char *verbose_name_out);
+int node_is_dir(const node_t *node);
+int node_is_good_exit(const node_t *node);
+int node_has_any_descriptor(const node_t *node);
+int node_has_preferred_descriptor(const node_t *node,
+ int for_direct_connect);
+int node_get_purpose(const node_t *node);
+#define node_is_bridge(node) \
+ (node_get_purpose((node)) == ROUTER_PURPOSE_BRIDGE)
+int node_is_me(const node_t *node);
+int node_exit_policy_rejects_all(const node_t *node);
+int node_exit_policy_is_exact(const node_t *node, sa_family_t family);
+smartlist_t *node_get_all_orports(const node_t *node);
+int node_allows_single_hop_exits(const node_t *node);
+const char *node_get_nickname(const node_t *node);
+const char *node_get_platform(const node_t *node);
+uint32_t node_get_prim_addr_ipv4h(const node_t *node);
+void node_get_address_string(const node_t *node, char *cp, size_t len);
+long node_get_declared_uptime(const node_t *node);
+const smartlist_t *node_get_declared_family(const node_t *node);
+const struct ed25519_public_key_t *node_get_ed25519_id(const node_t *node);
+int node_ed25519_id_matches(const node_t *node,
+ const struct ed25519_public_key_t *id);
+int node_supports_ed25519_link_authentication(const node_t *node,
+ int compatible_with_us);
+int node_supports_v3_hsdir(const node_t *node);
+int node_supports_ed25519_hs_intro(const node_t *node);
+int node_supports_v3_rendezvous_point(const node_t *node);
+const uint8_t *node_get_rsa_id_digest(const node_t *node);
+
+int node_has_ipv6_addr(const node_t *node);
+int node_has_ipv6_orport(const node_t *node);
+int node_has_ipv6_dirport(const node_t *node);
+/* Deprecated - use node_ipv6_or_preferred or node_ipv6_dir_preferred */
+#define node_ipv6_preferred(node) node_ipv6_or_preferred(node)
+int node_ipv6_or_preferred(const node_t *node);
+void node_get_prim_orport(const node_t *node, tor_addr_port_t *ap_out);
+void node_get_pref_orport(const node_t *node, tor_addr_port_t *ap_out);
+void node_get_pref_ipv6_orport(const node_t *node, tor_addr_port_t *ap_out);
+int node_ipv6_dir_preferred(const node_t *node);
+void node_get_prim_dirport(const node_t *node, tor_addr_port_t *ap_out);
+void node_get_pref_dirport(const node_t *node, tor_addr_port_t *ap_out);
+void node_get_pref_ipv6_dirport(const node_t *node, tor_addr_port_t *ap_out);
+int node_has_curve25519_onion_key(const node_t *node);
+const struct curve25519_public_key_t *node_get_curve25519_onion_key(
+ const node_t *node);
+
+MOCK_DECL(smartlist_t *, nodelist_get_list, (void));
+
+/* Temporary during transition to multiple addresses. */
+void node_get_addr(const node_t *node, tor_addr_t *addr_out);
+#define node_get_addr_ipv4h(n) node_get_prim_addr_ipv4h((n))
+
+void nodelist_refresh_countries(void);
+void node_set_country(node_t *node);
+void nodelist_add_node_and_family(smartlist_t *nodes, const node_t *node);
+int nodes_in_same_family(const node_t *node1, const node_t *node2);
+
+const node_t *router_find_exact_exit_enclave(const char *address,
+ uint16_t port);
+int node_is_unreliable(const node_t *router, int need_uptime,
+ int need_capacity, int need_guard);
+int router_exit_policy_all_nodes_reject(const tor_addr_t *addr, uint16_t port,
+ int need_uptime);
+void router_set_status(const char *digest, int up);
+int addrs_in_same_network_family(const tor_addr_t *a1,
+ const tor_addr_t *a2);
+
+/** router_have_minimum_dir_info tests to see if we have enough
+ * descriptor information to create circuits.
+ * If there are exits in the consensus, we wait until we have enough
+ * info to create exit paths before creating any circuits. If there are
+ * no exits in the consensus, we wait for enough info to create internal
+ * paths, and should avoid creating exit paths, as they will simply fail.
+ * We make sure we create all available circuit types at the same time. */
+MOCK_DECL(int, router_have_minimum_dir_info,(void));
+
+/** Set to CONSENSUS_PATH_EXIT if there is at least one exit node
+ * in the consensus. We update this flag in compute_frac_paths_available if
+ * there is at least one relay that has an Exit flag in the consensus.
+ * Used to avoid building exit circuits when they will almost certainly fail.
+ * Set to CONSENSUS_PATH_INTERNAL if there are no exits in the consensus.
+ * (This situation typically occurs during bootstrap of a test network.)
+ * Set to CONSENSUS_PATH_UNKNOWN if we have never checked, or have
+ * reason to believe our last known value was invalid or has expired.
+ */
+typedef enum {
+ /* we haven't checked yet, or we have invalidated our previous check */
+ CONSENSUS_PATH_UNKNOWN = -1,
+ /* The consensus only has internal relays, and we should only
+ * create internal paths, circuits, streams, ... */
+ CONSENSUS_PATH_INTERNAL = 0,
+ /* The consensus has at least one exit, and can therefore (potentially)
+ * create exit and internal paths, circuits, streams, ... */
+ CONSENSUS_PATH_EXIT = 1
+} consensus_path_type_t;
+
+MOCK_DECL(consensus_path_type_t, router_have_consensus_path, (void));
+
+void router_dir_info_changed(void);
+const char *get_dir_info_status_string(void);
+int count_loading_descriptors_progress(void);
+
+#ifdef NODELIST_PRIVATE
+
+#ifdef TOR_UNIT_TESTS
+
+STATIC void
+node_set_hsdir_index(node_t *node, const networkstatus_t *ns);
+
+#endif /* defined(TOR_UNIT_TESTS) */
+
+#endif /* defined(NODELIST_PRIVATE) */
+
+MOCK_DECL(int, get_estimated_address_per_node, (void));
+
+#endif /* !defined(TOR_NODELIST_H) */
diff --git a/src/feature/nodelist/parsecommon.c b/src/feature/nodelist/parsecommon.c
new file mode 100644
index 0000000000..4340f28225
--- /dev/null
+++ b/src/feature/nodelist/parsecommon.c
@@ -0,0 +1,458 @@
+/* Copyright (c) 2016-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file parsecommon.c
+ * \brief Common code to parse and validate various type of descriptors.
+ **/
+
+#include "or/parsecommon.h"
+#include "lib/log/torlog.h"
+#include "lib/log/util_bug.h"
+#include "lib/encoding/binascii.h"
+#include "lib/container/smartlist.h"
+#include "lib/string/util_string.h"
+#include "lib/string/printf.h"
+#include "lib/memarea/memarea.h"
+#include "lib/crypt_ops/crypto.h"
+
+#include <string.h>
+
+#define MIN_ANNOTATION A_PURPOSE
+#define MAX_ANNOTATION A_UNKNOWN_
+
+#define ALLOC_ZERO(sz) memarea_alloc_zero(area,sz)
+#define ALLOC(sz) memarea_alloc(area,sz)
+#define STRDUP(str) memarea_strdup(area,str)
+#define STRNDUP(str,n) memarea_strndup(area,(str),(n))
+
+#define RET_ERR(msg) \
+ STMT_BEGIN \
+ if (tok) token_clear(tok); \
+ tok = ALLOC_ZERO(sizeof(directory_token_t)); \
+ tok->tp = ERR_; \
+ tok->error = STRDUP(msg); \
+ goto done_tokenizing; \
+ STMT_END
+
+/** Free all resources allocated for <b>tok</b> */
+void
+token_clear(directory_token_t *tok)
+{
+ if (tok->key)
+ crypto_pk_free(tok->key);
+}
+
+/** Read all tokens from a string between <b>start</b> and <b>end</b>, and add
+ * them to <b>out</b>. Parse according to the token rules in <b>table</b>.
+ * Caller must free tokens in <b>out</b>. If <b>end</b> is NULL, use the
+ * entire string.
+ */
+int
+tokenize_string(memarea_t *area,
+ const char *start, const char *end, smartlist_t *out,
+ token_rule_t *table, int flags)
+{
+ const char **s;
+ directory_token_t *tok = NULL;
+ int counts[NIL_];
+ int i;
+ int first_nonannotation;
+ int prev_len = smartlist_len(out);
+ tor_assert(area);
+
+ s = &start;
+ if (!end) {
+ end = start+strlen(start);
+ } else {
+ /* it's only meaningful to check for nuls if we got an end-of-string ptr */
+ if (memchr(start, '\0', end-start)) {
+ log_warn(LD_DIR, "parse error: internal NUL character.");
+ return -1;
+ }
+ }
+ for (i = 0; i < NIL_; ++i)
+ counts[i] = 0;
+
+ SMARTLIST_FOREACH(out, const directory_token_t *, t, ++counts[t->tp]);
+
+ while (*s < end && (!tok || tok->tp != EOF_)) {
+ tok = get_next_token(area, s, end, table);
+ if (tok->tp == ERR_) {
+ log_warn(LD_DIR, "parse error: %s", tok->error);
+ token_clear(tok);
+ return -1;
+ }
+ ++counts[tok->tp];
+ smartlist_add(out, tok);
+ *s = eat_whitespace_eos(*s, end);
+ }
+
+ if (flags & TS_NOCHECK)
+ return 0;
+
+ if ((flags & TS_ANNOTATIONS_OK)) {
+ first_nonannotation = -1;
+ for (i = 0; i < smartlist_len(out); ++i) {
+ tok = smartlist_get(out, i);
+ if (tok->tp < MIN_ANNOTATION || tok->tp > MAX_ANNOTATION) {
+ first_nonannotation = i;
+ break;
+ }
+ }
+ if (first_nonannotation < 0) {
+ log_warn(LD_DIR, "parse error: item contains only annotations");
+ return -1;
+ }
+ for (i=first_nonannotation; i < smartlist_len(out); ++i) {
+ tok = smartlist_get(out, i);
+ if (tok->tp >= MIN_ANNOTATION && tok->tp <= MAX_ANNOTATION) {
+ log_warn(LD_DIR, "parse error: Annotations mixed with keywords");
+ return -1;
+ }
+ }
+ if ((flags & TS_NO_NEW_ANNOTATIONS)) {
+ if (first_nonannotation != prev_len) {
+ log_warn(LD_DIR, "parse error: Unexpected annotations.");
+ return -1;
+ }
+ }
+ } else {
+ for (i=0; i < smartlist_len(out); ++i) {
+ tok = smartlist_get(out, i);
+ if (tok->tp >= MIN_ANNOTATION && tok->tp <= MAX_ANNOTATION) {
+ log_warn(LD_DIR, "parse error: no annotations allowed.");
+ return -1;
+ }
+ }
+ first_nonannotation = 0;
+ }
+ for (i = 0; table[i].t; ++i) {
+ if (counts[table[i].v] < table[i].min_cnt) {
+ log_warn(LD_DIR, "Parse error: missing %s element.", table[i].t);
+ return -1;
+ }
+ if (counts[table[i].v] > table[i].max_cnt) {
+ log_warn(LD_DIR, "Parse error: too many %s elements.", table[i].t);
+ return -1;
+ }
+ if (table[i].pos & AT_START) {
+ if (smartlist_len(out) < 1 ||
+ (tok = smartlist_get(out, first_nonannotation))->tp != table[i].v) {
+ log_warn(LD_DIR, "Parse error: first item is not %s.", table[i].t);
+ return -1;
+ }
+ }
+ if (table[i].pos & AT_END) {
+ if (smartlist_len(out) < 1 ||
+ (tok = smartlist_get(out, smartlist_len(out)-1))->tp != table[i].v) {
+ log_warn(LD_DIR, "Parse error: last item is not %s.", table[i].t);
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+/** Helper: parse space-separated arguments from the string <b>s</b> ending at
+ * <b>eol</b>, and store them in the args field of <b>tok</b>. Store the
+ * number of parsed elements into the n_args field of <b>tok</b>. Allocate
+ * all storage in <b>area</b>. Return the number of arguments parsed, or
+ * return -1 if there was an insanely high number of arguments. */
+static inline int
+get_token_arguments(memarea_t *area, directory_token_t *tok,
+ const char *s, const char *eol)
+{
+/** Largest number of arguments we'll accept to any token, ever. */
+#define MAX_ARGS 512
+ char *mem = memarea_strndup(area, s, eol-s);
+ char *cp = mem;
+ int j = 0;
+ char *args[MAX_ARGS];
+ memset(args, 0, sizeof(args));
+ while (*cp) {
+ if (j == MAX_ARGS)
+ return -1;
+ args[j++] = cp;
+ cp = (char*)find_whitespace(cp);
+ if (!cp || !*cp)
+ break; /* End of the line. */
+ *cp++ = '\0';
+ cp = (char*)eat_whitespace(cp);
+ }
+ tok->n_args = j;
+ tok->args = memarea_memdup(area, args, j*sizeof(char*));
+ return j;
+#undef MAX_ARGS
+}
+
+/** Helper: make sure that the token <b>tok</b> with keyword <b>kwd</b> obeys
+ * the object syntax of <b>o_syn</b>. Allocate all storage in <b>area</b>.
+ * Return <b>tok</b> on success, or a new ERR_ token if the token didn't
+ * conform to the syntax we wanted.
+ **/
+static inline directory_token_t *
+token_check_object(memarea_t *area, const char *kwd,
+ directory_token_t *tok, obj_syntax o_syn)
+{
+ char ebuf[128];
+ switch (o_syn) {
+ case NO_OBJ:
+ /* No object is allowed for this token. */
+ if (tok->object_body) {
+ tor_snprintf(ebuf, sizeof(ebuf), "Unexpected object for %s", kwd);
+ RET_ERR(ebuf);
+ }
+ if (tok->key) {
+ tor_snprintf(ebuf, sizeof(ebuf), "Unexpected public key for %s", kwd);
+ RET_ERR(ebuf);
+ }
+ break;
+ case NEED_OBJ:
+ /* There must be a (non-key) object. */
+ if (!tok->object_body) {
+ tor_snprintf(ebuf, sizeof(ebuf), "Missing object for %s", kwd);
+ RET_ERR(ebuf);
+ }
+ break;
+ case NEED_KEY_1024: /* There must be a 1024-bit public key. */
+ case NEED_SKEY_1024: /* There must be a 1024-bit private key. */
+ if (tok->key && crypto_pk_num_bits(tok->key) != PK_BYTES*8) {
+ tor_snprintf(ebuf, sizeof(ebuf), "Wrong size on key for %s: %d bits",
+ kwd, crypto_pk_num_bits(tok->key));
+ RET_ERR(ebuf);
+ }
+ /* fall through */
+ case NEED_KEY: /* There must be some kind of key. */
+ if (!tok->key) {
+ tor_snprintf(ebuf, sizeof(ebuf), "Missing public key for %s", kwd);
+ RET_ERR(ebuf);
+ }
+ if (o_syn != NEED_SKEY_1024) {
+ if (crypto_pk_key_is_private(tok->key)) {
+ tor_snprintf(ebuf, sizeof(ebuf),
+ "Private key given for %s, which wants a public key", kwd);
+ RET_ERR(ebuf);
+ }
+ } else { /* o_syn == NEED_SKEY_1024 */
+ if (!crypto_pk_key_is_private(tok->key)) {
+ tor_snprintf(ebuf, sizeof(ebuf),
+ "Public key given for %s, which wants a private key", kwd);
+ RET_ERR(ebuf);
+ }
+ }
+ break;
+ case OBJ_OK:
+ /* Anything goes with this token. */
+ break;
+ }
+
+ done_tokenizing:
+ return tok;
+}
+
+/** Helper function: read the next token from *s, advance *s to the end of the
+ * token, and return the parsed token. Parse *<b>s</b> according to the list
+ * of tokens in <b>table</b>.
+ */
+directory_token_t *
+get_next_token(memarea_t *area,
+ const char **s, const char *eos, token_rule_t *table)
+{
+ /** Reject any object at least this big; it is probably an overflow, an
+ * attack, a bug, or some other nonsense. */
+#define MAX_UNPARSED_OBJECT_SIZE (128*1024)
+ /** Reject any line at least this big; it is probably an overflow, an
+ * attack, a bug, or some other nonsense. */
+#define MAX_LINE_LENGTH (128*1024)
+
+ const char *next, *eol, *obstart;
+ size_t obname_len;
+ int i;
+ directory_token_t *tok;
+ obj_syntax o_syn = NO_OBJ;
+ char ebuf[128];
+ const char *kwd = "";
+
+ tor_assert(area);
+ tok = ALLOC_ZERO(sizeof(directory_token_t));
+ tok->tp = ERR_;
+
+ /* Set *s to first token, eol to end-of-line, next to after first token */
+ *s = eat_whitespace_eos(*s, eos); /* eat multi-line whitespace */
+ tor_assert(eos >= *s);
+ eol = memchr(*s, '\n', eos-*s);
+ if (!eol)
+ eol = eos;
+ if (eol - *s > MAX_LINE_LENGTH) {
+ RET_ERR("Line far too long");
+ }
+
+ next = find_whitespace_eos(*s, eol);
+
+ if (!strcmp_len(*s, "opt", next-*s)) {
+ /* Skip past an "opt" at the start of the line. */
+ *s = eat_whitespace_eos_no_nl(next, eol);
+ next = find_whitespace_eos(*s, eol);
+ } else if (*s == eos) { /* If no "opt", and end-of-line, line is invalid */
+ RET_ERR("Unexpected EOF");
+ }
+
+ /* Search the table for the appropriate entry. (I tried a binary search
+ * instead, but it wasn't any faster.) */
+ for (i = 0; table[i].t ; ++i) {
+ if (!strcmp_len(*s, table[i].t, next-*s)) {
+ /* We've found the keyword. */
+ kwd = table[i].t;
+ tok->tp = table[i].v;
+ o_syn = table[i].os;
+ *s = eat_whitespace_eos_no_nl(next, eol);
+ /* We go ahead whether there are arguments or not, so that tok->args is
+ * always set if we want arguments. */
+ if (table[i].concat_args) {
+ /* The keyword takes the line as a single argument */
+ tok->args = ALLOC(sizeof(char*));
+ tok->args[0] = STRNDUP(*s,eol-*s); /* Grab everything on line */
+ tok->n_args = 1;
+ } else {
+ /* This keyword takes multiple arguments. */
+ if (get_token_arguments(area, tok, *s, eol)<0) {
+ tor_snprintf(ebuf, sizeof(ebuf),"Far too many arguments to %s", kwd);
+ RET_ERR(ebuf);
+ }
+ *s = eol;
+ }
+ if (tok->n_args < table[i].min_args) {
+ tor_snprintf(ebuf, sizeof(ebuf), "Too few arguments to %s", kwd);
+ RET_ERR(ebuf);
+ } else if (tok->n_args > table[i].max_args) {
+ tor_snprintf(ebuf, sizeof(ebuf), "Too many arguments to %s", kwd);
+ RET_ERR(ebuf);
+ }
+ break;
+ }
+ }
+
+ if (tok->tp == ERR_) {
+ /* No keyword matched; call it an "K_opt" or "A_unrecognized" */
+ if (*s < eol && **s == '@')
+ tok->tp = A_UNKNOWN_;
+ else
+ tok->tp = K_OPT;
+ tok->args = ALLOC(sizeof(char*));
+ tok->args[0] = STRNDUP(*s, eol-*s);
+ tok->n_args = 1;
+ o_syn = OBJ_OK;
+ }
+
+ /* Check whether there's an object present */
+ *s = eat_whitespace_eos(eol, eos); /* Scan from end of first line */
+ tor_assert(eos >= *s);
+ eol = memchr(*s, '\n', eos-*s);
+ if (!eol || eol-*s<11 || strcmpstart(*s, "-----BEGIN ")) /* No object. */
+ goto check_object;
+
+ obstart = *s; /* Set obstart to start of object spec */
+ if (*s+16 >= eol || memchr(*s+11,'\0',eol-*s-16) || /* no short lines, */
+ strcmp_len(eol-5, "-----", 5) || /* nuls or invalid endings */
+ (eol-*s) > MAX_UNPARSED_OBJECT_SIZE) { /* name too long */
+ RET_ERR("Malformed object: bad begin line");
+ }
+ tok->object_type = STRNDUP(*s+11, eol-*s-16);
+ obname_len = eol-*s-16; /* store objname length here to avoid a strlen() */
+ *s = eol+1; /* Set *s to possible start of object data (could be eos) */
+
+ /* Go to the end of the object */
+ next = tor_memstr(*s, eos-*s, "-----END ");
+ if (!next) {
+ RET_ERR("Malformed object: missing object end line");
+ }
+ tor_assert(eos >= next);
+ eol = memchr(next, '\n', eos-next);
+ if (!eol) /* end-of-line marker, or eos if there's no '\n' */
+ eol = eos;
+ /* Validate the ending tag, which should be 9 + NAME + 5 + eol */
+ if ((size_t)(eol-next) != 9+obname_len+5 ||
+ strcmp_len(next+9, tok->object_type, obname_len) ||
+ strcmp_len(eol-5, "-----", 5)) {
+ tor_snprintf(ebuf, sizeof(ebuf), "Malformed object: mismatched end tag %s",
+ tok->object_type);
+ ebuf[sizeof(ebuf)-1] = '\0';
+ RET_ERR(ebuf);
+ }
+ if (next - *s > MAX_UNPARSED_OBJECT_SIZE)
+ RET_ERR("Couldn't parse object: missing footer or object much too big.");
+
+ if (!strcmp(tok->object_type, "RSA PUBLIC KEY")) { /* If it's a public key */
+ tok->key = crypto_pk_new();
+ if (crypto_pk_read_public_key_from_string(tok->key, obstart, eol-obstart))
+ RET_ERR("Couldn't parse public key.");
+ } else if (!strcmp(tok->object_type, "RSA PRIVATE KEY")) { /* private key */
+ tok->key = crypto_pk_new();
+ if (crypto_pk_read_private_key_from_string(tok->key, obstart, eol-obstart))
+ RET_ERR("Couldn't parse private key.");
+ } else { /* If it's something else, try to base64-decode it */
+ int r;
+ tok->object_body = ALLOC(next-*s); /* really, this is too much RAM. */
+ r = base64_decode(tok->object_body, next-*s, *s, next-*s);
+ if (r<0)
+ RET_ERR("Malformed object: bad base64-encoded data");
+ tok->object_size = r;
+ }
+ *s = eol;
+
+ check_object:
+ tok = token_check_object(area, kwd, tok, o_syn);
+
+ done_tokenizing:
+ return tok;
+
+#undef RET_ERR
+#undef ALLOC
+#undef ALLOC_ZERO
+#undef STRDUP
+#undef STRNDUP
+}
+
+/** Find the first token in <b>s</b> whose keyword is <b>keyword</b>; fail
+ * with an assert if no such keyword is found.
+ */
+directory_token_t *
+find_by_keyword_(smartlist_t *s, directory_keyword keyword,
+ const char *keyword_as_string)
+{
+ directory_token_t *tok = find_opt_by_keyword(s, keyword);
+ if (PREDICT_UNLIKELY(!tok)) {
+ log_err(LD_BUG, "Missing %s [%d] in directory object that should have "
+ "been validated. Internal error.", keyword_as_string, (int)keyword);
+ tor_assert(tok);
+ }
+ return tok;
+}
+
+/** Find the first token in <b>s</b> whose keyword is <b>keyword</b>; return
+ * NULL if no such keyword is found.
+ */
+directory_token_t *
+find_opt_by_keyword(const smartlist_t *s, directory_keyword keyword)
+{
+ SMARTLIST_FOREACH(s, directory_token_t *, t, if (t->tp == keyword) return t);
+ return NULL;
+}
+
+/** If there are any directory_token_t entries in <b>s</b> whose keyword is
+ * <b>k</b>, return a newly allocated smartlist_t containing all such entries,
+ * in the same order in which they occur in <b>s</b>. Otherwise return
+ * NULL. */
+smartlist_t *
+find_all_by_keyword(const smartlist_t *s, directory_keyword k)
+{
+ smartlist_t *out = NULL;
+ SMARTLIST_FOREACH(s, directory_token_t *, t,
+ if (t->tp == k) {
+ if (!out)
+ out = smartlist_new();
+ smartlist_add(out, t);
+ });
+ return out;
+}
diff --git a/src/feature/nodelist/parsecommon.h b/src/feature/nodelist/parsecommon.h
new file mode 100644
index 0000000000..d0f3810c0b
--- /dev/null
+++ b/src/feature/nodelist/parsecommon.h
@@ -0,0 +1,324 @@
+/* Copyright (c) 2016-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file parsecommon.h
+ * \brief Header file for parsecommon.c
+ **/
+
+#ifndef TOR_PARSECOMMON_H
+#define TOR_PARSECOMMON_H
+
+#include <stddef.h>
+
+struct smartlist_t;
+struct crypto_pk_t;
+struct memarea_t;
+
+/** Enumeration of possible token types. The ones starting with K_ correspond
+* to directory 'keywords'. A_ is for an annotation, R or C is related to
+* hidden services, ERR_ is an error in the tokenizing process, EOF_ is an
+* end-of-file marker, and NIL_ is used to encode not-a-token.
+*/
+typedef enum {
+ K_ACCEPT = 0,
+ K_ACCEPT6,
+ K_DIRECTORY_SIGNATURE,
+ K_RECOMMENDED_SOFTWARE,
+ K_REJECT,
+ K_REJECT6,
+ K_ROUTER,
+ K_SIGNED_DIRECTORY,
+ K_SIGNING_KEY,
+ K_ONION_KEY,
+ K_ONION_KEY_NTOR,
+ K_ROUTER_SIGNATURE,
+ K_PUBLISHED,
+ K_RUNNING_ROUTERS,
+ K_ROUTER_STATUS,
+ K_PLATFORM,
+ K_PROTO,
+ K_OPT,
+ K_BANDWIDTH,
+ K_CONTACT,
+ K_NETWORK_STATUS,
+ K_UPTIME,
+ K_DIR_SIGNING_KEY,
+ K_FAMILY,
+ K_FINGERPRINT,
+ K_HIBERNATING,
+ K_READ_HISTORY,
+ K_WRITE_HISTORY,
+ K_NETWORK_STATUS_VERSION,
+ K_DIR_SOURCE,
+ K_DIR_OPTIONS,
+ K_CLIENT_VERSIONS,
+ K_SERVER_VERSIONS,
+ K_RECOMMENDED_CLIENT_PROTOCOLS,
+ K_RECOMMENDED_RELAY_PROTOCOLS,
+ K_REQUIRED_CLIENT_PROTOCOLS,
+ K_REQUIRED_RELAY_PROTOCOLS,
+ K_OR_ADDRESS,
+ K_ID,
+ K_P,
+ K_P6,
+ K_R,
+ K_A,
+ K_S,
+ K_V,
+ K_W,
+ K_M,
+ K_EXTRA_INFO,
+ K_EXTRA_INFO_DIGEST,
+ K_CACHES_EXTRA_INFO,
+ K_HIDDEN_SERVICE_DIR,
+ K_ALLOW_SINGLE_HOP_EXITS,
+ K_IPV6_POLICY,
+ K_ROUTER_SIG_ED25519,
+ K_IDENTITY_ED25519,
+ K_MASTER_KEY_ED25519,
+ K_ONION_KEY_CROSSCERT,
+ K_NTOR_ONION_KEY_CROSSCERT,
+
+ K_DIRREQ_END,
+ K_DIRREQ_V2_IPS,
+ K_DIRREQ_V3_IPS,
+ K_DIRREQ_V2_REQS,
+ K_DIRREQ_V3_REQS,
+ K_DIRREQ_V2_SHARE,
+ K_DIRREQ_V3_SHARE,
+ K_DIRREQ_V2_RESP,
+ K_DIRREQ_V3_RESP,
+ K_DIRREQ_V2_DIR,
+ K_DIRREQ_V3_DIR,
+ K_DIRREQ_V2_TUN,
+ K_DIRREQ_V3_TUN,
+ K_ENTRY_END,
+ K_ENTRY_IPS,
+ K_CELL_END,
+ K_CELL_PROCESSED,
+ K_CELL_QUEUED,
+ K_CELL_TIME,
+ K_CELL_CIRCS,
+ K_EXIT_END,
+ K_EXIT_WRITTEN,
+ K_EXIT_READ,
+ K_EXIT_OPENED,
+
+ K_DIR_KEY_CERTIFICATE_VERSION,
+ K_DIR_IDENTITY_KEY,
+ K_DIR_KEY_PUBLISHED,
+ K_DIR_KEY_EXPIRES,
+ K_DIR_KEY_CERTIFICATION,
+ K_DIR_KEY_CROSSCERT,
+ K_DIR_ADDRESS,
+ K_DIR_TUNNELLED,
+
+ K_VOTE_STATUS,
+ K_VALID_AFTER,
+ K_FRESH_UNTIL,
+ K_VALID_UNTIL,
+ K_VOTING_DELAY,
+
+ K_KNOWN_FLAGS,
+ K_PARAMS,
+ K_BW_WEIGHTS,
+ K_VOTE_DIGEST,
+ K_CONSENSUS_DIGEST,
+ K_ADDITIONAL_DIGEST,
+ K_ADDITIONAL_SIGNATURE,
+ K_CONSENSUS_METHODS,
+ K_CONSENSUS_METHOD,
+ K_LEGACY_DIR_KEY,
+ K_DIRECTORY_FOOTER,
+ K_SIGNING_CERT_ED,
+ K_SR_FLAG,
+ K_COMMIT,
+ K_PREVIOUS_SRV,
+ K_CURRENT_SRV,
+ K_PACKAGE,
+
+ A_PURPOSE,
+ A_LAST_LISTED,
+ A_UNKNOWN_,
+
+ R_RENDEZVOUS_SERVICE_DESCRIPTOR,
+ R_VERSION,
+ R_PERMANENT_KEY,
+ R_SECRET_ID_PART,
+ R_PUBLICATION_TIME,
+ R_PROTOCOL_VERSIONS,
+ R_INTRODUCTION_POINTS,
+ R_SIGNATURE,
+
+ R_HS_DESCRIPTOR, /* From version 3, this MUST be generic to all future
+ descriptor versions thus making it R_. */
+ R3_DESC_LIFETIME,
+ R3_DESC_SIGNING_CERT,
+ R3_REVISION_COUNTER,
+ R3_SUPERENCRYPTED,
+ R3_SIGNATURE,
+ R3_CREATE2_FORMATS,
+ R3_INTRO_AUTH_REQUIRED,
+ R3_SINGLE_ONION_SERVICE,
+ R3_INTRODUCTION_POINT,
+ R3_INTRO_ONION_KEY,
+ R3_INTRO_AUTH_KEY,
+ R3_INTRO_ENC_KEY,
+ R3_INTRO_ENC_KEY_CERT,
+ R3_INTRO_LEGACY_KEY,
+ R3_INTRO_LEGACY_KEY_CERT,
+ R3_DESC_AUTH_TYPE,
+ R3_DESC_AUTH_KEY,
+ R3_DESC_AUTH_CLIENT,
+ R3_ENCRYPTED,
+
+ R_IPO_IDENTIFIER,
+ R_IPO_IP_ADDRESS,
+ R_IPO_ONION_PORT,
+ R_IPO_ONION_KEY,
+ R_IPO_SERVICE_KEY,
+
+ C_CLIENT_NAME,
+ C_DESCRIPTOR_COOKIE,
+ C_CLIENT_KEY,
+
+ ERR_,
+ EOF_,
+ NIL_
+} directory_keyword;
+
+/** Structure to hold a single directory token.
+ *
+ * We parse a directory by breaking it into "tokens", each consisting
+ * of a keyword, a line full of arguments, and a binary object. The
+ * arguments and object are both optional, depending on the keyword
+ * type.
+ *
+ * This structure is only allocated in memareas; do not allocate it on
+ * the heap, or token_clear() won't work.
+ */
+typedef struct directory_token_t {
+ directory_keyword tp; /**< Type of the token. */
+ int n_args:30; /**< Number of elements in args */
+ char **args; /**< Array of arguments from keyword line. */
+
+ char *object_type; /**< -----BEGIN [object_type]-----*/
+ size_t object_size; /**< Bytes in object_body */
+ char *object_body; /**< Contents of object, base64-decoded. */
+
+ struct crypto_pk_t *key; /**< For public keys only. Heap-allocated. */
+
+ char *error; /**< For ERR_ tokens only. */
+} directory_token_t;
+
+/** We use a table of rules to decide how to parse each token type. */
+
+/** Rules for whether the keyword needs an object. */
+typedef enum {
+ NO_OBJ, /**< No object, ever. */
+ NEED_OBJ, /**< Object is required. */
+ NEED_SKEY_1024,/**< Object is required, and must be a 1024 bit private key */
+ NEED_KEY_1024, /**< Object is required, and must be a 1024 bit public key */
+ NEED_KEY, /**< Object is required, and must be a public key. */
+ OBJ_OK, /**< Object is optional. */
+} obj_syntax;
+
+#define AT_START 1
+#define AT_END 2
+
+#define TS_ANNOTATIONS_OK 1
+#define TS_NOCHECK 2
+#define TS_NO_NEW_ANNOTATIONS 4
+
+/**
+ * @name macros for defining token rules
+ *
+ * Helper macros to define token tables. 's' is a string, 't' is a
+ * directory_keyword, 'a' is a trio of argument multiplicities, and 'o' is an
+ * object syntax.
+ */
+/**@{*/
+
+/** Appears to indicate the end of a table. */
+#define END_OF_TABLE { NULL, NIL_, 0,0,0, NO_OBJ, 0, INT_MAX, 0, 0 }
+/** An item with no restrictions: used for obsolete document types */
+#define T(s,t,a,o) { s, t, a, o, 0, INT_MAX, 0, 0 }
+/** An item with no restrictions on multiplicity or location. */
+#define T0N(s,t,a,o) { s, t, a, o, 0, INT_MAX, 0, 0 }
+/** An item that must appear exactly once */
+#define T1(s,t,a,o) { s, t, a, o, 1, 1, 0, 0 }
+/** An item that must appear exactly once, at the start of the document */
+#define T1_START(s,t,a,o) { s, t, a, o, 1, 1, AT_START, 0 }
+/** An item that must appear exactly once, at the end of the document */
+#define T1_END(s,t,a,o) { s, t, a, o, 1, 1, AT_END, 0 }
+/** An item that must appear one or more times */
+#define T1N(s,t,a,o) { s, t, a, o, 1, INT_MAX, 0, 0 }
+/** An item that must appear no more than once */
+#define T01(s,t,a,o) { s, t, a, o, 0, 1, 0, 0 }
+/** An annotation that must appear no more than once */
+#define A01(s,t,a,o) { s, t, a, o, 0, 1, 0, 1 }
+
+/** Argument multiplicity: any number of arguments. */
+#define ARGS 0,INT_MAX,0
+/** Argument multiplicity: no arguments. */
+#define NO_ARGS 0,0,0
+/** Argument multiplicity: concatenate all arguments. */
+#define CONCAT_ARGS 1,1,1
+/** Argument multiplicity: at least <b>n</b> arguments. */
+#define GE(n) n,INT_MAX,0
+/** Argument multiplicity: exactly <b>n</b> arguments. */
+#define EQ(n) n,n,0
+/**@}*/
+
+/** Determines the parsing rules for a single token type. */
+typedef struct token_rule_t {
+ /** The string value of the keyword identifying the type of item. */
+ const char *t;
+ /** The corresponding directory_keyword enum. */
+ directory_keyword v;
+ /** Minimum number of arguments for this item */
+ int min_args;
+ /** Maximum number of arguments for this item */
+ int max_args;
+ /** If true, we concatenate all arguments for this item into a single
+ * string. */
+ int concat_args;
+ /** Requirements on object syntax for this item. */
+ obj_syntax os;
+ /** Lowest number of times this item may appear in a document. */
+ int min_cnt;
+ /** Highest number of times this item may appear in a document. */
+ int max_cnt;
+ /** One or more of AT_START/AT_END to limit where the item may appear in a
+ * document. */
+ int pos;
+ /** True iff this token is an annotation. */
+ int is_annotation;
+} token_rule_t;
+
+void token_clear(directory_token_t *tok);
+
+int tokenize_string(struct memarea_t *area,
+ const char *start, const char *end,
+ struct smartlist_t *out,
+ token_rule_t *table,
+ int flags);
+directory_token_t *get_next_token(struct memarea_t *area,
+ const char **s,
+ const char *eos,
+ token_rule_t *table);
+
+directory_token_t *find_by_keyword_(struct smartlist_t *s,
+ directory_keyword keyword,
+ const char *keyword_str);
+
+#define find_by_keyword(s, keyword) \
+ find_by_keyword_((s), (keyword), #keyword)
+
+directory_token_t *find_opt_by_keyword(const struct smartlist_t *s,
+ directory_keyword keyword);
+struct smartlist_t * find_all_by_keyword(const struct smartlist_t *s,
+ directory_keyword k);
+
+#endif /* !defined(TOR_PARSECOMMON_H) */
diff --git a/src/feature/nodelist/routerinfo_st.h b/src/feature/nodelist/routerinfo_st.h
new file mode 100644
index 0000000000..89a7702b30
--- /dev/null
+++ b/src/feature/nodelist/routerinfo_st.h
@@ -0,0 +1,108 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef ROUTERINFO_ST_H
+#define ROUTERINFO_ST_H
+
+#include "or/signed_descriptor_st.h"
+
+struct curve25519_public_key_t;
+
+/** Information about another onion router in the network. */
+struct routerinfo_t {
+ signed_descriptor_t cache_info;
+ char *nickname; /**< Human-readable OR name. */
+
+ uint32_t addr; /**< IPv4 address of OR, in host order. */
+ uint16_t or_port; /**< Port for TLS connections. */
+ uint16_t dir_port; /**< Port for HTTP directory connections. */
+
+ /** A router's IPv6 address, if it has one. */
+ /* XXXXX187 Actually these should probably be part of a list of addresses,
+ * not just a special case. Use abstractions to access these; don't do it
+ * directly. */
+ tor_addr_t ipv6_addr;
+ uint16_t ipv6_orport;
+
+ crypto_pk_t *onion_pkey; /**< Public RSA key for onions. */
+ crypto_pk_t *identity_pkey; /**< Public RSA key for signing. */
+ /** Public curve25519 key for onions */
+ struct curve25519_public_key_t *onion_curve25519_pkey;
+ /** What's the earliest expiration time on all the certs in this
+ * routerinfo? */
+ time_t cert_expiration_time;
+
+ char *platform; /**< What software/operating system is this OR using? */
+
+ char *protocol_list; /**< Encoded list of subprotocol versions supported
+ * by this OR */
+
+ /* link info */
+ uint32_t bandwidthrate; /**< How many bytes does this OR add to its token
+ * bucket per second? */
+ uint32_t bandwidthburst; /**< How large is this OR's token bucket? */
+ /** How many bytes/s is this router known to handle? */
+ uint32_t bandwidthcapacity;
+ smartlist_t *exit_policy; /**< What streams will this OR permit
+ * to exit on IPv4? NULL for 'reject *:*'. */
+ /** What streams will this OR permit to exit on IPv6?
+ * NULL for 'reject *:*' */
+ struct short_policy_t *ipv6_exit_policy;
+ long uptime; /**< How many seconds the router claims to have been up */
+ smartlist_t *declared_family; /**< Nicknames of router which this router
+ * claims are its family. */
+ char *contact_info; /**< Declared contact info for this router. */
+ unsigned int is_hibernating:1; /**< Whether the router claims to be
+ * hibernating */
+ unsigned int caches_extra_info:1; /**< Whether the router says it caches and
+ * serves extrainfo documents. */
+ unsigned int allow_single_hop_exits:1; /**< Whether the router says
+ * it allows single hop exits. */
+
+ unsigned int wants_to_be_hs_dir:1; /**< True iff this router claims to be
+ * a hidden service directory. */
+ unsigned int policy_is_reject_star:1; /**< True iff the exit policy for this
+ * router rejects everything. */
+ /** True if, after we have added this router, we should re-launch
+ * tests for it. */
+ unsigned int needs_retest_if_added:1;
+
+ /** True iff this router included "tunnelled-dir-server" in its descriptor,
+ * implying it accepts tunnelled directory requests, or it advertised
+ * dir_port > 0. */
+ unsigned int supports_tunnelled_dir_requests:1;
+
+ /** Used during voting to indicate that we should not include an entry for
+ * this routerinfo. Used only during voting. */
+ unsigned int omit_from_vote:1;
+
+ /** Flags to summarize the protocol versions for this routerinfo_t. */
+ protover_summary_flags_t pv;
+
+/** Tor can use this router for general positions in circuits; we got it
+ * from a directory server as usual, or we're an authority and a server
+ * uploaded it. */
+#define ROUTER_PURPOSE_GENERAL 0
+/** Tor should avoid using this router for circuit-building: we got it
+ * from a controller. If the controller wants to use it, it'll have to
+ * ask for it by identity. */
+#define ROUTER_PURPOSE_CONTROLLER 1
+/** Tor should use this router only for bridge positions in circuits: we got
+ * it via a directory request from the bridge itself, or a bridge
+ * authority. */
+#define ROUTER_PURPOSE_BRIDGE 2
+/** Tor should not use this router; it was marked in cached-descriptors with
+ * a purpose we didn't recognize. */
+#define ROUTER_PURPOSE_UNKNOWN 255
+
+ /** In what way did we find out about this router? One of ROUTER_PURPOSE_*.
+ * Routers of different purposes are kept segregated and used for different
+ * things; see notes on ROUTER_PURPOSE_* macros above.
+ */
+ uint8_t purpose;
+};
+
+#endif
diff --git a/src/feature/nodelist/routerlist.c b/src/feature/nodelist/routerlist.c
new file mode 100644
index 0000000000..76a236ff20
--- /dev/null
+++ b/src/feature/nodelist/routerlist.c
@@ -0,0 +1,5848 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file routerlist.c
+ * \brief Code to
+ * maintain and access the global list of routerinfos for known
+ * servers.
+ *
+ * A "routerinfo_t" object represents a single self-signed router
+ * descriptor, as generated by a Tor relay in order to tell the rest of
+ * the world about its keys, address, and capabilities. An
+ * "extrainfo_t" object represents an adjunct "extra-info" object,
+ * certified by a corresponding router descriptor, reporting more
+ * information about the relay that nearly all users will not need.
+ *
+ * Most users will not use router descriptors for most relays. Instead,
+ * they use the information in microdescriptors and in the consensus
+ * networkstatus.
+ *
+ * Right now, routerinfo_t objects are used in these ways:
+ * <ul>
+ * <li>By clients, in order to learn about bridge keys and capabilities.
+ * (Bridges aren't listed in the consensus networkstatus, so they
+ * can't have microdescriptors.)
+ * <li>By relays, since relays want more information about other relays
+ * than they can learn from microdescriptors. (TODO: Is this still true?)
+ * <li>By authorities, which receive them and use them to generate the
+ * consensus and the microdescriptors.
+ * <li>By all directory caches, which download them in case somebody
+ * else wants them.
+ * </ul>
+ *
+ * Routerinfos are mostly created by parsing them from a string, in
+ * routerparse.c. We store them to disk on receiving them, and
+ * periodically discard the ones we don't need. On restarting, we
+ * re-read them from disk. (This also applies to extrainfo documents, if
+ * we are configured to fetch them.)
+ *
+ * In order to keep our list of routerinfos up-to-date, we periodically
+ * check whether there are any listed in the latest consensus (or in the
+ * votes from other authorities, if we are an authority) that we don't
+ * have. (This also applies to extrainfo documents, if we are
+ * configured to fetch them.)
+ *
+ * Almost nothing in Tor should use a routerinfo_t to refer directly to
+ * a relay; instead, almost everything should use node_t (implemented in
+ * nodelist.c), which provides a common interface to routerinfo_t,
+ * routerstatus_t, and microdescriptor_t.
+ *
+ * <br>
+ *
+ * This module also has some of the functions used for choosing random
+ * nodes according to different rules and weights. Historically, they
+ * were all in this module. Now, they are spread across this module,
+ * nodelist.c, and networkstatus.c. (TODO: Fix that.)
+ *
+ * <br>
+ *
+ * (For historical reasons) this module also contains code for handling
+ * the list of fallback directories, the list of directory authorities,
+ * and the list of authority certificates.
+ *
+ * For the directory authorities, we have a list containing the public
+ * identity key, and contact points, for each authority. The
+ * authorities receive descriptors from relays, and publish consensuses,
+ * descriptors, and microdescriptors. This list is pre-configured.
+ *
+ * Fallback directories are well-known, stable, but untrusted directory
+ * caches that clients which have not yet bootstrapped can use to get
+ * their first networkstatus consensus, in order to find out where the
+ * Tor network really is. This list is pre-configured in
+ * fallback_dirs.inc. Every authority also serves as a fallback.
+ *
+ * Both fallback directories and directory authorities are are
+ * represented by a dir_server_t.
+ *
+ * Authority certificates are signed with authority identity keys; they
+ * are used to authenticate shorter-term authority signing keys. We
+ * fetch them when we find a consensus or a vote that has been signed
+ * with a signing key we don't recognize. We cache them on disk and
+ * load them on startup. Authority operators generate them with the
+ * "tor-gencert" utility.
+ *
+ * TODO: Authority certificates should be a separate module.
+ *
+ * TODO: dir_server_t stuff should be in a separate module.
+ **/
+
+#define ROUTERLIST_PRIVATE
+#include "or/or.h"
+#include "lib/err/backtrace.h"
+#include "or/bridges.h"
+#include "lib/crypt_ops/crypto_ed25519.h"
+#include "lib/crypt_ops/crypto_format.h"
+#include "or/circuitstats.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "or/directory.h"
+#include "or/dirserv.h"
+#include "or/entrynodes.h"
+#include "or/fp_pair.h"
+#include "or/geoip.h"
+#include "or/hibernate.h"
+#include "or/main.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/policies.h"
+#include "or/reasons.h"
+#include "or/rendcommon.h"
+#include "or/rendservice.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/routerparse.h"
+#include "or/routerset.h"
+#include "lib/sandbox/sandbox.h"
+#include "or/torcert.h"
+#include "lib/math/fp.h"
+
+#include "or/dirauth/dirvote.h"
+#include "or/dirauth/mode.h"
+
+#include "or/authority_cert_st.h"
+#include "or/dir_connection_st.h"
+#include "or/dir_server_st.h"
+#include "or/document_signature_st.h"
+#include "or/extrainfo_st.h"
+#include "or/networkstatus_st.h"
+#include "or/networkstatus_voter_info_st.h"
+#include "or/node_st.h"
+#include "or/routerinfo_st.h"
+#include "or/routerlist_st.h"
+#include "or/vote_routerstatus_st.h"
+
+#include "lib/crypt_ops/digestset.h"
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+// #define DEBUG_ROUTERLIST
+
+/****************************************************************************/
+
+/* Typed wrappers for different digestmap types; used to avoid type
+ * confusion. */
+
+DECLARE_TYPED_DIGESTMAP_FNS(sdmap_, digest_sd_map_t, signed_descriptor_t)
+DECLARE_TYPED_DIGESTMAP_FNS(rimap_, digest_ri_map_t, routerinfo_t)
+DECLARE_TYPED_DIGESTMAP_FNS(eimap_, digest_ei_map_t, extrainfo_t)
+DECLARE_TYPED_DIGESTMAP_FNS(dsmap_, digest_ds_map_t, download_status_t)
+#define SDMAP_FOREACH(map, keyvar, valvar) \
+ DIGESTMAP_FOREACH(sdmap_to_digestmap(map), keyvar, signed_descriptor_t *, \
+ valvar)
+#define RIMAP_FOREACH(map, keyvar, valvar) \
+ DIGESTMAP_FOREACH(rimap_to_digestmap(map), keyvar, routerinfo_t *, valvar)
+#define EIMAP_FOREACH(map, keyvar, valvar) \
+ DIGESTMAP_FOREACH(eimap_to_digestmap(map), keyvar, extrainfo_t *, valvar)
+#define DSMAP_FOREACH(map, keyvar, valvar) \
+ DIGESTMAP_FOREACH(dsmap_to_digestmap(map), keyvar, download_status_t *, \
+ valvar)
+#define eimap_free(map, fn) MAP_FREE_AND_NULL(eimap, (map), (fn))
+#define rimap_free(map, fn) MAP_FREE_AND_NULL(rimap, (map), (fn))
+#define dsmap_free(map, fn) MAP_FREE_AND_NULL(dsmap, (map), (fn))
+#define sdmap_free(map, fn) MAP_FREE_AND_NULL(sdmap, (map), (fn))
+
+/* Forward declaration for cert_list_t */
+typedef struct cert_list_t cert_list_t;
+
+/* static function prototypes */
+static int compute_weighted_bandwidths(const smartlist_t *sl,
+ bandwidth_weight_rule_t rule,
+ double **bandwidths_out,
+ double *total_bandwidth_out);
+static const routerstatus_t *router_pick_trusteddirserver_impl(
+ const smartlist_t *sourcelist, dirinfo_type_t auth,
+ int flags, int *n_busy_out);
+static const routerstatus_t *router_pick_dirserver_generic(
+ smartlist_t *sourcelist,
+ dirinfo_type_t type, int flags);
+static void mark_all_dirservers_up(smartlist_t *server_list);
+static int signed_desc_digest_is_recognized(signed_descriptor_t *desc);
+static const char *signed_descriptor_get_body_impl(
+ const signed_descriptor_t *desc,
+ int with_annotations);
+static void list_pending_downloads(digestmap_t *result,
+ digest256map_t *result256,
+ int purpose, const char *prefix);
+static void list_pending_fpsk_downloads(fp_pair_map_t *result);
+static void launch_dummy_descriptor_download_as_needed(time_t now,
+ const or_options_t *options);
+static void download_status_reset_by_sk_in_cl(cert_list_t *cl,
+ const char *digest);
+static int download_status_is_ready_by_sk_in_cl(cert_list_t *cl,
+ const char *digest,
+ time_t now);
+
+/****************************************************************************/
+
+/** Global list of a dir_server_t object for each directory
+ * authority. */
+static smartlist_t *trusted_dir_servers = NULL;
+/** Global list of dir_server_t objects for all directory authorities
+ * and all fallback directory servers. */
+static smartlist_t *fallback_dir_servers = NULL;
+
+/** List of certificates for a single authority, and download status for
+ * latest certificate.
+ */
+struct cert_list_t {
+ /*
+ * The keys of download status map are cert->signing_key_digest for pending
+ * downloads by (identity digest/signing key digest) pair; functions such
+ * as authority_cert_get_by_digest() already assume these are unique.
+ */
+ struct digest_ds_map_t *dl_status_map;
+ /* There is also a dlstatus for the download by identity key only */
+ download_status_t dl_status_by_id;
+ smartlist_t *certs;
+};
+/** Map from v3 identity key digest to cert_list_t. */
+static digestmap_t *trusted_dir_certs = NULL;
+/** True iff any key certificate in at least one member of
+ * <b>trusted_dir_certs</b> has changed since we last flushed the
+ * certificates to disk. */
+static int trusted_dir_servers_certs_changed = 0;
+
+/** Global list of all of the routers that we know about. */
+static routerlist_t *routerlist = NULL;
+
+/** List of strings for nicknames we've already warned about and that are
+ * still unknown / unavailable. */
+static smartlist_t *warned_nicknames = NULL;
+
+/** The last time we tried to download any routerdesc, or 0 for "never". We
+ * use this to rate-limit download attempts when the number of routerdescs to
+ * download is low. */
+static time_t last_descriptor_download_attempted = 0;
+
+/** Return the number of directory authorities whose type matches some bit set
+ * in <b>type</b> */
+int
+get_n_authorities(dirinfo_type_t type)
+{
+ int n = 0;
+ if (!trusted_dir_servers)
+ return 0;
+ SMARTLIST_FOREACH(trusted_dir_servers, dir_server_t *, ds,
+ if (ds->type & type)
+ ++n);
+ return n;
+}
+
+/** Initialise schedule, want_authority, and increment_on in the download
+ * status dlstatus, then call download_status_reset() on it.
+ * It is safe to call this function or download_status_reset() multiple times
+ * on a new dlstatus. But it should *not* be called after a dlstatus has been
+ * used to count download attempts or failures. */
+static void
+download_status_cert_init(download_status_t *dlstatus)
+{
+ dlstatus->schedule = DL_SCHED_CONSENSUS;
+ dlstatus->want_authority = DL_WANT_ANY_DIRSERVER;
+ dlstatus->increment_on = DL_SCHED_INCREMENT_FAILURE;
+ dlstatus->last_backoff_position = 0;
+ dlstatus->last_delay_used = 0;
+
+ /* Use the new schedule to set next_attempt_at */
+ download_status_reset(dlstatus);
+}
+
+/** Reset the download status of a specified element in a dsmap */
+static void
+download_status_reset_by_sk_in_cl(cert_list_t *cl, const char *digest)
+{
+ download_status_t *dlstatus = NULL;
+
+ tor_assert(cl);
+ tor_assert(digest);
+
+ /* Make sure we have a dsmap */
+ if (!(cl->dl_status_map)) {
+ cl->dl_status_map = dsmap_new();
+ }
+ /* Look for a download_status_t in the map with this digest */
+ dlstatus = dsmap_get(cl->dl_status_map, digest);
+ /* Got one? */
+ if (!dlstatus) {
+ /* Insert before we reset */
+ dlstatus = tor_malloc_zero(sizeof(*dlstatus));
+ dsmap_set(cl->dl_status_map, digest, dlstatus);
+ download_status_cert_init(dlstatus);
+ }
+ tor_assert(dlstatus);
+ /* Go ahead and reset it */
+ download_status_reset(dlstatus);
+}
+
+/**
+ * Return true if the download for this signing key digest in cl is ready
+ * to be re-attempted.
+ */
+static int
+download_status_is_ready_by_sk_in_cl(cert_list_t *cl,
+ const char *digest,
+ time_t now)
+{
+ int rv = 0;
+ download_status_t *dlstatus = NULL;
+
+ tor_assert(cl);
+ tor_assert(digest);
+
+ /* Make sure we have a dsmap */
+ if (!(cl->dl_status_map)) {
+ cl->dl_status_map = dsmap_new();
+ }
+ /* Look for a download_status_t in the map with this digest */
+ dlstatus = dsmap_get(cl->dl_status_map, digest);
+ /* Got one? */
+ if (dlstatus) {
+ /* Use download_status_is_ready() */
+ rv = download_status_is_ready(dlstatus, now);
+ } else {
+ /*
+ * If we don't know anything about it, return 1, since we haven't
+ * tried this one before. We need to create a new entry here,
+ * too.
+ */
+ dlstatus = tor_malloc_zero(sizeof(*dlstatus));
+ download_status_cert_init(dlstatus);
+ dsmap_set(cl->dl_status_map, digest, dlstatus);
+ rv = 1;
+ }
+
+ return rv;
+}
+
+/** Helper: Return the cert_list_t for an authority whose authority ID is
+ * <b>id_digest</b>, allocating a new list if necessary. */
+static cert_list_t *
+get_cert_list(const char *id_digest)
+{
+ cert_list_t *cl;
+ if (!trusted_dir_certs)
+ trusted_dir_certs = digestmap_new();
+ cl = digestmap_get(trusted_dir_certs, id_digest);
+ if (!cl) {
+ cl = tor_malloc_zero(sizeof(cert_list_t));
+ download_status_cert_init(&cl->dl_status_by_id);
+ cl->certs = smartlist_new();
+ cl->dl_status_map = dsmap_new();
+ digestmap_set(trusted_dir_certs, id_digest, cl);
+ }
+ return cl;
+}
+
+/** Return a list of authority ID digests with potentially enumerable lists
+ * of download_status_t objects; used by controller GETINFO queries.
+ */
+
+MOCK_IMPL(smartlist_t *,
+list_authority_ids_with_downloads, (void))
+{
+ smartlist_t *ids = smartlist_new();
+ digestmap_iter_t *i;
+ const char *digest;
+ char *tmp;
+ void *cl;
+
+ if (trusted_dir_certs) {
+ for (i = digestmap_iter_init(trusted_dir_certs);
+ !(digestmap_iter_done(i));
+ i = digestmap_iter_next(trusted_dir_certs, i)) {
+ /*
+ * We always have at least dl_status_by_id to query, so no need to
+ * probe deeper than the existence of a cert_list_t.
+ */
+ digestmap_iter_get(i, &digest, &cl);
+ tmp = tor_malloc(DIGEST_LEN);
+ memcpy(tmp, digest, DIGEST_LEN);
+ smartlist_add(ids, tmp);
+ }
+ }
+ /* else definitely no downloads going since nothing even has a cert list */
+
+ return ids;
+}
+
+/** Given an authority ID digest, return a pointer to the default download
+ * status, or NULL if there is no such entry in trusted_dir_certs */
+
+MOCK_IMPL(download_status_t *,
+id_only_download_status_for_authority_id, (const char *digest))
+{
+ download_status_t *dl = NULL;
+ cert_list_t *cl;
+
+ if (trusted_dir_certs) {
+ cl = digestmap_get(trusted_dir_certs, digest);
+ if (cl) {
+ dl = &(cl->dl_status_by_id);
+ }
+ }
+
+ return dl;
+}
+
+/** Given an authority ID digest, return a smartlist of signing key digests
+ * for which download_status_t is potentially queryable, or NULL if no such
+ * authority ID digest is known. */
+
+MOCK_IMPL(smartlist_t *,
+list_sk_digests_for_authority_id, (const char *digest))
+{
+ smartlist_t *sks = NULL;
+ cert_list_t *cl;
+ dsmap_iter_t *i;
+ const char *sk_digest;
+ char *tmp;
+ download_status_t *dl;
+
+ if (trusted_dir_certs) {
+ cl = digestmap_get(trusted_dir_certs, digest);
+ if (cl) {
+ sks = smartlist_new();
+ if (cl->dl_status_map) {
+ for (i = dsmap_iter_init(cl->dl_status_map);
+ !(dsmap_iter_done(i));
+ i = dsmap_iter_next(cl->dl_status_map, i)) {
+ /* Pull the digest out and add it to the list */
+ dsmap_iter_get(i, &sk_digest, &dl);
+ tmp = tor_malloc(DIGEST_LEN);
+ memcpy(tmp, sk_digest, DIGEST_LEN);
+ smartlist_add(sks, tmp);
+ }
+ }
+ }
+ }
+
+ return sks;
+}
+
+/** Given an authority ID digest and a signing key digest, return the
+ * download_status_t or NULL if none exists. */
+
+MOCK_IMPL(download_status_t *,
+download_status_for_authority_id_and_sk,(const char *id_digest,
+ const char *sk_digest))
+{
+ download_status_t *dl = NULL;
+ cert_list_t *cl = NULL;
+
+ if (trusted_dir_certs) {
+ cl = digestmap_get(trusted_dir_certs, id_digest);
+ if (cl && cl->dl_status_map) {
+ dl = dsmap_get(cl->dl_status_map, sk_digest);
+ }
+ }
+
+ return dl;
+}
+
+#define cert_list_free(val) \
+ FREE_AND_NULL(cert_list_t, cert_list_free_, (val))
+
+/** Release all space held by a cert_list_t */
+static void
+cert_list_free_(cert_list_t *cl)
+{
+ if (!cl)
+ return;
+
+ SMARTLIST_FOREACH(cl->certs, authority_cert_t *, cert,
+ authority_cert_free(cert));
+ smartlist_free(cl->certs);
+ dsmap_free(cl->dl_status_map, tor_free_);
+ tor_free(cl);
+}
+
+/** Wrapper for cert_list_free so we can pass it to digestmap_free */
+static void
+cert_list_free_void(void *cl)
+{
+ cert_list_free_(cl);
+}
+
+/** Reload the cached v3 key certificates from the cached-certs file in
+ * the data directory. Return 0 on success, -1 on failure. */
+int
+trusted_dirs_reload_certs(void)
+{
+ char *filename;
+ char *contents;
+ int r;
+
+ filename = get_cachedir_fname("cached-certs");
+ contents = read_file_to_str(filename, RFTS_IGNORE_MISSING, NULL);
+ tor_free(filename);
+ if (!contents)
+ return 0;
+ r = trusted_dirs_load_certs_from_string(
+ contents,
+ TRUSTED_DIRS_CERTS_SRC_FROM_STORE, 1, NULL);
+ tor_free(contents);
+ return r;
+}
+
+/** Helper: return true iff we already have loaded the exact cert
+ * <b>cert</b>. */
+static inline int
+already_have_cert(authority_cert_t *cert)
+{
+ cert_list_t *cl = get_cert_list(cert->cache_info.identity_digest);
+
+ SMARTLIST_FOREACH(cl->certs, authority_cert_t *, c,
+ {
+ if (tor_memeq(c->cache_info.signed_descriptor_digest,
+ cert->cache_info.signed_descriptor_digest,
+ DIGEST_LEN))
+ return 1;
+ });
+ return 0;
+}
+
+/** Load a bunch of new key certificates from the string <b>contents</b>. If
+ * <b>source</b> is TRUSTED_DIRS_CERTS_SRC_FROM_STORE, the certificates are
+ * from the cache, and we don't need to flush them to disk. If we are a
+ * dirauth loading our own cert, source is TRUSTED_DIRS_CERTS_SRC_SELF.
+ * Otherwise, source is download type: TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST
+ * or TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_SK_DIGEST. If <b>flush</b> is true, we
+ * need to flush any changed certificates to disk now. Return 0 on success,
+ * -1 if any certs fail to parse.
+ *
+ * If source_dir is non-NULL, it's the identity digest for a directory that
+ * we've just successfully retrieved certificates from, so try it first to
+ * fetch any missing certificates.
+ */
+int
+trusted_dirs_load_certs_from_string(const char *contents, int source,
+ int flush, const char *source_dir)
+{
+ dir_server_t *ds;
+ const char *s, *eos;
+ int failure_code = 0;
+ int from_store = (source == TRUSTED_DIRS_CERTS_SRC_FROM_STORE);
+ int added_trusted_cert = 0;
+
+ for (s = contents; *s; s = eos) {
+ authority_cert_t *cert = authority_cert_parse_from_string(s, &eos);
+ cert_list_t *cl;
+ if (!cert) {
+ failure_code = -1;
+ break;
+ }
+ ds = trusteddirserver_get_by_v3_auth_digest(
+ cert->cache_info.identity_digest);
+ log_debug(LD_DIR, "Parsed certificate for %s",
+ ds ? ds->nickname : "unknown authority");
+
+ if (already_have_cert(cert)) {
+ /* we already have this one. continue. */
+ log_info(LD_DIR, "Skipping %s certificate for %s that we "
+ "already have.",
+ from_store ? "cached" : "downloaded",
+ ds ? ds->nickname : "an old or new authority");
+
+ /*
+ * A duplicate on download should be treated as a failure, so we call
+ * authority_cert_dl_failed() to reset the download status to make sure
+ * we can't try again. Since we've implemented the fp-sk mechanism
+ * to download certs by signing key, this should be much rarer than it
+ * was and is perhaps cause for concern.
+ */
+ if (!from_store) {
+ if (authdir_mode(get_options())) {
+ log_warn(LD_DIR,
+ "Got a certificate for %s, but we already have it. "
+ "Maybe they haven't updated it. Waiting for a while.",
+ ds ? ds->nickname : "an old or new authority");
+ } else {
+ log_info(LD_DIR,
+ "Got a certificate for %s, but we already have it. "
+ "Maybe they haven't updated it. Waiting for a while.",
+ ds ? ds->nickname : "an old or new authority");
+ }
+
+ /*
+ * This is where we care about the source; authority_cert_dl_failed()
+ * needs to know whether the download was by fp or (fp,sk) pair to
+ * twiddle the right bit in the download map.
+ */
+ if (source == TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST) {
+ authority_cert_dl_failed(cert->cache_info.identity_digest,
+ NULL, 404);
+ } else if (source == TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_SK_DIGEST) {
+ authority_cert_dl_failed(cert->cache_info.identity_digest,
+ cert->signing_key_digest, 404);
+ }
+ }
+
+ authority_cert_free(cert);
+ continue;
+ }
+
+ if (ds) {
+ added_trusted_cert = 1;
+ log_info(LD_DIR, "Adding %s certificate for directory authority %s with "
+ "signing key %s", from_store ? "cached" : "downloaded",
+ ds->nickname, hex_str(cert->signing_key_digest,DIGEST_LEN));
+ } else {
+ int adding = we_want_to_fetch_unknown_auth_certs(get_options());
+ log_info(LD_DIR, "%s %s certificate for unrecognized directory "
+ "authority with signing key %s",
+ adding ? "Adding" : "Not adding",
+ from_store ? "cached" : "downloaded",
+ hex_str(cert->signing_key_digest,DIGEST_LEN));
+ if (!adding) {
+ authority_cert_free(cert);
+ continue;
+ }
+ }
+
+ cl = get_cert_list(cert->cache_info.identity_digest);
+ smartlist_add(cl->certs, cert);
+ if (ds && cert->cache_info.published_on > ds->addr_current_at) {
+ /* Check to see whether we should update our view of the authority's
+ * address. */
+ if (cert->addr && cert->dir_port &&
+ (ds->addr != cert->addr ||
+ ds->dir_port != cert->dir_port)) {
+ char *a = tor_dup_ip(cert->addr);
+ log_notice(LD_DIR, "Updating address for directory authority %s "
+ "from %s:%d to %s:%d based on certificate.",
+ ds->nickname, ds->address, (int)ds->dir_port,
+ a, cert->dir_port);
+ tor_free(a);
+ ds->addr = cert->addr;
+ ds->dir_port = cert->dir_port;
+ }
+ ds->addr_current_at = cert->cache_info.published_on;
+ }
+
+ if (!from_store)
+ trusted_dir_servers_certs_changed = 1;
+ }
+
+ if (flush)
+ trusted_dirs_flush_certs_to_disk();
+
+ /* call this even if failure_code is <0, since some certs might have
+ * succeeded, but only pass source_dir if there were no failures,
+ * and at least one more authority certificate was added to the store.
+ * This avoids retrying a directory that's serving bad or entirely duplicate
+ * certificates. */
+ if (failure_code == 0 && added_trusted_cert) {
+ networkstatus_note_certs_arrived(source_dir);
+ } else {
+ networkstatus_note_certs_arrived(NULL);
+ }
+
+ return failure_code;
+}
+
+/** Save all v3 key certificates to the cached-certs file. */
+void
+trusted_dirs_flush_certs_to_disk(void)
+{
+ char *filename;
+ smartlist_t *chunks;
+
+ if (!trusted_dir_servers_certs_changed || !trusted_dir_certs)
+ return;
+
+ chunks = smartlist_new();
+ DIGESTMAP_FOREACH(trusted_dir_certs, key, cert_list_t *, cl) {
+ SMARTLIST_FOREACH(cl->certs, authority_cert_t *, cert,
+ {
+ sized_chunk_t *c = tor_malloc(sizeof(sized_chunk_t));
+ c->bytes = cert->cache_info.signed_descriptor_body;
+ c->len = cert->cache_info.signed_descriptor_len;
+ smartlist_add(chunks, c);
+ });
+ } DIGESTMAP_FOREACH_END;
+
+ filename = get_cachedir_fname("cached-certs");
+ if (write_chunks_to_file(filename, chunks, 0, 0)) {
+ log_warn(LD_FS, "Error writing certificates to disk.");
+ }
+ tor_free(filename);
+ SMARTLIST_FOREACH(chunks, sized_chunk_t *, c, tor_free(c));
+ smartlist_free(chunks);
+
+ trusted_dir_servers_certs_changed = 0;
+}
+
+static int
+compare_certs_by_pubdates(const void **_a, const void **_b)
+{
+ const authority_cert_t *cert1 = *_a, *cert2=*_b;
+
+ if (cert1->cache_info.published_on < cert2->cache_info.published_on)
+ return -1;
+ else if (cert1->cache_info.published_on > cert2->cache_info.published_on)
+ return 1;
+ else
+ return 0;
+}
+
+/** Remove all expired v3 authority certificates that have been superseded for
+ * more than 48 hours or, if not expired, that were published more than 7 days
+ * before being superseded. (If the most recent cert was published more than 48
+ * hours ago, then we aren't going to get any consensuses signed with older
+ * keys.) */
+static void
+trusted_dirs_remove_old_certs(void)
+{
+ time_t now = time(NULL);
+#define DEAD_CERT_LIFETIME (2*24*60*60)
+#define SUPERSEDED_CERT_LIFETIME (2*24*60*60)
+ if (!trusted_dir_certs)
+ return;
+
+ DIGESTMAP_FOREACH(trusted_dir_certs, key, cert_list_t *, cl) {
+ /* Sort the list from first-published to last-published */
+ smartlist_sort(cl->certs, compare_certs_by_pubdates);
+
+ SMARTLIST_FOREACH_BEGIN(cl->certs, authority_cert_t *, cert) {
+ if (cert_sl_idx == smartlist_len(cl->certs) - 1) {
+ /* This is the most recently published cert. Keep it. */
+ continue;
+ }
+ authority_cert_t *next_cert = smartlist_get(cl->certs, cert_sl_idx+1);
+ const time_t next_cert_published = next_cert->cache_info.published_on;
+ if (next_cert_published > now) {
+ /* All later certs are published in the future. Keep everything
+ * we didn't discard. */
+ break;
+ }
+ int should_remove = 0;
+ if (cert->expires + DEAD_CERT_LIFETIME < now) {
+ /* Certificate has been expired for at least DEAD_CERT_LIFETIME.
+ * Remove it. */
+ should_remove = 1;
+ } else if (next_cert_published + SUPERSEDED_CERT_LIFETIME < now) {
+ /* Certificate has been superseded for OLD_CERT_LIFETIME.
+ * Remove it.
+ */
+ should_remove = 1;
+ }
+ if (should_remove) {
+ SMARTLIST_DEL_CURRENT_KEEPORDER(cl->certs, cert);
+ authority_cert_free(cert);
+ trusted_dir_servers_certs_changed = 1;
+ }
+ } SMARTLIST_FOREACH_END(cert);
+
+ } DIGESTMAP_FOREACH_END;
+#undef DEAD_CERT_LIFETIME
+#undef OLD_CERT_LIFETIME
+
+ trusted_dirs_flush_certs_to_disk();
+}
+
+/** Return the newest v3 authority certificate whose v3 authority identity key
+ * has digest <b>id_digest</b>. Return NULL if no such authority is known,
+ * or it has no certificate. */
+authority_cert_t *
+authority_cert_get_newest_by_id(const char *id_digest)
+{
+ cert_list_t *cl;
+ authority_cert_t *best = NULL;
+ if (!trusted_dir_certs ||
+ !(cl = digestmap_get(trusted_dir_certs, id_digest)))
+ return NULL;
+
+ SMARTLIST_FOREACH(cl->certs, authority_cert_t *, cert,
+ {
+ if (!best || cert->cache_info.published_on > best->cache_info.published_on)
+ best = cert;
+ });
+ return best;
+}
+
+/** Return the newest v3 authority certificate whose directory signing key has
+ * digest <b>sk_digest</b>. Return NULL if no such certificate is known.
+ */
+authority_cert_t *
+authority_cert_get_by_sk_digest(const char *sk_digest)
+{
+ authority_cert_t *c;
+ if (!trusted_dir_certs)
+ return NULL;
+
+ if ((c = get_my_v3_authority_cert()) &&
+ tor_memeq(c->signing_key_digest, sk_digest, DIGEST_LEN))
+ return c;
+ if ((c = get_my_v3_legacy_cert()) &&
+ tor_memeq(c->signing_key_digest, sk_digest, DIGEST_LEN))
+ return c;
+
+ DIGESTMAP_FOREACH(trusted_dir_certs, key, cert_list_t *, cl) {
+ SMARTLIST_FOREACH(cl->certs, authority_cert_t *, cert,
+ {
+ if (tor_memeq(cert->signing_key_digest, sk_digest, DIGEST_LEN))
+ return cert;
+ });
+ } DIGESTMAP_FOREACH_END;
+ return NULL;
+}
+
+/** Return the v3 authority certificate with signing key matching
+ * <b>sk_digest</b>, for the authority with identity digest <b>id_digest</b>.
+ * Return NULL if no such authority is known. */
+authority_cert_t *
+authority_cert_get_by_digests(const char *id_digest,
+ const char *sk_digest)
+{
+ cert_list_t *cl;
+ if (!trusted_dir_certs ||
+ !(cl = digestmap_get(trusted_dir_certs, id_digest)))
+ return NULL;
+ SMARTLIST_FOREACH(cl->certs, authority_cert_t *, cert,
+ if (tor_memeq(cert->signing_key_digest, sk_digest, DIGEST_LEN))
+ return cert; );
+
+ return NULL;
+}
+
+/** Add every known authority_cert_t to <b>certs_out</b>. */
+void
+authority_cert_get_all(smartlist_t *certs_out)
+{
+ tor_assert(certs_out);
+ if (!trusted_dir_certs)
+ return;
+
+ DIGESTMAP_FOREACH(trusted_dir_certs, key, cert_list_t *, cl) {
+ SMARTLIST_FOREACH(cl->certs, authority_cert_t *, c,
+ smartlist_add(certs_out, c));
+ } DIGESTMAP_FOREACH_END;
+}
+
+/** Called when an attempt to download a certificate with the authority with
+ * ID <b>id_digest</b> and, if not NULL, signed with key signing_key_digest
+ * fails with HTTP response code <b>status</b>: remember the failure, so we
+ * don't try again immediately. */
+void
+authority_cert_dl_failed(const char *id_digest,
+ const char *signing_key_digest, int status)
+{
+ cert_list_t *cl;
+ download_status_t *dlstatus = NULL;
+ char id_digest_str[2*DIGEST_LEN+1];
+ char sk_digest_str[2*DIGEST_LEN+1];
+
+ if (!trusted_dir_certs ||
+ !(cl = digestmap_get(trusted_dir_certs, id_digest)))
+ return;
+
+ /*
+ * Are we noting a failed download of the latest cert for the id digest,
+ * or of a download by (id, signing key) digest pair?
+ */
+ if (!signing_key_digest) {
+ /* Just by id digest */
+ download_status_failed(&cl->dl_status_by_id, status);
+ } else {
+ /* Reset by (id, signing key) digest pair
+ *
+ * Look for a download_status_t in the map with this digest
+ */
+ dlstatus = dsmap_get(cl->dl_status_map, signing_key_digest);
+ /* Got one? */
+ if (dlstatus) {
+ download_status_failed(dlstatus, status);
+ } else {
+ /*
+ * Do this rather than hex_str(), since hex_str clobbers
+ * old results and we call twice in the param list.
+ */
+ base16_encode(id_digest_str, sizeof(id_digest_str),
+ id_digest, DIGEST_LEN);
+ base16_encode(sk_digest_str, sizeof(sk_digest_str),
+ signing_key_digest, DIGEST_LEN);
+ log_warn(LD_BUG,
+ "Got failure for cert fetch with (fp,sk) = (%s,%s), with "
+ "status %d, but knew nothing about the download.",
+ id_digest_str, sk_digest_str, status);
+ }
+ }
+}
+
+static const char *BAD_SIGNING_KEYS[] = {
+ "09CD84F751FD6E955E0F8ADB497D5401470D697E", // Expires 2015-01-11 16:26:31
+ "0E7E9C07F0969D0468AD741E172A6109DC289F3C", // Expires 2014-08-12 10:18:26
+ "57B85409891D3FB32137F642FDEDF8B7F8CDFDCD", // Expires 2015-02-11 17:19:09
+ "87326329007AF781F587AF5B594E540B2B6C7630", // Expires 2014-07-17 11:10:09
+ "98CC82342DE8D298CF99D3F1A396475901E0D38E", // Expires 2014-11-10 13:18:56
+ "9904B52336713A5ADCB13E4FB14DC919E0D45571", // Expires 2014-04-20 20:01:01
+ "9DCD8E3F1DD1597E2AD476BBA28A1A89F3095227", // Expires 2015-01-16 03:52:30
+ "A61682F34B9BB9694AC98491FE1ABBFE61923941", // Expires 2014-06-11 09:25:09
+ "B59F6E99C575113650C99F1C425BA7B20A8C071D", // Expires 2014-07-31 13:22:10
+ "D27178388FA75B96D37FA36E0B015227DDDBDA51", // Expires 2014-08-04 04:01:57
+ NULL,
+};
+
+/** Return true iff <b>cert</b> authenticates some atuhority signing key
+ * which, because of the old openssl heartbleed vulnerability, should
+ * never be trusted. */
+int
+authority_cert_is_blacklisted(const authority_cert_t *cert)
+{
+ char hex_digest[HEX_DIGEST_LEN+1];
+ int i;
+ base16_encode(hex_digest, sizeof(hex_digest),
+ cert->signing_key_digest, sizeof(cert->signing_key_digest));
+
+ for (i = 0; BAD_SIGNING_KEYS[i]; ++i) {
+ if (!strcasecmp(hex_digest, BAD_SIGNING_KEYS[i])) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/** Return true iff when we've been getting enough failures when trying to
+ * download the certificate with ID digest <b>id_digest</b> that we're willing
+ * to start bugging the user about it. */
+int
+authority_cert_dl_looks_uncertain(const char *id_digest)
+{
+#define N_AUTH_CERT_DL_FAILURES_TO_BUG_USER 2
+ cert_list_t *cl;
+ int n_failures;
+ if (!trusted_dir_certs ||
+ !(cl = digestmap_get(trusted_dir_certs, id_digest)))
+ return 0;
+
+ n_failures = download_status_get_n_failures(&cl->dl_status_by_id);
+ return n_failures >= N_AUTH_CERT_DL_FAILURES_TO_BUG_USER;
+}
+
+/* Fetch the authority certificates specified in resource.
+ * If we are a bridge client, and node is a configured bridge, fetch from node
+ * using dir_hint as the fingerprint. Otherwise, if rs is not NULL, fetch from
+ * rs. Otherwise, fetch from a random directory mirror. */
+static void
+authority_certs_fetch_resource_impl(const char *resource,
+ const char *dir_hint,
+ const node_t *node,
+ const routerstatus_t *rs)
+{
+ const or_options_t *options = get_options();
+ int get_via_tor = purpose_needs_anonymity(DIR_PURPOSE_FETCH_CERTIFICATE, 0,
+ resource);
+
+ /* Make sure bridge clients never connect to anything but a bridge */
+ if (options->UseBridges) {
+ if (node && !node_is_a_configured_bridge(node)) {
+ /* If we're using bridges, and node is not a bridge, use a 3-hop path. */
+ get_via_tor = 1;
+ } else if (!node) {
+ /* If we're using bridges, and there's no node, use a 3-hop path. */
+ get_via_tor = 1;
+ }
+ }
+
+ const dir_indirection_t indirection = get_via_tor ? DIRIND_ANONYMOUS
+ : DIRIND_ONEHOP;
+
+ directory_request_t *req = NULL;
+ /* If we've just downloaded a consensus from a bridge, re-use that
+ * bridge */
+ if (options->UseBridges && node && node->ri && !get_via_tor) {
+ /* clients always make OR connections to bridges */
+ tor_addr_port_t or_ap;
+ /* we are willing to use a non-preferred address if we need to */
+ fascist_firewall_choose_address_node(node, FIREWALL_OR_CONNECTION, 0,
+ &or_ap);
+
+ req = directory_request_new(DIR_PURPOSE_FETCH_CERTIFICATE);
+ directory_request_set_or_addr_port(req, &or_ap);
+ if (dir_hint)
+ directory_request_set_directory_id_digest(req, dir_hint);
+ } else if (rs) {
+ /* And if we've just downloaded a consensus from a directory, re-use that
+ * directory */
+ req = directory_request_new(DIR_PURPOSE_FETCH_CERTIFICATE);
+ directory_request_set_routerstatus(req, rs);
+ }
+
+ if (req) {
+ /* We've set up a request object -- fill in the other request fields, and
+ * send the request. */
+ directory_request_set_indirection(req, indirection);
+ directory_request_set_resource(req, resource);
+ directory_initiate_request(req);
+ directory_request_free(req);
+ return;
+ }
+
+ /* Otherwise, we want certs from a random fallback or directory
+ * mirror, because they will almost always succeed. */
+ directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0,
+ resource, PDS_RETRY_IF_NO_SERVERS,
+ DL_WANT_ANY_DIRSERVER);
+}
+
+/** Try to download any v3 authority certificates that we may be missing. If
+ * <b>status</b> is provided, try to get all the ones that were used to sign
+ * <b>status</b>. Additionally, try to have a non-expired certificate for
+ * every V3 authority in trusted_dir_servers. Don't fetch certificates we
+ * already have.
+ *
+ * If dir_hint is non-NULL, it's the identity digest for a directory that
+ * we've just successfully retrieved a consensus or certificates from, so try
+ * it first to fetch any missing certificates.
+ **/
+void
+authority_certs_fetch_missing(networkstatus_t *status, time_t now,
+ const char *dir_hint)
+{
+ /*
+ * The pending_id digestmap tracks pending certificate downloads by
+ * identity digest; the pending_cert digestmap tracks pending downloads
+ * by (identity digest, signing key digest) pairs.
+ */
+ digestmap_t *pending_id;
+ fp_pair_map_t *pending_cert;
+ /*
+ * The missing_id_digests smartlist will hold a list of id digests
+ * we want to fetch the newest cert for; the missing_cert_digests
+ * smartlist will hold a list of fp_pair_t with an identity and
+ * signing key digest.
+ */
+ smartlist_t *missing_cert_digests, *missing_id_digests;
+ char *resource = NULL;
+ cert_list_t *cl;
+ const or_options_t *options = get_options();
+ const int keep_unknown = we_want_to_fetch_unknown_auth_certs(options);
+ fp_pair_t *fp_tmp = NULL;
+ char id_digest_str[2*DIGEST_LEN+1];
+ char sk_digest_str[2*DIGEST_LEN+1];
+
+ if (should_delay_dir_fetches(options, NULL))
+ return;
+
+ pending_cert = fp_pair_map_new();
+ pending_id = digestmap_new();
+ missing_cert_digests = smartlist_new();
+ missing_id_digests = smartlist_new();
+
+ /*
+ * First, we get the lists of already pending downloads so we don't
+ * duplicate effort.
+ */
+ list_pending_downloads(pending_id, NULL,
+ DIR_PURPOSE_FETCH_CERTIFICATE, "fp/");
+ list_pending_fpsk_downloads(pending_cert);
+
+ /*
+ * Now, we download any trusted authority certs we don't have by
+ * identity digest only. This gets the latest cert for that authority.
+ */
+ SMARTLIST_FOREACH_BEGIN(trusted_dir_servers, dir_server_t *, ds) {
+ int found = 0;
+ if (!(ds->type & V3_DIRINFO))
+ continue;
+ if (smartlist_contains_digest(missing_id_digests,
+ ds->v3_identity_digest))
+ continue;
+ cl = get_cert_list(ds->v3_identity_digest);
+ SMARTLIST_FOREACH_BEGIN(cl->certs, authority_cert_t *, cert) {
+ if (now < cert->expires) {
+ /* It's not expired, and we weren't looking for something to
+ * verify a consensus with. Call it done. */
+ download_status_reset(&(cl->dl_status_by_id));
+ /* No sense trying to download it specifically by signing key hash */
+ download_status_reset_by_sk_in_cl(cl, cert->signing_key_digest);
+ found = 1;
+ break;
+ }
+ } SMARTLIST_FOREACH_END(cert);
+ if (!found &&
+ download_status_is_ready(&(cl->dl_status_by_id), now) &&
+ !digestmap_get(pending_id, ds->v3_identity_digest)) {
+ log_info(LD_DIR,
+ "No current certificate known for authority %s "
+ "(ID digest %s); launching request.",
+ ds->nickname, hex_str(ds->v3_identity_digest, DIGEST_LEN));
+ smartlist_add(missing_id_digests, ds->v3_identity_digest);
+ }
+ } SMARTLIST_FOREACH_END(ds);
+
+ /*
+ * Next, if we have a consensus, scan through it and look for anything
+ * signed with a key from a cert we don't have. Those get downloaded
+ * by (fp,sk) pair, but if we don't know any certs at all for the fp
+ * (identity digest), and it's one of the trusted dir server certs
+ * we started off above or a pending download in pending_id, don't
+ * try to get it yet. Most likely, the one we'll get for that will
+ * have the right signing key too, and we'd just be downloading
+ * redundantly.
+ */
+ if (status) {
+ SMARTLIST_FOREACH_BEGIN(status->voters, networkstatus_voter_info_t *,
+ voter) {
+ if (!smartlist_len(voter->sigs))
+ continue; /* This authority never signed this consensus, so don't
+ * go looking for a cert with key digest 0000000000. */
+ if (!keep_unknown &&
+ !trusteddirserver_get_by_v3_auth_digest(voter->identity_digest))
+ continue; /* We don't want unknown certs, and we don't know this
+ * authority.*/
+
+ /*
+ * If we don't know *any* cert for this authority, and a download by ID
+ * is pending or we added it to missing_id_digests above, skip this
+ * one for now to avoid duplicate downloads.
+ */
+ cl = get_cert_list(voter->identity_digest);
+ if (smartlist_len(cl->certs) == 0) {
+ /* We have no certs at all for this one */
+
+ /* Do we have a download of one pending? */
+ if (digestmap_get(pending_id, voter->identity_digest))
+ continue;
+
+ /*
+ * Are we about to launch a download of one due to the trusted
+ * dir server check above?
+ */
+ if (smartlist_contains_digest(missing_id_digests,
+ voter->identity_digest))
+ continue;
+ }
+
+ SMARTLIST_FOREACH_BEGIN(voter->sigs, document_signature_t *, sig) {
+ authority_cert_t *cert =
+ authority_cert_get_by_digests(voter->identity_digest,
+ sig->signing_key_digest);
+ if (cert) {
+ if (now < cert->expires)
+ download_status_reset_by_sk_in_cl(cl, sig->signing_key_digest);
+ continue;
+ }
+ if (download_status_is_ready_by_sk_in_cl(
+ cl, sig->signing_key_digest, now) &&
+ !fp_pair_map_get_by_digests(pending_cert,
+ voter->identity_digest,
+ sig->signing_key_digest)) {
+ /*
+ * Do this rather than hex_str(), since hex_str clobbers
+ * old results and we call twice in the param list.
+ */
+ base16_encode(id_digest_str, sizeof(id_digest_str),
+ voter->identity_digest, DIGEST_LEN);
+ base16_encode(sk_digest_str, sizeof(sk_digest_str),
+ sig->signing_key_digest, DIGEST_LEN);
+
+ if (voter->nickname) {
+ log_info(LD_DIR,
+ "We're missing a certificate from authority %s "
+ "(ID digest %s) with signing key %s: "
+ "launching request.",
+ voter->nickname, id_digest_str, sk_digest_str);
+ } else {
+ log_info(LD_DIR,
+ "We're missing a certificate from authority ID digest "
+ "%s with signing key %s: launching request.",
+ id_digest_str, sk_digest_str);
+ }
+
+ /* Allocate a new fp_pair_t to append */
+ fp_tmp = tor_malloc(sizeof(*fp_tmp));
+ memcpy(fp_tmp->first, voter->identity_digest, sizeof(fp_tmp->first));
+ memcpy(fp_tmp->second, sig->signing_key_digest,
+ sizeof(fp_tmp->second));
+ smartlist_add(missing_cert_digests, fp_tmp);
+ }
+ } SMARTLIST_FOREACH_END(sig);
+ } SMARTLIST_FOREACH_END(voter);
+ }
+
+ /* Bridge clients look up the node for the dir_hint */
+ const node_t *node = NULL;
+ /* All clients, including bridge clients, look up the routerstatus for the
+ * dir_hint */
+ const routerstatus_t *rs = NULL;
+
+ /* If we still need certificates, try the directory that just successfully
+ * served us a consensus or certificates.
+ * As soon as the directory fails to provide additional certificates, we try
+ * another, randomly selected directory. This avoids continual retries.
+ * (We only ever have one outstanding request per certificate.)
+ */
+ if (dir_hint) {
+ if (options->UseBridges) {
+ /* Bridge clients try the nodelist. If the dir_hint is from an authority,
+ * or something else fetched over tor, we won't find the node here, but
+ * we will find the rs. */
+ node = node_get_by_id(dir_hint);
+ }
+
+ /* All clients try the consensus routerstatus, then the fallback
+ * routerstatus */
+ rs = router_get_consensus_status_by_id(dir_hint);
+ if (!rs) {
+ /* This will also find authorities */
+ const dir_server_t *ds = router_get_fallback_dirserver_by_digest(
+ dir_hint);
+ if (ds) {
+ rs = &ds->fake_status;
+ }
+ }
+
+ if (!node && !rs) {
+ log_warn(LD_BUG, "Directory %s delivered a consensus, but %s"
+ "no routerstatus could be found for it.",
+ options->UseBridges ? "no node and " : "",
+ hex_str(dir_hint, DIGEST_LEN));
+ }
+ }
+
+ /* Do downloads by identity digest */
+ if (smartlist_len(missing_id_digests) > 0) {
+ int need_plus = 0;
+ smartlist_t *fps = smartlist_new();
+
+ smartlist_add_strdup(fps, "fp/");
+
+ SMARTLIST_FOREACH_BEGIN(missing_id_digests, const char *, d) {
+ char *fp = NULL;
+
+ if (digestmap_get(pending_id, d))
+ continue;
+
+ base16_encode(id_digest_str, sizeof(id_digest_str),
+ d, DIGEST_LEN);
+
+ if (need_plus) {
+ tor_asprintf(&fp, "+%s", id_digest_str);
+ } else {
+ /* No need for tor_asprintf() in this case; first one gets no '+' */
+ fp = tor_strdup(id_digest_str);
+ need_plus = 1;
+ }
+
+ smartlist_add(fps, fp);
+ } SMARTLIST_FOREACH_END(d);
+
+ if (smartlist_len(fps) > 1) {
+ resource = smartlist_join_strings(fps, "", 0, NULL);
+ /* node and rs are directories that just gave us a consensus or
+ * certificates */
+ authority_certs_fetch_resource_impl(resource, dir_hint, node, rs);
+ tor_free(resource);
+ }
+ /* else we didn't add any: they were all pending */
+
+ SMARTLIST_FOREACH(fps, char *, cp, tor_free(cp));
+ smartlist_free(fps);
+ }
+
+ /* Do downloads by identity digest/signing key pair */
+ if (smartlist_len(missing_cert_digests) > 0) {
+ int need_plus = 0;
+ smartlist_t *fp_pairs = smartlist_new();
+
+ smartlist_add_strdup(fp_pairs, "fp-sk/");
+
+ SMARTLIST_FOREACH_BEGIN(missing_cert_digests, const fp_pair_t *, d) {
+ char *fp_pair = NULL;
+
+ if (fp_pair_map_get(pending_cert, d))
+ continue;
+
+ /* Construct string encodings of the digests */
+ base16_encode(id_digest_str, sizeof(id_digest_str),
+ d->first, DIGEST_LEN);
+ base16_encode(sk_digest_str, sizeof(sk_digest_str),
+ d->second, DIGEST_LEN);
+
+ /* Now tor_asprintf() */
+ if (need_plus) {
+ tor_asprintf(&fp_pair, "+%s-%s", id_digest_str, sk_digest_str);
+ } else {
+ /* First one in the list doesn't get a '+' */
+ tor_asprintf(&fp_pair, "%s-%s", id_digest_str, sk_digest_str);
+ need_plus = 1;
+ }
+
+ /* Add it to the list of pairs to request */
+ smartlist_add(fp_pairs, fp_pair);
+ } SMARTLIST_FOREACH_END(d);
+
+ if (smartlist_len(fp_pairs) > 1) {
+ resource = smartlist_join_strings(fp_pairs, "", 0, NULL);
+ /* node and rs are directories that just gave us a consensus or
+ * certificates */
+ authority_certs_fetch_resource_impl(resource, dir_hint, node, rs);
+ tor_free(resource);
+ }
+ /* else they were all pending */
+
+ SMARTLIST_FOREACH(fp_pairs, char *, p, tor_free(p));
+ smartlist_free(fp_pairs);
+ }
+
+ smartlist_free(missing_id_digests);
+ SMARTLIST_FOREACH(missing_cert_digests, fp_pair_t *, p, tor_free(p));
+ smartlist_free(missing_cert_digests);
+ digestmap_free(pending_id, NULL);
+ fp_pair_map_free(pending_cert, NULL);
+}
+
+/* Router descriptor storage.
+ *
+ * Routerdescs are stored in a big file, named "cached-descriptors". As new
+ * routerdescs arrive, we append them to a journal file named
+ * "cached-descriptors.new".
+ *
+ * From time to time, we replace "cached-descriptors" with a new file
+ * containing only the live, non-superseded descriptors, and clear
+ * cached-routers.new.
+ *
+ * On startup, we read both files.
+ */
+
+/** Helper: return 1 iff the router log is so big we want to rebuild the
+ * store. */
+static int
+router_should_rebuild_store(desc_store_t *store)
+{
+ if (store->store_len > (1<<16))
+ return (store->journal_len > store->store_len / 2 ||
+ store->bytes_dropped > store->store_len / 2);
+ else
+ return store->journal_len > (1<<15);
+}
+
+/** Return the desc_store_t in <b>rl</b> that should be used to store
+ * <b>sd</b>. */
+static inline desc_store_t *
+desc_get_store(routerlist_t *rl, const signed_descriptor_t *sd)
+{
+ if (sd->is_extrainfo)
+ return &rl->extrainfo_store;
+ else
+ return &rl->desc_store;
+}
+
+/** Add the signed_descriptor_t in <b>desc</b> to the router
+ * journal; change its saved_location to SAVED_IN_JOURNAL and set its
+ * offset appropriately. */
+static int
+signed_desc_append_to_journal(signed_descriptor_t *desc,
+ desc_store_t *store)
+{
+ char *fname = get_cachedir_fname_suffix(store->fname_base, ".new");
+ const char *body = signed_descriptor_get_body_impl(desc,1);
+ size_t len = desc->signed_descriptor_len + desc->annotations_len;
+
+ if (append_bytes_to_file(fname, body, len, 1)) {
+ log_warn(LD_FS, "Unable to store router descriptor");
+ tor_free(fname);
+ return -1;
+ }
+ desc->saved_location = SAVED_IN_JOURNAL;
+ tor_free(fname);
+
+ desc->saved_offset = store->journal_len;
+ store->journal_len += len;
+
+ return 0;
+}
+
+/** Sorting helper: return &lt;0, 0, or &gt;0 depending on whether the
+ * signed_descriptor_t* in *<b>a</b> is older, the same age as, or newer than
+ * the signed_descriptor_t* in *<b>b</b>. */
+static int
+compare_signed_descriptors_by_age_(const void **_a, const void **_b)
+{
+ const signed_descriptor_t *r1 = *_a, *r2 = *_b;
+ return (int)(r1->published_on - r2->published_on);
+}
+
+#define RRS_FORCE 1
+#define RRS_DONT_REMOVE_OLD 2
+
+/** If the journal of <b>store</b> is too long, or if RRS_FORCE is set in
+ * <b>flags</b>, then atomically replace the saved router store with the
+ * routers currently in our routerlist, and clear the journal. Unless
+ * RRS_DONT_REMOVE_OLD is set in <b>flags</b>, delete expired routers before
+ * rebuilding the store. Return 0 on success, -1 on failure.
+ */
+static int
+router_rebuild_store(int flags, desc_store_t *store)
+{
+ smartlist_t *chunk_list = NULL;
+ char *fname = NULL, *fname_tmp = NULL;
+ int r = -1;
+ off_t offset = 0;
+ smartlist_t *signed_descriptors = NULL;
+ int nocache=0;
+ size_t total_expected_len = 0;
+ int had_any;
+ int force = flags & RRS_FORCE;
+
+ if (!force && !router_should_rebuild_store(store)) {
+ r = 0;
+ goto done;
+ }
+ if (!routerlist) {
+ r = 0;
+ goto done;
+ }
+
+ if (store->type == EXTRAINFO_STORE)
+ had_any = !eimap_isempty(routerlist->extra_info_map);
+ else
+ had_any = (smartlist_len(routerlist->routers)+
+ smartlist_len(routerlist->old_routers))>0;
+
+ /* Don't save deadweight. */
+ if (!(flags & RRS_DONT_REMOVE_OLD))
+ routerlist_remove_old_routers();
+
+ log_info(LD_DIR, "Rebuilding %s cache", store->description);
+
+ fname = get_cachedir_fname(store->fname_base);
+ fname_tmp = get_cachedir_fname_suffix(store->fname_base, ".tmp");
+
+ chunk_list = smartlist_new();
+
+ /* We sort the routers by age to enhance locality on disk. */
+ signed_descriptors = smartlist_new();
+ if (store->type == EXTRAINFO_STORE) {
+ eimap_iter_t *iter;
+ for (iter = eimap_iter_init(routerlist->extra_info_map);
+ !eimap_iter_done(iter);
+ iter = eimap_iter_next(routerlist->extra_info_map, iter)) {
+ const char *key;
+ extrainfo_t *ei;
+ eimap_iter_get(iter, &key, &ei);
+ smartlist_add(signed_descriptors, &ei->cache_info);
+ }
+ } else {
+ SMARTLIST_FOREACH(routerlist->old_routers, signed_descriptor_t *, sd,
+ smartlist_add(signed_descriptors, sd));
+ SMARTLIST_FOREACH(routerlist->routers, routerinfo_t *, ri,
+ smartlist_add(signed_descriptors, &ri->cache_info));
+ }
+
+ smartlist_sort(signed_descriptors, compare_signed_descriptors_by_age_);
+
+ /* Now, add the appropriate members to chunk_list */
+ SMARTLIST_FOREACH_BEGIN(signed_descriptors, signed_descriptor_t *, sd) {
+ sized_chunk_t *c;
+ const char *body = signed_descriptor_get_body_impl(sd, 1);
+ if (!body) {
+ log_warn(LD_BUG, "No descriptor available for router.");
+ goto done;
+ }
+ if (sd->do_not_cache) {
+ ++nocache;
+ continue;
+ }
+ c = tor_malloc(sizeof(sized_chunk_t));
+ c->bytes = body;
+ c->len = sd->signed_descriptor_len + sd->annotations_len;
+ total_expected_len += c->len;
+ smartlist_add(chunk_list, c);
+ } SMARTLIST_FOREACH_END(sd);
+
+ if (write_chunks_to_file(fname_tmp, chunk_list, 1, 1)<0) {
+ log_warn(LD_FS, "Error writing router store to disk.");
+ goto done;
+ }
+
+ /* Our mmap is now invalid. */
+ if (store->mmap) {
+ int res = tor_munmap_file(store->mmap);
+ store->mmap = NULL;
+ if (res != 0) {
+ log_warn(LD_FS, "Unable to munmap route store in %s", fname);
+ }
+ }
+
+ if (replace_file(fname_tmp, fname)<0) {
+ log_warn(LD_FS, "Error replacing old router store: %s", strerror(errno));
+ goto done;
+ }
+
+ errno = 0;
+ store->mmap = tor_mmap_file(fname);
+ if (! store->mmap) {
+ if (errno == ERANGE) {
+ /* empty store.*/
+ if (total_expected_len) {
+ log_warn(LD_FS, "We wrote some bytes to a new descriptor file at '%s',"
+ " but when we went to mmap it, it was empty!", fname);
+ } else if (had_any) {
+ log_info(LD_FS, "We just removed every descriptor in '%s'. This is "
+ "okay if we're just starting up after a long time. "
+ "Otherwise, it's a bug.", fname);
+ }
+ } else {
+ log_warn(LD_FS, "Unable to mmap new descriptor file at '%s'.",fname);
+ }
+ }
+
+ log_info(LD_DIR, "Reconstructing pointers into cache");
+
+ offset = 0;
+ SMARTLIST_FOREACH_BEGIN(signed_descriptors, signed_descriptor_t *, sd) {
+ if (sd->do_not_cache)
+ continue;
+ sd->saved_location = SAVED_IN_CACHE;
+ if (store->mmap) {
+ tor_free(sd->signed_descriptor_body); // sets it to null
+ sd->saved_offset = offset;
+ }
+ offset += sd->signed_descriptor_len + sd->annotations_len;
+ signed_descriptor_get_body(sd); /* reconstruct and assert */
+ } SMARTLIST_FOREACH_END(sd);
+
+ tor_free(fname);
+ fname = get_cachedir_fname_suffix(store->fname_base, ".new");
+ write_str_to_file(fname, "", 1);
+
+ r = 0;
+ store->store_len = (size_t) offset;
+ store->journal_len = 0;
+ store->bytes_dropped = 0;
+ done:
+ smartlist_free(signed_descriptors);
+ tor_free(fname);
+ tor_free(fname_tmp);
+ if (chunk_list) {
+ SMARTLIST_FOREACH(chunk_list, sized_chunk_t *, c, tor_free(c));
+ smartlist_free(chunk_list);
+ }
+
+ return r;
+}
+
+/** Helper: Reload a cache file and its associated journal, setting metadata
+ * appropriately. If <b>extrainfo</b> is true, reload the extrainfo store;
+ * else reload the router descriptor store. */
+static int
+router_reload_router_list_impl(desc_store_t *store)
+{
+ char *fname = NULL, *contents = NULL;
+ struct stat st;
+ int extrainfo = (store->type == EXTRAINFO_STORE);
+ store->journal_len = store->store_len = 0;
+
+ fname = get_cachedir_fname(store->fname_base);
+
+ if (store->mmap) {
+ /* get rid of it first */
+ int res = tor_munmap_file(store->mmap);
+ store->mmap = NULL;
+ if (res != 0) {
+ log_warn(LD_FS, "Failed to munmap %s", fname);
+ tor_free(fname);
+ return -1;
+ }
+ }
+
+ store->mmap = tor_mmap_file(fname);
+ if (store->mmap) {
+ store->store_len = store->mmap->size;
+ if (extrainfo)
+ router_load_extrainfo_from_string(store->mmap->data,
+ store->mmap->data+store->mmap->size,
+ SAVED_IN_CACHE, NULL, 0);
+ else
+ router_load_routers_from_string(store->mmap->data,
+ store->mmap->data+store->mmap->size,
+ SAVED_IN_CACHE, NULL, 0, NULL);
+ }
+
+ tor_free(fname);
+ fname = get_cachedir_fname_suffix(store->fname_base, ".new");
+ /* don't load empty files - we wouldn't get any data, even if we tried */
+ if (file_status(fname) == FN_FILE)
+ contents = read_file_to_str(fname, RFTS_BIN|RFTS_IGNORE_MISSING, &st);
+ if (contents) {
+ if (extrainfo)
+ router_load_extrainfo_from_string(contents, NULL,SAVED_IN_JOURNAL,
+ NULL, 0);
+ else
+ router_load_routers_from_string(contents, NULL, SAVED_IN_JOURNAL,
+ NULL, 0, NULL);
+ store->journal_len = (size_t) st.st_size;
+ tor_free(contents);
+ }
+
+ tor_free(fname);
+
+ if (store->journal_len) {
+ /* Always clear the journal on startup.*/
+ router_rebuild_store(RRS_FORCE, store);
+ } else if (!extrainfo) {
+ /* Don't cache expired routers. (This is in an else because
+ * router_rebuild_store() also calls remove_old_routers().) */
+ routerlist_remove_old_routers();
+ }
+
+ return 0;
+}
+
+/** Load all cached router descriptors and extra-info documents from the
+ * store. Return 0 on success and -1 on failure.
+ */
+int
+router_reload_router_list(void)
+{
+ routerlist_t *rl = router_get_routerlist();
+ if (router_reload_router_list_impl(&rl->desc_store))
+ return -1;
+ if (router_reload_router_list_impl(&rl->extrainfo_store))
+ return -1;
+ return 0;
+}
+
+/** Return a smartlist containing a list of dir_server_t * for all
+ * known trusted dirservers. Callers must not modify the list or its
+ * contents.
+ */
+const smartlist_t *
+router_get_trusted_dir_servers(void)
+{
+ if (!trusted_dir_servers)
+ trusted_dir_servers = smartlist_new();
+
+ return trusted_dir_servers;
+}
+
+const smartlist_t *
+router_get_fallback_dir_servers(void)
+{
+ if (!fallback_dir_servers)
+ fallback_dir_servers = smartlist_new();
+
+ return fallback_dir_servers;
+}
+
+/** Try to find a running dirserver that supports operations of <b>type</b>.
+ *
+ * If there are no running dirservers in our routerlist and the
+ * <b>PDS_RETRY_IF_NO_SERVERS</b> flag is set, set all the fallback ones
+ * (including authorities) as running again, and pick one.
+ *
+ * If the <b>PDS_IGNORE_FASCISTFIREWALL</b> flag is set, then include
+ * dirservers that we can't reach.
+ *
+ * If the <b>PDS_ALLOW_SELF</b> flag is not set, then don't include ourself
+ * (if we're a dirserver).
+ *
+ * Don't pick a fallback directory mirror if any non-fallback is viable;
+ * (the fallback directory mirrors include the authorities)
+ * try to avoid using servers that have returned 503 recently.
+ */
+const routerstatus_t *
+router_pick_directory_server(dirinfo_type_t type, int flags)
+{
+ int busy = 0;
+ const routerstatus_t *choice;
+
+ if (!routerlist)
+ return NULL;
+
+ choice = router_pick_directory_server_impl(type, flags, &busy);
+ if (choice || !(flags & PDS_RETRY_IF_NO_SERVERS))
+ return choice;
+
+ if (busy) {
+ /* If the reason that we got no server is that servers are "busy",
+ * we must be excluding good servers because we already have serverdesc
+ * fetches with them. Do not mark down servers up because of this. */
+ tor_assert((flags & (PDS_NO_EXISTING_SERVERDESC_FETCH|
+ PDS_NO_EXISTING_MICRODESC_FETCH)));
+ return NULL;
+ }
+
+ log_info(LD_DIR,
+ "No reachable router entries for dirservers. "
+ "Trying them all again.");
+ /* mark all fallback directory mirrors as up again */
+ mark_all_dirservers_up(fallback_dir_servers);
+ /* try again */
+ choice = router_pick_directory_server_impl(type, flags, NULL);
+ return choice;
+}
+
+/** Return the dir_server_t for the directory authority whose identity
+ * key hashes to <b>digest</b>, or NULL if no such authority is known.
+ */
+dir_server_t *
+router_get_trusteddirserver_by_digest(const char *digest)
+{
+ if (!trusted_dir_servers)
+ return NULL;
+
+ SMARTLIST_FOREACH(trusted_dir_servers, dir_server_t *, ds,
+ {
+ if (tor_memeq(ds->digest, digest, DIGEST_LEN))
+ return ds;
+ });
+
+ return NULL;
+}
+
+/** Return the dir_server_t for the fallback dirserver whose identity
+ * key hashes to <b>digest</b>, or NULL if no such fallback is in the list of
+ * fallback_dir_servers. (fallback_dir_servers is affected by the FallbackDir
+ * and UseDefaultFallbackDirs torrc options.)
+ * The list of fallback directories includes the list of authorities.
+ */
+dir_server_t *
+router_get_fallback_dirserver_by_digest(const char *digest)
+{
+ if (!fallback_dir_servers)
+ return NULL;
+
+ if (!digest)
+ return NULL;
+
+ SMARTLIST_FOREACH(fallback_dir_servers, dir_server_t *, ds,
+ {
+ if (tor_memeq(ds->digest, digest, DIGEST_LEN))
+ return ds;
+ });
+
+ return NULL;
+}
+
+/** Return 1 if any fallback dirserver's identity key hashes to <b>digest</b>,
+ * or 0 if no such fallback is in the list of fallback_dir_servers.
+ * (fallback_dir_servers is affected by the FallbackDir and
+ * UseDefaultFallbackDirs torrc options.)
+ * The list of fallback directories includes the list of authorities.
+ */
+int
+router_digest_is_fallback_dir(const char *digest)
+{
+ return (router_get_fallback_dirserver_by_digest(digest) != NULL);
+}
+
+/** Return the dir_server_t for the directory authority whose
+ * v3 identity key hashes to <b>digest</b>, or NULL if no such authority
+ * is known.
+ */
+MOCK_IMPL(dir_server_t *,
+trusteddirserver_get_by_v3_auth_digest, (const char *digest))
+{
+ if (!trusted_dir_servers)
+ return NULL;
+
+ SMARTLIST_FOREACH(trusted_dir_servers, dir_server_t *, ds,
+ {
+ if (tor_memeq(ds->v3_identity_digest, digest, DIGEST_LEN) &&
+ (ds->type & V3_DIRINFO))
+ return ds;
+ });
+
+ return NULL;
+}
+
+/** Try to find a running directory authority. Flags are as for
+ * router_pick_directory_server.
+ */
+const routerstatus_t *
+router_pick_trusteddirserver(dirinfo_type_t type, int flags)
+{
+ return router_pick_dirserver_generic(trusted_dir_servers, type, flags);
+}
+
+/** Try to find a running fallback directory. Flags are as for
+ * router_pick_directory_server.
+ */
+const routerstatus_t *
+router_pick_fallback_dirserver(dirinfo_type_t type, int flags)
+{
+ return router_pick_dirserver_generic(fallback_dir_servers, type, flags);
+}
+
+/** Try to find a running fallback directory. Flags are as for
+ * router_pick_directory_server.
+ */
+static const routerstatus_t *
+router_pick_dirserver_generic(smartlist_t *sourcelist,
+ dirinfo_type_t type, int flags)
+{
+ const routerstatus_t *choice;
+ int busy = 0;
+
+ choice = router_pick_trusteddirserver_impl(sourcelist, type, flags, &busy);
+ if (choice || !(flags & PDS_RETRY_IF_NO_SERVERS))
+ return choice;
+ if (busy) {
+ /* If the reason that we got no server is that servers are "busy",
+ * we must be excluding good servers because we already have serverdesc
+ * fetches with them. Do not mark down servers up because of this. */
+ tor_assert((flags & (PDS_NO_EXISTING_SERVERDESC_FETCH|
+ PDS_NO_EXISTING_MICRODESC_FETCH)));
+ return NULL;
+ }
+
+ log_info(LD_DIR,
+ "No dirservers are reachable. Trying them all again.");
+ mark_all_dirservers_up(sourcelist);
+ return router_pick_trusteddirserver_impl(sourcelist, type, flags, NULL);
+}
+
+/* Check if we already have a directory fetch from ap, for serverdesc
+ * (including extrainfo) or microdesc documents.
+ * If so, return 1, if not, return 0.
+ * Also returns 0 if addr is NULL, tor_addr_is_null(addr), or dir_port is 0.
+ */
+STATIC int
+router_is_already_dir_fetching(const tor_addr_port_t *ap, int serverdesc,
+ int microdesc)
+{
+ if (!ap || tor_addr_is_null(&ap->addr) || !ap->port) {
+ return 0;
+ }
+
+ /* XX/teor - we're not checking tunnel connections here, see #17848
+ */
+ if (serverdesc && (
+ connection_get_by_type_addr_port_purpose(
+ CONN_TYPE_DIR, &ap->addr, ap->port, DIR_PURPOSE_FETCH_SERVERDESC)
+ || connection_get_by_type_addr_port_purpose(
+ CONN_TYPE_DIR, &ap->addr, ap->port, DIR_PURPOSE_FETCH_EXTRAINFO))) {
+ return 1;
+ }
+
+ if (microdesc && (
+ connection_get_by_type_addr_port_purpose(
+ CONN_TYPE_DIR, &ap->addr, ap->port, DIR_PURPOSE_FETCH_MICRODESC))) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Check if we already have a directory fetch from the ipv4 or ipv6
+ * router, for serverdesc (including extrainfo) or microdesc documents.
+ * If so, return 1, if not, return 0.
+ */
+static int
+router_is_already_dir_fetching_(uint32_t ipv4_addr,
+ const tor_addr_t *ipv6_addr,
+ uint16_t dir_port,
+ int serverdesc,
+ int microdesc)
+{
+ tor_addr_port_t ipv4_dir_ap, ipv6_dir_ap;
+
+ /* Assume IPv6 DirPort is the same as IPv4 DirPort */
+ tor_addr_from_ipv4h(&ipv4_dir_ap.addr, ipv4_addr);
+ ipv4_dir_ap.port = dir_port;
+ tor_addr_copy(&ipv6_dir_ap.addr, ipv6_addr);
+ ipv6_dir_ap.port = dir_port;
+
+ return (router_is_already_dir_fetching(&ipv4_dir_ap, serverdesc, microdesc)
+ || router_is_already_dir_fetching(&ipv6_dir_ap, serverdesc, microdesc));
+}
+
+#ifndef LOG_FALSE_POSITIVES_DURING_BOOTSTRAP
+#define LOG_FALSE_POSITIVES_DURING_BOOTSTRAP 0
+#endif
+
+/* Log a message if rs is not found or not a preferred address */
+static void
+router_picked_poor_directory_log(const routerstatus_t *rs)
+{
+ const networkstatus_t *usable_consensus;
+ usable_consensus = networkstatus_get_reasonably_live_consensus(time(NULL),
+ usable_consensus_flavor());
+
+#if !LOG_FALSE_POSITIVES_DURING_BOOTSTRAP
+ /* Don't log early in the bootstrap process, it's normal to pick from a
+ * small pool of nodes. Of course, this won't help if we're trying to
+ * diagnose bootstrap issues. */
+ if (!smartlist_len(nodelist_get_list()) || !usable_consensus
+ || !router_have_minimum_dir_info()) {
+ return;
+ }
+#endif /* !LOG_FALSE_POSITIVES_DURING_BOOTSTRAP */
+
+ /* We couldn't find a node, or the one we have doesn't fit our preferences.
+ * Sometimes this is normal, sometimes it can be a reachability issue. */
+ if (!rs) {
+ /* This happens a lot, so it's at debug level */
+ log_debug(LD_DIR, "Wanted to make an outgoing directory connection, but "
+ "we couldn't find a directory that fit our criteria. "
+ "Perhaps we will succeed next time with less strict criteria.");
+ } else if (!fascist_firewall_allows_rs(rs, FIREWALL_OR_CONNECTION, 1)
+ && !fascist_firewall_allows_rs(rs, FIREWALL_DIR_CONNECTION, 1)
+ ) {
+ /* This is rare, and might be interesting to users trying to diagnose
+ * connection issues on dual-stack machines. */
+ log_info(LD_DIR, "Selected a directory %s with non-preferred OR and Dir "
+ "addresses for launching an outgoing connection: "
+ "IPv4 %s OR %d Dir %d IPv6 %s OR %d Dir %d",
+ routerstatus_describe(rs),
+ fmt_addr32(rs->addr), rs->or_port,
+ rs->dir_port, fmt_addr(&rs->ipv6_addr),
+ rs->ipv6_orport, rs->dir_port);
+ }
+}
+
+#undef LOG_FALSE_POSITIVES_DURING_BOOTSTRAP
+
+/** How long do we avoid using a directory server after it's given us a 503? */
+#define DIR_503_TIMEOUT (60*60)
+
+/* Common retry code for router_pick_directory_server_impl and
+ * router_pick_trusteddirserver_impl. Retry with the non-preferred IP version.
+ * Must be called before RETRY_WITHOUT_EXCLUDE().
+ *
+ * If we got no result, and we are applying IP preferences, and we are a
+ * client that could use an alternate IP version, try again with the
+ * opposite preferences. */
+#define RETRY_ALTERNATE_IP_VERSION(retry_label) \
+ STMT_BEGIN \
+ if (result == NULL && try_ip_pref && options->ClientUseIPv4 \
+ && fascist_firewall_use_ipv6(options) && !server_mode(options) \
+ && !n_busy) { \
+ n_excluded = 0; \
+ n_busy = 0; \
+ try_ip_pref = 0; \
+ goto retry_label; \
+ } \
+ STMT_END \
+
+/* Common retry code for router_pick_directory_server_impl and
+ * router_pick_trusteddirserver_impl. Retry without excluding nodes, but with
+ * the preferred IP version. Must be called after RETRY_ALTERNATE_IP_VERSION().
+ *
+ * If we got no result, and we are excluding nodes, and StrictNodes is
+ * not set, try again without excluding nodes. */
+#define RETRY_WITHOUT_EXCLUDE(retry_label) \
+ STMT_BEGIN \
+ if (result == NULL && try_excluding && !options->StrictNodes \
+ && n_excluded && !n_busy) { \
+ try_excluding = 0; \
+ n_excluded = 0; \
+ n_busy = 0; \
+ try_ip_pref = 1; \
+ goto retry_label; \
+ } \
+ STMT_END
+
+/* Common code used in the loop within router_pick_directory_server_impl and
+ * router_pick_trusteddirserver_impl.
+ *
+ * Check if the given <b>identity</b> supports extrainfo. If not, skip further
+ * checks.
+ */
+#define SKIP_MISSING_TRUSTED_EXTRAINFO(type, identity) \
+ STMT_BEGIN \
+ int is_trusted_extrainfo = router_digest_is_trusted_dir_type( \
+ (identity), EXTRAINFO_DIRINFO); \
+ if (((type) & EXTRAINFO_DIRINFO) && \
+ !router_supports_extrainfo((identity), is_trusted_extrainfo)) \
+ continue; \
+ STMT_END
+
+/* When iterating through the routerlist, can OR address/port preference
+ * and reachability checks be skipped?
+ */
+int
+router_skip_or_reachability(const or_options_t *options, int try_ip_pref)
+{
+ /* Servers always have and prefer IPv4.
+ * And if clients are checking against the firewall for reachability only,
+ * but there's no firewall, don't bother checking */
+ return server_mode(options) || (!try_ip_pref && !firewall_is_fascist_or());
+}
+
+/* When iterating through the routerlist, can Dir address/port preference
+ * and reachability checks be skipped?
+ */
+static int
+router_skip_dir_reachability(const or_options_t *options, int try_ip_pref)
+{
+ /* Servers always have and prefer IPv4.
+ * And if clients are checking against the firewall for reachability only,
+ * but there's no firewall, don't bother checking */
+ return server_mode(options) || (!try_ip_pref && !firewall_is_fascist_dir());
+}
+
+/** Pick a random running valid directory server/mirror from our
+ * routerlist. Arguments are as for router_pick_directory_server(), except:
+ *
+ * If <b>n_busy_out</b> is provided, set *<b>n_busy_out</b> to the number of
+ * directories that we excluded for no other reason than
+ * PDS_NO_EXISTING_SERVERDESC_FETCH or PDS_NO_EXISTING_MICRODESC_FETCH.
+ */
+STATIC const routerstatus_t *
+router_pick_directory_server_impl(dirinfo_type_t type, int flags,
+ int *n_busy_out)
+{
+ const or_options_t *options = get_options();
+ const node_t *result;
+ smartlist_t *direct, *tunnel;
+ smartlist_t *trusted_direct, *trusted_tunnel;
+ smartlist_t *overloaded_direct, *overloaded_tunnel;
+ time_t now = time(NULL);
+ const networkstatus_t *consensus = networkstatus_get_latest_consensus();
+ const int requireother = ! (flags & PDS_ALLOW_SELF);
+ const int fascistfirewall = ! (flags & PDS_IGNORE_FASCISTFIREWALL);
+ const int no_serverdesc_fetching =(flags & PDS_NO_EXISTING_SERVERDESC_FETCH);
+ const int no_microdesc_fetching = (flags & PDS_NO_EXISTING_MICRODESC_FETCH);
+ int try_excluding = 1, n_excluded = 0, n_busy = 0;
+ int try_ip_pref = 1;
+
+ if (!consensus)
+ return NULL;
+
+ retry_search:
+
+ direct = smartlist_new();
+ tunnel = smartlist_new();
+ trusted_direct = smartlist_new();
+ trusted_tunnel = smartlist_new();
+ overloaded_direct = smartlist_new();
+ overloaded_tunnel = smartlist_new();
+
+ const int skip_or_fw = router_skip_or_reachability(options, try_ip_pref);
+ const int skip_dir_fw = router_skip_dir_reachability(options, try_ip_pref);
+ const int must_have_or = directory_must_use_begindir(options);
+
+ /* Find all the running dirservers we know about. */
+ SMARTLIST_FOREACH_BEGIN(nodelist_get_list(), const node_t *, node) {
+ int is_trusted;
+ int is_overloaded;
+ const routerstatus_t *status = node->rs;
+ const country_t country = node->country;
+ if (!status)
+ continue;
+
+ if (!node->is_running || !node_is_dir(node) || !node->is_valid)
+ continue;
+ if (requireother && router_digest_is_me(node->identity))
+ continue;
+
+ SKIP_MISSING_TRUSTED_EXTRAINFO(type, node->identity);
+
+ if (try_excluding &&
+ routerset_contains_routerstatus(options->ExcludeNodes, status,
+ country)) {
+ ++n_excluded;
+ continue;
+ }
+
+ if (router_is_already_dir_fetching_(status->addr,
+ &status->ipv6_addr,
+ status->dir_port,
+ no_serverdesc_fetching,
+ no_microdesc_fetching)) {
+ ++n_busy;
+ continue;
+ }
+
+ is_overloaded = status->last_dir_503_at + DIR_503_TIMEOUT > now;
+ is_trusted = router_digest_is_trusted_dir(node->identity);
+
+ /* Clients use IPv6 addresses if the server has one and the client
+ * prefers IPv6.
+ * Add the router if its preferred address and port are reachable.
+ * If we don't get any routers, we'll try again with the non-preferred
+ * address for each router (if any). (To ensure correct load-balancing
+ * we try routers that only have one address both times.)
+ */
+ if (!fascistfirewall || skip_or_fw ||
+ fascist_firewall_allows_node(node, FIREWALL_OR_CONNECTION,
+ try_ip_pref))
+ smartlist_add(is_trusted ? trusted_tunnel :
+ is_overloaded ? overloaded_tunnel : tunnel, (void*)node);
+ else if (!must_have_or && (skip_dir_fw ||
+ fascist_firewall_allows_node(node, FIREWALL_DIR_CONNECTION,
+ try_ip_pref)))
+ smartlist_add(is_trusted ? trusted_direct :
+ is_overloaded ? overloaded_direct : direct, (void*)node);
+ } SMARTLIST_FOREACH_END(node);
+
+ if (smartlist_len(tunnel)) {
+ result = node_sl_choose_by_bandwidth(tunnel, WEIGHT_FOR_DIR);
+ } else if (smartlist_len(overloaded_tunnel)) {
+ result = node_sl_choose_by_bandwidth(overloaded_tunnel,
+ WEIGHT_FOR_DIR);
+ } else if (smartlist_len(trusted_tunnel)) {
+ /* FFFF We don't distinguish between trusteds and overloaded trusteds
+ * yet. Maybe one day we should. */
+ /* FFFF We also don't load balance over authorities yet. I think this
+ * is a feature, but it could easily be a bug. -RD */
+ result = smartlist_choose(trusted_tunnel);
+ } else if (smartlist_len(direct)) {
+ result = node_sl_choose_by_bandwidth(direct, WEIGHT_FOR_DIR);
+ } else if (smartlist_len(overloaded_direct)) {
+ result = node_sl_choose_by_bandwidth(overloaded_direct,
+ WEIGHT_FOR_DIR);
+ } else {
+ result = smartlist_choose(trusted_direct);
+ }
+ smartlist_free(direct);
+ smartlist_free(tunnel);
+ smartlist_free(trusted_direct);
+ smartlist_free(trusted_tunnel);
+ smartlist_free(overloaded_direct);
+ smartlist_free(overloaded_tunnel);
+
+ RETRY_ALTERNATE_IP_VERSION(retry_search);
+
+ RETRY_WITHOUT_EXCLUDE(retry_search);
+
+ if (n_busy_out)
+ *n_busy_out = n_busy;
+
+ router_picked_poor_directory_log(result ? result->rs : NULL);
+
+ return result ? result->rs : NULL;
+}
+
+/** Pick a random element from a list of dir_server_t, weighting by their
+ * <b>weight</b> field. */
+static const dir_server_t *
+dirserver_choose_by_weight(const smartlist_t *servers, double authority_weight)
+{
+ int n = smartlist_len(servers);
+ int i;
+ double *weights_dbl;
+ uint64_t *weights_u64;
+ const dir_server_t *ds;
+
+ weights_dbl = tor_calloc(n, sizeof(double));
+ weights_u64 = tor_calloc(n, sizeof(uint64_t));
+ for (i = 0; i < n; ++i) {
+ ds = smartlist_get(servers, i);
+ weights_dbl[i] = ds->weight;
+ if (ds->is_authority)
+ weights_dbl[i] *= authority_weight;
+ }
+
+ scale_array_elements_to_u64(weights_u64, weights_dbl, n, NULL);
+ i = choose_array_element_by_weight(weights_u64, n);
+ tor_free(weights_dbl);
+ tor_free(weights_u64);
+ return (i < 0) ? NULL : smartlist_get(servers, i);
+}
+
+/** Choose randomly from among the dir_server_ts in sourcelist that
+ * are up. Flags are as for router_pick_directory_server_impl().
+ */
+static const routerstatus_t *
+router_pick_trusteddirserver_impl(const smartlist_t *sourcelist,
+ dirinfo_type_t type, int flags,
+ int *n_busy_out)
+{
+ const or_options_t *options = get_options();
+ smartlist_t *direct, *tunnel;
+ smartlist_t *overloaded_direct, *overloaded_tunnel;
+ const routerinfo_t *me = router_get_my_routerinfo();
+ const routerstatus_t *result = NULL;
+ time_t now = time(NULL);
+ const int requireother = ! (flags & PDS_ALLOW_SELF);
+ const int fascistfirewall = ! (flags & PDS_IGNORE_FASCISTFIREWALL);
+ const int no_serverdesc_fetching =(flags & PDS_NO_EXISTING_SERVERDESC_FETCH);
+ const int no_microdesc_fetching =(flags & PDS_NO_EXISTING_MICRODESC_FETCH);
+ const double auth_weight = (sourcelist == fallback_dir_servers) ?
+ options->DirAuthorityFallbackRate : 1.0;
+ smartlist_t *pick_from;
+ int n_busy = 0;
+ int try_excluding = 1, n_excluded = 0;
+ int try_ip_pref = 1;
+
+ if (!sourcelist)
+ return NULL;
+
+ retry_search:
+
+ direct = smartlist_new();
+ tunnel = smartlist_new();
+ overloaded_direct = smartlist_new();
+ overloaded_tunnel = smartlist_new();
+
+ const int skip_or_fw = router_skip_or_reachability(options, try_ip_pref);
+ const int skip_dir_fw = router_skip_dir_reachability(options, try_ip_pref);
+ const int must_have_or = directory_must_use_begindir(options);
+
+ SMARTLIST_FOREACH_BEGIN(sourcelist, const dir_server_t *, d)
+ {
+ int is_overloaded =
+ d->fake_status.last_dir_503_at + DIR_503_TIMEOUT > now;
+ if (!d->is_running) continue;
+ if ((type & d->type) == 0)
+ continue;
+
+ SKIP_MISSING_TRUSTED_EXTRAINFO(type, d->digest);
+
+ if (requireother && me && router_digest_is_me(d->digest))
+ continue;
+ if (try_excluding &&
+ routerset_contains_routerstatus(options->ExcludeNodes,
+ &d->fake_status, -1)) {
+ ++n_excluded;
+ continue;
+ }
+
+ if (router_is_already_dir_fetching_(d->addr,
+ &d->ipv6_addr,
+ d->dir_port,
+ no_serverdesc_fetching,
+ no_microdesc_fetching)) {
+ ++n_busy;
+ continue;
+ }
+
+ /* Clients use IPv6 addresses if the server has one and the client
+ * prefers IPv6.
+ * Add the router if its preferred address and port are reachable.
+ * If we don't get any routers, we'll try again with the non-preferred
+ * address for each router (if any). (To ensure correct load-balancing
+ * we try routers that only have one address both times.)
+ */
+ if (!fascistfirewall || skip_or_fw ||
+ fascist_firewall_allows_dir_server(d, FIREWALL_OR_CONNECTION,
+ try_ip_pref))
+ smartlist_add(is_overloaded ? overloaded_tunnel : tunnel, (void*)d);
+ else if (!must_have_or && (skip_dir_fw ||
+ fascist_firewall_allows_dir_server(d, FIREWALL_DIR_CONNECTION,
+ try_ip_pref)))
+ smartlist_add(is_overloaded ? overloaded_direct : direct, (void*)d);
+ }
+ SMARTLIST_FOREACH_END(d);
+
+ if (smartlist_len(tunnel)) {
+ pick_from = tunnel;
+ } else if (smartlist_len(overloaded_tunnel)) {
+ pick_from = overloaded_tunnel;
+ } else if (smartlist_len(direct)) {
+ pick_from = direct;
+ } else {
+ pick_from = overloaded_direct;
+ }
+
+ {
+ const dir_server_t *selection =
+ dirserver_choose_by_weight(pick_from, auth_weight);
+
+ if (selection)
+ result = &selection->fake_status;
+ }
+
+ smartlist_free(direct);
+ smartlist_free(tunnel);
+ smartlist_free(overloaded_direct);
+ smartlist_free(overloaded_tunnel);
+
+ RETRY_ALTERNATE_IP_VERSION(retry_search);
+
+ RETRY_WITHOUT_EXCLUDE(retry_search);
+
+ router_picked_poor_directory_log(result);
+
+ if (n_busy_out)
+ *n_busy_out = n_busy;
+ return result;
+}
+
+/** Mark as running every dir_server_t in <b>server_list</b>. */
+static void
+mark_all_dirservers_up(smartlist_t *server_list)
+{
+ if (server_list) {
+ SMARTLIST_FOREACH_BEGIN(server_list, dir_server_t *, dir) {
+ routerstatus_t *rs;
+ node_t *node;
+ dir->is_running = 1;
+ node = node_get_mutable_by_id(dir->digest);
+ if (node)
+ node->is_running = 1;
+ rs = router_get_mutable_consensus_status_by_id(dir->digest);
+ if (rs) {
+ rs->last_dir_503_at = 0;
+ control_event_networkstatus_changed_single(rs);
+ }
+ } SMARTLIST_FOREACH_END(dir);
+ }
+ router_dir_info_changed();
+}
+
+/** Return true iff r1 and r2 have the same address and OR port. */
+int
+routers_have_same_or_addrs(const routerinfo_t *r1, const routerinfo_t *r2)
+{
+ return r1->addr == r2->addr && r1->or_port == r2->or_port &&
+ tor_addr_eq(&r1->ipv6_addr, &r2->ipv6_addr) &&
+ r1->ipv6_orport == r2->ipv6_orport;
+}
+
+/** Reset all internal variables used to count failed downloads of network
+ * status objects. */
+void
+router_reset_status_download_failures(void)
+{
+ mark_all_dirservers_up(fallback_dir_servers);
+}
+
+/** Given a <b>router</b>, add every node_t in its family (including the
+ * node itself!) to <b>sl</b>.
+ *
+ * Note the type mismatch: This function takes a routerinfo, but adds nodes
+ * to the smartlist!
+ */
+static void
+routerlist_add_node_and_family(smartlist_t *sl, const routerinfo_t *router)
+{
+ /* XXXX MOVE ? */
+ node_t fake_node;
+ const node_t *node = node_get_by_id(router->cache_info.identity_digest);
+ if (node == NULL) {
+ memset(&fake_node, 0, sizeof(fake_node));
+ fake_node.ri = (routerinfo_t *)router;
+ memcpy(fake_node.identity, router->cache_info.identity_digest, DIGEST_LEN);
+ node = &fake_node;
+ }
+ nodelist_add_node_and_family(sl, node);
+}
+
+/** Add every suitable node from our nodelist to <b>sl</b>, so that
+ * we can pick a node for a circuit.
+ */
+void
+router_add_running_nodes_to_smartlist(smartlist_t *sl, int need_uptime,
+ int need_capacity, int need_guard,
+ int need_desc, int pref_addr,
+ int direct_conn)
+{
+ const int check_reach = !router_skip_or_reachability(get_options(),
+ pref_addr);
+ /* XXXX MOVE */
+ SMARTLIST_FOREACH_BEGIN(nodelist_get_list(), const node_t *, node) {
+ if (!node->is_running || !node->is_valid)
+ continue;
+ if (need_desc && !node_has_preferred_descriptor(node, direct_conn))
+ continue;
+ if (node->ri && node->ri->purpose != ROUTER_PURPOSE_GENERAL)
+ continue;
+ if (node_is_unreliable(node, need_uptime, need_capacity, need_guard))
+ continue;
+ /* Don't choose nodes if we are certain they can't do EXTEND2 cells */
+ if (node->rs && !routerstatus_version_supports_extend2_cells(node->rs, 1))
+ continue;
+ /* Don't choose nodes if we are certain they can't do ntor. */
+ if ((node->ri || node->md) && !node_has_curve25519_onion_key(node))
+ continue;
+ /* Choose a node with an OR address that matches the firewall rules */
+ if (direct_conn && check_reach &&
+ !fascist_firewall_allows_node(node,
+ FIREWALL_OR_CONNECTION,
+ pref_addr))
+ continue;
+
+ smartlist_add(sl, (void *)node);
+ } SMARTLIST_FOREACH_END(node);
+}
+
+/** Look through the routerlist until we find a router that has my key.
+ Return it. */
+const routerinfo_t *
+routerlist_find_my_routerinfo(void)
+{
+ if (!routerlist)
+ return NULL;
+
+ SMARTLIST_FOREACH(routerlist->routers, routerinfo_t *, router,
+ {
+ if (router_is_me(router))
+ return router;
+ });
+ return NULL;
+}
+
+/** Return the smaller of the router's configured BandwidthRate
+ * and its advertised capacity. */
+uint32_t
+router_get_advertised_bandwidth(const routerinfo_t *router)
+{
+ if (router->bandwidthcapacity < router->bandwidthrate)
+ return router->bandwidthcapacity;
+ return router->bandwidthrate;
+}
+
+/** Do not weight any declared bandwidth more than this much when picking
+ * routers by bandwidth. */
+#define DEFAULT_MAX_BELIEVABLE_BANDWIDTH 10000000 /* 10 MB/sec */
+
+/** Return the smaller of the router's configured BandwidthRate
+ * and its advertised capacity, capped by max-believe-bw. */
+uint32_t
+router_get_advertised_bandwidth_capped(const routerinfo_t *router)
+{
+ uint32_t result = router->bandwidthcapacity;
+ if (result > router->bandwidthrate)
+ result = router->bandwidthrate;
+ if (result > DEFAULT_MAX_BELIEVABLE_BANDWIDTH)
+ result = DEFAULT_MAX_BELIEVABLE_BANDWIDTH;
+ return result;
+}
+
+/** Given an array of double/uint64_t unions that are currently being used as
+ * doubles, convert them to uint64_t, and try to scale them linearly so as to
+ * much of the range of uint64_t. If <b>total_out</b> is provided, set it to
+ * the sum of all elements in the array _before_ scaling. */
+STATIC void
+scale_array_elements_to_u64(uint64_t *entries_out, const double *entries_in,
+ int n_entries,
+ uint64_t *total_out)
+{
+ double total = 0.0;
+ double scale_factor = 0.0;
+ int i;
+
+ for (i = 0; i < n_entries; ++i)
+ total += entries_in[i];
+
+ if (total > 0.0) {
+ scale_factor = ((double)INT64_MAX) / total;
+ scale_factor /= 4.0; /* make sure we're very far away from overflowing */
+ }
+
+ for (i = 0; i < n_entries; ++i)
+ entries_out[i] = tor_llround(entries_in[i] * scale_factor);
+
+ if (total_out)
+ *total_out = (uint64_t) total;
+}
+
+/** Pick a random element of <b>n_entries</b>-element array <b>entries</b>,
+ * choosing each element with a probability proportional to its (uint64_t)
+ * value, and return the index of that element. If all elements are 0, choose
+ * an index at random. Return -1 on error.
+ */
+STATIC int
+choose_array_element_by_weight(const uint64_t *entries, int n_entries)
+{
+ int i;
+ uint64_t rand_val;
+ uint64_t total = 0;
+
+ for (i = 0; i < n_entries; ++i)
+ total += entries[i];
+
+ if (n_entries < 1)
+ return -1;
+
+ if (total == 0)
+ return crypto_rand_int(n_entries);
+
+ tor_assert(total < INT64_MAX);
+
+ rand_val = crypto_rand_uint64(total);
+
+ return select_array_member_cumulative_timei(
+ entries, n_entries, total, rand_val);
+}
+
+/** When weighting bridges, enforce these values as lower and upper
+ * bound for believable bandwidth, because there is no way for us
+ * to verify a bridge's bandwidth currently. */
+#define BRIDGE_MIN_BELIEVABLE_BANDWIDTH 20000 /* 20 kB/sec */
+#define BRIDGE_MAX_BELIEVABLE_BANDWIDTH 100000 /* 100 kB/sec */
+
+/** Return the smaller of the router's configured BandwidthRate
+ * and its advertised capacity, making sure to stay within the
+ * interval between bridge-min-believe-bw and
+ * bridge-max-believe-bw. */
+static uint32_t
+bridge_get_advertised_bandwidth_bounded(routerinfo_t *router)
+{
+ uint32_t result = router->bandwidthcapacity;
+ if (result > router->bandwidthrate)
+ result = router->bandwidthrate;
+ if (result > BRIDGE_MAX_BELIEVABLE_BANDWIDTH)
+ result = BRIDGE_MAX_BELIEVABLE_BANDWIDTH;
+ else if (result < BRIDGE_MIN_BELIEVABLE_BANDWIDTH)
+ result = BRIDGE_MIN_BELIEVABLE_BANDWIDTH;
+ return result;
+}
+
+/** Return bw*1000, unless bw*1000 would overflow, in which case return
+ * INT32_MAX. */
+static inline int32_t
+kb_to_bytes(uint32_t bw)
+{
+ return (bw > (INT32_MAX/1000)) ? INT32_MAX : bw*1000;
+}
+
+/** Helper function:
+ * choose a random element of smartlist <b>sl</b> of nodes, weighted by
+ * the advertised bandwidth of each element using the consensus
+ * bandwidth weights.
+ *
+ * If <b>rule</b>==WEIGHT_FOR_EXIT. we're picking an exit node: consider all
+ * nodes' bandwidth equally regardless of their Exit status, since there may
+ * be some in the list because they exit to obscure ports. If
+ * <b>rule</b>==NO_WEIGHTING, we're picking a non-exit node: weight
+ * exit-node's bandwidth less depending on the smallness of the fraction of
+ * Exit-to-total bandwidth. If <b>rule</b>==WEIGHT_FOR_GUARD, we're picking a
+ * guard node: consider all guard's bandwidth equally. Otherwise, weight
+ * guards proportionally less.
+ */
+static const node_t *
+smartlist_choose_node_by_bandwidth_weights(const smartlist_t *sl,
+ bandwidth_weight_rule_t rule)
+{
+ double *bandwidths_dbl=NULL;
+ uint64_t *bandwidths_u64=NULL;
+
+ if (compute_weighted_bandwidths(sl, rule, &bandwidths_dbl, NULL) < 0)
+ return NULL;
+
+ bandwidths_u64 = tor_calloc(smartlist_len(sl), sizeof(uint64_t));
+ scale_array_elements_to_u64(bandwidths_u64, bandwidths_dbl,
+ smartlist_len(sl), NULL);
+
+ {
+ int idx = choose_array_element_by_weight(bandwidths_u64,
+ smartlist_len(sl));
+ tor_free(bandwidths_dbl);
+ tor_free(bandwidths_u64);
+ return idx < 0 ? NULL : smartlist_get(sl, idx);
+ }
+}
+
+/** Given a list of routers and a weighting rule as in
+ * smartlist_choose_node_by_bandwidth_weights, compute weighted bandwidth
+ * values for each node and store them in a freshly allocated
+ * *<b>bandwidths_out</b> of the same length as <b>sl</b>, and holding results
+ * as doubles. If <b>total_bandwidth_out</b> is non-NULL, set it to the total
+ * of all the bandwidths.
+ * Return 0 on success, -1 on failure. */
+static int
+compute_weighted_bandwidths(const smartlist_t *sl,
+ bandwidth_weight_rule_t rule,
+ double **bandwidths_out,
+ double *total_bandwidth_out)
+{
+ int64_t weight_scale;
+ double Wg = -1, Wm = -1, We = -1, Wd = -1;
+ double Wgb = -1, Wmb = -1, Web = -1, Wdb = -1;
+ guardfraction_bandwidth_t guardfraction_bw;
+ double *bandwidths = NULL;
+ double total_bandwidth = 0.0;
+
+ tor_assert(sl);
+ tor_assert(bandwidths_out);
+
+ /* Can't choose exit and guard at same time */
+ tor_assert(rule == NO_WEIGHTING ||
+ rule == WEIGHT_FOR_EXIT ||
+ rule == WEIGHT_FOR_GUARD ||
+ rule == WEIGHT_FOR_MID ||
+ rule == WEIGHT_FOR_DIR);
+
+ *bandwidths_out = NULL;
+
+ if (total_bandwidth_out) {
+ *total_bandwidth_out = 0.0;
+ }
+
+ if (smartlist_len(sl) == 0) {
+ log_info(LD_CIRC,
+ "Empty routerlist passed in to consensus weight node "
+ "selection for rule %s",
+ bandwidth_weight_rule_to_string(rule));
+ return -1;
+ }
+
+ weight_scale = networkstatus_get_weight_scale_param(NULL);
+
+ if (rule == WEIGHT_FOR_GUARD) {
+ Wg = networkstatus_get_bw_weight(NULL, "Wgg", -1);
+ Wm = networkstatus_get_bw_weight(NULL, "Wgm", -1); /* Bridges */
+ We = 0;
+ Wd = networkstatus_get_bw_weight(NULL, "Wgd", -1);
+
+ Wgb = networkstatus_get_bw_weight(NULL, "Wgb", -1);
+ Wmb = networkstatus_get_bw_weight(NULL, "Wmb", -1);
+ Web = networkstatus_get_bw_weight(NULL, "Web", -1);
+ Wdb = networkstatus_get_bw_weight(NULL, "Wdb", -1);
+ } else if (rule == WEIGHT_FOR_MID) {
+ Wg = networkstatus_get_bw_weight(NULL, "Wmg", -1);
+ Wm = networkstatus_get_bw_weight(NULL, "Wmm", -1);
+ We = networkstatus_get_bw_weight(NULL, "Wme", -1);
+ Wd = networkstatus_get_bw_weight(NULL, "Wmd", -1);
+
+ Wgb = networkstatus_get_bw_weight(NULL, "Wgb", -1);
+ Wmb = networkstatus_get_bw_weight(NULL, "Wmb", -1);
+ Web = networkstatus_get_bw_weight(NULL, "Web", -1);
+ Wdb = networkstatus_get_bw_weight(NULL, "Wdb", -1);
+ } else if (rule == WEIGHT_FOR_EXIT) {
+ // Guards CAN be exits if they have weird exit policies
+ // They are d then I guess...
+ We = networkstatus_get_bw_weight(NULL, "Wee", -1);
+ Wm = networkstatus_get_bw_weight(NULL, "Wem", -1); /* Odd exit policies */
+ Wd = networkstatus_get_bw_weight(NULL, "Wed", -1);
+ Wg = networkstatus_get_bw_weight(NULL, "Weg", -1); /* Odd exit policies */
+
+ Wgb = networkstatus_get_bw_weight(NULL, "Wgb", -1);
+ Wmb = networkstatus_get_bw_weight(NULL, "Wmb", -1);
+ Web = networkstatus_get_bw_weight(NULL, "Web", -1);
+ Wdb = networkstatus_get_bw_weight(NULL, "Wdb", -1);
+ } else if (rule == WEIGHT_FOR_DIR) {
+ We = networkstatus_get_bw_weight(NULL, "Wbe", -1);
+ Wm = networkstatus_get_bw_weight(NULL, "Wbm", -1);
+ Wd = networkstatus_get_bw_weight(NULL, "Wbd", -1);
+ Wg = networkstatus_get_bw_weight(NULL, "Wbg", -1);
+
+ Wgb = Wmb = Web = Wdb = weight_scale;
+ } else if (rule == NO_WEIGHTING) {
+ Wg = Wm = We = Wd = weight_scale;
+ Wgb = Wmb = Web = Wdb = weight_scale;
+ }
+
+ if (Wg < 0 || Wm < 0 || We < 0 || Wd < 0 || Wgb < 0 || Wmb < 0 || Wdb < 0
+ || Web < 0) {
+ log_debug(LD_CIRC,
+ "Got negative bandwidth weights. Defaulting to naive selection"
+ " algorithm.");
+ Wg = Wm = We = Wd = weight_scale;
+ Wgb = Wmb = Web = Wdb = weight_scale;
+ }
+
+ Wg /= weight_scale;
+ Wm /= weight_scale;
+ We /= weight_scale;
+ Wd /= weight_scale;
+
+ Wgb /= weight_scale;
+ Wmb /= weight_scale;
+ Web /= weight_scale;
+ Wdb /= weight_scale;
+
+ bandwidths = tor_calloc(smartlist_len(sl), sizeof(double));
+
+ // Cycle through smartlist and total the bandwidth.
+ static int warned_missing_bw = 0;
+ SMARTLIST_FOREACH_BEGIN(sl, const node_t *, node) {
+ int is_exit = 0, is_guard = 0, is_dir = 0, this_bw = 0;
+ double weight = 1;
+ double weight_without_guard_flag = 0; /* Used for guardfraction */
+ double final_weight = 0;
+ is_exit = node->is_exit && ! node->is_bad_exit;
+ is_guard = node->is_possible_guard;
+ is_dir = node_is_dir(node);
+ if (node->rs) {
+ if (!node->rs->has_bandwidth) {
+ /* This should never happen, unless all the authorities downgrade
+ * to 0.2.0 or rogue routerstatuses get inserted into our consensus. */
+ if (! warned_missing_bw) {
+ log_warn(LD_BUG,
+ "Consensus is missing some bandwidths. Using a naive "
+ "router selection algorithm");
+ warned_missing_bw = 1;
+ }
+ this_bw = 30000; /* Chosen arbitrarily */
+ } else {
+ this_bw = kb_to_bytes(node->rs->bandwidth_kb);
+ }
+ } else if (node->ri) {
+ /* bridge or other descriptor not in our consensus */
+ this_bw = bridge_get_advertised_bandwidth_bounded(node->ri);
+ } else {
+ /* We can't use this one. */
+ continue;
+ }
+
+ if (is_guard && is_exit) {
+ weight = (is_dir ? Wdb*Wd : Wd);
+ weight_without_guard_flag = (is_dir ? Web*We : We);
+ } else if (is_guard) {
+ weight = (is_dir ? Wgb*Wg : Wg);
+ weight_without_guard_flag = (is_dir ? Wmb*Wm : Wm);
+ } else if (is_exit) {
+ weight = (is_dir ? Web*We : We);
+ } else { // middle
+ weight = (is_dir ? Wmb*Wm : Wm);
+ }
+ /* These should be impossible; but overflows here would be bad, so let's
+ * make sure. */
+ if (this_bw < 0)
+ this_bw = 0;
+ if (weight < 0.0)
+ weight = 0.0;
+ if (weight_without_guard_flag < 0.0)
+ weight_without_guard_flag = 0.0;
+
+ /* If guardfraction information is available in the consensus, we
+ * want to calculate this router's bandwidth according to its
+ * guardfraction. Quoting from proposal236:
+ *
+ * Let Wpf denote the weight from the 'bandwidth-weights' line a
+ * client would apply to N for position p if it had the guard
+ * flag, Wpn the weight if it did not have the guard flag, and B the
+ * measured bandwidth of N in the consensus. Then instead of choosing
+ * N for position p proportionally to Wpf*B or Wpn*B, clients should
+ * choose N proportionally to F*Wpf*B + (1-F)*Wpn*B.
+ */
+ if (node->rs && node->rs->has_guardfraction && rule != WEIGHT_FOR_GUARD) {
+ /* XXX The assert should actually check for is_guard. However,
+ * that crashes dirauths because of #13297. This should be
+ * equivalent: */
+ tor_assert(node->rs->is_possible_guard);
+
+ guard_get_guardfraction_bandwidth(&guardfraction_bw,
+ this_bw,
+ node->rs->guardfraction_percentage);
+
+ /* Calculate final_weight = F*Wpf*B + (1-F)*Wpn*B */
+ final_weight =
+ guardfraction_bw.guard_bw * weight +
+ guardfraction_bw.non_guard_bw * weight_without_guard_flag;
+
+ log_debug(LD_GENERAL, "%s: Guardfraction weight %f instead of %f (%s)",
+ node->rs->nickname, final_weight, weight*this_bw,
+ bandwidth_weight_rule_to_string(rule));
+ } else { /* no guardfraction information. calculate the weight normally. */
+ final_weight = weight*this_bw;
+ }
+
+ bandwidths[node_sl_idx] = final_weight;
+ total_bandwidth += final_weight;
+ } SMARTLIST_FOREACH_END(node);
+
+ log_debug(LD_CIRC, "Generated weighted bandwidths for rule %s based "
+ "on weights "
+ "Wg=%f Wm=%f We=%f Wd=%f with total bw %f",
+ bandwidth_weight_rule_to_string(rule),
+ Wg, Wm, We, Wd, total_bandwidth);
+
+ *bandwidths_out = bandwidths;
+
+ if (total_bandwidth_out) {
+ *total_bandwidth_out = total_bandwidth;
+ }
+
+ return 0;
+}
+
+/** For all nodes in <b>sl</b>, return the fraction of those nodes, weighted
+ * by their weighted bandwidths with rule <b>rule</b>, for which we have
+ * descriptors.
+ *
+ * If <b>for_direct_connect</b> is true, we intend to connect to the node
+ * directly, as the first hop of a circuit; otherwise, we intend to connect
+ * to it indirectly, or use it as if we were connecting to it indirectly. */
+double
+frac_nodes_with_descriptors(const smartlist_t *sl,
+ bandwidth_weight_rule_t rule,
+ int for_direct_conn)
+{
+ double *bandwidths = NULL;
+ double total, present;
+
+ if (smartlist_len(sl) == 0)
+ return 0.0;
+
+ if (compute_weighted_bandwidths(sl, rule, &bandwidths, &total) < 0 ||
+ total <= 0.0) {
+ int n_with_descs = 0;
+ SMARTLIST_FOREACH(sl, const node_t *, node, {
+ if (node_has_preferred_descriptor(node, for_direct_conn))
+ n_with_descs++;
+ });
+ tor_free(bandwidths);
+ return ((double)n_with_descs) / smartlist_len(sl);
+ }
+
+ present = 0.0;
+ SMARTLIST_FOREACH_BEGIN(sl, const node_t *, node) {
+ if (node_has_preferred_descriptor(node, for_direct_conn))
+ present += bandwidths[node_sl_idx];
+ } SMARTLIST_FOREACH_END(node);
+
+ tor_free(bandwidths);
+
+ return present / total;
+}
+
+/** Choose a random element of status list <b>sl</b>, weighted by
+ * the advertised bandwidth of each node */
+const node_t *
+node_sl_choose_by_bandwidth(const smartlist_t *sl,
+ bandwidth_weight_rule_t rule)
+{ /*XXXX MOVE */
+ return smartlist_choose_node_by_bandwidth_weights(sl, rule);
+}
+
+/** Return a random running node from the nodelist. Never
+ * pick a node that is in
+ * <b>excludedsmartlist</b>, or which matches <b>excludedset</b>,
+ * even if they are the only nodes available.
+ * If <b>CRN_NEED_UPTIME</b> is set in flags and any router has more than
+ * a minimum uptime, return one of those.
+ * If <b>CRN_NEED_CAPACITY</b> is set in flags, weight your choice by the
+ * advertised capacity of each router.
+ * If <b>CRN_NEED_GUARD</b> is set in flags, consider only Guard routers.
+ * If <b>CRN_WEIGHT_AS_EXIT</b> is set in flags, we weight bandwidths as if
+ * picking an exit node, otherwise we weight bandwidths for picking a relay
+ * node (that is, possibly discounting exit nodes).
+ * If <b>CRN_NEED_DESC</b> is set in flags, we only consider nodes that
+ * have a routerinfo or microdescriptor -- that is, enough info to be
+ * used to build a circuit.
+ * If <b>CRN_PREF_ADDR</b> is set in flags, we only consider nodes that
+ * have an address that is preferred by the ClientPreferIPv6ORPort setting
+ * (regardless of this flag, we exclude nodes that aren't allowed by the
+ * firewall, including ClientUseIPv4 0 and fascist_firewall_use_ipv6() == 0).
+ */
+const node_t *
+router_choose_random_node(smartlist_t *excludedsmartlist,
+ routerset_t *excludedset,
+ router_crn_flags_t flags)
+{ /* XXXX MOVE */
+ const int need_uptime = (flags & CRN_NEED_UPTIME) != 0;
+ const int need_capacity = (flags & CRN_NEED_CAPACITY) != 0;
+ const int need_guard = (flags & CRN_NEED_GUARD) != 0;
+ const int weight_for_exit = (flags & CRN_WEIGHT_AS_EXIT) != 0;
+ const int need_desc = (flags & CRN_NEED_DESC) != 0;
+ const int pref_addr = (flags & CRN_PREF_ADDR) != 0;
+ const int direct_conn = (flags & CRN_DIRECT_CONN) != 0;
+ const int rendezvous_v3 = (flags & CRN_RENDEZVOUS_V3) != 0;
+
+ smartlist_t *sl=smartlist_new(),
+ *excludednodes=smartlist_new();
+ const node_t *choice = NULL;
+ const routerinfo_t *r;
+ bandwidth_weight_rule_t rule;
+
+ tor_assert(!(weight_for_exit && need_guard));
+ rule = weight_for_exit ? WEIGHT_FOR_EXIT :
+ (need_guard ? WEIGHT_FOR_GUARD : WEIGHT_FOR_MID);
+
+ SMARTLIST_FOREACH_BEGIN(nodelist_get_list(), node_t *, node) {
+ if (node_allows_single_hop_exits(node)) {
+ /* Exclude relays that allow single hop exit circuits. This is an
+ * obsolete option since 0.2.9.2-alpha and done by default in
+ * 0.3.1.0-alpha. */
+ smartlist_add(excludednodes, node);
+ } else if (rendezvous_v3 &&
+ !node_supports_v3_rendezvous_point(node)) {
+ /* Exclude relays that do not support to rendezvous for a hidden service
+ * version 3. */
+ smartlist_add(excludednodes, node);
+ }
+ } SMARTLIST_FOREACH_END(node);
+
+ /* If the node_t is not found we won't be to exclude ourself but we
+ * won't be able to pick ourself in router_choose_random_node() so
+ * this is fine to at least try with our routerinfo_t object. */
+ if ((r = router_get_my_routerinfo()))
+ routerlist_add_node_and_family(excludednodes, r);
+
+ router_add_running_nodes_to_smartlist(sl, need_uptime, need_capacity,
+ need_guard, need_desc, pref_addr,
+ direct_conn);
+ log_debug(LD_CIRC,
+ "We found %d running nodes.",
+ smartlist_len(sl));
+
+ smartlist_subtract(sl,excludednodes);
+ log_debug(LD_CIRC,
+ "We removed %d excludednodes, leaving %d nodes.",
+ smartlist_len(excludednodes),
+ smartlist_len(sl));
+
+ if (excludedsmartlist) {
+ smartlist_subtract(sl,excludedsmartlist);
+ log_debug(LD_CIRC,
+ "We removed %d excludedsmartlist, leaving %d nodes.",
+ smartlist_len(excludedsmartlist),
+ smartlist_len(sl));
+ }
+ if (excludedset) {
+ routerset_subtract_nodes(sl,excludedset);
+ log_debug(LD_CIRC,
+ "We removed excludedset, leaving %d nodes.",
+ smartlist_len(sl));
+ }
+
+ // Always weight by bandwidth
+ choice = node_sl_choose_by_bandwidth(sl, rule);
+
+ smartlist_free(sl);
+ if (!choice && (need_uptime || need_capacity || need_guard || pref_addr)) {
+ /* try once more -- recurse but with fewer restrictions. */
+ log_info(LD_CIRC,
+ "We couldn't find any live%s%s%s routers; falling back "
+ "to list of all routers.",
+ need_capacity?", fast":"",
+ need_uptime?", stable":"",
+ need_guard?", guard":"");
+ flags &= ~ (CRN_NEED_UPTIME|CRN_NEED_CAPACITY|CRN_NEED_GUARD|
+ CRN_PREF_ADDR);
+ choice = router_choose_random_node(
+ excludedsmartlist, excludedset, flags);
+ }
+ smartlist_free(excludednodes);
+ if (!choice) {
+ log_warn(LD_CIRC,
+ "No available nodes when trying to choose node. Failing.");
+ }
+ return choice;
+}
+
+/** Helper: given an extended nickname in <b>hexdigest</b> try to decode it.
+ * Return 0 on success, -1 on failure. Store the result into the
+ * DIGEST_LEN-byte buffer at <b>digest_out</b>, the single character at
+ * <b>nickname_qualifier_char_out</b>, and the MAXNICKNAME_LEN+1-byte buffer
+ * at <b>nickname_out</b>.
+ *
+ * The recognized format is:
+ * HexName = Dollar? HexDigest NamePart?
+ * Dollar = '?'
+ * HexDigest = HexChar*20
+ * HexChar = 'a'..'f' | 'A'..'F' | '0'..'9'
+ * NamePart = QualChar Name
+ * QualChar = '=' | '~'
+ * Name = NameChar*(1..MAX_NICKNAME_LEN)
+ * NameChar = Any ASCII alphanumeric character
+ */
+int
+hex_digest_nickname_decode(const char *hexdigest,
+ char *digest_out,
+ char *nickname_qualifier_char_out,
+ char *nickname_out)
+{
+ size_t len;
+
+ tor_assert(hexdigest);
+ if (hexdigest[0] == '$')
+ ++hexdigest;
+
+ len = strlen(hexdigest);
+ if (len < HEX_DIGEST_LEN) {
+ return -1;
+ } else if (len > HEX_DIGEST_LEN && (hexdigest[HEX_DIGEST_LEN] == '=' ||
+ hexdigest[HEX_DIGEST_LEN] == '~') &&
+ len <= HEX_DIGEST_LEN+1+MAX_NICKNAME_LEN) {
+ *nickname_qualifier_char_out = hexdigest[HEX_DIGEST_LEN];
+ strlcpy(nickname_out, hexdigest+HEX_DIGEST_LEN+1 , MAX_NICKNAME_LEN+1);
+ } else if (len == HEX_DIGEST_LEN) {
+ ;
+ } else {
+ return -1;
+ }
+
+ if (base16_decode(digest_out, DIGEST_LEN,
+ hexdigest, HEX_DIGEST_LEN) != DIGEST_LEN)
+ return -1;
+ return 0;
+}
+
+/** Helper: Return true iff the <b>identity_digest</b> and <b>nickname</b>
+ * combination of a router, encoded in hexadecimal, matches <b>hexdigest</b>
+ * (which is optionally prefixed with a single dollar sign). Return false if
+ * <b>hexdigest</b> is malformed, or it doesn't match. */
+int
+hex_digest_nickname_matches(const char *hexdigest, const char *identity_digest,
+ const char *nickname)
+{
+ char digest[DIGEST_LEN];
+ char nn_char='\0';
+ char nn_buf[MAX_NICKNAME_LEN+1];
+
+ if (hex_digest_nickname_decode(hexdigest, digest, &nn_char, nn_buf) == -1)
+ return 0;
+
+ if (nn_char == '=') {
+ return 0;
+ }
+
+ if (nn_char == '~') {
+ if (!nickname) // XXX This seems wrong. -NM
+ return 0;
+ if (strcasecmp(nn_buf, nickname))
+ return 0;
+ }
+
+ return tor_memeq(digest, identity_digest, DIGEST_LEN);
+}
+
+/** Return true iff <b>digest</b> is the digest of the identity key of a
+ * trusted directory matching at least one bit of <b>type</b>. If <b>type</b>
+ * is zero (NO_DIRINFO), or ALL_DIRINFO, any authority is okay. */
+int
+router_digest_is_trusted_dir_type(const char *digest, dirinfo_type_t type)
+{
+ if (!trusted_dir_servers)
+ return 0;
+ if (authdir_mode(get_options()) && router_digest_is_me(digest))
+ return 1;
+ SMARTLIST_FOREACH(trusted_dir_servers, dir_server_t *, ent,
+ if (tor_memeq(digest, ent->digest, DIGEST_LEN)) {
+ return (!type) || ((type & ent->type) != 0);
+ });
+ return 0;
+}
+
+/** If hexdigest is correctly formed, base16_decode it into
+ * digest, which must have DIGEST_LEN space in it.
+ * Return 0 on success, -1 on failure.
+ */
+int
+hexdigest_to_digest(const char *hexdigest, char *digest)
+{
+ if (hexdigest[0]=='$')
+ ++hexdigest;
+ if (strlen(hexdigest) < HEX_DIGEST_LEN ||
+ base16_decode(digest,DIGEST_LEN,hexdigest,HEX_DIGEST_LEN) != DIGEST_LEN)
+ return -1;
+ return 0;
+}
+
+/** As router_get_by_id_digest,but return a pointer that you're allowed to
+ * modify */
+routerinfo_t *
+router_get_mutable_by_digest(const char *digest)
+{
+ tor_assert(digest);
+
+ if (!routerlist) return NULL;
+
+ // routerlist_assert_ok(routerlist);
+
+ return rimap_get(routerlist->identity_map, digest);
+}
+
+/** Return the router in our routerlist whose 20-byte key digest
+ * is <b>digest</b>. Return NULL if no such router is known. */
+const routerinfo_t *
+router_get_by_id_digest(const char *digest)
+{
+ return router_get_mutable_by_digest(digest);
+}
+
+/** Return the router in our routerlist whose 20-byte descriptor
+ * is <b>digest</b>. Return NULL if no such router is known. */
+signed_descriptor_t *
+router_get_by_descriptor_digest(const char *digest)
+{
+ tor_assert(digest);
+
+ if (!routerlist) return NULL;
+
+ return sdmap_get(routerlist->desc_digest_map, digest);
+}
+
+/** Return the signed descriptor for the router in our routerlist whose
+ * 20-byte extra-info digest is <b>digest</b>. Return NULL if no such router
+ * is known. */
+MOCK_IMPL(signed_descriptor_t *,
+router_get_by_extrainfo_digest,(const char *digest))
+{
+ tor_assert(digest);
+
+ if (!routerlist) return NULL;
+
+ return sdmap_get(routerlist->desc_by_eid_map, digest);
+}
+
+/** Return the signed descriptor for the extrainfo_t in our routerlist whose
+ * extra-info-digest is <b>digest</b>. Return NULL if no such extra-info
+ * document is known. */
+MOCK_IMPL(signed_descriptor_t *,
+extrainfo_get_by_descriptor_digest,(const char *digest))
+{
+ extrainfo_t *ei;
+ tor_assert(digest);
+ if (!routerlist) return NULL;
+ ei = eimap_get(routerlist->extra_info_map, digest);
+ return ei ? &ei->cache_info : NULL;
+}
+
+/** Return a pointer to the signed textual representation of a descriptor.
+ * The returned string is not guaranteed to be NUL-terminated: the string's
+ * length will be in desc-\>signed_descriptor_len.
+ *
+ * If <b>with_annotations</b> is set, the returned string will include
+ * the annotations
+ * (if any) preceding the descriptor. This will increase the length of the
+ * string by desc-\>annotations_len.
+ *
+ * The caller must not free the string returned.
+ */
+static const char *
+signed_descriptor_get_body_impl(const signed_descriptor_t *desc,
+ int with_annotations)
+{
+ const char *r = NULL;
+ size_t len = desc->signed_descriptor_len;
+ off_t offset = desc->saved_offset;
+ if (with_annotations)
+ len += desc->annotations_len;
+ else
+ offset += desc->annotations_len;
+
+ tor_assert(len > 32);
+ if (desc->saved_location == SAVED_IN_CACHE && routerlist) {
+ desc_store_t *store = desc_get_store(router_get_routerlist(), desc);
+ if (store && store->mmap) {
+ tor_assert(desc->saved_offset + len <= store->mmap->size);
+ r = store->mmap->data + offset;
+ } else if (store) {
+ log_err(LD_DIR, "We couldn't read a descriptor that is supposedly "
+ "mmaped in our cache. Is another process running in our data "
+ "directory? Exiting.");
+ exit(1); // XXXX bad exit: should recover.
+ }
+ }
+ if (!r) /* no mmap, or not in cache. */
+ r = desc->signed_descriptor_body +
+ (with_annotations ? 0 : desc->annotations_len);
+
+ tor_assert(r);
+ if (!with_annotations) {
+ if (fast_memcmp("router ", r, 7) && fast_memcmp("extra-info ", r, 11)) {
+ char *cp = tor_strndup(r, 64);
+ log_err(LD_DIR, "descriptor at %p begins with unexpected string %s. "
+ "Is another process running in our data directory? Exiting.",
+ desc, escaped(cp));
+ exit(1); // XXXX bad exit: should recover.
+ }
+ }
+
+ return r;
+}
+
+/** Return a pointer to the signed textual representation of a descriptor.
+ * The returned string is not guaranteed to be NUL-terminated: the string's
+ * length will be in desc-\>signed_descriptor_len.
+ *
+ * The caller must not free the string returned.
+ */
+const char *
+signed_descriptor_get_body(const signed_descriptor_t *desc)
+{
+ return signed_descriptor_get_body_impl(desc, 0);
+}
+
+/** As signed_descriptor_get_body(), but points to the beginning of the
+ * annotations section rather than the beginning of the descriptor. */
+const char *
+signed_descriptor_get_annotations(const signed_descriptor_t *desc)
+{
+ return signed_descriptor_get_body_impl(desc, 1);
+}
+
+/** Return the current list of all known routers. */
+routerlist_t *
+router_get_routerlist(void)
+{
+ if (PREDICT_UNLIKELY(!routerlist)) {
+ routerlist = tor_malloc_zero(sizeof(routerlist_t));
+ routerlist->routers = smartlist_new();
+ routerlist->old_routers = smartlist_new();
+ routerlist->identity_map = rimap_new();
+ routerlist->desc_digest_map = sdmap_new();
+ routerlist->desc_by_eid_map = sdmap_new();
+ routerlist->extra_info_map = eimap_new();
+
+ routerlist->desc_store.fname_base = "cached-descriptors";
+ routerlist->extrainfo_store.fname_base = "cached-extrainfo";
+
+ routerlist->desc_store.type = ROUTER_STORE;
+ routerlist->extrainfo_store.type = EXTRAINFO_STORE;
+
+ routerlist->desc_store.description = "router descriptors";
+ routerlist->extrainfo_store.description = "extra-info documents";
+ }
+ return routerlist;
+}
+
+/** Free all storage held by <b>router</b>. */
+void
+routerinfo_free_(routerinfo_t *router)
+{
+ if (!router)
+ return;
+
+ tor_free(router->cache_info.signed_descriptor_body);
+ tor_free(router->nickname);
+ tor_free(router->platform);
+ tor_free(router->protocol_list);
+ tor_free(router->contact_info);
+ if (router->onion_pkey)
+ crypto_pk_free(router->onion_pkey);
+ tor_free(router->onion_curve25519_pkey);
+ if (router->identity_pkey)
+ crypto_pk_free(router->identity_pkey);
+ tor_cert_free(router->cache_info.signing_key_cert);
+ if (router->declared_family) {
+ SMARTLIST_FOREACH(router->declared_family, char *, s, tor_free(s));
+ smartlist_free(router->declared_family);
+ }
+ addr_policy_list_free(router->exit_policy);
+ short_policy_free(router->ipv6_exit_policy);
+
+ memset(router, 77, sizeof(routerinfo_t));
+
+ tor_free(router);
+}
+
+/** Release all storage held by <b>extrainfo</b> */
+void
+extrainfo_free_(extrainfo_t *extrainfo)
+{
+ if (!extrainfo)
+ return;
+ tor_cert_free(extrainfo->cache_info.signing_key_cert);
+ tor_free(extrainfo->cache_info.signed_descriptor_body);
+ tor_free(extrainfo->pending_sig);
+
+ memset(extrainfo, 88, sizeof(extrainfo_t)); /* debug bad memory usage */
+ tor_free(extrainfo);
+}
+
+#define signed_descriptor_free(val) \
+ FREE_AND_NULL(signed_descriptor_t, signed_descriptor_free_, (val))
+
+/** Release storage held by <b>sd</b>. */
+static void
+signed_descriptor_free_(signed_descriptor_t *sd)
+{
+ if (!sd)
+ return;
+
+ tor_free(sd->signed_descriptor_body);
+ tor_cert_free(sd->signing_key_cert);
+
+ memset(sd, 99, sizeof(signed_descriptor_t)); /* Debug bad mem usage */
+ tor_free(sd);
+}
+
+/** Reset the given signed descriptor <b>sd</b> by freeing the allocated
+ * memory inside the object and by zeroing its content. */
+static void
+signed_descriptor_reset(signed_descriptor_t *sd)
+{
+ tor_assert(sd);
+ tor_free(sd->signed_descriptor_body);
+ tor_cert_free(sd->signing_key_cert);
+ memset(sd, 0, sizeof(*sd));
+}
+
+/** Copy src into dest, and steal all references inside src so that when
+ * we free src, we don't mess up dest. */
+static void
+signed_descriptor_move(signed_descriptor_t *dest,
+ signed_descriptor_t *src)
+{
+ tor_assert(dest != src);
+ /* Cleanup destination object before overwriting it.*/
+ signed_descriptor_reset(dest);
+ memcpy(dest, src, sizeof(signed_descriptor_t));
+ src->signed_descriptor_body = NULL;
+ src->signing_key_cert = NULL;
+ dest->routerlist_index = -1;
+}
+
+/** Extract a signed_descriptor_t from a general routerinfo, and free the
+ * routerinfo.
+ */
+static signed_descriptor_t *
+signed_descriptor_from_routerinfo(routerinfo_t *ri)
+{
+ signed_descriptor_t *sd;
+ tor_assert(ri->purpose == ROUTER_PURPOSE_GENERAL);
+ sd = tor_malloc_zero(sizeof(signed_descriptor_t));
+ signed_descriptor_move(sd, &ri->cache_info);
+ routerinfo_free(ri);
+ return sd;
+}
+
+/** Helper: free the storage held by the extrainfo_t in <b>e</b>. */
+static void
+extrainfo_free_void(void *e)
+{
+ extrainfo_free_(e);
+}
+
+/** Free all storage held by a routerlist <b>rl</b>. */
+void
+routerlist_free_(routerlist_t *rl)
+{
+ if (!rl)
+ return;
+ rimap_free(rl->identity_map, NULL);
+ sdmap_free(rl->desc_digest_map, NULL);
+ sdmap_free(rl->desc_by_eid_map, NULL);
+ eimap_free(rl->extra_info_map, extrainfo_free_void);
+ SMARTLIST_FOREACH(rl->routers, routerinfo_t *, r,
+ routerinfo_free(r));
+ SMARTLIST_FOREACH(rl->old_routers, signed_descriptor_t *, sd,
+ signed_descriptor_free(sd));
+ smartlist_free(rl->routers);
+ smartlist_free(rl->old_routers);
+ if (rl->desc_store.mmap) {
+ int res = tor_munmap_file(routerlist->desc_store.mmap);
+ if (res != 0) {
+ log_warn(LD_FS, "Failed to munmap routerlist->desc_store.mmap");
+ }
+ }
+ if (rl->extrainfo_store.mmap) {
+ int res = tor_munmap_file(routerlist->extrainfo_store.mmap);
+ if (res != 0) {
+ log_warn(LD_FS, "Failed to munmap routerlist->extrainfo_store.mmap");
+ }
+ }
+ tor_free(rl);
+
+ router_dir_info_changed();
+}
+
+/** Log information about how much memory is being used for routerlist,
+ * at log level <b>severity</b>. */
+void
+dump_routerlist_mem_usage(int severity)
+{
+ uint64_t livedescs = 0;
+ uint64_t olddescs = 0;
+ if (!routerlist)
+ return;
+ SMARTLIST_FOREACH(routerlist->routers, routerinfo_t *, r,
+ livedescs += r->cache_info.signed_descriptor_len);
+ SMARTLIST_FOREACH(routerlist->old_routers, signed_descriptor_t *, sd,
+ olddescs += sd->signed_descriptor_len);
+
+ tor_log(severity, LD_DIR,
+ "In %d live descriptors: %"PRIu64" bytes. "
+ "In %d old descriptors: %"PRIu64" bytes.",
+ smartlist_len(routerlist->routers), (livedescs),
+ smartlist_len(routerlist->old_routers), (olddescs));
+}
+
+/** Debugging helper: If <b>idx</b> is nonnegative, assert that <b>ri</b> is
+ * in <b>sl</b> at position <b>idx</b>. Otherwise, search <b>sl</b> for
+ * <b>ri</b>. Return the index of <b>ri</b> in <b>sl</b>, or -1 if <b>ri</b>
+ * is not in <b>sl</b>. */
+static inline int
+routerlist_find_elt_(smartlist_t *sl, void *ri, int idx)
+{
+ if (idx < 0) {
+ idx = -1;
+ SMARTLIST_FOREACH(sl, routerinfo_t *, r,
+ if (r == ri) {
+ idx = r_sl_idx;
+ break;
+ });
+ } else {
+ tor_assert(idx < smartlist_len(sl));
+ tor_assert(smartlist_get(sl, idx) == ri);
+ };
+ return idx;
+}
+
+/** Insert an item <b>ri</b> into the routerlist <b>rl</b>, updating indices
+ * as needed. There must be no previous member of <b>rl</b> with the same
+ * identity digest as <b>ri</b>: If there is, call routerlist_replace
+ * instead.
+ */
+static void
+routerlist_insert(routerlist_t *rl, routerinfo_t *ri)
+{
+ routerinfo_t *ri_old;
+ signed_descriptor_t *sd_old;
+ {
+ const routerinfo_t *ri_generated = router_get_my_routerinfo();
+ tor_assert(ri_generated != ri);
+ }
+ tor_assert(ri->cache_info.routerlist_index == -1);
+
+ ri_old = rimap_set(rl->identity_map, ri->cache_info.identity_digest, ri);
+ tor_assert(!ri_old);
+
+ sd_old = sdmap_set(rl->desc_digest_map,
+ ri->cache_info.signed_descriptor_digest,
+ &(ri->cache_info));
+ if (sd_old) {
+ int idx = sd_old->routerlist_index;
+ sd_old->routerlist_index = -1;
+ smartlist_del(rl->old_routers, idx);
+ if (idx < smartlist_len(rl->old_routers)) {
+ signed_descriptor_t *d = smartlist_get(rl->old_routers, idx);
+ d->routerlist_index = idx;
+ }
+ rl->desc_store.bytes_dropped += sd_old->signed_descriptor_len;
+ sdmap_remove(rl->desc_by_eid_map, sd_old->extra_info_digest);
+ signed_descriptor_free(sd_old);
+ }
+
+ if (!tor_digest_is_zero(ri->cache_info.extra_info_digest))
+ sdmap_set(rl->desc_by_eid_map, ri->cache_info.extra_info_digest,
+ &ri->cache_info);
+ smartlist_add(rl->routers, ri);
+ ri->cache_info.routerlist_index = smartlist_len(rl->routers) - 1;
+ nodelist_set_routerinfo(ri, NULL);
+ router_dir_info_changed();
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Adds the extrainfo_t <b>ei</b> to the routerlist <b>rl</b>, if there is a
+ * corresponding router in rl-\>routers or rl-\>old_routers. Return the status
+ * of inserting <b>ei</b>. Free <b>ei</b> if it isn't inserted. */
+MOCK_IMPL(STATIC was_router_added_t,
+extrainfo_insert,(routerlist_t *rl, extrainfo_t *ei, int warn_if_incompatible))
+{
+ was_router_added_t r;
+ const char *compatibility_error_msg;
+ routerinfo_t *ri = rimap_get(rl->identity_map,
+ ei->cache_info.identity_digest);
+ signed_descriptor_t *sd =
+ sdmap_get(rl->desc_by_eid_map, ei->cache_info.signed_descriptor_digest);
+ extrainfo_t *ei_tmp;
+ const int severity = warn_if_incompatible ? LOG_WARN : LOG_INFO;
+
+ {
+ extrainfo_t *ei_generated = router_get_my_extrainfo();
+ tor_assert(ei_generated != ei);
+ }
+
+ if (!ri) {
+ /* This router is unknown; we can't even verify the signature. Give up.*/
+ r = ROUTER_NOT_IN_CONSENSUS;
+ goto done;
+ }
+ if (! sd) {
+ /* The extrainfo router doesn't have a known routerdesc to attach it to.
+ * This just won't work. */;
+ static ratelim_t no_sd_ratelim = RATELIM_INIT(1800);
+ r = ROUTER_BAD_EI;
+ log_fn_ratelim(&no_sd_ratelim, severity, LD_BUG,
+ "No entry found in extrainfo map.");
+ goto done;
+ }
+ if (tor_memneq(ei->cache_info.signed_descriptor_digest,
+ sd->extra_info_digest, DIGEST_LEN)) {
+ static ratelim_t digest_mismatch_ratelim = RATELIM_INIT(1800);
+ /* The sd we got from the map doesn't match the digest we used to look
+ * it up. This makes no sense. */
+ r = ROUTER_BAD_EI;
+ log_fn_ratelim(&digest_mismatch_ratelim, severity, LD_BUG,
+ "Mismatch in digest in extrainfo map.");
+ goto done;
+ }
+ if (routerinfo_incompatible_with_extrainfo(ri->identity_pkey, ei, sd,
+ &compatibility_error_msg)) {
+ char d1[HEX_DIGEST_LEN+1], d2[HEX_DIGEST_LEN+1];
+ r = (ri->cache_info.extrainfo_is_bogus) ?
+ ROUTER_BAD_EI : ROUTER_NOT_IN_CONSENSUS;
+
+ base16_encode(d1, sizeof(d1), ri->cache_info.identity_digest, DIGEST_LEN);
+ base16_encode(d2, sizeof(d2), ei->cache_info.identity_digest, DIGEST_LEN);
+
+ log_fn(severity,LD_DIR,
+ "router info incompatible with extra info (ri id: %s, ei id %s, "
+ "reason: %s)", d1, d2, compatibility_error_msg);
+
+ goto done;
+ }
+
+ /* Okay, if we make it here, we definitely have a router corresponding to
+ * this extrainfo. */
+
+ ei_tmp = eimap_set(rl->extra_info_map,
+ ei->cache_info.signed_descriptor_digest,
+ ei);
+ r = ROUTER_ADDED_SUCCESSFULLY;
+ if (ei_tmp) {
+ rl->extrainfo_store.bytes_dropped +=
+ ei_tmp->cache_info.signed_descriptor_len;
+ extrainfo_free(ei_tmp);
+ }
+
+ done:
+ if (r != ROUTER_ADDED_SUCCESSFULLY)
+ extrainfo_free(ei);
+
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+ return r;
+}
+
+#define should_cache_old_descriptors() \
+ directory_caches_dir_info(get_options())
+
+/** If we're a directory cache and routerlist <b>rl</b> doesn't have
+ * a copy of router <b>ri</b> yet, add it to the list of old (not
+ * recommended but still served) descriptors. Else free it. */
+static void
+routerlist_insert_old(routerlist_t *rl, routerinfo_t *ri)
+{
+ {
+ const routerinfo_t *ri_generated = router_get_my_routerinfo();
+ tor_assert(ri_generated != ri);
+ }
+ tor_assert(ri->cache_info.routerlist_index == -1);
+
+ if (should_cache_old_descriptors() &&
+ ri->purpose == ROUTER_PURPOSE_GENERAL &&
+ !sdmap_get(rl->desc_digest_map,
+ ri->cache_info.signed_descriptor_digest)) {
+ signed_descriptor_t *sd = signed_descriptor_from_routerinfo(ri);
+ sdmap_set(rl->desc_digest_map, sd->signed_descriptor_digest, sd);
+ smartlist_add(rl->old_routers, sd);
+ sd->routerlist_index = smartlist_len(rl->old_routers)-1;
+ if (!tor_digest_is_zero(sd->extra_info_digest))
+ sdmap_set(rl->desc_by_eid_map, sd->extra_info_digest, sd);
+ } else {
+ routerinfo_free(ri);
+ }
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Remove an item <b>ri</b> from the routerlist <b>rl</b>, updating indices
+ * as needed. If <b>idx</b> is nonnegative and smartlist_get(rl-&gt;routers,
+ * idx) == ri, we don't need to do a linear search over the list to decide
+ * which to remove. We fill the gap in rl-&gt;routers with a later element in
+ * the list, if any exists. <b>ri</b> is freed.
+ *
+ * If <b>make_old</b> is true, instead of deleting the router, we try adding
+ * it to rl-&gt;old_routers. */
+void
+routerlist_remove(routerlist_t *rl, routerinfo_t *ri, int make_old, time_t now)
+{
+ routerinfo_t *ri_tmp;
+ extrainfo_t *ei_tmp;
+ int idx = ri->cache_info.routerlist_index;
+ tor_assert(0 <= idx && idx < smartlist_len(rl->routers));
+ tor_assert(smartlist_get(rl->routers, idx) == ri);
+
+ nodelist_remove_routerinfo(ri);
+
+ /* make sure the rephist module knows that it's not running */
+ rep_hist_note_router_unreachable(ri->cache_info.identity_digest, now);
+
+ ri->cache_info.routerlist_index = -1;
+ smartlist_del(rl->routers, idx);
+ if (idx < smartlist_len(rl->routers)) {
+ routerinfo_t *r = smartlist_get(rl->routers, idx);
+ r->cache_info.routerlist_index = idx;
+ }
+
+ ri_tmp = rimap_remove(rl->identity_map, ri->cache_info.identity_digest);
+ router_dir_info_changed();
+ tor_assert(ri_tmp == ri);
+
+ if (make_old && should_cache_old_descriptors() &&
+ ri->purpose == ROUTER_PURPOSE_GENERAL) {
+ signed_descriptor_t *sd;
+ sd = signed_descriptor_from_routerinfo(ri);
+ smartlist_add(rl->old_routers, sd);
+ sd->routerlist_index = smartlist_len(rl->old_routers)-1;
+ sdmap_set(rl->desc_digest_map, sd->signed_descriptor_digest, sd);
+ if (!tor_digest_is_zero(sd->extra_info_digest))
+ sdmap_set(rl->desc_by_eid_map, sd->extra_info_digest, sd);
+ } else {
+ signed_descriptor_t *sd_tmp;
+ sd_tmp = sdmap_remove(rl->desc_digest_map,
+ ri->cache_info.signed_descriptor_digest);
+ tor_assert(sd_tmp == &(ri->cache_info));
+ rl->desc_store.bytes_dropped += ri->cache_info.signed_descriptor_len;
+ ei_tmp = eimap_remove(rl->extra_info_map,
+ ri->cache_info.extra_info_digest);
+ if (ei_tmp) {
+ rl->extrainfo_store.bytes_dropped +=
+ ei_tmp->cache_info.signed_descriptor_len;
+ extrainfo_free(ei_tmp);
+ }
+ if (!tor_digest_is_zero(ri->cache_info.extra_info_digest))
+ sdmap_remove(rl->desc_by_eid_map, ri->cache_info.extra_info_digest);
+ routerinfo_free(ri);
+ }
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Remove a signed_descriptor_t <b>sd</b> from <b>rl</b>-\>old_routers, and
+ * adjust <b>rl</b> as appropriate. <b>idx</b> is -1, or the index of
+ * <b>sd</b>. */
+static void
+routerlist_remove_old(routerlist_t *rl, signed_descriptor_t *sd, int idx)
+{
+ signed_descriptor_t *sd_tmp;
+ extrainfo_t *ei_tmp;
+ desc_store_t *store;
+ if (idx == -1) {
+ idx = sd->routerlist_index;
+ }
+ tor_assert(0 <= idx && idx < smartlist_len(rl->old_routers));
+ /* XXXX edmanm's bridge relay triggered the following assert while
+ * running 0.2.0.12-alpha. If anybody triggers this again, see if we
+ * can get a backtrace. */
+ tor_assert(smartlist_get(rl->old_routers, idx) == sd);
+ tor_assert(idx == sd->routerlist_index);
+
+ sd->routerlist_index = -1;
+ smartlist_del(rl->old_routers, idx);
+ if (idx < smartlist_len(rl->old_routers)) {
+ signed_descriptor_t *d = smartlist_get(rl->old_routers, idx);
+ d->routerlist_index = idx;
+ }
+ sd_tmp = sdmap_remove(rl->desc_digest_map,
+ sd->signed_descriptor_digest);
+ tor_assert(sd_tmp == sd);
+ store = desc_get_store(rl, sd);
+ if (store)
+ store->bytes_dropped += sd->signed_descriptor_len;
+
+ ei_tmp = eimap_remove(rl->extra_info_map,
+ sd->extra_info_digest);
+ if (ei_tmp) {
+ rl->extrainfo_store.bytes_dropped +=
+ ei_tmp->cache_info.signed_descriptor_len;
+ extrainfo_free(ei_tmp);
+ }
+ if (!tor_digest_is_zero(sd->extra_info_digest))
+ sdmap_remove(rl->desc_by_eid_map, sd->extra_info_digest);
+
+ signed_descriptor_free(sd);
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Remove <b>ri_old</b> from the routerlist <b>rl</b>, and replace it with
+ * <b>ri_new</b>, updating all index info. If <b>idx</b> is nonnegative and
+ * smartlist_get(rl-&gt;routers, idx) == ri, we don't need to do a linear
+ * search over the list to decide which to remove. We put ri_new in the same
+ * index as ri_old, if possible. ri is freed as appropriate.
+ *
+ * If should_cache_descriptors() is true, instead of deleting the router,
+ * we add it to rl-&gt;old_routers. */
+static void
+routerlist_replace(routerlist_t *rl, routerinfo_t *ri_old,
+ routerinfo_t *ri_new)
+{
+ int idx;
+ int same_descriptors;
+
+ routerinfo_t *ri_tmp;
+ extrainfo_t *ei_tmp;
+ {
+ const routerinfo_t *ri_generated = router_get_my_routerinfo();
+ tor_assert(ri_generated != ri_new);
+ }
+ tor_assert(ri_old != ri_new);
+ tor_assert(ri_new->cache_info.routerlist_index == -1);
+
+ idx = ri_old->cache_info.routerlist_index;
+ tor_assert(0 <= idx && idx < smartlist_len(rl->routers));
+ tor_assert(smartlist_get(rl->routers, idx) == ri_old);
+
+ {
+ routerinfo_t *ri_old_tmp=NULL;
+ nodelist_set_routerinfo(ri_new, &ri_old_tmp);
+ tor_assert(ri_old == ri_old_tmp);
+ }
+
+ router_dir_info_changed();
+ if (idx >= 0) {
+ smartlist_set(rl->routers, idx, ri_new);
+ ri_old->cache_info.routerlist_index = -1;
+ ri_new->cache_info.routerlist_index = idx;
+ /* Check that ri_old is not in rl->routers anymore: */
+ tor_assert( routerlist_find_elt_(rl->routers, ri_old, -1) == -1 );
+ } else {
+ log_warn(LD_BUG, "Appending entry from routerlist_replace.");
+ routerlist_insert(rl, ri_new);
+ return;
+ }
+ if (tor_memneq(ri_old->cache_info.identity_digest,
+ ri_new->cache_info.identity_digest, DIGEST_LEN)) {
+ /* digests don't match; digestmap_set won't replace */
+ rimap_remove(rl->identity_map, ri_old->cache_info.identity_digest);
+ }
+ ri_tmp = rimap_set(rl->identity_map,
+ ri_new->cache_info.identity_digest, ri_new);
+ tor_assert(!ri_tmp || ri_tmp == ri_old);
+ sdmap_set(rl->desc_digest_map,
+ ri_new->cache_info.signed_descriptor_digest,
+ &(ri_new->cache_info));
+
+ if (!tor_digest_is_zero(ri_new->cache_info.extra_info_digest)) {
+ sdmap_set(rl->desc_by_eid_map, ri_new->cache_info.extra_info_digest,
+ &ri_new->cache_info);
+ }
+
+ same_descriptors = tor_memeq(ri_old->cache_info.signed_descriptor_digest,
+ ri_new->cache_info.signed_descriptor_digest,
+ DIGEST_LEN);
+
+ if (should_cache_old_descriptors() &&
+ ri_old->purpose == ROUTER_PURPOSE_GENERAL &&
+ !same_descriptors) {
+ /* ri_old is going to become a signed_descriptor_t and go into
+ * old_routers */
+ signed_descriptor_t *sd = signed_descriptor_from_routerinfo(ri_old);
+ smartlist_add(rl->old_routers, sd);
+ sd->routerlist_index = smartlist_len(rl->old_routers)-1;
+ sdmap_set(rl->desc_digest_map, sd->signed_descriptor_digest, sd);
+ if (!tor_digest_is_zero(sd->extra_info_digest))
+ sdmap_set(rl->desc_by_eid_map, sd->extra_info_digest, sd);
+ } else {
+ /* We're dropping ri_old. */
+ if (!same_descriptors) {
+ /* digests don't match; The sdmap_set above didn't replace */
+ sdmap_remove(rl->desc_digest_map,
+ ri_old->cache_info.signed_descriptor_digest);
+
+ if (tor_memneq(ri_old->cache_info.extra_info_digest,
+ ri_new->cache_info.extra_info_digest, DIGEST_LEN)) {
+ ei_tmp = eimap_remove(rl->extra_info_map,
+ ri_old->cache_info.extra_info_digest);
+ if (ei_tmp) {
+ rl->extrainfo_store.bytes_dropped +=
+ ei_tmp->cache_info.signed_descriptor_len;
+ extrainfo_free(ei_tmp);
+ }
+ }
+
+ if (!tor_digest_is_zero(ri_old->cache_info.extra_info_digest)) {
+ sdmap_remove(rl->desc_by_eid_map,
+ ri_old->cache_info.extra_info_digest);
+ }
+ }
+ rl->desc_store.bytes_dropped += ri_old->cache_info.signed_descriptor_len;
+ routerinfo_free(ri_old);
+ }
+#ifdef DEBUG_ROUTERLIST
+ routerlist_assert_ok(rl);
+#endif
+}
+
+/** Extract the descriptor <b>sd</b> from old_routerlist, and re-parse
+ * it as a fresh routerinfo_t. */
+static routerinfo_t *
+routerlist_reparse_old(routerlist_t *rl, signed_descriptor_t *sd)
+{
+ routerinfo_t *ri;
+ const char *body;
+
+ body = signed_descriptor_get_annotations(sd);
+
+ ri = router_parse_entry_from_string(body,
+ body+sd->signed_descriptor_len+sd->annotations_len,
+ 0, 1, NULL, NULL);
+ if (!ri)
+ return NULL;
+ signed_descriptor_move(&ri->cache_info, sd);
+
+ routerlist_remove_old(rl, sd, -1);
+
+ return ri;
+}
+
+/** Free all memory held by the routerlist module.
+ * Note: Calling routerlist_free_all() should always be paired with
+ * a call to nodelist_free_all(). These should only be called during
+ * cleanup.
+ */
+void
+routerlist_free_all(void)
+{
+ routerlist_free(routerlist);
+ routerlist = NULL;
+ if (warned_nicknames) {
+ SMARTLIST_FOREACH(warned_nicknames, char *, cp, tor_free(cp));
+ smartlist_free(warned_nicknames);
+ warned_nicknames = NULL;
+ }
+ clear_dir_servers();
+ smartlist_free(trusted_dir_servers);
+ smartlist_free(fallback_dir_servers);
+ trusted_dir_servers = fallback_dir_servers = NULL;
+ if (trusted_dir_certs) {
+ digestmap_free(trusted_dir_certs, cert_list_free_void);
+ trusted_dir_certs = NULL;
+ }
+}
+
+/** Forget that we have issued any router-related warnings, so that we'll
+ * warn again if we see the same errors. */
+void
+routerlist_reset_warnings(void)
+{
+ if (!warned_nicknames)
+ warned_nicknames = smartlist_new();
+ SMARTLIST_FOREACH(warned_nicknames, char *, cp, tor_free(cp));
+ smartlist_clear(warned_nicknames); /* now the list is empty. */
+
+ networkstatus_reset_warnings();
+}
+
+/** Return 1 if the signed descriptor of this router is older than
+ * <b>seconds</b> seconds. Otherwise return 0. */
+MOCK_IMPL(int,
+router_descriptor_is_older_than,(const routerinfo_t *router, int seconds))
+{
+ return router->cache_info.published_on < approx_time() - seconds;
+}
+
+/** Add <b>router</b> to the routerlist, if we don't already have it. Replace
+ * older entries (if any) with the same key. Note: Callers should not hold
+ * their pointers to <b>router</b> if this function fails; <b>router</b>
+ * will either be inserted into the routerlist or freed. Similarly, even
+ * if this call succeeds, they should not hold their pointers to
+ * <b>router</b> after subsequent calls with other routerinfo's -- they
+ * might cause the original routerinfo to get freed.
+ *
+ * Returns the status for the operation. Might set *<b>msg</b> if it wants
+ * the poster of the router to know something.
+ *
+ * If <b>from_cache</b>, this descriptor came from our disk cache. If
+ * <b>from_fetch</b>, we received it in response to a request we made.
+ * (If both are false, that means it was uploaded to us as an auth dir
+ * server or via the controller.)
+ *
+ * This function should be called *after*
+ * routers_update_status_from_consensus_networkstatus; subsequently, you
+ * should call router_rebuild_store and routerlist_descriptors_added.
+ */
+was_router_added_t
+router_add_to_routerlist(routerinfo_t *router, const char **msg,
+ int from_cache, int from_fetch)
+{
+ const char *id_digest;
+ const or_options_t *options = get_options();
+ int authdir = authdir_mode_handles_descs(options, router->purpose);
+ int authdir_believes_valid = 0;
+ routerinfo_t *old_router;
+ networkstatus_t *consensus =
+ networkstatus_get_latest_consensus_by_flavor(FLAV_NS);
+ int in_consensus = 0;
+
+ tor_assert(msg);
+
+ if (!routerlist)
+ router_get_routerlist();
+
+ id_digest = router->cache_info.identity_digest;
+
+ old_router = router_get_mutable_by_digest(id_digest);
+
+ /* Make sure that it isn't expired. */
+ if (router->cert_expiration_time < approx_time()) {
+ routerinfo_free(router);
+ *msg = "Some certs on this router are expired.";
+ return ROUTER_CERTS_EXPIRED;
+ }
+
+ /* Make sure that we haven't already got this exact descriptor. */
+ if (sdmap_get(routerlist->desc_digest_map,
+ router->cache_info.signed_descriptor_digest)) {
+ /* If we have this descriptor already and the new descriptor is a bridge
+ * descriptor, replace it. If we had a bridge descriptor before and the
+ * new one is not a bridge descriptor, don't replace it. */
+
+ /* Only members of routerlist->identity_map can be bridges; we don't
+ * put bridges in old_routers. */
+ const int was_bridge = old_router &&
+ old_router->purpose == ROUTER_PURPOSE_BRIDGE;
+
+ if (routerinfo_is_a_configured_bridge(router) &&
+ router->purpose == ROUTER_PURPOSE_BRIDGE &&
+ !was_bridge) {
+ log_info(LD_DIR, "Replacing non-bridge descriptor with bridge "
+ "descriptor for router %s",
+ router_describe(router));
+ } else {
+ log_info(LD_DIR,
+ "Dropping descriptor that we already have for router %s",
+ router_describe(router));
+ *msg = "Router descriptor was not new.";
+ routerinfo_free(router);
+ return ROUTER_IS_ALREADY_KNOWN;
+ }
+ }
+
+ if (authdir) {
+ if (authdir_wants_to_reject_router(router, msg,
+ !from_cache && !from_fetch,
+ &authdir_believes_valid)) {
+ tor_assert(*msg);
+ routerinfo_free(router);
+ return ROUTER_AUTHDIR_REJECTS;
+ }
+ } else if (from_fetch) {
+ /* Only check the descriptor digest against the network statuses when
+ * we are receiving in response to a fetch. */
+
+ if (!signed_desc_digest_is_recognized(&router->cache_info) &&
+ !routerinfo_is_a_configured_bridge(router)) {
+ /* We asked for it, so some networkstatus must have listed it when we
+ * did. Save it if we're a cache in case somebody else asks for it. */
+ log_info(LD_DIR,
+ "Received a no-longer-recognized descriptor for router %s",
+ router_describe(router));
+ *msg = "Router descriptor is not referenced by any network-status.";
+
+ /* Only journal this desc if we want to keep old descriptors */
+ if (!from_cache && should_cache_old_descriptors())
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ routerlist_insert_old(routerlist, router);
+ return ROUTER_NOT_IN_CONSENSUS_OR_NETWORKSTATUS;
+ }
+ }
+
+ /* We no longer need a router with this descriptor digest. */
+ if (consensus) {
+ routerstatus_t *rs = networkstatus_vote_find_mutable_entry(
+ consensus, id_digest);
+ if (rs && tor_memeq(rs->descriptor_digest,
+ router->cache_info.signed_descriptor_digest,
+ DIGEST_LEN)) {
+ in_consensus = 1;
+ }
+ }
+
+ if (router->purpose == ROUTER_PURPOSE_GENERAL &&
+ consensus && !in_consensus && !authdir) {
+ /* If it's a general router not listed in the consensus, then don't
+ * consider replacing the latest router with it. */
+ if (!from_cache && should_cache_old_descriptors())
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ routerlist_insert_old(routerlist, router);
+ *msg = "Skipping router descriptor: not in consensus.";
+ return ROUTER_NOT_IN_CONSENSUS;
+ }
+
+ /* If we're reading a bridge descriptor from our cache, and we don't
+ * recognize it as one of our currently configured bridges, drop the
+ * descriptor. Otherwise we could end up using it as one of our entry
+ * guards even if it isn't in our Bridge config lines. */
+ if (router->purpose == ROUTER_PURPOSE_BRIDGE && from_cache &&
+ !authdir_mode_bridge(options) &&
+ !routerinfo_is_a_configured_bridge(router)) {
+ log_info(LD_DIR, "Dropping bridge descriptor for %s because we have "
+ "no bridge configured at that address.",
+ safe_str_client(router_describe(router)));
+ *msg = "Router descriptor was not a configured bridge.";
+ routerinfo_free(router);
+ return ROUTER_WAS_NOT_WANTED;
+ }
+
+ /* If we have a router with the same identity key, choose the newer one. */
+ if (old_router) {
+ if (!in_consensus && (router->cache_info.published_on <=
+ old_router->cache_info.published_on)) {
+ /* Same key, but old. This one is not listed in the consensus. */
+ log_debug(LD_DIR, "Not-new descriptor for router %s",
+ router_describe(router));
+ /* Only journal this desc if we'll be serving it. */
+ if (!from_cache && should_cache_old_descriptors())
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ routerlist_insert_old(routerlist, router);
+ *msg = "Router descriptor was not new.";
+ return ROUTER_IS_ALREADY_KNOWN;
+ } else {
+ /* Same key, and either new, or listed in the consensus. */
+ log_debug(LD_DIR, "Replacing entry for router %s",
+ router_describe(router));
+ routerlist_replace(routerlist, old_router, router);
+ if (!from_cache) {
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ }
+ *msg = authdir_believes_valid ? "Valid server updated" :
+ ("Invalid server updated. (This dirserver is marking your "
+ "server as unapproved.)");
+ return ROUTER_ADDED_SUCCESSFULLY;
+ }
+ }
+
+ if (!in_consensus && from_cache &&
+ router_descriptor_is_older_than(router, OLD_ROUTER_DESC_MAX_AGE)) {
+ *msg = "Router descriptor was really old.";
+ routerinfo_free(router);
+ return ROUTER_WAS_TOO_OLD;
+ }
+
+ /* We haven't seen a router with this identity before. Add it to the end of
+ * the list. */
+ routerlist_insert(routerlist, router);
+ if (!from_cache) {
+ signed_desc_append_to_journal(&router->cache_info,
+ &routerlist->desc_store);
+ }
+ return ROUTER_ADDED_SUCCESSFULLY;
+}
+
+/** Insert <b>ei</b> into the routerlist, or free it. Other arguments are
+ * as for router_add_to_routerlist(). Return ROUTER_ADDED_SUCCESSFULLY iff
+ * we actually inserted it, ROUTER_BAD_EI otherwise.
+ */
+was_router_added_t
+router_add_extrainfo_to_routerlist(extrainfo_t *ei, const char **msg,
+ int from_cache, int from_fetch)
+{
+ was_router_added_t inserted;
+ (void)from_fetch;
+ if (msg) *msg = NULL;
+ /*XXXX Do something with msg */
+
+ inserted = extrainfo_insert(router_get_routerlist(), ei, !from_cache);
+
+ if (WRA_WAS_ADDED(inserted) && !from_cache)
+ signed_desc_append_to_journal(&ei->cache_info,
+ &routerlist->extrainfo_store);
+
+ return inserted;
+}
+
+/** Sorting helper: return &lt;0, 0, or &gt;0 depending on whether the
+ * signed_descriptor_t* in *<b>a</b> has an identity digest preceding, equal
+ * to, or later than that of *<b>b</b>. */
+static int
+compare_old_routers_by_identity_(const void **_a, const void **_b)
+{
+ int i;
+ const signed_descriptor_t *r1 = *_a, *r2 = *_b;
+ if ((i = fast_memcmp(r1->identity_digest, r2->identity_digest, DIGEST_LEN)))
+ return i;
+ return (int)(r1->published_on - r2->published_on);
+}
+
+/** Internal type used to represent how long an old descriptor was valid,
+ * where it appeared in the list of old descriptors, and whether it's extra
+ * old. Used only by routerlist_remove_old_cached_routers_with_id(). */
+struct duration_idx_t {
+ int duration;
+ int idx;
+ int old;
+};
+
+/** Sorting helper: compare two duration_idx_t by their duration. */
+static int
+compare_duration_idx_(const void *_d1, const void *_d2)
+{
+ const struct duration_idx_t *d1 = _d1;
+ const struct duration_idx_t *d2 = _d2;
+ return d1->duration - d2->duration;
+}
+
+/** The range <b>lo</b> through <b>hi</b> inclusive of routerlist->old_routers
+ * must contain routerinfo_t with the same identity and with publication time
+ * in ascending order. Remove members from this range until there are no more
+ * than max_descriptors_per_router() remaining. Start by removing the oldest
+ * members from before <b>cutoff</b>, then remove members which were current
+ * for the lowest amount of time. The order of members of old_routers at
+ * indices <b>lo</b> or higher may be changed.
+ */
+static void
+routerlist_remove_old_cached_routers_with_id(time_t now,
+ time_t cutoff, int lo, int hi,
+ digestset_t *retain)
+{
+ int i, n = hi-lo+1;
+ unsigned n_extra, n_rmv = 0;
+ struct duration_idx_t *lifespans;
+ uint8_t *rmv, *must_keep;
+ smartlist_t *lst = routerlist->old_routers;
+#if 1
+ const char *ident;
+ tor_assert(hi < smartlist_len(lst));
+ tor_assert(lo <= hi);
+ ident = ((signed_descriptor_t*)smartlist_get(lst, lo))->identity_digest;
+ for (i = lo+1; i <= hi; ++i) {
+ signed_descriptor_t *r = smartlist_get(lst, i);
+ tor_assert(tor_memeq(ident, r->identity_digest, DIGEST_LEN));
+ }
+#endif /* 1 */
+ /* Check whether we need to do anything at all. */
+ {
+ int mdpr = directory_caches_dir_info(get_options()) ? 2 : 1;
+ if (n <= mdpr)
+ return;
+ n_extra = n - mdpr;
+ }
+
+ lifespans = tor_calloc(n, sizeof(struct duration_idx_t));
+ rmv = tor_calloc(n, sizeof(uint8_t));
+ must_keep = tor_calloc(n, sizeof(uint8_t));
+ /* Set lifespans to contain the lifespan and index of each server. */
+ /* Set rmv[i-lo]=1 if we're going to remove a server for being too old. */
+ for (i = lo; i <= hi; ++i) {
+ signed_descriptor_t *r = smartlist_get(lst, i);
+ signed_descriptor_t *r_next;
+ lifespans[i-lo].idx = i;
+ if (r->last_listed_as_valid_until >= now ||
+ (retain && digestset_probably_contains(retain,
+ r->signed_descriptor_digest))) {
+ must_keep[i-lo] = 1;
+ }
+ if (i < hi) {
+ r_next = smartlist_get(lst, i+1);
+ tor_assert(r->published_on <= r_next->published_on);
+ lifespans[i-lo].duration = (int)(r_next->published_on - r->published_on);
+ } else {
+ r_next = NULL;
+ lifespans[i-lo].duration = INT_MAX;
+ }
+ if (!must_keep[i-lo] && r->published_on < cutoff && n_rmv < n_extra) {
+ ++n_rmv;
+ lifespans[i-lo].old = 1;
+ rmv[i-lo] = 1;
+ }
+ }
+
+ if (n_rmv < n_extra) {
+ /**
+ * We aren't removing enough servers for being old. Sort lifespans by
+ * the duration of liveness, and remove the ones we're not already going to
+ * remove based on how long they were alive.
+ **/
+ qsort(lifespans, n, sizeof(struct duration_idx_t), compare_duration_idx_);
+ for (i = 0; i < n && n_rmv < n_extra; ++i) {
+ if (!must_keep[lifespans[i].idx-lo] && !lifespans[i].old) {
+ rmv[lifespans[i].idx-lo] = 1;
+ ++n_rmv;
+ }
+ }
+ }
+
+ i = hi;
+ do {
+ if (rmv[i-lo])
+ routerlist_remove_old(routerlist, smartlist_get(lst, i), i);
+ } while (--i >= lo);
+ tor_free(must_keep);
+ tor_free(rmv);
+ tor_free(lifespans);
+}
+
+/** Deactivate any routers from the routerlist that are more than
+ * ROUTER_MAX_AGE seconds old and not recommended by any networkstatuses;
+ * remove old routers from the list of cached routers if we have too many.
+ */
+void
+routerlist_remove_old_routers(void)
+{
+ int i, hi=-1;
+ const char *cur_id = NULL;
+ time_t now = time(NULL);
+ time_t cutoff;
+ routerinfo_t *router;
+ signed_descriptor_t *sd;
+ digestset_t *retain;
+ const networkstatus_t *consensus = networkstatus_get_latest_consensus();
+
+ trusted_dirs_remove_old_certs();
+
+ if (!routerlist || !consensus)
+ return;
+
+ // routerlist_assert_ok(routerlist);
+
+ /* We need to guess how many router descriptors we will wind up wanting to
+ retain, so that we can be sure to allocate a large enough Bloom filter
+ to hold the digest set. Overestimating is fine; underestimating is bad.
+ */
+ {
+ /* We'll probably retain everything in the consensus. */
+ int n_max_retain = smartlist_len(consensus->routerstatus_list);
+ retain = digestset_new(n_max_retain);
+ }
+
+ cutoff = now - OLD_ROUTER_DESC_MAX_AGE;
+ /* Retain anything listed in the consensus. */
+ if (consensus) {
+ SMARTLIST_FOREACH(consensus->routerstatus_list, routerstatus_t *, rs,
+ if (rs->published_on >= cutoff)
+ digestset_add(retain, rs->descriptor_digest));
+ }
+
+ /* If we have a consensus, we should consider pruning current routers that
+ * are too old and that nobody recommends. (If we don't have a consensus,
+ * then we should get one before we decide to kill routers.) */
+
+ if (consensus) {
+ cutoff = now - ROUTER_MAX_AGE;
+ /* Remove too-old unrecommended members of routerlist->routers. */
+ for (i = 0; i < smartlist_len(routerlist->routers); ++i) {
+ router = smartlist_get(routerlist->routers, i);
+ if (router->cache_info.published_on <= cutoff &&
+ router->cache_info.last_listed_as_valid_until < now &&
+ !digestset_probably_contains(retain,
+ router->cache_info.signed_descriptor_digest)) {
+ /* Too old: remove it. (If we're a cache, just move it into
+ * old_routers.) */
+ log_info(LD_DIR,
+ "Forgetting obsolete (too old) routerinfo for router %s",
+ router_describe(router));
+ routerlist_remove(routerlist, router, 1, now);
+ i--;
+ }
+ }
+ }
+
+ //routerlist_assert_ok(routerlist);
+
+ /* Remove far-too-old members of routerlist->old_routers. */
+ cutoff = now - OLD_ROUTER_DESC_MAX_AGE;
+ for (i = 0; i < smartlist_len(routerlist->old_routers); ++i) {
+ sd = smartlist_get(routerlist->old_routers, i);
+ if (sd->published_on <= cutoff &&
+ sd->last_listed_as_valid_until < now &&
+ !digestset_probably_contains(retain, sd->signed_descriptor_digest)) {
+ /* Too old. Remove it. */
+ routerlist_remove_old(routerlist, sd, i--);
+ }
+ }
+
+ //routerlist_assert_ok(routerlist);
+
+ log_info(LD_DIR, "We have %d live routers and %d old router descriptors.",
+ smartlist_len(routerlist->routers),
+ smartlist_len(routerlist->old_routers));
+
+ /* Now we might have to look at routerlist->old_routers for extraneous
+ * members. (We'd keep all the members if we could, but we need to save
+ * space.) First, check whether we have too many router descriptors, total.
+ * We're okay with having too many for some given router, so long as the
+ * total number doesn't approach max_descriptors_per_router()*len(router).
+ */
+ if (smartlist_len(routerlist->old_routers) <
+ smartlist_len(routerlist->routers))
+ goto done;
+
+ /* Sort by identity, then fix indices. */
+ smartlist_sort(routerlist->old_routers, compare_old_routers_by_identity_);
+ /* Fix indices. */
+ for (i = 0; i < smartlist_len(routerlist->old_routers); ++i) {
+ signed_descriptor_t *r = smartlist_get(routerlist->old_routers, i);
+ r->routerlist_index = i;
+ }
+
+ /* Iterate through the list from back to front, so when we remove descriptors
+ * we don't mess up groups we haven't gotten to. */
+ for (i = smartlist_len(routerlist->old_routers)-1; i >= 0; --i) {
+ signed_descriptor_t *r = smartlist_get(routerlist->old_routers, i);
+ if (!cur_id) {
+ cur_id = r->identity_digest;
+ hi = i;
+ }
+ if (tor_memneq(cur_id, r->identity_digest, DIGEST_LEN)) {
+ routerlist_remove_old_cached_routers_with_id(now,
+ cutoff, i+1, hi, retain);
+ cur_id = r->identity_digest;
+ hi = i;
+ }
+ }
+ if (hi>=0)
+ routerlist_remove_old_cached_routers_with_id(now, cutoff, 0, hi, retain);
+ //routerlist_assert_ok(routerlist);
+
+ done:
+ digestset_free(retain);
+ router_rebuild_store(RRS_DONT_REMOVE_OLD, &routerlist->desc_store);
+ router_rebuild_store(RRS_DONT_REMOVE_OLD,&routerlist->extrainfo_store);
+}
+
+/** We just added a new set of descriptors. Take whatever extra steps
+ * we need. */
+void
+routerlist_descriptors_added(smartlist_t *sl, int from_cache)
+{
+ tor_assert(sl);
+ control_event_descriptors_changed(sl);
+ SMARTLIST_FOREACH_BEGIN(sl, routerinfo_t *, ri) {
+ if (ri->purpose == ROUTER_PURPOSE_BRIDGE)
+ learned_bridge_descriptor(ri, from_cache);
+ if (ri->needs_retest_if_added) {
+ ri->needs_retest_if_added = 0;
+ dirserv_single_reachability_test(approx_time(), ri);
+ }
+ } SMARTLIST_FOREACH_END(ri);
+}
+
+/**
+ * Code to parse a single router descriptor and insert it into the
+ * routerlist. Return -1 if the descriptor was ill-formed; 0 if the
+ * descriptor was well-formed but could not be added; and 1 if the
+ * descriptor was added.
+ *
+ * If we don't add it and <b>msg</b> is not NULL, then assign to
+ * *<b>msg</b> a static string describing the reason for refusing the
+ * descriptor.
+ *
+ * This is used only by the controller.
+ */
+int
+router_load_single_router(const char *s, uint8_t purpose, int cache,
+ const char **msg)
+{
+ routerinfo_t *ri;
+ was_router_added_t r;
+ smartlist_t *lst;
+ char annotation_buf[ROUTER_ANNOTATION_BUF_LEN];
+ tor_assert(msg);
+ *msg = NULL;
+
+ tor_snprintf(annotation_buf, sizeof(annotation_buf),
+ "@source controller\n"
+ "@purpose %s\n", router_purpose_to_string(purpose));
+
+ if (!(ri = router_parse_entry_from_string(s, NULL, 1, 0,
+ annotation_buf, NULL))) {
+ log_warn(LD_DIR, "Error parsing router descriptor; dropping.");
+ *msg = "Couldn't parse router descriptor.";
+ return -1;
+ }
+ tor_assert(ri->purpose == purpose);
+ if (router_is_me(ri)) {
+ log_warn(LD_DIR, "Router's identity key matches mine; dropping.");
+ *msg = "Router's identity key matches mine.";
+ routerinfo_free(ri);
+ return 0;
+ }
+
+ if (!cache) /* obey the preference of the controller */
+ ri->cache_info.do_not_cache = 1;
+
+ lst = smartlist_new();
+ smartlist_add(lst, ri);
+ routers_update_status_from_consensus_networkstatus(lst, 0);
+
+ r = router_add_to_routerlist(ri, msg, 0, 0);
+ if (!WRA_WAS_ADDED(r)) {
+ /* we've already assigned to *msg now, and ri is already freed */
+ tor_assert(*msg);
+ if (r == ROUTER_AUTHDIR_REJECTS)
+ log_warn(LD_DIR, "Couldn't add router to list: %s Dropping.", *msg);
+ smartlist_free(lst);
+ return 0;
+ } else {
+ routerlist_descriptors_added(lst, 0);
+ smartlist_free(lst);
+ log_debug(LD_DIR, "Added router to list");
+ return 1;
+ }
+}
+
+/** Given a string <b>s</b> containing some routerdescs, parse it and put the
+ * routers into our directory. If saved_location is SAVED_NOWHERE, the routers
+ * are in response to a query to the network: cache them by adding them to
+ * the journal.
+ *
+ * Return the number of routers actually added.
+ *
+ * If <b>requested_fingerprints</b> is provided, it must contain a list of
+ * uppercased fingerprints. Do not update any router whose
+ * fingerprint is not on the list; after updating a router, remove its
+ * fingerprint from the list.
+ *
+ * If <b>descriptor_digests</b> is non-zero, then the requested_fingerprints
+ * are descriptor digests. Otherwise they are identity digests.
+ */
+int
+router_load_routers_from_string(const char *s, const char *eos,
+ saved_location_t saved_location,
+ smartlist_t *requested_fingerprints,
+ int descriptor_digests,
+ const char *prepend_annotations)
+{
+ smartlist_t *routers = smartlist_new(), *changed = smartlist_new();
+ char fp[HEX_DIGEST_LEN+1];
+ const char *msg;
+ int from_cache = (saved_location != SAVED_NOWHERE);
+ int allow_annotations = (saved_location != SAVED_NOWHERE);
+ int any_changed = 0;
+ smartlist_t *invalid_digests = smartlist_new();
+
+ router_parse_list_from_string(&s, eos, routers, saved_location, 0,
+ allow_annotations, prepend_annotations,
+ invalid_digests);
+
+ routers_update_status_from_consensus_networkstatus(routers, !from_cache);
+
+ log_info(LD_DIR, "%d elements to add", smartlist_len(routers));
+
+ SMARTLIST_FOREACH_BEGIN(routers, routerinfo_t *, ri) {
+ was_router_added_t r;
+ char d[DIGEST_LEN];
+ if (requested_fingerprints) {
+ base16_encode(fp, sizeof(fp), descriptor_digests ?
+ ri->cache_info.signed_descriptor_digest :
+ ri->cache_info.identity_digest,
+ DIGEST_LEN);
+ if (smartlist_contains_string(requested_fingerprints, fp)) {
+ smartlist_string_remove(requested_fingerprints, fp);
+ } else {
+ char *requested =
+ smartlist_join_strings(requested_fingerprints," ",0,NULL);
+ log_warn(LD_DIR,
+ "We received a router descriptor with a fingerprint (%s) "
+ "that we never requested. (We asked for: %s.) Dropping.",
+ fp, requested);
+ tor_free(requested);
+ routerinfo_free(ri);
+ continue;
+ }
+ }
+
+ memcpy(d, ri->cache_info.signed_descriptor_digest, DIGEST_LEN);
+ r = router_add_to_routerlist(ri, &msg, from_cache, !from_cache);
+ if (WRA_WAS_ADDED(r)) {
+ any_changed++;
+ smartlist_add(changed, ri);
+ routerlist_descriptors_added(changed, from_cache);
+ smartlist_clear(changed);
+ } else if (WRA_NEVER_DOWNLOADABLE(r)) {
+ download_status_t *dl_status;
+ dl_status = router_get_dl_status_by_descriptor_digest(d);
+ if (dl_status) {
+ log_info(LD_GENERAL, "Marking router %s as never downloadable",
+ hex_str(d, DIGEST_LEN));
+ download_status_mark_impossible(dl_status);
+ }
+ }
+ } SMARTLIST_FOREACH_END(ri);
+
+ SMARTLIST_FOREACH_BEGIN(invalid_digests, const uint8_t *, bad_digest) {
+ /* This digest is never going to be parseable. */
+ base16_encode(fp, sizeof(fp), (char*)bad_digest, DIGEST_LEN);
+ if (requested_fingerprints && descriptor_digests) {
+ if (! smartlist_contains_string(requested_fingerprints, fp)) {
+ /* But we didn't ask for it, so we should assume shennanegans. */
+ continue;
+ }
+ smartlist_string_remove(requested_fingerprints, fp);
+ }
+ download_status_t *dls;
+ dls = router_get_dl_status_by_descriptor_digest((char*)bad_digest);
+ if (dls) {
+ log_info(LD_GENERAL, "Marking router with descriptor %s as unparseable, "
+ "and therefore undownloadable", fp);
+ download_status_mark_impossible(dls);
+ }
+ } SMARTLIST_FOREACH_END(bad_digest);
+ SMARTLIST_FOREACH(invalid_digests, uint8_t *, d, tor_free(d));
+ smartlist_free(invalid_digests);
+
+ routerlist_assert_ok(routerlist);
+
+ if (any_changed)
+ router_rebuild_store(0, &routerlist->desc_store);
+
+ smartlist_free(routers);
+ smartlist_free(changed);
+
+ return any_changed;
+}
+
+/** Parse one or more extrainfos from <b>s</b> (ending immediately before
+ * <b>eos</b> if <b>eos</b> is present). Other arguments are as for
+ * router_load_routers_from_string(). */
+void
+router_load_extrainfo_from_string(const char *s, const char *eos,
+ saved_location_t saved_location,
+ smartlist_t *requested_fingerprints,
+ int descriptor_digests)
+{
+ smartlist_t *extrainfo_list = smartlist_new();
+ const char *msg;
+ int from_cache = (saved_location != SAVED_NOWHERE);
+ smartlist_t *invalid_digests = smartlist_new();
+
+ router_parse_list_from_string(&s, eos, extrainfo_list, saved_location, 1, 0,
+ NULL, invalid_digests);
+
+ log_info(LD_DIR, "%d elements to add", smartlist_len(extrainfo_list));
+
+ SMARTLIST_FOREACH_BEGIN(extrainfo_list, extrainfo_t *, ei) {
+ uint8_t d[DIGEST_LEN];
+ memcpy(d, ei->cache_info.signed_descriptor_digest, DIGEST_LEN);
+ was_router_added_t added =
+ router_add_extrainfo_to_routerlist(ei, &msg, from_cache, !from_cache);
+ if (WRA_WAS_ADDED(added) && requested_fingerprints) {
+ char fp[HEX_DIGEST_LEN+1];
+ base16_encode(fp, sizeof(fp), descriptor_digests ?
+ ei->cache_info.signed_descriptor_digest :
+ ei->cache_info.identity_digest,
+ DIGEST_LEN);
+ smartlist_string_remove(requested_fingerprints, fp);
+ /* We silently let relays stuff us with extrainfos we didn't ask for,
+ * so long as we would have wanted them anyway. Since we always fetch
+ * all the extrainfos we want, and we never actually act on them
+ * inside Tor, this should be harmless. */
+ } else if (WRA_NEVER_DOWNLOADABLE(added)) {
+ signed_descriptor_t *sd = router_get_by_extrainfo_digest((char*)d);
+ if (sd) {
+ log_info(LD_GENERAL, "Marking extrainfo with descriptor %s as "
+ "unparseable, and therefore undownloadable",
+ hex_str((char*)d,DIGEST_LEN));
+ download_status_mark_impossible(&sd->ei_dl_status);
+ }
+ }
+ } SMARTLIST_FOREACH_END(ei);
+
+ SMARTLIST_FOREACH_BEGIN(invalid_digests, const uint8_t *, bad_digest) {
+ /* This digest is never going to be parseable. */
+ char fp[HEX_DIGEST_LEN+1];
+ base16_encode(fp, sizeof(fp), (char*)bad_digest, DIGEST_LEN);
+ if (requested_fingerprints) {
+ if (! smartlist_contains_string(requested_fingerprints, fp)) {
+ /* But we didn't ask for it, so we should assume shennanegans. */
+ continue;
+ }
+ smartlist_string_remove(requested_fingerprints, fp);
+ }
+ signed_descriptor_t *sd =
+ router_get_by_extrainfo_digest((char*)bad_digest);
+ if (sd) {
+ log_info(LD_GENERAL, "Marking extrainfo with descriptor %s as "
+ "unparseable, and therefore undownloadable", fp);
+ download_status_mark_impossible(&sd->ei_dl_status);
+ }
+ } SMARTLIST_FOREACH_END(bad_digest);
+ SMARTLIST_FOREACH(invalid_digests, uint8_t *, d, tor_free(d));
+ smartlist_free(invalid_digests);
+
+ routerlist_assert_ok(routerlist);
+ router_rebuild_store(0, &router_get_routerlist()->extrainfo_store);
+
+ smartlist_free(extrainfo_list);
+}
+
+/** Return true iff the latest ns-flavored consensus includes a descriptor
+ * whose digest is that of <b>desc</b>. */
+static int
+signed_desc_digest_is_recognized(signed_descriptor_t *desc)
+{
+ const routerstatus_t *rs;
+ networkstatus_t *consensus = networkstatus_get_latest_consensus_by_flavor(
+ FLAV_NS);
+
+ if (consensus) {
+ rs = networkstatus_vote_find_entry(consensus, desc->identity_digest);
+ if (rs && tor_memeq(rs->descriptor_digest,
+ desc->signed_descriptor_digest, DIGEST_LEN))
+ return 1;
+ }
+ return 0;
+}
+
+/** Update downloads for router descriptors and/or microdescriptors as
+ * appropriate. */
+void
+update_all_descriptor_downloads(time_t now)
+{
+ if (should_delay_dir_fetches(get_options(), NULL))
+ return;
+ update_router_descriptor_downloads(now);
+ update_microdesc_downloads(now);
+ launch_dummy_descriptor_download_as_needed(now, get_options());
+}
+
+/** Clear all our timeouts for fetching v3 directory stuff, and then
+ * give it all a try again. */
+void
+routerlist_retry_directory_downloads(time_t now)
+{
+ (void)now;
+
+ log_debug(LD_GENERAL,
+ "In routerlist_retry_directory_downloads()");
+
+ router_reset_status_download_failures();
+ router_reset_descriptor_download_failures();
+ reschedule_directory_downloads();
+}
+
+/** Return true iff <b>router</b> does not permit exit streams.
+ */
+int
+router_exit_policy_rejects_all(const routerinfo_t *router)
+{
+ return router->policy_is_reject_star;
+}
+
+/** Create a directory server at <b>address</b>:<b>port</b>, with OR identity
+ * key <b>digest</b> which has DIGEST_LEN bytes. If <b>address</b> is NULL,
+ * add ourself. If <b>is_authority</b>, this is a directory authority. Return
+ * the new directory server entry on success or NULL on failure. */
+static dir_server_t *
+dir_server_new(int is_authority,
+ const char *nickname,
+ const tor_addr_t *addr,
+ const char *hostname,
+ uint16_t dir_port, uint16_t or_port,
+ const tor_addr_port_t *addrport_ipv6,
+ const char *digest, const char *v3_auth_digest,
+ dirinfo_type_t type,
+ double weight)
+{
+ dir_server_t *ent;
+ uint32_t a;
+ char *hostname_ = NULL;
+
+ tor_assert(digest);
+
+ if (weight < 0)
+ return NULL;
+
+ if (tor_addr_family(addr) == AF_INET)
+ a = tor_addr_to_ipv4h(addr);
+ else
+ return NULL;
+
+ if (!hostname)
+ hostname_ = tor_addr_to_str_dup(addr);
+ else
+ hostname_ = tor_strdup(hostname);
+
+ ent = tor_malloc_zero(sizeof(dir_server_t));
+ ent->nickname = nickname ? tor_strdup(nickname) : NULL;
+ ent->address = hostname_;
+ ent->addr = a;
+ ent->dir_port = dir_port;
+ ent->or_port = or_port;
+ ent->is_running = 1;
+ ent->is_authority = is_authority;
+ ent->type = type;
+ ent->weight = weight;
+ if (addrport_ipv6) {
+ if (tor_addr_family(&addrport_ipv6->addr) != AF_INET6) {
+ log_warn(LD_BUG, "Hey, I got a non-ipv6 addr as addrport_ipv6.");
+ tor_addr_make_unspec(&ent->ipv6_addr);
+ } else {
+ tor_addr_copy(&ent->ipv6_addr, &addrport_ipv6->addr);
+ ent->ipv6_orport = addrport_ipv6->port;
+ }
+ } else {
+ tor_addr_make_unspec(&ent->ipv6_addr);
+ }
+
+ memcpy(ent->digest, digest, DIGEST_LEN);
+ if (v3_auth_digest && (type & V3_DIRINFO))
+ memcpy(ent->v3_identity_digest, v3_auth_digest, DIGEST_LEN);
+
+ if (nickname)
+ tor_asprintf(&ent->description, "directory server \"%s\" at %s:%d",
+ nickname, hostname_, (int)dir_port);
+ else
+ tor_asprintf(&ent->description, "directory server at %s:%d",
+ hostname_, (int)dir_port);
+
+ ent->fake_status.addr = ent->addr;
+ tor_addr_copy(&ent->fake_status.ipv6_addr, &ent->ipv6_addr);
+ memcpy(ent->fake_status.identity_digest, digest, DIGEST_LEN);
+ if (nickname)
+ strlcpy(ent->fake_status.nickname, nickname,
+ sizeof(ent->fake_status.nickname));
+ else
+ ent->fake_status.nickname[0] = '\0';
+ ent->fake_status.dir_port = ent->dir_port;
+ ent->fake_status.or_port = ent->or_port;
+ ent->fake_status.ipv6_orport = ent->ipv6_orport;
+
+ return ent;
+}
+
+/** Create an authoritative directory server at
+ * <b>address</b>:<b>port</b>, with identity key <b>digest</b>. If
+ * <b>address</b> is NULL, add ourself. Return the new trusted directory
+ * server entry on success or NULL if we couldn't add it. */
+dir_server_t *
+trusted_dir_server_new(const char *nickname, const char *address,
+ uint16_t dir_port, uint16_t or_port,
+ const tor_addr_port_t *ipv6_addrport,
+ const char *digest, const char *v3_auth_digest,
+ dirinfo_type_t type, double weight)
+{
+ uint32_t a;
+ tor_addr_t addr;
+ char *hostname=NULL;
+ dir_server_t *result;
+
+ if (!address) { /* The address is us; we should guess. */
+ if (resolve_my_address(LOG_WARN, get_options(),
+ &a, NULL, &hostname) < 0) {
+ log_warn(LD_CONFIG,
+ "Couldn't find a suitable address when adding ourself as a "
+ "trusted directory server.");
+ return NULL;
+ }
+ if (!hostname)
+ hostname = tor_dup_ip(a);
+ } else {
+ if (tor_lookup_hostname(address, &a)) {
+ log_warn(LD_CONFIG,
+ "Unable to lookup address for directory server at '%s'",
+ address);
+ return NULL;
+ }
+ hostname = tor_strdup(address);
+ }
+ tor_addr_from_ipv4h(&addr, a);
+
+ result = dir_server_new(1, nickname, &addr, hostname,
+ dir_port, or_port,
+ ipv6_addrport,
+ digest,
+ v3_auth_digest, type, weight);
+ tor_free(hostname);
+ return result;
+}
+
+/** Return a new dir_server_t for a fallback directory server at
+ * <b>addr</b>:<b>or_port</b>/<b>dir_port</b>, with identity key digest
+ * <b>id_digest</b> */
+dir_server_t *
+fallback_dir_server_new(const tor_addr_t *addr,
+ uint16_t dir_port, uint16_t or_port,
+ const tor_addr_port_t *addrport_ipv6,
+ const char *id_digest, double weight)
+{
+ return dir_server_new(0, NULL, addr, NULL, dir_port, or_port,
+ addrport_ipv6,
+ id_digest,
+ NULL, ALL_DIRINFO, weight);
+}
+
+/** Add a directory server to the global list(s). */
+void
+dir_server_add(dir_server_t *ent)
+{
+ if (!trusted_dir_servers)
+ trusted_dir_servers = smartlist_new();
+ if (!fallback_dir_servers)
+ fallback_dir_servers = smartlist_new();
+
+ if (ent->is_authority)
+ smartlist_add(trusted_dir_servers, ent);
+
+ smartlist_add(fallback_dir_servers, ent);
+ router_dir_info_changed();
+}
+
+/** Free storage held in <b>cert</b>. */
+void
+authority_cert_free_(authority_cert_t *cert)
+{
+ if (!cert)
+ return;
+
+ tor_free(cert->cache_info.signed_descriptor_body);
+ crypto_pk_free(cert->signing_key);
+ crypto_pk_free(cert->identity_key);
+
+ tor_free(cert);
+}
+
+#define dir_server_free(val) \
+ FREE_AND_NULL(dir_server_t, dir_server_free_, (val))
+
+/** Free storage held in <b>ds</b>. */
+static void
+dir_server_free_(dir_server_t *ds)
+{
+ if (!ds)
+ return;
+
+ tor_free(ds->nickname);
+ tor_free(ds->description);
+ tor_free(ds->address);
+ tor_free(ds);
+}
+
+/** Remove all members from the list of dir servers. */
+void
+clear_dir_servers(void)
+{
+ if (fallback_dir_servers) {
+ SMARTLIST_FOREACH(fallback_dir_servers, dir_server_t *, ent,
+ dir_server_free(ent));
+ smartlist_clear(fallback_dir_servers);
+ } else {
+ fallback_dir_servers = smartlist_new();
+ }
+ if (trusted_dir_servers) {
+ smartlist_clear(trusted_dir_servers);
+ } else {
+ trusted_dir_servers = smartlist_new();
+ }
+ router_dir_info_changed();
+}
+
+/** For every current directory connection whose purpose is <b>purpose</b>,
+ * and where the resource being downloaded begins with <b>prefix</b>, split
+ * rest of the resource into base16 fingerprints (or base64 fingerprints if
+ * purpose==DIR_PURPOSE_FETCH_MICRODESC), decode them, and set the
+ * corresponding elements of <b>result</b> to a nonzero value.
+ */
+static void
+list_pending_downloads(digestmap_t *result, digest256map_t *result256,
+ int purpose, const char *prefix)
+{
+ const size_t p_len = strlen(prefix);
+ smartlist_t *tmp = smartlist_new();
+ smartlist_t *conns = get_connection_array();
+ int flags = DSR_HEX;
+ if (purpose == DIR_PURPOSE_FETCH_MICRODESC)
+ flags = DSR_DIGEST256|DSR_BASE64;
+
+ tor_assert(result || result256);
+
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
+ if (conn->type == CONN_TYPE_DIR &&
+ conn->purpose == purpose &&
+ !conn->marked_for_close) {
+ const char *resource = TO_DIR_CONN(conn)->requested_resource;
+ if (!strcmpstart(resource, prefix))
+ dir_split_resource_into_fingerprints(resource + p_len,
+ tmp, NULL, flags);
+ }
+ } SMARTLIST_FOREACH_END(conn);
+
+ if (result) {
+ SMARTLIST_FOREACH(tmp, char *, d,
+ {
+ digestmap_set(result, d, (void*)1);
+ tor_free(d);
+ });
+ } else if (result256) {
+ SMARTLIST_FOREACH(tmp, uint8_t *, d,
+ {
+ digest256map_set(result256, d, (void*)1);
+ tor_free(d);
+ });
+ }
+ smartlist_free(tmp);
+}
+
+/** For every router descriptor (or extra-info document if <b>extrainfo</b> is
+ * true) we are currently downloading by descriptor digest, set result[d] to
+ * (void*)1. */
+static void
+list_pending_descriptor_downloads(digestmap_t *result, int extrainfo)
+{
+ int purpose =
+ extrainfo ? DIR_PURPOSE_FETCH_EXTRAINFO : DIR_PURPOSE_FETCH_SERVERDESC;
+ list_pending_downloads(result, NULL, purpose, "d/");
+}
+
+/** For every microdescriptor we are currently downloading by descriptor
+ * digest, set result[d] to (void*)1.
+ */
+void
+list_pending_microdesc_downloads(digest256map_t *result)
+{
+ list_pending_downloads(NULL, result, DIR_PURPOSE_FETCH_MICRODESC, "d/");
+}
+
+/** For every certificate we are currently downloading by (identity digest,
+ * signing key digest) pair, set result[fp_pair] to (void *1).
+ */
+static void
+list_pending_fpsk_downloads(fp_pair_map_t *result)
+{
+ const char *pfx = "fp-sk/";
+ smartlist_t *tmp;
+ smartlist_t *conns;
+ const char *resource;
+
+ tor_assert(result);
+
+ tmp = smartlist_new();
+ conns = get_connection_array();
+
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
+ if (conn->type == CONN_TYPE_DIR &&
+ conn->purpose == DIR_PURPOSE_FETCH_CERTIFICATE &&
+ !conn->marked_for_close) {
+ resource = TO_DIR_CONN(conn)->requested_resource;
+ if (!strcmpstart(resource, pfx))
+ dir_split_resource_into_fingerprint_pairs(resource + strlen(pfx),
+ tmp);
+ }
+ } SMARTLIST_FOREACH_END(conn);
+
+ SMARTLIST_FOREACH_BEGIN(tmp, fp_pair_t *, fp) {
+ fp_pair_map_set(result, fp, (void*)1);
+ tor_free(fp);
+ } SMARTLIST_FOREACH_END(fp);
+
+ smartlist_free(tmp);
+}
+
+/** Launch downloads for all the descriptors whose digests or digests256
+ * are listed as digests[i] for lo <= i < hi. (Lo and hi may be out of
+ * range.) If <b>source</b> is given, download from <b>source</b>;
+ * otherwise, download from an appropriate random directory server.
+ */
+MOCK_IMPL(STATIC void,
+initiate_descriptor_downloads,(const routerstatus_t *source,
+ int purpose, smartlist_t *digests,
+ int lo, int hi, int pds_flags))
+{
+ char *resource, *cp;
+ int digest_len, enc_digest_len;
+ const char *sep;
+ int b64_256;
+ smartlist_t *tmp;
+
+ if (purpose == DIR_PURPOSE_FETCH_MICRODESC) {
+ /* Microdescriptors are downloaded by "-"-separated base64-encoded
+ * 256-bit digests. */
+ digest_len = DIGEST256_LEN;
+ enc_digest_len = BASE64_DIGEST256_LEN + 1;
+ sep = "-";
+ b64_256 = 1;
+ } else {
+ digest_len = DIGEST_LEN;
+ enc_digest_len = HEX_DIGEST_LEN + 1;
+ sep = "+";
+ b64_256 = 0;
+ }
+
+ if (lo < 0)
+ lo = 0;
+ if (hi > smartlist_len(digests))
+ hi = smartlist_len(digests);
+
+ if (hi-lo <= 0)
+ return;
+
+ tmp = smartlist_new();
+
+ for (; lo < hi; ++lo) {
+ cp = tor_malloc(enc_digest_len);
+ if (b64_256) {
+ digest256_to_base64(cp, smartlist_get(digests, lo));
+ } else {
+ base16_encode(cp, enc_digest_len, smartlist_get(digests, lo),
+ digest_len);
+ }
+ smartlist_add(tmp, cp);
+ }
+
+ cp = smartlist_join_strings(tmp, sep, 0, NULL);
+ tor_asprintf(&resource, "d/%s.z", cp);
+
+ SMARTLIST_FOREACH(tmp, char *, cp1, tor_free(cp1));
+ smartlist_free(tmp);
+ tor_free(cp);
+
+ if (source) {
+ /* We know which authority or directory mirror we want. */
+ directory_request_t *req = directory_request_new(purpose);
+ directory_request_set_routerstatus(req, source);
+ directory_request_set_resource(req, resource);
+ directory_initiate_request(req);
+ directory_request_free(req);
+ } else {
+ directory_get_from_dirserver(purpose, ROUTER_PURPOSE_GENERAL, resource,
+ pds_flags, DL_WANT_ANY_DIRSERVER);
+ }
+ tor_free(resource);
+}
+
+/** Return the max number of hashes to put in a URL for a given request.
+ */
+static int
+max_dl_per_request(const or_options_t *options, int purpose)
+{
+ /* Since squid does not like URLs >= 4096 bytes we limit it to 96.
+ * 4096 - strlen(http://[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff]:65535
+ * /tor/server/d/.z) == 4026
+ * 4026/41 (40 for the hash and 1 for the + that separates them) => 98
+ * So use 96 because it's a nice number.
+ *
+ * For microdescriptors, the calculation is
+ * 4096 - strlen(http://[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff]:65535
+ * /tor/micro/d/.z) == 4027
+ * 4027/44 (43 for the hash and 1 for the - that separates them) => 91
+ * So use 90 because it's a nice number.
+ */
+ int max = 96;
+ if (purpose == DIR_PURPOSE_FETCH_MICRODESC) {
+ max = 90;
+ }
+ /* If we're going to tunnel our connections, we can ask for a lot more
+ * in a request. */
+ if (directory_must_use_begindir(options)) {
+ max = 500;
+ }
+ return max;
+}
+
+/** Don't split our requests so finely that we are requesting fewer than
+ * this number per server. (Grouping more than this at once leads to
+ * diminishing returns.) */
+#define MIN_DL_PER_REQUEST 32
+/** To prevent a single screwy cache from confusing us by selective reply,
+ * try to split our requests into at least this many requests. */
+#define MIN_REQUESTS 3
+/** If we want fewer than this many descriptors, wait until we
+ * want more, or until TestingClientMaxIntervalWithoutRequest has passed. */
+#define MAX_DL_TO_DELAY 16
+
+/** Given a <b>purpose</b> (FETCH_MICRODESC or FETCH_SERVERDESC) and a list of
+ * router descriptor digests or microdescriptor digest256s in
+ * <b>downloadable</b>, decide whether to delay fetching until we have more.
+ * If we don't want to delay, launch one or more requests to the appropriate
+ * directory authorities.
+ */
+void
+launch_descriptor_downloads(int purpose,
+ smartlist_t *downloadable,
+ const routerstatus_t *source, time_t now)
+{
+ const or_options_t *options = get_options();
+ const char *descname;
+ const int fetch_microdesc = (purpose == DIR_PURPOSE_FETCH_MICRODESC);
+ int n_downloadable = smartlist_len(downloadable);
+
+ int i, n_per_request, max_dl_per_req;
+ const char *req_plural = "", *rtr_plural = "";
+ int pds_flags = PDS_RETRY_IF_NO_SERVERS;
+
+ tor_assert(fetch_microdesc || purpose == DIR_PURPOSE_FETCH_SERVERDESC);
+ descname = fetch_microdesc ? "microdesc" : "routerdesc";
+
+ if (!n_downloadable)
+ return;
+
+ if (!directory_fetches_dir_info_early(options)) {
+ if (n_downloadable >= MAX_DL_TO_DELAY) {
+ log_debug(LD_DIR,
+ "There are enough downloadable %ss to launch requests.",
+ descname);
+ } else if (! router_have_minimum_dir_info()) {
+ log_debug(LD_DIR,
+ "We are only missing %d %ss, but we'll fetch anyway, since "
+ "we don't yet have enough directory info.",
+ n_downloadable, descname);
+ } else {
+
+ /* should delay */
+ if ((last_descriptor_download_attempted +
+ options->TestingClientMaxIntervalWithoutRequest) > now)
+ return;
+
+ if (last_descriptor_download_attempted) {
+ log_info(LD_DIR,
+ "There are not many downloadable %ss, but we've "
+ "been waiting long enough (%d seconds). Downloading.",
+ descname,
+ (int)(now-last_descriptor_download_attempted));
+ } else {
+ log_info(LD_DIR,
+ "There are not many downloadable %ss, but we haven't "
+ "tried downloading descriptors recently. Downloading.",
+ descname);
+ }
+ }
+ }
+
+ if (!authdir_mode(options)) {
+ /* If we wind up going to the authorities, we want to only open one
+ * connection to each authority at a time, so that we don't overload
+ * them. We do this by setting PDS_NO_EXISTING_SERVERDESC_FETCH
+ * regardless of whether we're a cache or not.
+ *
+ * Setting this flag can make initiate_descriptor_downloads() ignore
+ * requests. We need to make sure that we do in fact call
+ * update_router_descriptor_downloads() later on, once the connections
+ * have succeeded or failed.
+ */
+ pds_flags |= fetch_microdesc ?
+ PDS_NO_EXISTING_MICRODESC_FETCH :
+ PDS_NO_EXISTING_SERVERDESC_FETCH;
+ }
+
+ n_per_request = CEIL_DIV(n_downloadable, MIN_REQUESTS);
+ max_dl_per_req = max_dl_per_request(options, purpose);
+
+ if (n_per_request > max_dl_per_req)
+ n_per_request = max_dl_per_req;
+
+ if (n_per_request < MIN_DL_PER_REQUEST) {
+ n_per_request = MIN(MIN_DL_PER_REQUEST, n_downloadable);
+ }
+
+ if (n_downloadable > n_per_request)
+ req_plural = rtr_plural = "s";
+ else if (n_downloadable > 1)
+ rtr_plural = "s";
+
+ log_info(LD_DIR,
+ "Launching %d request%s for %d %s%s, %d at a time",
+ CEIL_DIV(n_downloadable, n_per_request), req_plural,
+ n_downloadable, descname, rtr_plural, n_per_request);
+ smartlist_sort_digests(downloadable);
+ for (i=0; i < n_downloadable; i += n_per_request) {
+ initiate_descriptor_downloads(source, purpose,
+ downloadable, i, i+n_per_request,
+ pds_flags);
+ }
+ last_descriptor_download_attempted = now;
+}
+
+/** For any descriptor that we want that's currently listed in
+ * <b>consensus</b>, download it as appropriate. */
+void
+update_consensus_router_descriptor_downloads(time_t now, int is_vote,
+ networkstatus_t *consensus)
+{
+ const or_options_t *options = get_options();
+ digestmap_t *map = NULL;
+ smartlist_t *no_longer_old = smartlist_new();
+ smartlist_t *downloadable = smartlist_new();
+ routerstatus_t *source = NULL;
+ int authdir = authdir_mode(options);
+ int n_delayed=0, n_have=0, n_would_reject=0, n_wouldnt_use=0,
+ n_inprogress=0, n_in_oldrouters=0;
+
+ if (directory_too_idle_to_fetch_descriptors(options, now))
+ goto done;
+ if (!consensus)
+ goto done;
+
+ if (is_vote) {
+ /* where's it from, so we know whom to ask for descriptors */
+ dir_server_t *ds;
+ networkstatus_voter_info_t *voter = smartlist_get(consensus->voters, 0);
+ tor_assert(voter);
+ ds = trusteddirserver_get_by_v3_auth_digest(voter->identity_digest);
+ if (ds)
+ source = &(ds->fake_status);
+ else
+ log_warn(LD_DIR, "couldn't lookup source from vote?");
+ }
+
+ map = digestmap_new();
+ list_pending_descriptor_downloads(map, 0);
+ SMARTLIST_FOREACH_BEGIN(consensus->routerstatus_list, void *, rsp) {
+ routerstatus_t *rs =
+ is_vote ? &(((vote_routerstatus_t *)rsp)->status) : rsp;
+ signed_descriptor_t *sd;
+ if ((sd = router_get_by_descriptor_digest(rs->descriptor_digest))) {
+ const routerinfo_t *ri;
+ ++n_have;
+ if (!(ri = router_get_by_id_digest(rs->identity_digest)) ||
+ tor_memneq(ri->cache_info.signed_descriptor_digest,
+ sd->signed_descriptor_digest, DIGEST_LEN)) {
+ /* We have a descriptor with this digest, but either there is no
+ * entry in routerlist with the same ID (!ri), or there is one,
+ * but the identity digest differs (memneq).
+ */
+ smartlist_add(no_longer_old, sd);
+ ++n_in_oldrouters; /* We have it in old_routers. */
+ }
+ continue; /* We have it already. */
+ }
+ if (digestmap_get(map, rs->descriptor_digest)) {
+ ++n_inprogress;
+ continue; /* We have an in-progress download. */
+ }
+ if (!download_status_is_ready(&rs->dl_status, now)) {
+ ++n_delayed; /* Not ready for retry. */
+ continue;
+ }
+ if (authdir && dirserv_would_reject_router(rs)) {
+ ++n_would_reject;
+ continue; /* We would throw it out immediately. */
+ }
+ if (!we_want_to_fetch_flavor(options, consensus->flavor) &&
+ !client_would_use_router(rs, now)) {
+ ++n_wouldnt_use;
+ continue; /* We would never use it ourself. */
+ }
+ if (is_vote && source) {
+ char time_bufnew[ISO_TIME_LEN+1];
+ char time_bufold[ISO_TIME_LEN+1];
+ const routerinfo_t *oldrouter;
+ oldrouter = router_get_by_id_digest(rs->identity_digest);
+ format_iso_time(time_bufnew, rs->published_on);
+ if (oldrouter)
+ format_iso_time(time_bufold, oldrouter->cache_info.published_on);
+ log_info(LD_DIR, "Learned about %s (%s vs %s) from %s's vote (%s)",
+ routerstatus_describe(rs),
+ time_bufnew,
+ oldrouter ? time_bufold : "none",
+ source->nickname, oldrouter ? "known" : "unknown");
+ }
+ smartlist_add(downloadable, rs->descriptor_digest);
+ } SMARTLIST_FOREACH_END(rsp);
+
+ if (!authdir_mode_v3(options)
+ && smartlist_len(no_longer_old)) {
+ routerlist_t *rl = router_get_routerlist();
+ log_info(LD_DIR, "%d router descriptors listed in consensus are "
+ "currently in old_routers; making them current.",
+ smartlist_len(no_longer_old));
+ SMARTLIST_FOREACH_BEGIN(no_longer_old, signed_descriptor_t *, sd) {
+ const char *msg;
+ was_router_added_t r;
+ time_t tmp_cert_expiration_time;
+ routerinfo_t *ri = routerlist_reparse_old(rl, sd);
+ if (!ri) {
+ log_warn(LD_BUG, "Failed to re-parse a router.");
+ continue;
+ }
+ /* need to remember for below, since add_to_routerlist may free. */
+ tmp_cert_expiration_time = ri->cert_expiration_time;
+
+ r = router_add_to_routerlist(ri, &msg, 1, 0);
+ if (WRA_WAS_OUTDATED(r)) {
+ log_warn(LD_DIR, "Couldn't add re-parsed router: %s. This isn't "
+ "usually a big deal, but you should make sure that your "
+ "clock and timezone are set correctly.",
+ msg?msg:"???");
+ if (r == ROUTER_CERTS_EXPIRED) {
+ char time_cons[ISO_TIME_LEN+1];
+ char time_cert_expires[ISO_TIME_LEN+1];
+ format_iso_time(time_cons, consensus->valid_after);
+ format_iso_time(time_cert_expires, tmp_cert_expiration_time);
+ log_warn(LD_DIR, " (I'm looking at a consensus from %s; This "
+ "router's certificates began expiring at %s.)",
+ time_cons, time_cert_expires);
+ }
+ }
+ } SMARTLIST_FOREACH_END(sd);
+ routerlist_assert_ok(rl);
+ }
+
+ log_info(LD_DIR,
+ "%d router descriptors downloadable. %d delayed; %d present "
+ "(%d of those were in old_routers); %d would_reject; "
+ "%d wouldnt_use; %d in progress.",
+ smartlist_len(downloadable), n_delayed, n_have, n_in_oldrouters,
+ n_would_reject, n_wouldnt_use, n_inprogress);
+
+ launch_descriptor_downloads(DIR_PURPOSE_FETCH_SERVERDESC,
+ downloadable, source, now);
+
+ digestmap_free(map, NULL);
+ done:
+ smartlist_free(downloadable);
+ smartlist_free(no_longer_old);
+}
+
+/** How often should we launch a server/authority request to be sure of getting
+ * a guess for our IP? */
+/*XXXX+ this info should come from netinfo cells or something, or we should
+ * do this only when we aren't seeing incoming data. see bug 652. */
+#define DUMMY_DOWNLOAD_INTERVAL (20*60)
+
+/** As needed, launch a dummy router descriptor fetch to see if our
+ * address has changed. */
+static void
+launch_dummy_descriptor_download_as_needed(time_t now,
+ const or_options_t *options)
+{
+ static time_t last_dummy_download = 0;
+ /* XXXX+ we could be smarter here; see notes on bug 652. */
+ /* If we're a server that doesn't have a configured address, we rely on
+ * directory fetches to learn when our address changes. So if we haven't
+ * tried to get any routerdescs in a long time, try a dummy fetch now. */
+ if (!options->Address &&
+ server_mode(options) &&
+ last_descriptor_download_attempted + DUMMY_DOWNLOAD_INTERVAL < now &&
+ last_dummy_download + DUMMY_DOWNLOAD_INTERVAL < now) {
+ last_dummy_download = now;
+ /* XX/teor - do we want an authority here, because they are less likely
+ * to give us the wrong address? (See #17782)
+ * I'm leaving the previous behaviour intact, because I don't like
+ * the idea of some relays contacting an authority every 20 minutes. */
+ directory_get_from_dirserver(DIR_PURPOSE_FETCH_SERVERDESC,
+ ROUTER_PURPOSE_GENERAL, "authority.z",
+ PDS_RETRY_IF_NO_SERVERS,
+ DL_WANT_ANY_DIRSERVER);
+ }
+}
+
+/** Launch downloads for router status as needed. */
+void
+update_router_descriptor_downloads(time_t now)
+{
+ const or_options_t *options = get_options();
+ if (should_delay_dir_fetches(options, NULL))
+ return;
+ if (!we_fetch_router_descriptors(options))
+ return;
+
+ update_consensus_router_descriptor_downloads(now, 0,
+ networkstatus_get_reasonably_live_consensus(now, FLAV_NS));
+}
+
+/** Launch extrainfo downloads as needed. */
+void
+update_extrainfo_downloads(time_t now)
+{
+ const or_options_t *options = get_options();
+ routerlist_t *rl;
+ smartlist_t *wanted;
+ digestmap_t *pending;
+ int old_routers, i, max_dl_per_req;
+ int n_no_ei = 0, n_pending = 0, n_have = 0, n_delay = 0, n_bogus[2] = {0,0};
+ if (! options->DownloadExtraInfo)
+ return;
+ if (should_delay_dir_fetches(options, NULL))
+ return;
+ if (!router_have_minimum_dir_info())
+ return;
+
+ pending = digestmap_new();
+ list_pending_descriptor_downloads(pending, 1);
+ rl = router_get_routerlist();
+ wanted = smartlist_new();
+ for (old_routers = 0; old_routers < 2; ++old_routers) {
+ smartlist_t *lst = old_routers ? rl->old_routers : rl->routers;
+ for (i = 0; i < smartlist_len(lst); ++i) {
+ signed_descriptor_t *sd;
+ char *d;
+ if (old_routers)
+ sd = smartlist_get(lst, i);
+ else
+ sd = &((routerinfo_t*)smartlist_get(lst, i))->cache_info;
+ if (sd->is_extrainfo)
+ continue; /* This should never happen. */
+ if (old_routers && !router_get_by_id_digest(sd->identity_digest))
+ continue; /* Couldn't check the signature if we got it. */
+ if (sd->extrainfo_is_bogus)
+ continue;
+ d = sd->extra_info_digest;
+ if (tor_digest_is_zero(d)) {
+ ++n_no_ei;
+ continue;
+ }
+ if (eimap_get(rl->extra_info_map, d)) {
+ ++n_have;
+ continue;
+ }
+ if (!download_status_is_ready(&sd->ei_dl_status, now)) {
+ ++n_delay;
+ continue;
+ }
+ if (digestmap_get(pending, d)) {
+ ++n_pending;
+ continue;
+ }
+
+ const signed_descriptor_t *sd2 = router_get_by_extrainfo_digest(d);
+ if (sd2 != sd) {
+ if (sd2 != NULL) {
+ char d1[HEX_DIGEST_LEN+1], d2[HEX_DIGEST_LEN+1];
+ char d3[HEX_DIGEST_LEN+1], d4[HEX_DIGEST_LEN+1];
+ base16_encode(d1, sizeof(d1), sd->identity_digest, DIGEST_LEN);
+ base16_encode(d2, sizeof(d2), sd2->identity_digest, DIGEST_LEN);
+ base16_encode(d3, sizeof(d3), d, DIGEST_LEN);
+ base16_encode(d4, sizeof(d3), sd2->extra_info_digest, DIGEST_LEN);
+
+ log_info(LD_DIR, "Found an entry in %s with mismatched "
+ "router_get_by_extrainfo_digest() value. This has ID %s "
+ "but the entry in the map has ID %s. This has EI digest "
+ "%s and the entry in the map has EI digest %s.",
+ old_routers?"old_routers":"routers",
+ d1, d2, d3, d4);
+ } else {
+ char d1[HEX_DIGEST_LEN+1], d2[HEX_DIGEST_LEN+1];
+ base16_encode(d1, sizeof(d1), sd->identity_digest, DIGEST_LEN);
+ base16_encode(d2, sizeof(d2), d, DIGEST_LEN);
+
+ log_info(LD_DIR, "Found an entry in %s with NULL "
+ "router_get_by_extrainfo_digest() value. This has ID %s "
+ "and EI digest %s.",
+ old_routers?"old_routers":"routers",
+ d1, d2);
+ }
+ ++n_bogus[old_routers];
+ continue;
+ }
+ smartlist_add(wanted, d);
+ }
+ }
+ digestmap_free(pending, NULL);
+
+ log_info(LD_DIR, "Extrainfo download status: %d router with no ei, %d "
+ "with present ei, %d delaying, %d pending, %d downloadable, %d "
+ "bogus in routers, %d bogus in old_routers",
+ n_no_ei, n_have, n_delay, n_pending, smartlist_len(wanted),
+ n_bogus[0], n_bogus[1]);
+
+ smartlist_shuffle(wanted);
+
+ max_dl_per_req = max_dl_per_request(options, DIR_PURPOSE_FETCH_EXTRAINFO);
+ for (i = 0; i < smartlist_len(wanted); i += max_dl_per_req) {
+ initiate_descriptor_downloads(NULL, DIR_PURPOSE_FETCH_EXTRAINFO,
+ wanted, i, i+max_dl_per_req,
+ PDS_RETRY_IF_NO_SERVERS|PDS_NO_EXISTING_SERVERDESC_FETCH);
+ }
+
+ smartlist_free(wanted);
+}
+
+/** Reset the consensus and extra-info download failure count on all routers.
+ * When we get a new consensus,
+ * routers_update_status_from_consensus_networkstatus() will reset the
+ * download statuses on the descriptors in that consensus.
+ */
+void
+router_reset_descriptor_download_failures(void)
+{
+ log_debug(LD_GENERAL,
+ "In router_reset_descriptor_download_failures()");
+
+ networkstatus_reset_download_failures();
+ last_descriptor_download_attempted = 0;
+ if (!routerlist)
+ return;
+ /* We want to download *all* extra-info descriptors, not just those in
+ * the consensus we currently have (or are about to have) */
+ SMARTLIST_FOREACH(routerlist->routers, routerinfo_t *, ri,
+ {
+ download_status_reset(&ri->cache_info.ei_dl_status);
+ });
+ SMARTLIST_FOREACH(routerlist->old_routers, signed_descriptor_t *, sd,
+ {
+ download_status_reset(&sd->ei_dl_status);
+ });
+}
+
+/** Any changes in a router descriptor's publication time larger than this are
+ * automatically non-cosmetic. */
+#define ROUTER_MAX_COSMETIC_TIME_DIFFERENCE (2*60*60)
+
+/** We allow uptime to vary from how much it ought to be by this much. */
+#define ROUTER_ALLOW_UPTIME_DRIFT (6*60*60)
+
+/** Return true iff the only differences between r1 and r2 are such that
+ * would not cause a recent (post 0.1.1.6) dirserver to republish.
+ */
+int
+router_differences_are_cosmetic(const routerinfo_t *r1, const routerinfo_t *r2)
+{
+ time_t r1pub, r2pub;
+ long time_difference;
+ tor_assert(r1 && r2);
+
+ /* r1 should be the one that was published first. */
+ if (r1->cache_info.published_on > r2->cache_info.published_on) {
+ const routerinfo_t *ri_tmp = r2;
+ r2 = r1;
+ r1 = ri_tmp;
+ }
+
+ /* If any key fields differ, they're different. */
+ if (r1->addr != r2->addr ||
+ strcasecmp(r1->nickname, r2->nickname) ||
+ r1->or_port != r2->or_port ||
+ !tor_addr_eq(&r1->ipv6_addr, &r2->ipv6_addr) ||
+ r1->ipv6_orport != r2->ipv6_orport ||
+ r1->dir_port != r2->dir_port ||
+ r1->purpose != r2->purpose ||
+ !crypto_pk_eq_keys(r1->onion_pkey, r2->onion_pkey) ||
+ !crypto_pk_eq_keys(r1->identity_pkey, r2->identity_pkey) ||
+ strcasecmp(r1->platform, r2->platform) ||
+ (r1->contact_info && !r2->contact_info) || /* contact_info is optional */
+ (!r1->contact_info && r2->contact_info) ||
+ (r1->contact_info && r2->contact_info &&
+ strcasecmp(r1->contact_info, r2->contact_info)) ||
+ r1->is_hibernating != r2->is_hibernating ||
+ ! addr_policies_eq(r1->exit_policy, r2->exit_policy) ||
+ (r1->supports_tunnelled_dir_requests !=
+ r2->supports_tunnelled_dir_requests))
+ return 0;
+ if ((r1->declared_family == NULL) != (r2->declared_family == NULL))
+ return 0;
+ if (r1->declared_family && r2->declared_family) {
+ int i, n;
+ if (smartlist_len(r1->declared_family)!=smartlist_len(r2->declared_family))
+ return 0;
+ n = smartlist_len(r1->declared_family);
+ for (i=0; i < n; ++i) {
+ if (strcasecmp(smartlist_get(r1->declared_family, i),
+ smartlist_get(r2->declared_family, i)))
+ return 0;
+ }
+ }
+
+ /* Did bandwidth change a lot? */
+ if ((r1->bandwidthcapacity < r2->bandwidthcapacity/2) ||
+ (r2->bandwidthcapacity < r1->bandwidthcapacity/2))
+ return 0;
+
+ /* Did the bandwidthrate or bandwidthburst change? */
+ if ((r1->bandwidthrate != r2->bandwidthrate) ||
+ (r1->bandwidthburst != r2->bandwidthburst))
+ return 0;
+
+ /* Did more than 12 hours pass? */
+ if (r1->cache_info.published_on + ROUTER_MAX_COSMETIC_TIME_DIFFERENCE
+ < r2->cache_info.published_on)
+ return 0;
+
+ /* Did uptime fail to increase by approximately the amount we would think,
+ * give or take some slop? */
+ r1pub = r1->cache_info.published_on;
+ r2pub = r2->cache_info.published_on;
+ time_difference = labs(r2->uptime - (r1->uptime + (r2pub - r1pub)));
+ if (time_difference > ROUTER_ALLOW_UPTIME_DRIFT &&
+ time_difference > r1->uptime * .05 &&
+ time_difference > r2->uptime * .05)
+ return 0;
+
+ /* Otherwise, the difference is cosmetic. */
+ return 1;
+}
+
+/** Check whether <b>sd</b> describes a router descriptor compatible with the
+ * extrainfo document <b>ei</b>.
+ *
+ * <b>identity_pkey</b> (which must also be provided) is RSA1024 identity key
+ * for the router. We use it to check the signature of the extrainfo document,
+ * if it has not already been checked.
+ *
+ * If no router is compatible with <b>ei</b>, <b>ei</b> should be
+ * dropped. Return 0 for "compatible", return 1 for "reject, and inform
+ * whoever uploaded <b>ei</b>, and return -1 for "reject silently.". If
+ * <b>msg</b> is present, set *<b>msg</b> to a description of the
+ * incompatibility (if any).
+ *
+ * Set the extrainfo_is_bogus field in <b>sd</b> if the digests matched
+ * but the extrainfo was nonetheless incompatible.
+ **/
+int
+routerinfo_incompatible_with_extrainfo(const crypto_pk_t *identity_pkey,
+ extrainfo_t *ei,
+ signed_descriptor_t *sd,
+ const char **msg)
+{
+ int digest_matches, digest256_matches, r=1;
+ tor_assert(identity_pkey);
+ tor_assert(sd);
+ tor_assert(ei);
+
+ if (ei->bad_sig) {
+ if (msg) *msg = "Extrainfo signature was bad, or signed with wrong key.";
+ return 1;
+ }
+
+ digest_matches = tor_memeq(ei->cache_info.signed_descriptor_digest,
+ sd->extra_info_digest, DIGEST_LEN);
+ /* Set digest256_matches to 1 if the digest is correct, or if no
+ * digest256 was in the ri. */
+ digest256_matches = tor_memeq(ei->digest256,
+ sd->extra_info_digest256, DIGEST256_LEN);
+ digest256_matches |=
+ tor_mem_is_zero(sd->extra_info_digest256, DIGEST256_LEN);
+
+ /* The identity must match exactly to have been generated at the same time
+ * by the same router. */
+ if (tor_memneq(sd->identity_digest,
+ ei->cache_info.identity_digest,
+ DIGEST_LEN)) {
+ if (msg) *msg = "Extrainfo nickname or identity did not match routerinfo";
+ goto err; /* different servers */
+ }
+
+ if (! tor_cert_opt_eq(sd->signing_key_cert,
+ ei->cache_info.signing_key_cert)) {
+ if (msg) *msg = "Extrainfo signing key cert didn't match routerinfo";
+ goto err; /* different servers */
+ }
+
+ if (ei->pending_sig) {
+ char signed_digest[128];
+ if (crypto_pk_public_checksig(identity_pkey,
+ signed_digest, sizeof(signed_digest),
+ ei->pending_sig, ei->pending_sig_len) != DIGEST_LEN ||
+ tor_memneq(signed_digest, ei->cache_info.signed_descriptor_digest,
+ DIGEST_LEN)) {
+ ei->bad_sig = 1;
+ tor_free(ei->pending_sig);
+ if (msg) *msg = "Extrainfo signature bad, or signed with wrong key";
+ goto err; /* Bad signature, or no match. */
+ }
+
+ ei->cache_info.send_unencrypted = sd->send_unencrypted;
+ tor_free(ei->pending_sig);
+ }
+
+ if (ei->cache_info.published_on < sd->published_on) {
+ if (msg) *msg = "Extrainfo published time did not match routerdesc";
+ goto err;
+ } else if (ei->cache_info.published_on > sd->published_on) {
+ if (msg) *msg = "Extrainfo published time did not match routerdesc";
+ r = -1;
+ goto err;
+ }
+
+ if (!digest256_matches && !digest_matches) {
+ if (msg) *msg = "Neither digest256 or digest matched "
+ "digest from routerdesc";
+ goto err;
+ }
+
+ if (!digest256_matches) {
+ if (msg) *msg = "Extrainfo digest did not match digest256 from routerdesc";
+ goto err; /* Digest doesn't match declared value. */
+ }
+
+ if (!digest_matches) {
+ if (msg) *msg = "Extrainfo digest did not match value from routerdesc";
+ goto err; /* Digest doesn't match declared value. */
+ }
+
+ return 0;
+ err:
+ if (digest_matches) {
+ /* This signature was okay, and the digest was right: This is indeed the
+ * corresponding extrainfo. But insanely, it doesn't match the routerinfo
+ * that lists it. Don't try to fetch this one again. */
+ sd->extrainfo_is_bogus = 1;
+ }
+
+ return r;
+}
+
+/* Does ri have a valid ntor onion key?
+ * Valid ntor onion keys exist and have at least one non-zero byte. */
+int
+routerinfo_has_curve25519_onion_key(const routerinfo_t *ri)
+{
+ if (!ri) {
+ return 0;
+ }
+
+ if (!ri->onion_curve25519_pkey) {
+ return 0;
+ }
+
+ if (tor_mem_is_zero((const char*)ri->onion_curve25519_pkey->public_key,
+ CURVE25519_PUBKEY_LEN)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Is rs running a tor version known to support EXTEND2 cells?
+ * If allow_unknown_versions is true, return true if we can't tell
+ * (from a versions line or a protocols line) whether it supports extend2
+ * cells.
+ * Otherwise, return false if the version is unknown. */
+int
+routerstatus_version_supports_extend2_cells(const routerstatus_t *rs,
+ int allow_unknown_versions)
+{
+ if (!rs) {
+ return allow_unknown_versions;
+ }
+
+ if (!rs->pv.protocols_known) {
+ return allow_unknown_versions;
+ }
+
+ return rs->pv.supports_extend2_cells;
+}
+
+/** Assert that the internal representation of <b>rl</b> is
+ * self-consistent. */
+void
+routerlist_assert_ok(const routerlist_t *rl)
+{
+ routerinfo_t *r2;
+ signed_descriptor_t *sd2;
+ if (!rl)
+ return;
+ SMARTLIST_FOREACH_BEGIN(rl->routers, routerinfo_t *, r) {
+ r2 = rimap_get(rl->identity_map, r->cache_info.identity_digest);
+ tor_assert(r == r2);
+ sd2 = sdmap_get(rl->desc_digest_map,
+ r->cache_info.signed_descriptor_digest);
+ tor_assert(&(r->cache_info) == sd2);
+ tor_assert(r->cache_info.routerlist_index == r_sl_idx);
+ /* XXXX
+ *
+ * Hoo boy. We need to fix this one, and the fix is a bit tricky, so
+ * commenting this out is just a band-aid.
+ *
+ * The problem is that, although well-behaved router descriptors
+ * should never have the same value for their extra_info_digest, it's
+ * possible for ill-behaved routers to claim whatever they like there.
+ *
+ * The real answer is to trash desc_by_eid_map and instead have
+ * something that indicates for a given extra-info digest we want,
+ * what its download status is. We'll do that as a part of routerlist
+ * refactoring once consensus directories are in. For now,
+ * this rep violation is probably harmless: an adversary can make us
+ * reset our retry count for an extrainfo, but that's not the end
+ * of the world. Changing the representation in 0.2.0.x would just
+ * destabilize the codebase.
+ if (!tor_digest_is_zero(r->cache_info.extra_info_digest)) {
+ signed_descriptor_t *sd3 =
+ sdmap_get(rl->desc_by_eid_map, r->cache_info.extra_info_digest);
+ tor_assert(sd3 == &(r->cache_info));
+ }
+ */
+ } SMARTLIST_FOREACH_END(r);
+ SMARTLIST_FOREACH_BEGIN(rl->old_routers, signed_descriptor_t *, sd) {
+ r2 = rimap_get(rl->identity_map, sd->identity_digest);
+ tor_assert(!r2 || sd != &(r2->cache_info));
+ sd2 = sdmap_get(rl->desc_digest_map, sd->signed_descriptor_digest);
+ tor_assert(sd == sd2);
+ tor_assert(sd->routerlist_index == sd_sl_idx);
+ /* XXXX see above.
+ if (!tor_digest_is_zero(sd->extra_info_digest)) {
+ signed_descriptor_t *sd3 =
+ sdmap_get(rl->desc_by_eid_map, sd->extra_info_digest);
+ tor_assert(sd3 == sd);
+ }
+ */
+ } SMARTLIST_FOREACH_END(sd);
+
+ RIMAP_FOREACH(rl->identity_map, d, r) {
+ tor_assert(tor_memeq(r->cache_info.identity_digest, d, DIGEST_LEN));
+ } DIGESTMAP_FOREACH_END;
+ SDMAP_FOREACH(rl->desc_digest_map, d, sd) {
+ tor_assert(tor_memeq(sd->signed_descriptor_digest, d, DIGEST_LEN));
+ } DIGESTMAP_FOREACH_END;
+ SDMAP_FOREACH(rl->desc_by_eid_map, d, sd) {
+ tor_assert(!tor_digest_is_zero(d));
+ tor_assert(sd);
+ tor_assert(tor_memeq(sd->extra_info_digest, d, DIGEST_LEN));
+ } DIGESTMAP_FOREACH_END;
+ EIMAP_FOREACH(rl->extra_info_map, d, ei) {
+ signed_descriptor_t *sd;
+ tor_assert(tor_memeq(ei->cache_info.signed_descriptor_digest,
+ d, DIGEST_LEN));
+ sd = sdmap_get(rl->desc_by_eid_map,
+ ei->cache_info.signed_descriptor_digest);
+ // tor_assert(sd); // XXXX see above
+ if (sd) {
+ tor_assert(tor_memeq(ei->cache_info.signed_descriptor_digest,
+ sd->extra_info_digest, DIGEST_LEN));
+ }
+ } DIGESTMAP_FOREACH_END;
+}
+
+/** Allocate and return a new string representing the contact info
+ * and platform string for <b>router</b>,
+ * surrounded by quotes and using standard C escapes.
+ *
+ * THIS FUNCTION IS NOT REENTRANT. Don't call it from outside the main
+ * thread. Also, each call invalidates the last-returned value, so don't
+ * try log_warn(LD_GENERAL, "%s %s", esc_router_info(a), esc_router_info(b));
+ *
+ * If <b>router</b> is NULL, it just frees its internal memory and returns.
+ */
+const char *
+esc_router_info(const routerinfo_t *router)
+{
+ static char *info=NULL;
+ char *esc_contact, *esc_platform;
+ tor_free(info);
+
+ if (!router)
+ return NULL; /* we're exiting; just free the memory we use */
+
+ esc_contact = esc_for_log(router->contact_info);
+ esc_platform = esc_for_log(router->platform);
+
+ tor_asprintf(&info, "Contact %s, Platform %s", esc_contact, esc_platform);
+ tor_free(esc_contact);
+ tor_free(esc_platform);
+
+ return info;
+}
+
+/** Helper for sorting: compare two routerinfos by their identity
+ * digest. */
+static int
+compare_routerinfo_by_id_digest_(const void **a, const void **b)
+{
+ routerinfo_t *first = *(routerinfo_t **)a, *second = *(routerinfo_t **)b;
+ return fast_memcmp(first->cache_info.identity_digest,
+ second->cache_info.identity_digest,
+ DIGEST_LEN);
+}
+
+/** Sort a list of routerinfo_t in ascending order of identity digest. */
+void
+routers_sort_by_identity(smartlist_t *routers)
+{
+ smartlist_sort(routers, compare_routerinfo_by_id_digest_);
+}
+
+/** Called when we change a node set, or when we reload the geoip IPv4 list:
+ * recompute all country info in all configuration node sets and in the
+ * routerlist. */
+void
+refresh_all_country_info(void)
+{
+ const or_options_t *options = get_options();
+
+ if (options->EntryNodes)
+ routerset_refresh_countries(options->EntryNodes);
+ if (options->ExitNodes)
+ routerset_refresh_countries(options->ExitNodes);
+ if (options->ExcludeNodes)
+ routerset_refresh_countries(options->ExcludeNodes);
+ if (options->ExcludeExitNodes)
+ routerset_refresh_countries(options->ExcludeExitNodes);
+ if (options->ExcludeExitNodesUnion_)
+ routerset_refresh_countries(options->ExcludeExitNodesUnion_);
+
+ nodelist_refresh_countries();
+}
diff --git a/src/feature/nodelist/routerlist.h b/src/feature/nodelist/routerlist.h
new file mode 100644
index 0000000000..4b7406364f
--- /dev/null
+++ b/src/feature/nodelist/routerlist.h
@@ -0,0 +1,347 @@
+/* Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file routerlist.h
+ * \brief Header file for routerlist.c.
+ **/
+
+#ifndef TOR_ROUTERLIST_H
+#define TOR_ROUTERLIST_H
+
+#include "lib/testsupport/testsupport.h"
+
+/** Return value for router_add_to_routerlist() and dirserv_add_descriptor() */
+typedef enum was_router_added_t {
+ /* Router was added successfully. */
+ ROUTER_ADDED_SUCCESSFULLY = 1,
+ /* Extrainfo document was rejected because no corresponding router
+ * descriptor was found OR router descriptor was rejected because
+ * it was incompatible with its extrainfo document. */
+ ROUTER_BAD_EI = -1,
+ /* Router descriptor was rejected because it is already known. */
+ ROUTER_IS_ALREADY_KNOWN = -2,
+ /* General purpose router was rejected, because it was not listed
+ * in consensus. */
+ ROUTER_NOT_IN_CONSENSUS = -3,
+ /* Router was neither in directory consensus nor in any of
+ * networkstatus documents. Caching it to access later.
+ * (Applies to fetched descriptors only.) */
+ ROUTER_NOT_IN_CONSENSUS_OR_NETWORKSTATUS = -4,
+ /* Router was rejected by directory authority. */
+ ROUTER_AUTHDIR_REJECTS = -5,
+ /* Bridge descriptor was rejected because such bridge was not one
+ * of the bridges we have listed in our configuration. */
+ ROUTER_WAS_NOT_WANTED = -6,
+ /* Router descriptor was rejected because it was older than
+ * OLD_ROUTER_DESC_MAX_AGE. */
+ ROUTER_WAS_TOO_OLD = -7, /* note contrast with 'NOT_NEW' */
+ /* DOCDOC */
+ ROUTER_CERTS_EXPIRED = -8
+} was_router_added_t;
+
+/** Flags to be passed to control router_choose_random_node() to indicate what
+ * kind of nodes to pick according to what algorithm. */
+typedef enum router_crn_flags_t {
+ CRN_NEED_UPTIME = 1<<0,
+ CRN_NEED_CAPACITY = 1<<1,
+ CRN_NEED_GUARD = 1<<2,
+ /* XXXX not used, apparently. */
+ CRN_WEIGHT_AS_EXIT = 1<<5,
+ CRN_NEED_DESC = 1<<6,
+ /* On clients, only provide nodes that satisfy ClientPreferIPv6OR */
+ CRN_PREF_ADDR = 1<<7,
+ /* On clients, only provide nodes that we can connect to directly, based on
+ * our firewall rules */
+ CRN_DIRECT_CONN = 1<<8,
+ /* On clients, only provide nodes with HSRend >= 2 protocol version which
+ * is required for hidden service version >= 3. */
+ CRN_RENDEZVOUS_V3 = 1<<9,
+} router_crn_flags_t;
+
+/** Possible ways to weight routers when choosing one randomly. See
+ * routerlist_sl_choose_by_bandwidth() for more information.*/
+typedef enum bandwidth_weight_rule_t {
+ NO_WEIGHTING, WEIGHT_FOR_EXIT, WEIGHT_FOR_MID, WEIGHT_FOR_GUARD,
+ WEIGHT_FOR_DIR
+} bandwidth_weight_rule_t;
+
+/* Flags for pick_directory_server() and pick_trusteddirserver(). */
+/** Flag to indicate that we should not automatically be willing to use
+ * ourself to answer a directory request.
+ * Passed to router_pick_directory_server (et al).*/
+#define PDS_ALLOW_SELF (1<<0)
+/** Flag to indicate that if no servers seem to be up, we should mark all
+ * directory servers as up and try again.
+ * Passed to router_pick_directory_server (et al).*/
+#define PDS_RETRY_IF_NO_SERVERS (1<<1)
+/** Flag to indicate that we should not exclude directory servers that
+ * our ReachableAddress settings would exclude. This usually means that
+ * we're going to connect to the server over Tor, and so we don't need to
+ * worry about our firewall telling us we can't.
+ * Passed to router_pick_directory_server (et al).*/
+#define PDS_IGNORE_FASCISTFIREWALL (1<<2)
+/** Flag to indicate that we should not use any directory authority to which
+ * we have an existing directory connection for downloading server descriptors
+ * or extrainfo documents.
+ *
+ * Passed to router_pick_directory_server (et al)
+ */
+#define PDS_NO_EXISTING_SERVERDESC_FETCH (1<<3)
+/** Flag to indicate that we should not use any directory authority to which
+ * we have an existing directory connection for downloading microdescs.
+ *
+ * Passed to router_pick_directory_server (et al)
+ */
+#define PDS_NO_EXISTING_MICRODESC_FETCH (1<<4)
+
+int get_n_authorities(dirinfo_type_t type);
+int trusted_dirs_reload_certs(void);
+
+/*
+ * Pass one of these as source to trusted_dirs_load_certs_from_string()
+ * to indicate whence string originates; this controls error handling
+ * behavior such as marking downloads as failed.
+ */
+
+#define TRUSTED_DIRS_CERTS_SRC_SELF 0
+#define TRUSTED_DIRS_CERTS_SRC_FROM_STORE 1
+#define TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST 2
+#define TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_SK_DIGEST 3
+#define TRUSTED_DIRS_CERTS_SRC_FROM_VOTE 4
+
+int trusted_dirs_load_certs_from_string(const char *contents, int source,
+ int flush, const char *source_dir);
+void trusted_dirs_flush_certs_to_disk(void);
+authority_cert_t *authority_cert_get_newest_by_id(const char *id_digest);
+authority_cert_t *authority_cert_get_by_sk_digest(const char *sk_digest);
+authority_cert_t *authority_cert_get_by_digests(const char *id_digest,
+ const char *sk_digest);
+void authority_cert_get_all(smartlist_t *certs_out);
+void authority_cert_dl_failed(const char *id_digest,
+ const char *signing_key_digest, int status);
+void authority_certs_fetch_missing(networkstatus_t *status, time_t now,
+ const char *dir_hint);
+int router_reload_router_list(void);
+int authority_cert_dl_looks_uncertain(const char *id_digest);
+const smartlist_t *router_get_trusted_dir_servers(void);
+const smartlist_t *router_get_fallback_dir_servers(void);
+int authority_cert_is_blacklisted(const authority_cert_t *cert);
+
+const routerstatus_t *router_pick_directory_server(dirinfo_type_t type,
+ int flags);
+dir_server_t *router_get_trusteddirserver_by_digest(const char *d);
+dir_server_t *router_get_fallback_dirserver_by_digest(
+ const char *digest);
+int router_digest_is_fallback_dir(const char *digest);
+MOCK_DECL(dir_server_t *, trusteddirserver_get_by_v3_auth_digest,
+ (const char *d));
+const routerstatus_t *router_pick_trusteddirserver(dirinfo_type_t type,
+ int flags);
+const routerstatus_t *router_pick_fallback_dirserver(dirinfo_type_t type,
+ int flags);
+int router_skip_or_reachability(const or_options_t *options, int try_ip_pref);
+int router_get_my_share_of_directory_requests(double *v3_share_out);
+void router_reset_status_download_failures(void);
+int routers_have_same_or_addrs(const routerinfo_t *r1, const routerinfo_t *r2);
+void router_add_running_nodes_to_smartlist(smartlist_t *sl, int need_uptime,
+ int need_capacity, int need_guard,
+ int need_desc, int pref_addr,
+ int direct_conn);
+
+const routerinfo_t *routerlist_find_my_routerinfo(void);
+uint32_t router_get_advertised_bandwidth(const routerinfo_t *router);
+uint32_t router_get_advertised_bandwidth_capped(const routerinfo_t *router);
+
+const node_t *node_sl_choose_by_bandwidth(const smartlist_t *sl,
+ bandwidth_weight_rule_t rule);
+double frac_nodes_with_descriptors(const smartlist_t *sl,
+ bandwidth_weight_rule_t rule,
+ int for_direct_conn);
+
+const node_t *router_choose_random_node(smartlist_t *excludedsmartlist,
+ struct routerset_t *excludedset,
+ router_crn_flags_t flags);
+
+int router_digest_is_trusted_dir_type(const char *digest,
+ dirinfo_type_t type);
+#define router_digest_is_trusted_dir(d) \
+ router_digest_is_trusted_dir_type((d), NO_DIRINFO)
+
+int hexdigest_to_digest(const char *hexdigest, char *digest);
+const routerinfo_t *router_get_by_id_digest(const char *digest);
+routerinfo_t *router_get_mutable_by_digest(const char *digest);
+signed_descriptor_t *router_get_by_descriptor_digest(const char *digest);
+MOCK_DECL(signed_descriptor_t *,router_get_by_extrainfo_digest,
+ (const char *digest));
+MOCK_DECL(signed_descriptor_t *,extrainfo_get_by_descriptor_digest,
+ (const char *digest));
+const char *signed_descriptor_get_body(const signed_descriptor_t *desc);
+const char *signed_descriptor_get_annotations(const signed_descriptor_t *desc);
+routerlist_t *router_get_routerlist(void);
+void routerinfo_free_(routerinfo_t *router);
+#define routerinfo_free(router) \
+ FREE_AND_NULL(routerinfo_t, routerinfo_free_, (router))
+void extrainfo_free_(extrainfo_t *extrainfo);
+#define extrainfo_free(ei) FREE_AND_NULL(extrainfo_t, extrainfo_free_, (ei))
+void routerlist_free_(routerlist_t *rl);
+#define routerlist_free(rl) FREE_AND_NULL(routerlist_t, routerlist_free_, (rl))
+void dump_routerlist_mem_usage(int severity);
+void routerlist_remove(routerlist_t *rl, routerinfo_t *ri, int make_old,
+ time_t now);
+void routerlist_free_all(void);
+void routerlist_reset_warnings(void);
+
+MOCK_DECL(smartlist_t *, list_authority_ids_with_downloads, (void));
+MOCK_DECL(download_status_t *, id_only_download_status_for_authority_id,
+ (const char *digest));
+MOCK_DECL(smartlist_t *, list_sk_digests_for_authority_id,
+ (const char *digest));
+MOCK_DECL(download_status_t *, download_status_for_authority_id_and_sk,
+ (const char *id_digest, const char *sk_digest));
+
+static int WRA_WAS_ADDED(was_router_added_t s);
+static int WRA_WAS_OUTDATED(was_router_added_t s);
+static int WRA_WAS_REJECTED(was_router_added_t s);
+static int WRA_NEVER_DOWNLOADABLE(was_router_added_t s);
+/** Return true iff the outcome code in <b>s</b> indicates that the descriptor
+ * was added. It might still be necessary to check whether the descriptor
+ * generator should be notified.
+ */
+static inline int
+WRA_WAS_ADDED(was_router_added_t s) {
+ return s == ROUTER_ADDED_SUCCESSFULLY;
+}
+/** Return true iff the outcome code in <b>s</b> indicates that the descriptor
+ * was not added because it was either:
+ * - not in the consensus
+ * - neither in the consensus nor in any networkstatus document
+ * - it was outdated.
+ * - its certificates were expired.
+ */
+static inline int WRA_WAS_OUTDATED(was_router_added_t s)
+{
+ return (s == ROUTER_WAS_TOO_OLD ||
+ s == ROUTER_IS_ALREADY_KNOWN ||
+ s == ROUTER_NOT_IN_CONSENSUS ||
+ s == ROUTER_NOT_IN_CONSENSUS_OR_NETWORKSTATUS ||
+ s == ROUTER_CERTS_EXPIRED);
+}
+/** Return true iff the outcome code in <b>s</b> indicates that the descriptor
+ * was flat-out rejected. */
+static inline int WRA_WAS_REJECTED(was_router_added_t s)
+{
+ return (s == ROUTER_AUTHDIR_REJECTS);
+}
+/** Return true iff the outcome code in <b>s</b> indicates that the descriptor
+ * was flat-out rejected. */
+static inline int WRA_NEVER_DOWNLOADABLE(was_router_added_t s)
+{
+ return (s == ROUTER_AUTHDIR_REJECTS ||
+ s == ROUTER_BAD_EI ||
+ s == ROUTER_WAS_TOO_OLD ||
+ s == ROUTER_CERTS_EXPIRED);
+}
+was_router_added_t router_add_to_routerlist(routerinfo_t *router,
+ const char **msg,
+ int from_cache,
+ int from_fetch);
+was_router_added_t router_add_extrainfo_to_routerlist(
+ extrainfo_t *ei, const char **msg,
+ int from_cache, int from_fetch);
+void routerlist_descriptors_added(smartlist_t *sl, int from_cache);
+void routerlist_remove_old_routers(void);
+int router_load_single_router(const char *s, uint8_t purpose, int cache,
+ const char **msg);
+int router_load_routers_from_string(const char *s, const char *eos,
+ saved_location_t saved_location,
+ smartlist_t *requested_fingerprints,
+ int descriptor_digests,
+ const char *prepend_annotations);
+void router_load_extrainfo_from_string(const char *s, const char *eos,
+ saved_location_t saved_location,
+ smartlist_t *requested_fingerprints,
+ int descriptor_digests);
+
+void routerlist_retry_directory_downloads(time_t now);
+
+int router_exit_policy_rejects_all(const routerinfo_t *router);
+
+dir_server_t *trusted_dir_server_new(const char *nickname, const char *address,
+ uint16_t dir_port, uint16_t or_port,
+ const tor_addr_port_t *addrport_ipv6,
+ const char *digest, const char *v3_auth_digest,
+ dirinfo_type_t type, double weight);
+dir_server_t *fallback_dir_server_new(const tor_addr_t *addr,
+ uint16_t dir_port, uint16_t or_port,
+ const tor_addr_port_t *addrport_ipv6,
+ const char *id_digest, double weight);
+void dir_server_add(dir_server_t *ent);
+
+void authority_cert_free_(authority_cert_t *cert);
+#define authority_cert_free(cert) \
+ FREE_AND_NULL(authority_cert_t, authority_cert_free_, (cert))
+void clear_dir_servers(void);
+void update_consensus_router_descriptor_downloads(time_t now, int is_vote,
+ networkstatus_t *consensus);
+void update_router_descriptor_downloads(time_t now);
+void update_all_descriptor_downloads(time_t now);
+void update_extrainfo_downloads(time_t now);
+void router_reset_descriptor_download_failures(void);
+int router_differences_are_cosmetic(const routerinfo_t *r1,
+ const routerinfo_t *r2);
+int routerinfo_incompatible_with_extrainfo(const crypto_pk_t *ri,
+ extrainfo_t *ei,
+ signed_descriptor_t *sd,
+ const char **msg);
+int routerinfo_has_curve25519_onion_key(const routerinfo_t *ri);
+int routerstatus_version_supports_extend2_cells(const routerstatus_t *rs,
+ int allow_unknown_versions);
+
+void routerlist_assert_ok(const routerlist_t *rl);
+const char *esc_router_info(const routerinfo_t *router);
+void routers_sort_by_identity(smartlist_t *routers);
+
+void refresh_all_country_info(void);
+
+void list_pending_microdesc_downloads(digest256map_t *result);
+void launch_descriptor_downloads(int purpose,
+ smartlist_t *downloadable,
+ const routerstatus_t *source,
+ time_t now);
+
+int hex_digest_nickname_decode(const char *hexdigest,
+ char *digest_out,
+ char *nickname_qualifier_out,
+ char *nickname_out);
+int hex_digest_nickname_matches(const char *hexdigest,
+ const char *identity_digest,
+ const char *nickname);
+
+#ifdef ROUTERLIST_PRIVATE
+STATIC int choose_array_element_by_weight(const uint64_t *entries,
+ int n_entries);
+STATIC void scale_array_elements_to_u64(uint64_t *entries_out,
+ const double *entries_in,
+ int n_entries,
+ uint64_t *total_out);
+STATIC const routerstatus_t *router_pick_directory_server_impl(
+ dirinfo_type_t auth, int flags,
+ int *n_busy_out);
+
+MOCK_DECL(int, router_descriptor_is_older_than, (const routerinfo_t *router,
+ int seconds));
+MOCK_DECL(STATIC was_router_added_t, extrainfo_insert,
+ (routerlist_t *rl, extrainfo_t *ei, int warn_if_incompatible));
+
+MOCK_DECL(STATIC void, initiate_descriptor_downloads,
+ (const routerstatus_t *source, int purpose, smartlist_t *digests,
+ int lo, int hi, int pds_flags));
+STATIC int router_is_already_dir_fetching(const tor_addr_port_t *ap,
+ int serverdesc, int microdesc);
+
+#endif /* defined(ROUTERLIST_PRIVATE) */
+
+#endif /* !defined(TOR_ROUTERLIST_H) */
diff --git a/src/feature/nodelist/routerlist_st.h b/src/feature/nodelist/routerlist_st.h
new file mode 100644
index 0000000000..0b94a4dfcd
--- /dev/null
+++ b/src/feature/nodelist/routerlist_st.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef ROUTERLIST_ST_H
+#define ROUTERLIST_ST_H
+
+#include "or/desc_store_st.h"
+
+/** Contents of a directory of onion routers. */
+struct routerlist_t {
+ /** Map from server identity digest to a member of routers. */
+ struct digest_ri_map_t *identity_map;
+ /** Map from server descriptor digest to a signed_descriptor_t from
+ * routers or old_routers. */
+ struct digest_sd_map_t *desc_digest_map;
+ /** Map from extra-info digest to an extrainfo_t. Only exists for
+ * routers in routers or old_routers. */
+ struct digest_ei_map_t *extra_info_map;
+ /** Map from extra-info digests to a signed_descriptor_t for a router
+ * descriptor having that extra-info digest. Only exists for
+ * routers in routers or old_routers. */
+ struct digest_sd_map_t *desc_by_eid_map;
+ /** List of routerinfo_t for all currently live routers we know. */
+ smartlist_t *routers;
+ /** List of signed_descriptor_t for older router descriptors we're
+ * caching. */
+ smartlist_t *old_routers;
+ /** Store holding server descriptors. If present, any router whose
+ * cache_info.saved_location == SAVED_IN_CACHE is stored in this file
+ * starting at cache_info.saved_offset */
+ desc_store_t desc_store;
+ /** Store holding extra-info documents. */
+ desc_store_t extrainfo_store;
+};
+
+#endif
+
diff --git a/src/feature/nodelist/routerparse.c b/src/feature/nodelist/routerparse.c
new file mode 100644
index 0000000000..273666046b
--- /dev/null
+++ b/src/feature/nodelist/routerparse.c
@@ -0,0 +1,5693 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file routerparse.c
+ * \brief Code to parse and validate router descriptors, consenus directories,
+ * and similar objects.
+ *
+ * The objects parsed by this module use a common text-based metaformat,
+ * documented in dir-spec.txt in torspec.git. This module is itself divided
+ * into two major kinds of function: code to handle the metaformat, and code
+ * to convert from particular instances of the metaformat into the
+ * objects that Tor uses.
+ *
+ * The generic parsing code works by calling a table-based tokenizer on the
+ * input string. Each token corresponds to a single line with a token, plus
+ * optional arguments on that line, plus an optional base-64 encoded object
+ * after that line. Each token has a definition in a table of token_rule_t
+ * entries that describes how many arguments it can take, whether it takes an
+ * object, how many times it may appear, whether it must appear first, and so
+ * on.
+ *
+ * The tokenizer function tokenize_string() converts its string input into a
+ * smartlist full of instances of directory_token_t, according to a provided
+ * table of token_rule_t.
+ *
+ * The generic parts of this module additionally include functions for
+ * finding the start and end of signed information inside a signed object, and
+ * computing the digest that will be signed.
+ *
+ * There are also functions for saving objects to disk that have caused
+ * parsing to fail.
+ *
+ * The specific parts of this module describe conversions between
+ * particular lists of directory_token_t and particular objects. The
+ * kinds of objects that can be parsed here are:
+ * <ul>
+ * <li>router descriptors (managed from routerlist.c)
+ * <li>extra-info documents (managed from routerlist.c)
+ * <li>microdescriptors (managed from microdesc.c)
+ * <li>vote and consensus networkstatus documents, and the routerstatus_t
+ * objects that they comprise (managed from networkstatus.c)
+ * <li>detached-signature objects used by authorities for gathering
+ * signatures on the networkstatus consensus (managed from dirvote.c)
+ * <li>authority key certificates (managed from routerlist.c)
+ * <li>hidden service descriptors (managed from rendcommon.c and rendcache.c)
+ * </ul>
+ *
+ * For no terribly good reason, the functions to <i>generate</i> signatures on
+ * the above directory objects are also in this module.
+ **/
+
+#define ROUTERPARSE_PRIVATE
+
+#include "or/or.h"
+#include "or/circuitstats.h"
+#include "or/config.h"
+#include "lib/crypt_ops/crypto_format.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/dirauth/shared_random.h"
+#include "or/dirserv.h"
+#include "or/entrynodes.h"
+#include "lib/memarea/memarea.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/parsecommon.h"
+#include "or/policies.h"
+#include "or/protover.h"
+#include "or/rendcommon.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerkeys.h"
+#include "or/routerlist.h"
+#include "or/routerparse.h"
+#include "lib/sandbox/sandbox.h"
+#include "or/shared_random_client.h"
+#include "or/torcert.h"
+#include "or/voting_schedule.h"
+
+#include "or/dirauth/dirvote.h"
+
+#include "or/addr_policy_st.h"
+#include "or/authority_cert_st.h"
+#include "or/document_signature_st.h"
+#include "or/extend_info_st.h"
+#include "or/extrainfo_st.h"
+#include "or/microdesc_st.h"
+#include "or/networkstatus_st.h"
+#include "or/networkstatus_voter_info_st.h"
+#include "or/ns_detached_signatures_st.h"
+#include "or/rend_authorized_client_st.h"
+#include "or/rend_intro_point_st.h"
+#include "or/rend_service_descriptor_st.h"
+#include "or/routerinfo_st.h"
+#include "or/routerlist_st.h"
+#include "or/tor_version_st.h"
+#include "or/vote_microdesc_hash_st.h"
+#include "or/vote_routerstatus_st.h"
+
+#include "lib/container/bloomfilt.h"
+
+#undef log
+#include <math.h>
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+/****************************************************************************/
+
+/** List of tokens recognized in router descriptors */
+static token_rule_t routerdesc_token_table[] = {
+ T0N("reject", K_REJECT, ARGS, NO_OBJ ),
+ T0N("accept", K_ACCEPT, ARGS, NO_OBJ ),
+ T0N("reject6", K_REJECT6, ARGS, NO_OBJ ),
+ T0N("accept6", K_ACCEPT6, ARGS, NO_OBJ ),
+ T1_START( "router", K_ROUTER, GE(5), NO_OBJ ),
+ T01("ipv6-policy", K_IPV6_POLICY, CONCAT_ARGS, NO_OBJ),
+ T1( "signing-key", K_SIGNING_KEY, NO_ARGS, NEED_KEY_1024 ),
+ T1( "onion-key", K_ONION_KEY, NO_ARGS, NEED_KEY_1024 ),
+ T01("ntor-onion-key", K_ONION_KEY_NTOR, GE(1), NO_OBJ ),
+ T1_END( "router-signature", K_ROUTER_SIGNATURE, NO_ARGS, NEED_OBJ ),
+ T1( "published", K_PUBLISHED, CONCAT_ARGS, NO_OBJ ),
+ T01("uptime", K_UPTIME, GE(1), NO_OBJ ),
+ T01("fingerprint", K_FINGERPRINT, CONCAT_ARGS, NO_OBJ ),
+ T01("hibernating", K_HIBERNATING, GE(1), NO_OBJ ),
+ T01("platform", K_PLATFORM, CONCAT_ARGS, NO_OBJ ),
+ T01("proto", K_PROTO, CONCAT_ARGS, NO_OBJ ),
+ T01("contact", K_CONTACT, CONCAT_ARGS, NO_OBJ ),
+ T01("read-history", K_READ_HISTORY, ARGS, NO_OBJ ),
+ T01("write-history", K_WRITE_HISTORY, ARGS, NO_OBJ ),
+ T01("extra-info-digest", K_EXTRA_INFO_DIGEST, GE(1), NO_OBJ ),
+ T01("hidden-service-dir", K_HIDDEN_SERVICE_DIR, NO_ARGS, NO_OBJ ),
+ T01("identity-ed25519", K_IDENTITY_ED25519, NO_ARGS, NEED_OBJ ),
+ T01("master-key-ed25519", K_MASTER_KEY_ED25519, GE(1), NO_OBJ ),
+ T01("router-sig-ed25519", K_ROUTER_SIG_ED25519, GE(1), NO_OBJ ),
+ T01("onion-key-crosscert", K_ONION_KEY_CROSSCERT, NO_ARGS, NEED_OBJ ),
+ T01("ntor-onion-key-crosscert", K_NTOR_ONION_KEY_CROSSCERT,
+ EQ(1), NEED_OBJ ),
+
+ T01("allow-single-hop-exits",K_ALLOW_SINGLE_HOP_EXITS, NO_ARGS, NO_OBJ ),
+
+ T01("family", K_FAMILY, ARGS, NO_OBJ ),
+ T01("caches-extra-info", K_CACHES_EXTRA_INFO, NO_ARGS, NO_OBJ ),
+ T0N("or-address", K_OR_ADDRESS, GE(1), NO_OBJ ),
+
+ T0N("opt", K_OPT, CONCAT_ARGS, OBJ_OK ),
+ T1( "bandwidth", K_BANDWIDTH, GE(3), NO_OBJ ),
+ A01("@purpose", A_PURPOSE, GE(1), NO_OBJ ),
+ T01("tunnelled-dir-server",K_DIR_TUNNELLED, NO_ARGS, NO_OBJ ),
+
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in extra-info documents. */
+static token_rule_t extrainfo_token_table[] = {
+ T1_END( "router-signature", K_ROUTER_SIGNATURE, NO_ARGS, NEED_OBJ ),
+ T1( "published", K_PUBLISHED, CONCAT_ARGS, NO_OBJ ),
+ T01("identity-ed25519", K_IDENTITY_ED25519, NO_ARGS, NEED_OBJ ),
+ T01("router-sig-ed25519", K_ROUTER_SIG_ED25519, GE(1), NO_OBJ ),
+ T0N("opt", K_OPT, CONCAT_ARGS, OBJ_OK ),
+ T01("read-history", K_READ_HISTORY, ARGS, NO_OBJ ),
+ T01("write-history", K_WRITE_HISTORY, ARGS, NO_OBJ ),
+ T01("dirreq-stats-end", K_DIRREQ_END, ARGS, NO_OBJ ),
+ T01("dirreq-v2-ips", K_DIRREQ_V2_IPS, ARGS, NO_OBJ ),
+ T01("dirreq-v3-ips", K_DIRREQ_V3_IPS, ARGS, NO_OBJ ),
+ T01("dirreq-v2-reqs", K_DIRREQ_V2_REQS, ARGS, NO_OBJ ),
+ T01("dirreq-v3-reqs", K_DIRREQ_V3_REQS, ARGS, NO_OBJ ),
+ T01("dirreq-v2-share", K_DIRREQ_V2_SHARE, ARGS, NO_OBJ ),
+ T01("dirreq-v3-share", K_DIRREQ_V3_SHARE, ARGS, NO_OBJ ),
+ T01("dirreq-v2-resp", K_DIRREQ_V2_RESP, ARGS, NO_OBJ ),
+ T01("dirreq-v3-resp", K_DIRREQ_V3_RESP, ARGS, NO_OBJ ),
+ T01("dirreq-v2-direct-dl", K_DIRREQ_V2_DIR, ARGS, NO_OBJ ),
+ T01("dirreq-v3-direct-dl", K_DIRREQ_V3_DIR, ARGS, NO_OBJ ),
+ T01("dirreq-v2-tunneled-dl", K_DIRREQ_V2_TUN, ARGS, NO_OBJ ),
+ T01("dirreq-v3-tunneled-dl", K_DIRREQ_V3_TUN, ARGS, NO_OBJ ),
+ T01("entry-stats-end", K_ENTRY_END, ARGS, NO_OBJ ),
+ T01("entry-ips", K_ENTRY_IPS, ARGS, NO_OBJ ),
+ T01("cell-stats-end", K_CELL_END, ARGS, NO_OBJ ),
+ T01("cell-processed-cells", K_CELL_PROCESSED, ARGS, NO_OBJ ),
+ T01("cell-queued-cells", K_CELL_QUEUED, ARGS, NO_OBJ ),
+ T01("cell-time-in-queue", K_CELL_TIME, ARGS, NO_OBJ ),
+ T01("cell-circuits-per-decile", K_CELL_CIRCS, ARGS, NO_OBJ ),
+ T01("exit-stats-end", K_EXIT_END, ARGS, NO_OBJ ),
+ T01("exit-kibibytes-written", K_EXIT_WRITTEN, ARGS, NO_OBJ ),
+ T01("exit-kibibytes-read", K_EXIT_READ, ARGS, NO_OBJ ),
+ T01("exit-streams-opened", K_EXIT_OPENED, ARGS, NO_OBJ ),
+
+ T1_START( "extra-info", K_EXTRA_INFO, GE(2), NO_OBJ ),
+
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in the body part of v3 networkstatus
+ * documents. */
+static token_rule_t rtrstatus_token_table[] = {
+ T01("p", K_P, CONCAT_ARGS, NO_OBJ ),
+ T1( "r", K_R, GE(7), NO_OBJ ),
+ T0N("a", K_A, GE(1), NO_OBJ ),
+ T1( "s", K_S, ARGS, NO_OBJ ),
+ T01("v", K_V, CONCAT_ARGS, NO_OBJ ),
+ T01("w", K_W, ARGS, NO_OBJ ),
+ T0N("m", K_M, CONCAT_ARGS, NO_OBJ ),
+ T0N("id", K_ID, GE(2), NO_OBJ ),
+ T01("pr", K_PROTO, CONCAT_ARGS, NO_OBJ ),
+ T0N("opt", K_OPT, CONCAT_ARGS, OBJ_OK ),
+ END_OF_TABLE
+};
+
+/** List of tokens common to V3 authority certificates and V3 consensuses. */
+#define CERTIFICATE_MEMBERS \
+ T1("dir-key-certificate-version", K_DIR_KEY_CERTIFICATE_VERSION, \
+ GE(1), NO_OBJ ), \
+ T1("dir-identity-key", K_DIR_IDENTITY_KEY, NO_ARGS, NEED_KEY ),\
+ T1("dir-key-published",K_DIR_KEY_PUBLISHED, CONCAT_ARGS, NO_OBJ), \
+ T1("dir-key-expires", K_DIR_KEY_EXPIRES, CONCAT_ARGS, NO_OBJ), \
+ T1("dir-signing-key", K_DIR_SIGNING_KEY, NO_ARGS, NEED_KEY ),\
+ T1("dir-key-crosscert", K_DIR_KEY_CROSSCERT, NO_ARGS, NEED_OBJ ),\
+ T1("dir-key-certification", K_DIR_KEY_CERTIFICATION, \
+ NO_ARGS, NEED_OBJ), \
+ T01("dir-address", K_DIR_ADDRESS, GE(1), NO_OBJ),
+
+/** List of tokens recognized in V3 authority certificates. */
+static token_rule_t dir_key_certificate_table[] = {
+ CERTIFICATE_MEMBERS
+ T1("fingerprint", K_FINGERPRINT, CONCAT_ARGS, NO_OBJ ),
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in rendezvous service descriptors */
+static token_rule_t desc_token_table[] = {
+ T1_START("rendezvous-service-descriptor", R_RENDEZVOUS_SERVICE_DESCRIPTOR,
+ EQ(1), NO_OBJ),
+ T1("version", R_VERSION, EQ(1), NO_OBJ),
+ T1("permanent-key", R_PERMANENT_KEY, NO_ARGS, NEED_KEY_1024),
+ T1("secret-id-part", R_SECRET_ID_PART, EQ(1), NO_OBJ),
+ T1("publication-time", R_PUBLICATION_TIME, CONCAT_ARGS, NO_OBJ),
+ T1("protocol-versions", R_PROTOCOL_VERSIONS, EQ(1), NO_OBJ),
+ T01("introduction-points", R_INTRODUCTION_POINTS, NO_ARGS, NEED_OBJ),
+ T1_END("signature", R_SIGNATURE, NO_ARGS, NEED_OBJ),
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in the (encrypted) list of introduction points of
+ * rendezvous service descriptors */
+static token_rule_t ipo_token_table[] = {
+ T1_START("introduction-point", R_IPO_IDENTIFIER, EQ(1), NO_OBJ),
+ T1("ip-address", R_IPO_IP_ADDRESS, EQ(1), NO_OBJ),
+ T1("onion-port", R_IPO_ONION_PORT, EQ(1), NO_OBJ),
+ T1("onion-key", R_IPO_ONION_KEY, NO_ARGS, NEED_KEY_1024),
+ T1("service-key", R_IPO_SERVICE_KEY, NO_ARGS, NEED_KEY_1024),
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in the (possibly encrypted) list of introduction
+ * points of rendezvous service descriptors */
+static token_rule_t client_keys_token_table[] = {
+ T1_START("client-name", C_CLIENT_NAME, CONCAT_ARGS, NO_OBJ),
+ T1("descriptor-cookie", C_DESCRIPTOR_COOKIE, EQ(1), NO_OBJ),
+ T01("client-key", C_CLIENT_KEY, NO_ARGS, NEED_SKEY_1024),
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in V3 networkstatus votes. */
+static token_rule_t networkstatus_token_table[] = {
+ T1_START("network-status-version", K_NETWORK_STATUS_VERSION,
+ GE(1), NO_OBJ ),
+ T1("vote-status", K_VOTE_STATUS, GE(1), NO_OBJ ),
+ T1("published", K_PUBLISHED, CONCAT_ARGS, NO_OBJ ),
+ T1("valid-after", K_VALID_AFTER, CONCAT_ARGS, NO_OBJ ),
+ T1("fresh-until", K_FRESH_UNTIL, CONCAT_ARGS, NO_OBJ ),
+ T1("valid-until", K_VALID_UNTIL, CONCAT_ARGS, NO_OBJ ),
+ T1("voting-delay", K_VOTING_DELAY, GE(2), NO_OBJ ),
+ T1("known-flags", K_KNOWN_FLAGS, ARGS, NO_OBJ ),
+ T01("params", K_PARAMS, ARGS, NO_OBJ ),
+ T( "fingerprint", K_FINGERPRINT, CONCAT_ARGS, NO_OBJ ),
+ T01("signing-ed25519", K_SIGNING_CERT_ED, NO_ARGS , NEED_OBJ ),
+ T01("shared-rand-participate",K_SR_FLAG, NO_ARGS, NO_OBJ ),
+ T0N("shared-rand-commit", K_COMMIT, GE(3), NO_OBJ ),
+ T01("shared-rand-previous-value", K_PREVIOUS_SRV,EQ(2), NO_OBJ ),
+ T01("shared-rand-current-value", K_CURRENT_SRV, EQ(2), NO_OBJ ),
+ T0N("package", K_PACKAGE, CONCAT_ARGS, NO_OBJ ),
+ T01("recommended-client-protocols", K_RECOMMENDED_CLIENT_PROTOCOLS,
+ CONCAT_ARGS, NO_OBJ ),
+ T01("recommended-relay-protocols", K_RECOMMENDED_RELAY_PROTOCOLS,
+ CONCAT_ARGS, NO_OBJ ),
+ T01("required-client-protocols", K_REQUIRED_CLIENT_PROTOCOLS,
+ CONCAT_ARGS, NO_OBJ ),
+ T01("required-relay-protocols", K_REQUIRED_RELAY_PROTOCOLS,
+ CONCAT_ARGS, NO_OBJ ),
+
+ CERTIFICATE_MEMBERS
+
+ T0N("opt", K_OPT, CONCAT_ARGS, OBJ_OK ),
+ T1( "contact", K_CONTACT, CONCAT_ARGS, NO_OBJ ),
+ T1( "dir-source", K_DIR_SOURCE, GE(6), NO_OBJ ),
+ T01("legacy-dir-key", K_LEGACY_DIR_KEY, GE(1), NO_OBJ ),
+ T1( "known-flags", K_KNOWN_FLAGS, CONCAT_ARGS, NO_OBJ ),
+ T01("client-versions", K_CLIENT_VERSIONS, CONCAT_ARGS, NO_OBJ ),
+ T01("server-versions", K_SERVER_VERSIONS, CONCAT_ARGS, NO_OBJ ),
+ T1( "consensus-methods", K_CONSENSUS_METHODS, GE(1), NO_OBJ ),
+
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in V3 networkstatus consensuses. */
+static token_rule_t networkstatus_consensus_token_table[] = {
+ T1_START("network-status-version", K_NETWORK_STATUS_VERSION,
+ GE(1), NO_OBJ ),
+ T1("vote-status", K_VOTE_STATUS, GE(1), NO_OBJ ),
+ T1("valid-after", K_VALID_AFTER, CONCAT_ARGS, NO_OBJ ),
+ T1("fresh-until", K_FRESH_UNTIL, CONCAT_ARGS, NO_OBJ ),
+ T1("valid-until", K_VALID_UNTIL, CONCAT_ARGS, NO_OBJ ),
+ T1("voting-delay", K_VOTING_DELAY, GE(2), NO_OBJ ),
+
+ T0N("opt", K_OPT, CONCAT_ARGS, OBJ_OK ),
+
+ T1N("dir-source", K_DIR_SOURCE, GE(6), NO_OBJ ),
+ T1N("contact", K_CONTACT, CONCAT_ARGS, NO_OBJ ),
+ T1N("vote-digest", K_VOTE_DIGEST, GE(1), NO_OBJ ),
+
+ T1( "known-flags", K_KNOWN_FLAGS, CONCAT_ARGS, NO_OBJ ),
+
+ T01("client-versions", K_CLIENT_VERSIONS, CONCAT_ARGS, NO_OBJ ),
+ T01("server-versions", K_SERVER_VERSIONS, CONCAT_ARGS, NO_OBJ ),
+ T01("consensus-method", K_CONSENSUS_METHOD, EQ(1), NO_OBJ),
+ T01("params", K_PARAMS, ARGS, NO_OBJ ),
+
+ T01("shared-rand-previous-value", K_PREVIOUS_SRV, EQ(2), NO_OBJ ),
+ T01("shared-rand-current-value", K_CURRENT_SRV, EQ(2), NO_OBJ ),
+
+ T01("recommended-client-protocols", K_RECOMMENDED_CLIENT_PROTOCOLS,
+ CONCAT_ARGS, NO_OBJ ),
+ T01("recommended-relay-protocols", K_RECOMMENDED_RELAY_PROTOCOLS,
+ CONCAT_ARGS, NO_OBJ ),
+ T01("required-client-protocols", K_REQUIRED_CLIENT_PROTOCOLS,
+ CONCAT_ARGS, NO_OBJ ),
+ T01("required-relay-protocols", K_REQUIRED_RELAY_PROTOCOLS,
+ CONCAT_ARGS, NO_OBJ ),
+
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in the footer of v1 directory footers. */
+static token_rule_t networkstatus_vote_footer_token_table[] = {
+ T01("directory-footer", K_DIRECTORY_FOOTER, NO_ARGS, NO_OBJ ),
+ T01("bandwidth-weights", K_BW_WEIGHTS, ARGS, NO_OBJ ),
+ T( "directory-signature", K_DIRECTORY_SIGNATURE, GE(2), NEED_OBJ ),
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in detached networkstatus signature documents. */
+static token_rule_t networkstatus_detached_signature_token_table[] = {
+ T1_START("consensus-digest", K_CONSENSUS_DIGEST, GE(1), NO_OBJ ),
+ T("additional-digest", K_ADDITIONAL_DIGEST,GE(3), NO_OBJ ),
+ T1("valid-after", K_VALID_AFTER, CONCAT_ARGS, NO_OBJ ),
+ T1("fresh-until", K_FRESH_UNTIL, CONCAT_ARGS, NO_OBJ ),
+ T1("valid-until", K_VALID_UNTIL, CONCAT_ARGS, NO_OBJ ),
+ T("additional-signature", K_ADDITIONAL_SIGNATURE, GE(4), NEED_OBJ ),
+ T1N("directory-signature", K_DIRECTORY_SIGNATURE, GE(2), NEED_OBJ ),
+ END_OF_TABLE
+};
+
+/** List of tokens recognized in microdescriptors */
+static token_rule_t microdesc_token_table[] = {
+ T1_START("onion-key", K_ONION_KEY, NO_ARGS, NEED_KEY_1024),
+ T01("ntor-onion-key", K_ONION_KEY_NTOR, GE(1), NO_OBJ ),
+ T0N("id", K_ID, GE(2), NO_OBJ ),
+ T0N("a", K_A, GE(1), NO_OBJ ),
+ T01("family", K_FAMILY, ARGS, NO_OBJ ),
+ T01("p", K_P, CONCAT_ARGS, NO_OBJ ),
+ T01("p6", K_P6, CONCAT_ARGS, NO_OBJ ),
+ A01("@last-listed", A_LAST_LISTED, CONCAT_ARGS, NO_OBJ ),
+ END_OF_TABLE
+};
+
+#undef T
+
+/* static function prototypes */
+static int router_add_exit_policy(routerinfo_t *router,directory_token_t *tok);
+static addr_policy_t *router_parse_addr_policy(directory_token_t *tok,
+ unsigned fmt_flags);
+static addr_policy_t *router_parse_addr_policy_private(directory_token_t *tok);
+
+static int router_get_hash_impl_helper(const char *s, size_t s_len,
+ const char *start_str,
+ const char *end_str, char end_c,
+ int log_severity,
+ const char **start_out, const char **end_out);
+static int router_get_hash_impl(const char *s, size_t s_len, char *digest,
+ const char *start_str, const char *end_str,
+ char end_char,
+ digest_algorithm_t alg);
+static int router_get_hashes_impl(const char *s, size_t s_len,
+ common_digests_t *digests,
+ const char *start_str, const char *end_str,
+ char end_char);
+static smartlist_t *find_all_exitpolicy(smartlist_t *s);
+
+#define CST_NO_CHECK_OBJTYPE (1<<0)
+static int check_signature_token(const char *digest,
+ ssize_t digest_len,
+ directory_token_t *tok,
+ crypto_pk_t *pkey,
+ int flags,
+ const char *doctype);
+
+#undef DEBUG_AREA_ALLOC
+
+#ifdef DEBUG_AREA_ALLOC
+#define DUMP_AREA(a,name) STMT_BEGIN \
+ size_t alloc=0, used=0; \
+ memarea_get_stats((a),&alloc,&used); \
+ log_debug(LD_MM, "Area for %s has %lu allocated; using %lu.", \
+ name, (unsigned long)alloc, (unsigned long)used); \
+ STMT_END
+#else /* !(defined(DEBUG_AREA_ALLOC)) */
+#define DUMP_AREA(a,name) STMT_NIL
+#endif /* defined(DEBUG_AREA_ALLOC) */
+
+/* Dump mechanism for unparseable descriptors */
+
+/** List of dumped descriptors for FIFO cleanup purposes */
+STATIC smartlist_t *descs_dumped = NULL;
+/** Total size of dumped descriptors for FIFO cleanup */
+STATIC uint64_t len_descs_dumped = 0;
+/** Directory to stash dumps in */
+static int have_dump_desc_dir = 0;
+static int problem_with_dump_desc_dir = 0;
+
+#define DESC_DUMP_DATADIR_SUBDIR "unparseable-descs"
+#define DESC_DUMP_BASE_FILENAME "unparseable-desc"
+
+/** Find the dump directory and check if we'll be able to create it */
+static void
+dump_desc_init(void)
+{
+ char *dump_desc_dir;
+
+ dump_desc_dir = get_datadir_fname(DESC_DUMP_DATADIR_SUBDIR);
+
+ /*
+ * We just check for it, don't create it at this point; we'll
+ * create it when we need it if it isn't already there.
+ */
+ if (check_private_dir(dump_desc_dir, CPD_CHECK, get_options()->User) < 0) {
+ /* Error, log and flag it as having a problem */
+ log_notice(LD_DIR,
+ "Doesn't look like we'll be able to create descriptor dump "
+ "directory %s; dumps will be disabled.",
+ dump_desc_dir);
+ problem_with_dump_desc_dir = 1;
+ tor_free(dump_desc_dir);
+ return;
+ }
+
+ /* Check if it exists */
+ switch (file_status(dump_desc_dir)) {
+ case FN_DIR:
+ /* We already have a directory */
+ have_dump_desc_dir = 1;
+ break;
+ case FN_NOENT:
+ /* Nothing, we'll need to create it later */
+ have_dump_desc_dir = 0;
+ break;
+ case FN_ERROR:
+ /* Log and flag having a problem */
+ log_notice(LD_DIR,
+ "Couldn't check whether descriptor dump directory %s already"
+ " exists: %s",
+ dump_desc_dir, strerror(errno));
+ problem_with_dump_desc_dir = 1;
+ break;
+ case FN_FILE:
+ case FN_EMPTY:
+ default:
+ /* Something else was here! */
+ log_notice(LD_DIR,
+ "Descriptor dump directory %s already exists and isn't a "
+ "directory",
+ dump_desc_dir);
+ problem_with_dump_desc_dir = 1;
+ }
+
+ if (have_dump_desc_dir && !problem_with_dump_desc_dir) {
+ dump_desc_populate_fifo_from_directory(dump_desc_dir);
+ }
+
+ tor_free(dump_desc_dir);
+}
+
+/** Create the dump directory if needed and possible */
+static void
+dump_desc_create_dir(void)
+{
+ char *dump_desc_dir;
+
+ /* If the problem flag is set, skip it */
+ if (problem_with_dump_desc_dir) return;
+
+ /* Do we need it? */
+ if (!have_dump_desc_dir) {
+ dump_desc_dir = get_datadir_fname(DESC_DUMP_DATADIR_SUBDIR);
+
+ if (check_private_dir(dump_desc_dir, CPD_CREATE,
+ get_options()->User) < 0) {
+ log_notice(LD_DIR,
+ "Failed to create descriptor dump directory %s",
+ dump_desc_dir);
+ problem_with_dump_desc_dir = 1;
+ }
+
+ /* Okay, we created it */
+ have_dump_desc_dir = 1;
+
+ tor_free(dump_desc_dir);
+ }
+}
+
+/** Dump desc FIFO/cleanup; take ownership of the given filename, add it to
+ * the FIFO, and clean up the oldest entries to the extent they exceed the
+ * configured cap. If any old entries with a matching hash existed, they
+ * just got overwritten right before this was called and we should adjust
+ * the total size counter without deleting them.
+ */
+static void
+dump_desc_fifo_add_and_clean(char *filename, const uint8_t *digest_sha256,
+ size_t len)
+{
+ dumped_desc_t *ent = NULL, *tmp;
+ uint64_t max_len;
+
+ tor_assert(filename != NULL);
+ tor_assert(digest_sha256 != NULL);
+
+ if (descs_dumped == NULL) {
+ /* We better have no length, then */
+ tor_assert(len_descs_dumped == 0);
+ /* Make a smartlist */
+ descs_dumped = smartlist_new();
+ }
+
+ /* Make a new entry to put this one in */
+ ent = tor_malloc_zero(sizeof(*ent));
+ ent->filename = filename;
+ ent->len = len;
+ ent->when = time(NULL);
+ memcpy(ent->digest_sha256, digest_sha256, DIGEST256_LEN);
+
+ /* Do we need to do some cleanup? */
+ max_len = get_options()->MaxUnparseableDescSizeToLog;
+ /* Iterate over the list until we've freed enough space */
+ while (len > max_len - len_descs_dumped &&
+ smartlist_len(descs_dumped) > 0) {
+ /* Get the oldest thing on the list */
+ tmp = (dumped_desc_t *)(smartlist_get(descs_dumped, 0));
+
+ /*
+ * Check if it matches the filename we just added, so we don't delete
+ * something we just emitted if we get repeated identical descriptors.
+ */
+ if (strcmp(tmp->filename, filename) != 0) {
+ /* Delete it and adjust the length counter */
+ tor_unlink(tmp->filename);
+ tor_assert(len_descs_dumped >= tmp->len);
+ len_descs_dumped -= tmp->len;
+ log_info(LD_DIR,
+ "Deleting old unparseable descriptor dump %s due to "
+ "space limits",
+ tmp->filename);
+ } else {
+ /*
+ * Don't delete, but do adjust the counter since we will bump it
+ * later
+ */
+ tor_assert(len_descs_dumped >= tmp->len);
+ len_descs_dumped -= tmp->len;
+ log_info(LD_DIR,
+ "Replacing old descriptor dump %s with new identical one",
+ tmp->filename);
+ }
+
+ /* Free it and remove it from the list */
+ smartlist_del_keeporder(descs_dumped, 0);
+ tor_free(tmp->filename);
+ tor_free(tmp);
+ }
+
+ /* Append our entry to the end of the list and bump the counter */
+ smartlist_add(descs_dumped, ent);
+ len_descs_dumped += len;
+}
+
+/** Check if we already have a descriptor for this hash and move it to the
+ * head of the queue if so. Return 1 if one existed and 0 otherwise.
+ */
+static int
+dump_desc_fifo_bump_hash(const uint8_t *digest_sha256)
+{
+ dumped_desc_t *match = NULL;
+
+ tor_assert(digest_sha256);
+
+ if (descs_dumped) {
+ /* Find a match if one exists */
+ SMARTLIST_FOREACH_BEGIN(descs_dumped, dumped_desc_t *, ent) {
+ if (ent &&
+ tor_memeq(ent->digest_sha256, digest_sha256, DIGEST256_LEN)) {
+ /*
+ * Save a pointer to the match and remove it from its current
+ * position.
+ */
+ match = ent;
+ SMARTLIST_DEL_CURRENT_KEEPORDER(descs_dumped, ent);
+ break;
+ }
+ } SMARTLIST_FOREACH_END(ent);
+
+ if (match) {
+ /* Update the timestamp */
+ match->when = time(NULL);
+ /* Add it back at the end of the list */
+ smartlist_add(descs_dumped, match);
+
+ /* Indicate we found one */
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/** Clean up on exit; just memory, leave the dumps behind
+ */
+STATIC void
+dump_desc_fifo_cleanup(void)
+{
+ if (descs_dumped) {
+ /* Free each descriptor */
+ SMARTLIST_FOREACH_BEGIN(descs_dumped, dumped_desc_t *, ent) {
+ tor_assert(ent);
+ tor_free(ent->filename);
+ tor_free(ent);
+ } SMARTLIST_FOREACH_END(ent);
+ /* Free the list */
+ smartlist_free(descs_dumped);
+ descs_dumped = NULL;
+ len_descs_dumped = 0;
+ }
+}
+
+/** Handle one file for dump_desc_populate_fifo_from_directory(); make sure
+ * the filename is sensibly formed and matches the file content, and either
+ * return a dumped_desc_t for it or remove the file and return NULL.
+ */
+MOCK_IMPL(STATIC dumped_desc_t *,
+dump_desc_populate_one_file, (const char *dirname, const char *f))
+{
+ dumped_desc_t *ent = NULL;
+ char *path = NULL, *desc = NULL;
+ const char *digest_str;
+ char digest[DIGEST256_LEN], content_digest[DIGEST256_LEN];
+ /* Expected prefix before digest in filenames */
+ const char *f_pfx = DESC_DUMP_BASE_FILENAME ".";
+ /*
+ * Stat while reading; this is important in case the file
+ * contains a NUL character.
+ */
+ struct stat st;
+
+ /* Sanity-check args */
+ tor_assert(dirname != NULL);
+ tor_assert(f != NULL);
+
+ /* Form the full path */
+ tor_asprintf(&path, "%s" PATH_SEPARATOR "%s", dirname, f);
+
+ /* Check that f has the form DESC_DUMP_BASE_FILENAME.<digest256> */
+
+ if (!strcmpstart(f, f_pfx)) {
+ /* It matches the form, but is the digest parseable as such? */
+ digest_str = f + strlen(f_pfx);
+ if (base16_decode(digest, DIGEST256_LEN,
+ digest_str, strlen(digest_str)) != DIGEST256_LEN) {
+ /* We failed to decode it */
+ digest_str = NULL;
+ }
+ } else {
+ /* No match */
+ digest_str = NULL;
+ }
+
+ if (!digest_str) {
+ /* We couldn't get a sensible digest */
+ log_notice(LD_DIR,
+ "Removing unrecognized filename %s from unparseable "
+ "descriptors directory", f);
+ tor_unlink(path);
+ /* We're done */
+ goto done;
+ }
+
+ /*
+ * The filename has the form DESC_DUMP_BASE_FILENAME "." <digest256> and
+ * we've decoded the digest. Next, check that we can read it and the
+ * content matches this digest. We are relying on the fact that if the
+ * file contains a '\0', read_file_to_str() will allocate space for and
+ * read the entire file and return the correct size in st.
+ */
+ desc = read_file_to_str(path, RFTS_IGNORE_MISSING|RFTS_BIN, &st);
+ if (!desc) {
+ /* We couldn't read it */
+ log_notice(LD_DIR,
+ "Failed to read %s from unparseable descriptors directory; "
+ "attempting to remove it.", f);
+ tor_unlink(path);
+ /* We're done */
+ goto done;
+ }
+
+#if SIZE_MAX > UINT64_MAX
+ if (BUG((uint64_t)st.st_size > (uint64_t)SIZE_MAX)) {
+ /* LCOV_EXCL_START
+ * Should be impossible since RFTS above should have failed to read the
+ * huge file into RAM. */
+ goto done;
+ /* LCOV_EXCL_STOP */
+ }
+#endif /* SIZE_MAX > UINT64_MAX */
+ if (BUG(st.st_size < 0)) {
+ /* LCOV_EXCL_START
+ * Should be impossible, since the OS isn't supposed to be b0rken. */
+ goto done;
+ /* LCOV_EXCL_STOP */
+ }
+ /* (Now we can be sure that st.st_size is safe to cast to a size_t.) */
+
+ /*
+ * We got one; now compute its digest and check that it matches the
+ * filename.
+ */
+ if (crypto_digest256((char *)content_digest, desc, (size_t) st.st_size,
+ DIGEST_SHA256) < 0) {
+ /* Weird, but okay */
+ log_info(LD_DIR,
+ "Unable to hash content of %s from unparseable descriptors "
+ "directory", f);
+ tor_unlink(path);
+ /* We're done */
+ goto done;
+ }
+
+ /* Compare the digests */
+ if (tor_memneq(digest, content_digest, DIGEST256_LEN)) {
+ /* No match */
+ log_info(LD_DIR,
+ "Hash of %s from unparseable descriptors directory didn't "
+ "match its filename; removing it", f);
+ tor_unlink(path);
+ /* We're done */
+ goto done;
+ }
+
+ /* Okay, it's a match, we should prepare ent */
+ ent = tor_malloc_zero(sizeof(dumped_desc_t));
+ ent->filename = path;
+ memcpy(ent->digest_sha256, digest, DIGEST256_LEN);
+ ent->len = (size_t) st.st_size;
+ ent->when = st.st_mtime;
+ /* Null out path so we don't free it out from under ent */
+ path = NULL;
+
+ done:
+ /* Free allocations if we had them */
+ tor_free(desc);
+ tor_free(path);
+
+ return ent;
+}
+
+/** Sort helper for dump_desc_populate_fifo_from_directory(); compares
+ * the when field of dumped_desc_ts in a smartlist to put the FIFO in
+ * the correct order after reconstructing it from the directory.
+ */
+static int
+dump_desc_compare_fifo_entries(const void **a_v, const void **b_v)
+{
+ const dumped_desc_t **a = (const dumped_desc_t **)a_v;
+ const dumped_desc_t **b = (const dumped_desc_t **)b_v;
+
+ if ((a != NULL) && (*a != NULL)) {
+ if ((b != NULL) && (*b != NULL)) {
+ /* We have sensible dumped_desc_ts to compare */
+ if ((*a)->when < (*b)->when) {
+ return -1;
+ } else if ((*a)->when == (*b)->when) {
+ return 0;
+ } else {
+ return 1;
+ }
+ } else {
+ /*
+ * We shouldn't see this, but what the hell, NULLs precede everythin
+ * else
+ */
+ return 1;
+ }
+ } else {
+ return -1;
+ }
+}
+
+/** Scan the contents of the directory, and update FIFO/counters; this will
+ * consistency-check descriptor dump filenames against hashes of descriptor
+ * dump file content, and remove any inconsistent/unreadable dumps, and then
+ * reconstruct the dump FIFO as closely as possible for the last time the
+ * tor process shut down. If a previous dump was repeated more than once and
+ * moved ahead in the FIFO, the mtime will not have been updated and the
+ * reconstructed order will be wrong, but will always be a permutation of
+ * the original.
+ */
+STATIC void
+dump_desc_populate_fifo_from_directory(const char *dirname)
+{
+ smartlist_t *files = NULL;
+ dumped_desc_t *ent = NULL;
+
+ tor_assert(dirname != NULL);
+
+ /* Get a list of files */
+ files = tor_listdir(dirname);
+ if (!files) {
+ log_notice(LD_DIR,
+ "Unable to get contents of unparseable descriptor dump "
+ "directory %s",
+ dirname);
+ return;
+ }
+
+ /*
+ * Iterate through the list and decide which files should go in the
+ * FIFO and which should be purged.
+ */
+
+ SMARTLIST_FOREACH_BEGIN(files, char *, f) {
+ /* Try to get a FIFO entry */
+ ent = dump_desc_populate_one_file(dirname, f);
+ if (ent) {
+ /*
+ * We got one; add it to the FIFO. No need for duplicate checking
+ * here since we just verified the name and digest match.
+ */
+
+ /* Make sure we have a list to add it to */
+ if (!descs_dumped) {
+ descs_dumped = smartlist_new();
+ len_descs_dumped = 0;
+ }
+
+ /* Add it and adjust the counter */
+ smartlist_add(descs_dumped, ent);
+ len_descs_dumped += ent->len;
+ }
+ /*
+ * If we didn't, we will have unlinked the file if necessary and
+ * possible, and emitted a log message about it, so just go on to
+ * the next.
+ */
+ } SMARTLIST_FOREACH_END(f);
+
+ /* Did we get anything? */
+ if (descs_dumped != NULL) {
+ /* Sort the FIFO in order of increasing timestamp */
+ smartlist_sort(descs_dumped, dump_desc_compare_fifo_entries);
+
+ /* Log some stats */
+ log_info(LD_DIR,
+ "Reloaded unparseable descriptor dump FIFO with %d dump(s) "
+ "totaling %"PRIu64 " bytes",
+ smartlist_len(descs_dumped), (len_descs_dumped));
+ }
+
+ /* Free the original list */
+ SMARTLIST_FOREACH(files, char *, f, tor_free(f));
+ smartlist_free(files);
+}
+
+/** For debugging purposes, dump unparseable descriptor *<b>desc</b> of
+ * type *<b>type</b> to file $DATADIR/unparseable-desc. Do not write more
+ * than one descriptor to disk per minute. If there is already such a
+ * file in the data directory, overwrite it. */
+MOCK_IMPL(STATIC void,
+dump_desc,(const char *desc, const char *type))
+{
+ tor_assert(desc);
+ tor_assert(type);
+ size_t len;
+ /* The SHA256 of the string */
+ uint8_t digest_sha256[DIGEST256_LEN];
+ char digest_sha256_hex[HEX_DIGEST256_LEN+1];
+ /* Filename to log it to */
+ char *debugfile, *debugfile_base;
+
+ /* Get the hash for logging purposes anyway */
+ len = strlen(desc);
+ if (crypto_digest256((char *)digest_sha256, desc, len,
+ DIGEST_SHA256) < 0) {
+ log_info(LD_DIR,
+ "Unable to parse descriptor of type %s, and unable to even hash"
+ " it!", type);
+ goto err;
+ }
+
+ base16_encode(digest_sha256_hex, sizeof(digest_sha256_hex),
+ (const char *)digest_sha256, sizeof(digest_sha256));
+
+ /*
+ * We mention type and hash in the main log; don't clutter up the files
+ * with anything but the exact dump.
+ */
+ tor_asprintf(&debugfile_base,
+ DESC_DUMP_BASE_FILENAME ".%s", digest_sha256_hex);
+ debugfile = get_datadir_fname2(DESC_DUMP_DATADIR_SUBDIR, debugfile_base);
+
+ /*
+ * Check if the sandbox is active or will become active; see comment
+ * below at the log message for why.
+ */
+ if (!(sandbox_is_active() || get_options()->Sandbox)) {
+ if (len <= get_options()->MaxUnparseableDescSizeToLog) {
+ if (!dump_desc_fifo_bump_hash(digest_sha256)) {
+ /* Create the directory if needed */
+ dump_desc_create_dir();
+ /* Make sure we've got it */
+ if (have_dump_desc_dir && !problem_with_dump_desc_dir) {
+ /* Write it, and tell the main log about it */
+ write_str_to_file(debugfile, desc, 1);
+ log_info(LD_DIR,
+ "Unable to parse descriptor of type %s with hash %s and "
+ "length %lu. See file %s in data directory for details.",
+ type, digest_sha256_hex, (unsigned long)len,
+ debugfile_base);
+ dump_desc_fifo_add_and_clean(debugfile, digest_sha256, len);
+ /* Since we handed ownership over, don't free debugfile later */
+ debugfile = NULL;
+ } else {
+ /* Problem with the subdirectory */
+ log_info(LD_DIR,
+ "Unable to parse descriptor of type %s with hash %s and "
+ "length %lu. Descriptor not dumped because we had a "
+ "problem creating the " DESC_DUMP_DATADIR_SUBDIR
+ " subdirectory",
+ type, digest_sha256_hex, (unsigned long)len);
+ /* We do have to free debugfile in this case */
+ }
+ } else {
+ /* We already had one with this hash dumped */
+ log_info(LD_DIR,
+ "Unable to parse descriptor of type %s with hash %s and "
+ "length %lu. Descriptor not dumped because one with that "
+ "hash has already been dumped.",
+ type, digest_sha256_hex, (unsigned long)len);
+ /* We do have to free debugfile in this case */
+ }
+ } else {
+ /* Just log that it happened without dumping */
+ log_info(LD_DIR,
+ "Unable to parse descriptor of type %s with hash %s and "
+ "length %lu. Descriptor not dumped because it exceeds maximum"
+ " log size all by itself.",
+ type, digest_sha256_hex, (unsigned long)len);
+ /* We do have to free debugfile in this case */
+ }
+ } else {
+ /*
+ * Not logging because the sandbox is active and seccomp2 apparently
+ * doesn't have a sensible way to allow filenames according to a pattern
+ * match. (If we ever figure out how to say "allow writes to /regex/",
+ * remove this checK).
+ */
+ log_info(LD_DIR,
+ "Unable to parse descriptor of type %s with hash %s and "
+ "length %lu. Descriptor not dumped because the sandbox is "
+ "configured",
+ type, digest_sha256_hex, (unsigned long)len);
+ }
+
+ tor_free(debugfile_base);
+ tor_free(debugfile);
+
+ err:
+ return;
+}
+
+/** Set <b>digest</b> to the SHA-1 digest of the hash of the directory in
+ * <b>s</b>. Return 0 on success, -1 on failure.
+ */
+int
+router_get_dir_hash(const char *s, char *digest)
+{
+ return router_get_hash_impl(s, strlen(s), digest,
+ "signed-directory","\ndirectory-signature",'\n',
+ DIGEST_SHA1);
+}
+
+/** Set <b>digest</b> to the SHA-1 digest of the hash of the first router in
+ * <b>s</b>. Return 0 on success, -1 on failure.
+ */
+int
+router_get_router_hash(const char *s, size_t s_len, char *digest)
+{
+ return router_get_hash_impl(s, s_len, digest,
+ "router ","\nrouter-signature", '\n',
+ DIGEST_SHA1);
+}
+
+/** Try to find the start and end of the signed portion of a networkstatus
+ * document in <b>s</b>. On success, set <b>start_out</b> to the first
+ * character of the document, and <b>end_out</b> to a position one after the
+ * final character of the signed document, and return 0. On failure, return
+ * -1. */
+int
+router_get_networkstatus_v3_signed_boundaries(const char *s,
+ const char **start_out,
+ const char **end_out)
+{
+ return router_get_hash_impl_helper(s, strlen(s),
+ "network-status-version",
+ "\ndirectory-signature",
+ ' ', LOG_INFO,
+ start_out, end_out);
+}
+
+/** Set <b>digest_out</b> to the SHA3-256 digest of the signed portion of the
+ * networkstatus vote in <b>s</b> -- or of the entirety of <b>s</b> if no
+ * signed portion can be identified. Return 0 on success, -1 on failure. */
+int
+router_get_networkstatus_v3_sha3_as_signed(uint8_t *digest_out,
+ const char *s)
+{
+ const char *start, *end;
+ if (router_get_networkstatus_v3_signed_boundaries(s, &start, &end) < 0) {
+ start = s;
+ end = s + strlen(s);
+ }
+ tor_assert(start);
+ tor_assert(end);
+ return crypto_digest256((char*)digest_out, start, end-start,
+ DIGEST_SHA3_256);
+}
+
+/** Set <b>digests</b> to all the digests of the consensus document in
+ * <b>s</b> */
+int
+router_get_networkstatus_v3_hashes(const char *s, common_digests_t *digests)
+{
+ return router_get_hashes_impl(s,strlen(s),digests,
+ "network-status-version",
+ "\ndirectory-signature",
+ ' ');
+}
+
+/** Set <b>digest</b> to the SHA-1 digest of the hash of the <b>s_len</b>-byte
+ * extrainfo string at <b>s</b>. Return 0 on success, -1 on failure. */
+int
+router_get_extrainfo_hash(const char *s, size_t s_len, char *digest)
+{
+ return router_get_hash_impl(s, s_len, digest, "extra-info",
+ "\nrouter-signature",'\n', DIGEST_SHA1);
+}
+
+/** Helper: used to generate signatures for routers, directories and
+ * network-status objects. Given a <b>digest_len</b>-byte digest in
+ * <b>digest</b> and a secret <b>private_key</b>, generate an PKCS1-padded
+ * signature, BASE64-encode it, surround it with -----BEGIN/END----- pairs,
+ * and return the new signature on success or NULL on failure.
+ */
+char *
+router_get_dirobj_signature(const char *digest,
+ size_t digest_len,
+ const crypto_pk_t *private_key)
+{
+ char *signature;
+ size_t i, keysize;
+ int siglen;
+ char *buf = NULL;
+ size_t buf_len;
+ /* overestimate of BEGIN/END lines total len. */
+#define BEGIN_END_OVERHEAD_LEN 64
+
+ keysize = crypto_pk_keysize(private_key);
+ signature = tor_malloc(keysize);
+ siglen = crypto_pk_private_sign(private_key, signature, keysize,
+ digest, digest_len);
+ if (siglen < 0) {
+ log_warn(LD_BUG,"Couldn't sign digest.");
+ goto err;
+ }
+
+ /* The *2 here is a ridiculous overestimate of base-64 overhead. */
+ buf_len = (siglen * 2) + BEGIN_END_OVERHEAD_LEN;
+ buf = tor_malloc(buf_len);
+
+ if (strlcpy(buf, "-----BEGIN SIGNATURE-----\n", buf_len) >= buf_len)
+ goto truncated;
+
+ i = strlen(buf);
+ if (base64_encode(buf+i, buf_len-i, signature, siglen,
+ BASE64_ENCODE_MULTILINE) < 0) {
+ log_warn(LD_BUG,"couldn't base64-encode signature");
+ goto err;
+ }
+
+ if (strlcat(buf, "-----END SIGNATURE-----\n", buf_len) >= buf_len)
+ goto truncated;
+
+ tor_free(signature);
+ return buf;
+
+ truncated:
+ log_warn(LD_BUG,"tried to exceed string length.");
+ err:
+ tor_free(signature);
+ tor_free(buf);
+ return NULL;
+}
+
+/** Helper: used to generate signatures for routers, directories and
+ * network-status objects. Given a digest in <b>digest</b> and a secret
+ * <b>private_key</b>, generate a PKCS1-padded signature, BASE64-encode it,
+ * surround it with -----BEGIN/END----- pairs, and write it to the
+ * <b>buf_len</b>-byte buffer at <b>buf</b>. Return 0 on success, -1 on
+ * failure.
+ */
+int
+router_append_dirobj_signature(char *buf, size_t buf_len, const char *digest,
+ size_t digest_len, crypto_pk_t *private_key)
+{
+ size_t sig_len, s_len;
+ char *sig = router_get_dirobj_signature(digest, digest_len, private_key);
+ if (!sig) {
+ log_warn(LD_BUG, "No signature generated");
+ return -1;
+ }
+ sig_len = strlen(sig);
+ s_len = strlen(buf);
+ if (sig_len + s_len + 1 > buf_len) {
+ log_warn(LD_BUG, "Not enough room for signature");
+ tor_free(sig);
+ return -1;
+ }
+ memcpy(buf+s_len, sig, sig_len+1);
+ tor_free(sig);
+ return 0;
+}
+
+/** Return VS_RECOMMENDED if <b>myversion</b> is contained in
+ * <b>versionlist</b>. Else, return VS_EMPTY if versionlist has no
+ * entries. Else, return VS_OLD if every member of
+ * <b>versionlist</b> is newer than <b>myversion</b>. Else, return
+ * VS_NEW_IN_SERIES if there is at least one member of <b>versionlist</b> in
+ * the same series (major.minor.micro) as <b>myversion</b>, but no such member
+ * is newer than <b>myversion.</b>. Else, return VS_NEW if every member of
+ * <b>versionlist</b> is older than <b>myversion</b>. Else, return
+ * VS_UNRECOMMENDED.
+ *
+ * (versionlist is a comma-separated list of version strings,
+ * optionally prefixed with "Tor". Versions that can't be parsed are
+ * ignored.)
+ */
+version_status_t
+tor_version_is_obsolete(const char *myversion, const char *versionlist)
+{
+ tor_version_t mine, other;
+ int found_newer = 0, found_older = 0, found_newer_in_series = 0,
+ found_any_in_series = 0, r, same;
+ version_status_t ret = VS_UNRECOMMENDED;
+ smartlist_t *version_sl;
+
+ log_debug(LD_CONFIG,"Checking whether version '%s' is in '%s'",
+ myversion, versionlist);
+
+ if (tor_version_parse(myversion, &mine)) {
+ log_err(LD_BUG,"I couldn't parse my own version (%s)", myversion);
+ tor_assert(0);
+ }
+ version_sl = smartlist_new();
+ smartlist_split_string(version_sl, versionlist, ",", SPLIT_SKIP_SPACE, 0);
+
+ if (!strlen(versionlist)) { /* no authorities cared or agreed */
+ ret = VS_EMPTY;
+ goto done;
+ }
+
+ SMARTLIST_FOREACH_BEGIN(version_sl, const char *, cp) {
+ if (!strcmpstart(cp, "Tor "))
+ cp += 4;
+
+ if (tor_version_parse(cp, &other)) {
+ /* Couldn't parse other; it can't be a match. */
+ } else {
+ same = tor_version_same_series(&mine, &other);
+ if (same)
+ found_any_in_series = 1;
+ r = tor_version_compare(&mine, &other);
+ if (r==0) {
+ ret = VS_RECOMMENDED;
+ goto done;
+ } else if (r<0) {
+ found_newer = 1;
+ if (same)
+ found_newer_in_series = 1;
+ } else if (r>0) {
+ found_older = 1;
+ }
+ }
+ } SMARTLIST_FOREACH_END(cp);
+
+ /* We didn't find the listed version. Is it new or old? */
+ if (found_any_in_series && !found_newer_in_series && found_newer) {
+ ret = VS_NEW_IN_SERIES;
+ } else if (found_newer && !found_older) {
+ ret = VS_OLD;
+ } else if (found_older && !found_newer) {
+ ret = VS_NEW;
+ } else {
+ ret = VS_UNRECOMMENDED;
+ }
+
+ done:
+ SMARTLIST_FOREACH(version_sl, char *, version, tor_free(version));
+ smartlist_free(version_sl);
+ return ret;
+}
+
+MOCK_IMPL(STATIC int,
+signed_digest_equals, (const uint8_t *d1, const uint8_t *d2, size_t len))
+{
+ return tor_memeq(d1, d2, len);
+}
+
+/** Check whether the object body of the token in <b>tok</b> has a good
+ * signature for <b>digest</b> using key <b>pkey</b>.
+ * If <b>CST_NO_CHECK_OBJTYPE</b> is set, do not check
+ * the object type of the signature object. Use <b>doctype</b> as the type of
+ * the document when generating log messages. Return 0 on success, negative
+ * on failure.
+ */
+static int
+check_signature_token(const char *digest,
+ ssize_t digest_len,
+ directory_token_t *tok,
+ crypto_pk_t *pkey,
+ int flags,
+ const char *doctype)
+{
+ char *signed_digest;
+ size_t keysize;
+ const int check_objtype = ! (flags & CST_NO_CHECK_OBJTYPE);
+
+ tor_assert(pkey);
+ tor_assert(tok);
+ tor_assert(digest);
+ tor_assert(doctype);
+
+ if (check_objtype) {
+ if (strcmp(tok->object_type, "SIGNATURE")) {
+ log_warn(LD_DIR, "Bad object type on %s signature", doctype);
+ return -1;
+ }
+ }
+
+ keysize = crypto_pk_keysize(pkey);
+ signed_digest = tor_malloc(keysize);
+ if (crypto_pk_public_checksig(pkey, signed_digest, keysize,
+ tok->object_body, tok->object_size)
+ < digest_len) {
+ log_warn(LD_DIR, "Error reading %s: invalid signature.", doctype);
+ tor_free(signed_digest);
+ return -1;
+ }
+ // log_debug(LD_DIR,"Signed %s hash starts %s", doctype,
+ // hex_str(signed_digest,4));
+ if (! signed_digest_equals((const uint8_t *)digest,
+ (const uint8_t *)signed_digest, digest_len)) {
+ log_warn(LD_DIR, "Error reading %s: signature does not match.", doctype);
+ tor_free(signed_digest);
+ return -1;
+ }
+ tor_free(signed_digest);
+ return 0;
+}
+
+/** Helper: move *<b>s_ptr</b> ahead to the next router, the next extra-info,
+ * or to the first of the annotations proceeding the next router or
+ * extra-info---whichever comes first. Set <b>is_extrainfo_out</b> to true if
+ * we found an extrainfo, or false if found a router. Do not scan beyond
+ * <b>eos</b>. Return -1 if we found nothing; 0 if we found something. */
+static int
+find_start_of_next_router_or_extrainfo(const char **s_ptr,
+ const char *eos,
+ int *is_extrainfo_out)
+{
+ const char *annotations = NULL;
+ const char *s = *s_ptr;
+
+ s = eat_whitespace_eos(s, eos);
+
+ while (s < eos-32) { /* 32 gives enough room for a the first keyword. */
+ /* We're at the start of a line. */
+ tor_assert(*s != '\n');
+
+ if (*s == '@' && !annotations) {
+ annotations = s;
+ } else if (*s == 'r' && !strcmpstart(s, "router ")) {
+ *s_ptr = annotations ? annotations : s;
+ *is_extrainfo_out = 0;
+ return 0;
+ } else if (*s == 'e' && !strcmpstart(s, "extra-info ")) {
+ *s_ptr = annotations ? annotations : s;
+ *is_extrainfo_out = 1;
+ return 0;
+ }
+
+ if (!(s = memchr(s+1, '\n', eos-(s+1))))
+ break;
+ s = eat_whitespace_eos(s, eos);
+ }
+ return -1;
+}
+
+/** Given a string *<b>s</b> containing a concatenated sequence of router
+ * descriptors (or extra-info documents if <b>is_extrainfo</b> is set), parses
+ * them and stores the result in <b>dest</b>. All routers are marked running
+ * and valid. Advances *s to a point immediately following the last router
+ * entry. Ignore any trailing router entries that are not complete.
+ *
+ * If <b>saved_location</b> isn't SAVED_IN_CACHE, make a local copy of each
+ * descriptor in the signed_descriptor_body field of each routerinfo_t. If it
+ * isn't SAVED_NOWHERE, remember the offset of each descriptor.
+ *
+ * Returns 0 on success and -1 on failure. Adds a digest to
+ * <b>invalid_digests_out</b> for every entry that was unparseable or
+ * invalid. (This may cause duplicate entries.)
+ */
+int
+router_parse_list_from_string(const char **s, const char *eos,
+ smartlist_t *dest,
+ saved_location_t saved_location,
+ int want_extrainfo,
+ int allow_annotations,
+ const char *prepend_annotations,
+ smartlist_t *invalid_digests_out)
+{
+ routerinfo_t *router;
+ extrainfo_t *extrainfo;
+ signed_descriptor_t *signed_desc = NULL;
+ void *elt;
+ const char *end, *start;
+ int have_extrainfo;
+
+ tor_assert(s);
+ tor_assert(*s);
+ tor_assert(dest);
+
+ start = *s;
+ if (!eos)
+ eos = *s + strlen(*s);
+
+ tor_assert(eos >= *s);
+
+ while (1) {
+ char raw_digest[DIGEST_LEN];
+ int have_raw_digest = 0;
+ int dl_again = 0;
+ if (find_start_of_next_router_or_extrainfo(s, eos, &have_extrainfo) < 0)
+ break;
+
+ end = tor_memstr(*s, eos-*s, "\nrouter-signature");
+ if (end)
+ end = tor_memstr(end, eos-end, "\n-----END SIGNATURE-----\n");
+ if (end)
+ end += strlen("\n-----END SIGNATURE-----\n");
+
+ if (!end)
+ break;
+
+ elt = NULL;
+
+ if (have_extrainfo && want_extrainfo) {
+ routerlist_t *rl = router_get_routerlist();
+ have_raw_digest = router_get_extrainfo_hash(*s, end-*s, raw_digest) == 0;
+ extrainfo = extrainfo_parse_entry_from_string(*s, end,
+ saved_location != SAVED_IN_CACHE,
+ rl->identity_map, &dl_again);
+ if (extrainfo) {
+ signed_desc = &extrainfo->cache_info;
+ elt = extrainfo;
+ }
+ } else if (!have_extrainfo && !want_extrainfo) {
+ have_raw_digest = router_get_router_hash(*s, end-*s, raw_digest) == 0;
+ router = router_parse_entry_from_string(*s, end,
+ saved_location != SAVED_IN_CACHE,
+ allow_annotations,
+ prepend_annotations, &dl_again);
+ if (router) {
+ log_debug(LD_DIR, "Read router '%s', purpose '%s'",
+ router_describe(router),
+ router_purpose_to_string(router->purpose));
+ signed_desc = &router->cache_info;
+ elt = router;
+ }
+ }
+ if (! elt && ! dl_again && have_raw_digest && invalid_digests_out) {
+ smartlist_add(invalid_digests_out, tor_memdup(raw_digest, DIGEST_LEN));
+ }
+ if (!elt) {
+ *s = end;
+ continue;
+ }
+ if (saved_location != SAVED_NOWHERE) {
+ tor_assert(signed_desc);
+ signed_desc->saved_location = saved_location;
+ signed_desc->saved_offset = *s - start;
+ }
+ *s = end;
+ smartlist_add(dest, elt);
+ }
+
+ return 0;
+}
+
+/* For debugging: define to count every descriptor digest we've seen so we
+ * know if we need to try harder to avoid duplicate verifies. */
+#undef COUNT_DISTINCT_DIGESTS
+
+#ifdef COUNT_DISTINCT_DIGESTS
+static digestmap_t *verified_digests = NULL;
+#endif
+
+/** Log the total count of the number of distinct router digests we've ever
+ * verified. When compared to the number of times we've verified routerdesc
+ * signatures <i>in toto</i>, this will tell us if we're doing too much
+ * multiple-verification. */
+void
+dump_distinct_digest_count(int severity)
+{
+#ifdef COUNT_DISTINCT_DIGESTS
+ if (!verified_digests)
+ verified_digests = digestmap_new();
+ tor_log(severity, LD_GENERAL, "%d *distinct* router digests verified",
+ digestmap_size(verified_digests));
+#else /* !(defined(COUNT_DISTINCT_DIGESTS)) */
+ (void)severity; /* suppress "unused parameter" warning */
+#endif /* defined(COUNT_DISTINCT_DIGESTS) */
+}
+
+/** Try to find an IPv6 OR port in <b>list</b> of directory_token_t's
+ * with at least one argument (use GE(1) in setup). If found, store
+ * address and port number to <b>addr_out</b> and
+ * <b>port_out</b>. Return number of OR ports found. */
+static int
+find_single_ipv6_orport(const smartlist_t *list,
+ tor_addr_t *addr_out,
+ uint16_t *port_out)
+{
+ int ret = 0;
+ tor_assert(list != NULL);
+ tor_assert(addr_out != NULL);
+ tor_assert(port_out != NULL);
+
+ SMARTLIST_FOREACH_BEGIN(list, directory_token_t *, t) {
+ tor_addr_t a;
+ maskbits_t bits;
+ uint16_t port_min, port_max;
+ tor_assert(t->n_args >= 1);
+ /* XXXX Prop186 the full spec allows much more than this. */
+ if (tor_addr_parse_mask_ports(t->args[0], 0,
+ &a, &bits, &port_min,
+ &port_max) == AF_INET6 &&
+ bits == 128 &&
+ port_min == port_max) {
+ /* Okay, this is one we can understand. Use it and ignore
+ any potential more addresses in list. */
+ tor_addr_copy(addr_out, &a);
+ *port_out = port_min;
+ ret = 1;
+ break;
+ }
+ } SMARTLIST_FOREACH_END(t);
+
+ return ret;
+}
+
+/** Helper function: reads a single router entry from *<b>s</b> ...
+ * *<b>end</b>. Mallocs a new router and returns it if all goes well, else
+ * returns NULL. If <b>cache_copy</b> is true, duplicate the contents of
+ * s through end into the signed_descriptor_body of the resulting
+ * routerinfo_t.
+ *
+ * If <b>end</b> is NULL, <b>s</b> must be properly NUL-terminated.
+ *
+ * If <b>allow_annotations</b>, it's okay to encounter annotations in <b>s</b>
+ * before the router; if it's false, reject the router if it's annotated. If
+ * <b>prepend_annotations</b> is set, it should contain some annotations:
+ * append them to the front of the router before parsing it, and keep them
+ * around when caching the router.
+ *
+ * Only one of allow_annotations and prepend_annotations may be set.
+ *
+ * If <b>can_dl_again_out</b> is provided, set *<b>can_dl_again_out</b> to 1
+ * if it's okay to try to download a descriptor with this same digest again,
+ * and 0 if it isn't. (It might not be okay to download it again if part of
+ * the part covered by the digest is invalid.)
+ */
+routerinfo_t *
+router_parse_entry_from_string(const char *s, const char *end,
+ int cache_copy, int allow_annotations,
+ const char *prepend_annotations,
+ int *can_dl_again_out)
+{
+ routerinfo_t *router = NULL;
+ char digest[128];
+ smartlist_t *tokens = NULL, *exit_policy_tokens = NULL;
+ directory_token_t *tok;
+ struct in_addr in;
+ const char *start_of_annotations, *cp, *s_dup = s;
+ size_t prepend_len = prepend_annotations ? strlen(prepend_annotations) : 0;
+ int ok = 1;
+ memarea_t *area = NULL;
+ tor_cert_t *ntor_cc_cert = NULL;
+ /* Do not set this to '1' until we have parsed everything that we intend to
+ * parse that's covered by the hash. */
+ int can_dl_again = 0;
+
+ tor_assert(!allow_annotations || !prepend_annotations);
+
+ if (!end) {
+ end = s + strlen(s);
+ }
+
+ /* point 'end' to a point immediately after the final newline. */
+ while (end > s+2 && *(end-1) == '\n' && *(end-2) == '\n')
+ --end;
+
+ area = memarea_new();
+ tokens = smartlist_new();
+ if (prepend_annotations) {
+ if (tokenize_string(area,prepend_annotations,NULL,tokens,
+ routerdesc_token_table,TS_NOCHECK)) {
+ log_warn(LD_DIR, "Error tokenizing router descriptor (annotations).");
+ goto err;
+ }
+ }
+
+ start_of_annotations = s;
+ cp = tor_memstr(s, end-s, "\nrouter ");
+ if (!cp) {
+ if (end-s < 7 || strcmpstart(s, "router ")) {
+ log_warn(LD_DIR, "No router keyword found.");
+ goto err;
+ }
+ } else {
+ s = cp+1;
+ }
+
+ if (start_of_annotations != s) { /* We have annotations */
+ if (allow_annotations) {
+ if (tokenize_string(area,start_of_annotations,s,tokens,
+ routerdesc_token_table,TS_NOCHECK)) {
+ log_warn(LD_DIR, "Error tokenizing router descriptor (annotations).");
+ goto err;
+ }
+ } else {
+ log_warn(LD_DIR, "Found unexpected annotations on router descriptor not "
+ "loaded from disk. Dropping it.");
+ goto err;
+ }
+ }
+
+ if (router_get_router_hash(s, end - s, digest) < 0) {
+ log_warn(LD_DIR, "Couldn't compute router hash.");
+ goto err;
+ }
+ {
+ int flags = 0;
+ if (allow_annotations)
+ flags |= TS_ANNOTATIONS_OK;
+ if (prepend_annotations)
+ flags |= TS_ANNOTATIONS_OK|TS_NO_NEW_ANNOTATIONS;
+
+ if (tokenize_string(area,s,end,tokens,routerdesc_token_table, flags)) {
+ log_warn(LD_DIR, "Error tokenizing router descriptor.");
+ goto err;
+ }
+ }
+
+ if (smartlist_len(tokens) < 2) {
+ log_warn(LD_DIR, "Impossibly short router descriptor.");
+ goto err;
+ }
+
+ tok = find_by_keyword(tokens, K_ROUTER);
+ const int router_token_pos = smartlist_pos(tokens, tok);
+ tor_assert(tok->n_args >= 5);
+
+ router = tor_malloc_zero(sizeof(routerinfo_t));
+ router->cert_expiration_time = TIME_MAX;
+ router->cache_info.routerlist_index = -1;
+ router->cache_info.annotations_len = s-start_of_annotations + prepend_len;
+ router->cache_info.signed_descriptor_len = end-s;
+ if (cache_copy) {
+ size_t len = router->cache_info.signed_descriptor_len +
+ router->cache_info.annotations_len;
+ char *signed_body =
+ router->cache_info.signed_descriptor_body = tor_malloc(len+1);
+ if (prepend_annotations) {
+ memcpy(signed_body, prepend_annotations, prepend_len);
+ signed_body += prepend_len;
+ }
+ /* This assertion will always succeed.
+ * len == signed_desc_len + annotations_len
+ * == end-s + s-start_of_annotations + prepend_len
+ * == end-start_of_annotations + prepend_len
+ * We already wrote prepend_len bytes into the buffer; now we're
+ * writing end-start_of_annotations -NM. */
+ tor_assert(signed_body+(end-start_of_annotations) ==
+ router->cache_info.signed_descriptor_body+len);
+ memcpy(signed_body, start_of_annotations, end-start_of_annotations);
+ router->cache_info.signed_descriptor_body[len] = '\0';
+ tor_assert(strlen(router->cache_info.signed_descriptor_body) == len);
+ }
+ memcpy(router->cache_info.signed_descriptor_digest, digest, DIGEST_LEN);
+
+ router->nickname = tor_strdup(tok->args[0]);
+ if (!is_legal_nickname(router->nickname)) {
+ log_warn(LD_DIR,"Router nickname is invalid");
+ goto err;
+ }
+ if (!tor_inet_aton(tok->args[1], &in)) {
+ log_warn(LD_DIR,"Router address is not an IP address.");
+ goto err;
+ }
+ router->addr = ntohl(in.s_addr);
+
+ router->or_port =
+ (uint16_t) tor_parse_long(tok->args[2],10,0,65535,&ok,NULL);
+ if (!ok) {
+ log_warn(LD_DIR,"Invalid OR port %s", escaped(tok->args[2]));
+ goto err;
+ }
+ router->dir_port =
+ (uint16_t) tor_parse_long(tok->args[4],10,0,65535,&ok,NULL);
+ if (!ok) {
+ log_warn(LD_DIR,"Invalid dir port %s", escaped(tok->args[4]));
+ goto err;
+ }
+
+ tok = find_by_keyword(tokens, K_BANDWIDTH);
+ tor_assert(tok->n_args >= 3);
+ router->bandwidthrate = (int)
+ tor_parse_long(tok->args[0],10,1,INT_MAX,&ok,NULL);
+
+ if (!ok) {
+ log_warn(LD_DIR, "bandwidthrate %s unreadable or 0. Failing.",
+ escaped(tok->args[0]));
+ goto err;
+ }
+ router->bandwidthburst =
+ (int) tor_parse_long(tok->args[1],10,0,INT_MAX,&ok,NULL);
+ if (!ok) {
+ log_warn(LD_DIR, "Invalid bandwidthburst %s", escaped(tok->args[1]));
+ goto err;
+ }
+ router->bandwidthcapacity = (int)
+ tor_parse_long(tok->args[2],10,0,INT_MAX,&ok,NULL);
+ if (!ok) {
+ log_warn(LD_DIR, "Invalid bandwidthcapacity %s", escaped(tok->args[1]));
+ goto err;
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, A_PURPOSE))) {
+ tor_assert(tok->n_args);
+ router->purpose = router_purpose_from_string(tok->args[0]);
+ } else {
+ router->purpose = ROUTER_PURPOSE_GENERAL;
+ }
+ router->cache_info.send_unencrypted =
+ (router->purpose == ROUTER_PURPOSE_GENERAL) ? 1 : 0;
+
+ if ((tok = find_opt_by_keyword(tokens, K_UPTIME))) {
+ tor_assert(tok->n_args >= 1);
+ router->uptime = tor_parse_long(tok->args[0],10,0,LONG_MAX,&ok,NULL);
+ if (!ok) {
+ log_warn(LD_DIR, "Invalid uptime %s", escaped(tok->args[0]));
+ goto err;
+ }
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, K_HIBERNATING))) {
+ tor_assert(tok->n_args >= 1);
+ router->is_hibernating
+ = (tor_parse_long(tok->args[0],10,0,LONG_MAX,NULL,NULL) != 0);
+ }
+
+ tok = find_by_keyword(tokens, K_PUBLISHED);
+ tor_assert(tok->n_args == 1);
+ if (parse_iso_time(tok->args[0], &router->cache_info.published_on) < 0)
+ goto err;
+
+ tok = find_by_keyword(tokens, K_ONION_KEY);
+ if (!crypto_pk_public_exponent_ok(tok->key)) {
+ log_warn(LD_DIR,
+ "Relay's onion key had invalid exponent.");
+ goto err;
+ }
+ router->onion_pkey = tok->key;
+ tok->key = NULL; /* Prevent free */
+
+ if ((tok = find_opt_by_keyword(tokens, K_ONION_KEY_NTOR))) {
+ curve25519_public_key_t k;
+ tor_assert(tok->n_args >= 1);
+ if (curve25519_public_from_base64(&k, tok->args[0]) < 0) {
+ log_warn(LD_DIR, "Bogus ntor-onion-key in routerinfo");
+ goto err;
+ }
+ router->onion_curve25519_pkey =
+ tor_memdup(&k, sizeof(curve25519_public_key_t));
+ }
+
+ tok = find_by_keyword(tokens, K_SIGNING_KEY);
+ router->identity_pkey = tok->key;
+ tok->key = NULL; /* Prevent free */
+ if (crypto_pk_get_digest(router->identity_pkey,
+ router->cache_info.identity_digest)) {
+ log_warn(LD_DIR, "Couldn't calculate key digest"); goto err;
+ }
+
+ {
+ directory_token_t *ed_sig_tok, *ed_cert_tok, *cc_tap_tok, *cc_ntor_tok,
+ *master_key_tok;
+ ed_sig_tok = find_opt_by_keyword(tokens, K_ROUTER_SIG_ED25519);
+ ed_cert_tok = find_opt_by_keyword(tokens, K_IDENTITY_ED25519);
+ master_key_tok = find_opt_by_keyword(tokens, K_MASTER_KEY_ED25519);
+ cc_tap_tok = find_opt_by_keyword(tokens, K_ONION_KEY_CROSSCERT);
+ cc_ntor_tok = find_opt_by_keyword(tokens, K_NTOR_ONION_KEY_CROSSCERT);
+ int n_ed_toks = !!ed_sig_tok + !!ed_cert_tok +
+ !!cc_tap_tok + !!cc_ntor_tok;
+ if ((n_ed_toks != 0 && n_ed_toks != 4) ||
+ (n_ed_toks == 4 && !router->onion_curve25519_pkey)) {
+ log_warn(LD_DIR, "Router descriptor with only partial ed25519/"
+ "cross-certification support");
+ goto err;
+ }
+ if (master_key_tok && !ed_sig_tok) {
+ log_warn(LD_DIR, "Router descriptor has ed25519 master key but no "
+ "certificate");
+ goto err;
+ }
+ if (ed_sig_tok) {
+ tor_assert(ed_cert_tok && cc_tap_tok && cc_ntor_tok);
+ const int ed_cert_token_pos = smartlist_pos(tokens, ed_cert_tok);
+ if (ed_cert_token_pos == -1 || router_token_pos == -1 ||
+ (ed_cert_token_pos != router_token_pos + 1 &&
+ ed_cert_token_pos != router_token_pos - 1)) {
+ log_warn(LD_DIR, "Ed25519 certificate in wrong position");
+ goto err;
+ }
+ if (ed_sig_tok != smartlist_get(tokens, smartlist_len(tokens)-2)) {
+ log_warn(LD_DIR, "Ed25519 signature in wrong position");
+ goto err;
+ }
+ if (strcmp(ed_cert_tok->object_type, "ED25519 CERT")) {
+ log_warn(LD_DIR, "Wrong object type on identity-ed25519 in decriptor");
+ goto err;
+ }
+ if (strcmp(cc_ntor_tok->object_type, "ED25519 CERT")) {
+ log_warn(LD_DIR, "Wrong object type on ntor-onion-key-crosscert "
+ "in decriptor");
+ goto err;
+ }
+ if (strcmp(cc_tap_tok->object_type, "CROSSCERT")) {
+ log_warn(LD_DIR, "Wrong object type on onion-key-crosscert "
+ "in decriptor");
+ goto err;
+ }
+ if (strcmp(cc_ntor_tok->args[0], "0") &&
+ strcmp(cc_ntor_tok->args[0], "1")) {
+ log_warn(LD_DIR, "Bad sign bit on ntor-onion-key-crosscert");
+ goto err;
+ }
+ int ntor_cc_sign_bit = !strcmp(cc_ntor_tok->args[0], "1");
+
+ uint8_t d256[DIGEST256_LEN];
+ const char *signed_start, *signed_end;
+ tor_cert_t *cert = tor_cert_parse(
+ (const uint8_t*)ed_cert_tok->object_body,
+ ed_cert_tok->object_size);
+ if (! cert) {
+ log_warn(LD_DIR, "Couldn't parse ed25519 cert");
+ goto err;
+ }
+ /* makes sure it gets freed. */
+ router->cache_info.signing_key_cert = cert;
+
+ if (cert->cert_type != CERT_TYPE_ID_SIGNING ||
+ ! cert->signing_key_included) {
+ log_warn(LD_DIR, "Invalid form for ed25519 cert");
+ goto err;
+ }
+
+ if (master_key_tok) {
+ /* This token is optional, but if it's present, it must match
+ * the signature in the signing cert, or supplant it. */
+ tor_assert(master_key_tok->n_args >= 1);
+ ed25519_public_key_t pkey;
+ if (ed25519_public_from_base64(&pkey, master_key_tok->args[0])<0) {
+ log_warn(LD_DIR, "Can't parse ed25519 master key");
+ goto err;
+ }
+
+ if (fast_memneq(&cert->signing_key.pubkey,
+ pkey.pubkey, ED25519_PUBKEY_LEN)) {
+ log_warn(LD_DIR, "Ed25519 master key does not match "
+ "key in certificate");
+ goto err;
+ }
+ }
+ ntor_cc_cert = tor_cert_parse((const uint8_t*)cc_ntor_tok->object_body,
+ cc_ntor_tok->object_size);
+ if (!ntor_cc_cert) {
+ log_warn(LD_DIR, "Couldn't parse ntor-onion-key-crosscert cert");
+ goto err;
+ }
+ if (ntor_cc_cert->cert_type != CERT_TYPE_ONION_ID ||
+ ! ed25519_pubkey_eq(&ntor_cc_cert->signed_key, &cert->signing_key)) {
+ log_warn(LD_DIR, "Invalid contents for ntor-onion-key-crosscert cert");
+ goto err;
+ }
+
+ ed25519_public_key_t ntor_cc_pk;
+ if (ed25519_public_key_from_curve25519_public_key(&ntor_cc_pk,
+ router->onion_curve25519_pkey,
+ ntor_cc_sign_bit)<0) {
+ log_warn(LD_DIR, "Error converting onion key to ed25519");
+ goto err;
+ }
+
+ if (router_get_hash_impl_helper(s, end-s, "router ",
+ "\nrouter-sig-ed25519",
+ ' ', LOG_WARN,
+ &signed_start, &signed_end) < 0) {
+ log_warn(LD_DIR, "Can't find ed25519-signed portion of descriptor");
+ goto err;
+ }
+ crypto_digest_t *d = crypto_digest256_new(DIGEST_SHA256);
+ crypto_digest_add_bytes(d, ED_DESC_SIGNATURE_PREFIX,
+ strlen(ED_DESC_SIGNATURE_PREFIX));
+ crypto_digest_add_bytes(d, signed_start, signed_end-signed_start);
+ crypto_digest_get_digest(d, (char*)d256, sizeof(d256));
+ crypto_digest_free(d);
+
+ ed25519_checkable_t check[3];
+ int check_ok[3];
+ time_t expires = TIME_MAX;
+ if (tor_cert_get_checkable_sig(&check[0], cert, NULL, &expires) < 0) {
+ log_err(LD_BUG, "Couldn't create 'checkable' for cert.");
+ goto err;
+ }
+ if (tor_cert_get_checkable_sig(&check[1],
+ ntor_cc_cert, &ntor_cc_pk, &expires) < 0) {
+ log_err(LD_BUG, "Couldn't create 'checkable' for ntor_cc_cert.");
+ goto err;
+ }
+
+ if (ed25519_signature_from_base64(&check[2].signature,
+ ed_sig_tok->args[0])<0) {
+ log_warn(LD_DIR, "Couldn't decode ed25519 signature");
+ goto err;
+ }
+ check[2].pubkey = &cert->signed_key;
+ check[2].msg = d256;
+ check[2].len = DIGEST256_LEN;
+
+ if (ed25519_checksig_batch(check_ok, check, 3) < 0) {
+ log_warn(LD_DIR, "Incorrect ed25519 signature(s)");
+ goto err;
+ }
+
+ if (check_tap_onion_key_crosscert(
+ (const uint8_t*)cc_tap_tok->object_body,
+ (int)cc_tap_tok->object_size,
+ router->onion_pkey,
+ &cert->signing_key,
+ (const uint8_t*)router->cache_info.identity_digest)<0) {
+ log_warn(LD_DIR, "Incorrect TAP cross-verification");
+ goto err;
+ }
+
+ /* We check this before adding it to the routerlist. */
+ router->cert_expiration_time = expires;
+ }
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, K_FINGERPRINT))) {
+ /* If there's a fingerprint line, it must match the identity digest. */
+ char d[DIGEST_LEN];
+ tor_assert(tok->n_args == 1);
+ tor_strstrip(tok->args[0], " ");
+ if (base16_decode(d, DIGEST_LEN,
+ tok->args[0], strlen(tok->args[0])) != DIGEST_LEN) {
+ log_warn(LD_DIR, "Couldn't decode router fingerprint %s",
+ escaped(tok->args[0]));
+ goto err;
+ }
+ if (tor_memneq(d,router->cache_info.identity_digest, DIGEST_LEN)) {
+ log_warn(LD_DIR, "Fingerprint '%s' does not match identity digest.",
+ tok->args[0]);
+ goto err;
+ }
+ }
+
+ {
+ const char *version = NULL, *protocols = NULL;
+ if ((tok = find_opt_by_keyword(tokens, K_PLATFORM))) {
+ router->platform = tor_strdup(tok->args[0]);
+ version = tok->args[0];
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, K_PROTO))) {
+ router->protocol_list = tor_strdup(tok->args[0]);
+ protocols = tok->args[0];
+ }
+
+ summarize_protover_flags(&router->pv, protocols, version);
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, K_CONTACT))) {
+ router->contact_info = tor_strdup(tok->args[0]);
+ }
+
+ if (find_opt_by_keyword(tokens, K_REJECT6) ||
+ find_opt_by_keyword(tokens, K_ACCEPT6)) {
+ log_warn(LD_DIR, "Rejecting router with reject6/accept6 line: they crash "
+ "older Tors.");
+ goto err;
+ }
+ {
+ smartlist_t *or_addresses = find_all_by_keyword(tokens, K_OR_ADDRESS);
+ if (or_addresses) {
+ find_single_ipv6_orport(or_addresses, &router->ipv6_addr,
+ &router->ipv6_orport);
+ smartlist_free(or_addresses);
+ }
+ }
+ exit_policy_tokens = find_all_exitpolicy(tokens);
+ if (!smartlist_len(exit_policy_tokens)) {
+ log_warn(LD_DIR, "No exit policy tokens in descriptor.");
+ goto err;
+ }
+ SMARTLIST_FOREACH(exit_policy_tokens, directory_token_t *, t,
+ if (router_add_exit_policy(router,t)<0) {
+ log_warn(LD_DIR,"Error in exit policy");
+ goto err;
+ });
+ policy_expand_private(&router->exit_policy);
+
+ if ((tok = find_opt_by_keyword(tokens, K_IPV6_POLICY)) && tok->n_args) {
+ router->ipv6_exit_policy = parse_short_policy(tok->args[0]);
+ if (! router->ipv6_exit_policy) {
+ log_warn(LD_DIR , "Error in ipv6-policy %s", escaped(tok->args[0]));
+ goto err;
+ }
+ }
+
+ if (policy_is_reject_star(router->exit_policy, AF_INET, 1) &&
+ (!router->ipv6_exit_policy ||
+ short_policy_is_reject_star(router->ipv6_exit_policy)))
+ router->policy_is_reject_star = 1;
+
+ if ((tok = find_opt_by_keyword(tokens, K_FAMILY)) && tok->n_args) {
+ int i;
+ router->declared_family = smartlist_new();
+ for (i=0;i<tok->n_args;++i) {
+ if (!is_legal_nickname_or_hexdigest(tok->args[i])) {
+ log_warn(LD_DIR, "Illegal nickname %s in family line",
+ escaped(tok->args[i]));
+ goto err;
+ }
+ smartlist_add_strdup(router->declared_family, tok->args[i]);
+ }
+ }
+
+ if (find_opt_by_keyword(tokens, K_CACHES_EXTRA_INFO))
+ router->caches_extra_info = 1;
+
+ if (find_opt_by_keyword(tokens, K_ALLOW_SINGLE_HOP_EXITS))
+ router->allow_single_hop_exits = 1;
+
+ if ((tok = find_opt_by_keyword(tokens, K_EXTRA_INFO_DIGEST))) {
+ tor_assert(tok->n_args >= 1);
+ if (strlen(tok->args[0]) == HEX_DIGEST_LEN) {
+ if (base16_decode(router->cache_info.extra_info_digest, DIGEST_LEN,
+ tok->args[0], HEX_DIGEST_LEN) != DIGEST_LEN) {
+ log_warn(LD_DIR,"Invalid extra info digest");
+ }
+ } else {
+ log_warn(LD_DIR, "Invalid extra info digest %s", escaped(tok->args[0]));
+ }
+
+ if (tok->n_args >= 2) {
+ if (digest256_from_base64(router->cache_info.extra_info_digest256,
+ tok->args[1]) < 0) {
+ log_warn(LD_DIR, "Invalid extra info digest256 %s",
+ escaped(tok->args[1]));
+ }
+ }
+ }
+
+ if (find_opt_by_keyword(tokens, K_HIDDEN_SERVICE_DIR)) {
+ router->wants_to_be_hs_dir = 1;
+ }
+
+ /* This router accepts tunnelled directory requests via begindir if it has
+ * an open dirport or it included "tunnelled-dir-server". */
+ if (find_opt_by_keyword(tokens, K_DIR_TUNNELLED) || router->dir_port > 0) {
+ router->supports_tunnelled_dir_requests = 1;
+ }
+
+ tok = find_by_keyword(tokens, K_ROUTER_SIGNATURE);
+#ifdef COUNT_DISTINCT_DIGESTS
+ if (!verified_digests)
+ verified_digests = digestmap_new();
+ digestmap_set(verified_digests, signed_digest, (void*)(uintptr_t)1);
+#endif
+
+ if (!router->or_port) {
+ log_warn(LD_DIR,"or_port unreadable or 0. Failing.");
+ goto err;
+ }
+
+ /* We've checked everything that's covered by the hash. */
+ can_dl_again = 1;
+ if (check_signature_token(digest, DIGEST_LEN, tok, router->identity_pkey, 0,
+ "router descriptor") < 0)
+ goto err;
+
+ if (!router->platform) {
+ router->platform = tor_strdup("<unknown>");
+ }
+ goto done;
+
+ err:
+ dump_desc(s_dup, "router descriptor");
+ routerinfo_free(router);
+ router = NULL;
+ done:
+ tor_cert_free(ntor_cc_cert);
+ if (tokens) {
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(tokens);
+ }
+ smartlist_free(exit_policy_tokens);
+ if (area) {
+ DUMP_AREA(area, "routerinfo");
+ memarea_drop_all(area);
+ }
+ if (can_dl_again_out)
+ *can_dl_again_out = can_dl_again;
+ return router;
+}
+
+/** Parse a single extrainfo entry from the string <b>s</b>, ending at
+ * <b>end</b>. (If <b>end</b> is NULL, parse up to the end of <b>s</b>.) If
+ * <b>cache_copy</b> is true, make a copy of the extra-info document in the
+ * cache_info fields of the result. If <b>routermap</b> is provided, use it
+ * as a map from router identity to routerinfo_t when looking up signing keys.
+ *
+ * If <b>can_dl_again_out</b> is provided, set *<b>can_dl_again_out</b> to 1
+ * if it's okay to try to download an extrainfo with this same digest again,
+ * and 0 if it isn't. (It might not be okay to download it again if part of
+ * the part covered by the digest is invalid.)
+ */
+extrainfo_t *
+extrainfo_parse_entry_from_string(const char *s, const char *end,
+ int cache_copy, struct digest_ri_map_t *routermap,
+ int *can_dl_again_out)
+{
+ extrainfo_t *extrainfo = NULL;
+ char digest[128];
+ smartlist_t *tokens = NULL;
+ directory_token_t *tok;
+ crypto_pk_t *key = NULL;
+ routerinfo_t *router = NULL;
+ memarea_t *area = NULL;
+ const char *s_dup = s;
+ /* Do not set this to '1' until we have parsed everything that we intend to
+ * parse that's covered by the hash. */
+ int can_dl_again = 0;
+
+ if (BUG(s == NULL))
+ return NULL;
+
+ if (!end) {
+ end = s + strlen(s);
+ }
+
+ /* point 'end' to a point immediately after the final newline. */
+ while (end > s+2 && *(end-1) == '\n' && *(end-2) == '\n')
+ --end;
+
+ if (router_get_extrainfo_hash(s, end-s, digest) < 0) {
+ log_warn(LD_DIR, "Couldn't compute router hash.");
+ goto err;
+ }
+ tokens = smartlist_new();
+ area = memarea_new();
+ if (tokenize_string(area,s,end,tokens,extrainfo_token_table,0)) {
+ log_warn(LD_DIR, "Error tokenizing extra-info document.");
+ goto err;
+ }
+
+ if (smartlist_len(tokens) < 2) {
+ log_warn(LD_DIR, "Impossibly short extra-info document.");
+ goto err;
+ }
+
+ /* XXXX Accept this in position 1 too, and ed identity in position 0. */
+ tok = smartlist_get(tokens,0);
+ if (tok->tp != K_EXTRA_INFO) {
+ log_warn(LD_DIR,"Entry does not start with \"extra-info\"");
+ goto err;
+ }
+
+ extrainfo = tor_malloc_zero(sizeof(extrainfo_t));
+ extrainfo->cache_info.is_extrainfo = 1;
+ if (cache_copy)
+ extrainfo->cache_info.signed_descriptor_body = tor_memdup_nulterm(s,end-s);
+ extrainfo->cache_info.signed_descriptor_len = end-s;
+ memcpy(extrainfo->cache_info.signed_descriptor_digest, digest, DIGEST_LEN);
+ crypto_digest256((char*)extrainfo->digest256, s, end-s, DIGEST_SHA256);
+
+ tor_assert(tok->n_args >= 2);
+ if (!is_legal_nickname(tok->args[0])) {
+ log_warn(LD_DIR,"Bad nickname %s on \"extra-info\"",escaped(tok->args[0]));
+ goto err;
+ }
+ strlcpy(extrainfo->nickname, tok->args[0], sizeof(extrainfo->nickname));
+ if (strlen(tok->args[1]) != HEX_DIGEST_LEN ||
+ base16_decode(extrainfo->cache_info.identity_digest, DIGEST_LEN,
+ tok->args[1], HEX_DIGEST_LEN) != DIGEST_LEN) {
+ log_warn(LD_DIR,"Invalid fingerprint %s on \"extra-info\"",
+ escaped(tok->args[1]));
+ goto err;
+ }
+
+ tok = find_by_keyword(tokens, K_PUBLISHED);
+ if (parse_iso_time(tok->args[0], &extrainfo->cache_info.published_on)) {
+ log_warn(LD_DIR,"Invalid published time %s on \"extra-info\"",
+ escaped(tok->args[0]));
+ goto err;
+ }
+
+ {
+ directory_token_t *ed_sig_tok, *ed_cert_tok;
+ ed_sig_tok = find_opt_by_keyword(tokens, K_ROUTER_SIG_ED25519);
+ ed_cert_tok = find_opt_by_keyword(tokens, K_IDENTITY_ED25519);
+ int n_ed_toks = !!ed_sig_tok + !!ed_cert_tok;
+ if (n_ed_toks != 0 && n_ed_toks != 2) {
+ log_warn(LD_DIR, "Router descriptor with only partial ed25519/"
+ "cross-certification support");
+ goto err;
+ }
+ if (ed_sig_tok) {
+ tor_assert(ed_cert_tok);
+ const int ed_cert_token_pos = smartlist_pos(tokens, ed_cert_tok);
+ if (ed_cert_token_pos != 1) {
+ /* Accept this in position 0 XXXX */
+ log_warn(LD_DIR, "Ed25519 certificate in wrong position");
+ goto err;
+ }
+ if (ed_sig_tok != smartlist_get(tokens, smartlist_len(tokens)-2)) {
+ log_warn(LD_DIR, "Ed25519 signature in wrong position");
+ goto err;
+ }
+ if (strcmp(ed_cert_tok->object_type, "ED25519 CERT")) {
+ log_warn(LD_DIR, "Wrong object type on identity-ed25519 in decriptor");
+ goto err;
+ }
+
+ uint8_t d256[DIGEST256_LEN];
+ const char *signed_start, *signed_end;
+ tor_cert_t *cert = tor_cert_parse(
+ (const uint8_t*)ed_cert_tok->object_body,
+ ed_cert_tok->object_size);
+ if (! cert) {
+ log_warn(LD_DIR, "Couldn't parse ed25519 cert");
+ goto err;
+ }
+ /* makes sure it gets freed. */
+ extrainfo->cache_info.signing_key_cert = cert;
+
+ if (cert->cert_type != CERT_TYPE_ID_SIGNING ||
+ ! cert->signing_key_included) {
+ log_warn(LD_DIR, "Invalid form for ed25519 cert");
+ goto err;
+ }
+
+ if (router_get_hash_impl_helper(s, end-s, "extra-info ",
+ "\nrouter-sig-ed25519",
+ ' ', LOG_WARN,
+ &signed_start, &signed_end) < 0) {
+ log_warn(LD_DIR, "Can't find ed25519-signed portion of extrainfo");
+ goto err;
+ }
+ crypto_digest_t *d = crypto_digest256_new(DIGEST_SHA256);
+ crypto_digest_add_bytes(d, ED_DESC_SIGNATURE_PREFIX,
+ strlen(ED_DESC_SIGNATURE_PREFIX));
+ crypto_digest_add_bytes(d, signed_start, signed_end-signed_start);
+ crypto_digest_get_digest(d, (char*)d256, sizeof(d256));
+ crypto_digest_free(d);
+
+ ed25519_checkable_t check[2];
+ int check_ok[2];
+ if (tor_cert_get_checkable_sig(&check[0], cert, NULL, NULL) < 0) {
+ log_err(LD_BUG, "Couldn't create 'checkable' for cert.");
+ goto err;
+ }
+
+ if (ed25519_signature_from_base64(&check[1].signature,
+ ed_sig_tok->args[0])<0) {
+ log_warn(LD_DIR, "Couldn't decode ed25519 signature");
+ goto err;
+ }
+ check[1].pubkey = &cert->signed_key;
+ check[1].msg = d256;
+ check[1].len = DIGEST256_LEN;
+
+ if (ed25519_checksig_batch(check_ok, check, 2) < 0) {
+ log_warn(LD_DIR, "Incorrect ed25519 signature(s)");
+ goto err;
+ }
+ /* We don't check the certificate expiration time: checking that it
+ * matches the cert in the router descriptor is adequate. */
+ }
+ }
+
+ /* We've checked everything that's covered by the hash. */
+ can_dl_again = 1;
+
+ if (routermap &&
+ (router = digestmap_get((digestmap_t*)routermap,
+ extrainfo->cache_info.identity_digest))) {
+ key = router->identity_pkey;
+ }
+
+ tok = find_by_keyword(tokens, K_ROUTER_SIGNATURE);
+ if (strcmp(tok->object_type, "SIGNATURE") ||
+ tok->object_size < 128 || tok->object_size > 512) {
+ log_warn(LD_DIR, "Bad object type or length on extra-info signature");
+ goto err;
+ }
+
+ if (key) {
+ if (check_signature_token(digest, DIGEST_LEN, tok, key, 0,
+ "extra-info") < 0)
+ goto err;
+
+ if (router)
+ extrainfo->cache_info.send_unencrypted =
+ router->cache_info.send_unencrypted;
+ } else {
+ extrainfo->pending_sig = tor_memdup(tok->object_body,
+ tok->object_size);
+ extrainfo->pending_sig_len = tok->object_size;
+ }
+
+ goto done;
+ err:
+ dump_desc(s_dup, "extra-info descriptor");
+ extrainfo_free(extrainfo);
+ extrainfo = NULL;
+ done:
+ if (tokens) {
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(tokens);
+ }
+ if (area) {
+ DUMP_AREA(area, "extrainfo");
+ memarea_drop_all(area);
+ }
+ if (can_dl_again_out)
+ *can_dl_again_out = can_dl_again;
+ return extrainfo;
+}
+
+/** Parse a key certificate from <b>s</b>; point <b>end-of-string</b> to
+ * the first character after the certificate. */
+authority_cert_t *
+authority_cert_parse_from_string(const char *s, const char **end_of_string)
+{
+ /** Reject any certificate at least this big; it is probably an overflow, an
+ * attack, a bug, or some other nonsense. */
+#define MAX_CERT_SIZE (128*1024)
+
+ authority_cert_t *cert = NULL, *old_cert;
+ smartlist_t *tokens = NULL;
+ char digest[DIGEST_LEN];
+ directory_token_t *tok;
+ char fp_declared[DIGEST_LEN];
+ char *eos;
+ size_t len;
+ int found;
+ memarea_t *area = NULL;
+ const char *s_dup = s;
+
+ s = eat_whitespace(s);
+ eos = strstr(s, "\ndir-key-certification");
+ if (! eos) {
+ log_warn(LD_DIR, "No signature found on key certificate");
+ return NULL;
+ }
+ eos = strstr(eos, "\n-----END SIGNATURE-----\n");
+ if (! eos) {
+ log_warn(LD_DIR, "No end-of-signature found on key certificate");
+ return NULL;
+ }
+ eos = strchr(eos+2, '\n');
+ tor_assert(eos);
+ ++eos;
+ len = eos - s;
+
+ if (len > MAX_CERT_SIZE) {
+ log_warn(LD_DIR, "Certificate is far too big (at %lu bytes long); "
+ "rejecting", (unsigned long)len);
+ return NULL;
+ }
+
+ tokens = smartlist_new();
+ area = memarea_new();
+ if (tokenize_string(area,s, eos, tokens, dir_key_certificate_table, 0) < 0) {
+ log_warn(LD_DIR, "Error tokenizing key certificate");
+ goto err;
+ }
+ if (router_get_hash_impl(s, strlen(s), digest, "dir-key-certificate-version",
+ "\ndir-key-certification", '\n', DIGEST_SHA1) < 0)
+ goto err;
+ tok = smartlist_get(tokens, 0);
+ if (tok->tp != K_DIR_KEY_CERTIFICATE_VERSION || strcmp(tok->args[0], "3")) {
+ log_warn(LD_DIR,
+ "Key certificate does not begin with a recognized version (3).");
+ goto err;
+ }
+
+ cert = tor_malloc_zero(sizeof(authority_cert_t));
+ memcpy(cert->cache_info.signed_descriptor_digest, digest, DIGEST_LEN);
+
+ tok = find_by_keyword(tokens, K_DIR_SIGNING_KEY);
+ tor_assert(tok->key);
+ cert->signing_key = tok->key;
+ tok->key = NULL;
+ if (crypto_pk_get_digest(cert->signing_key, cert->signing_key_digest))
+ goto err;
+
+ tok = find_by_keyword(tokens, K_DIR_IDENTITY_KEY);
+ tor_assert(tok->key);
+ cert->identity_key = tok->key;
+ tok->key = NULL;
+
+ tok = find_by_keyword(tokens, K_FINGERPRINT);
+ tor_assert(tok->n_args);
+ if (base16_decode(fp_declared, DIGEST_LEN, tok->args[0],
+ strlen(tok->args[0])) != DIGEST_LEN) {
+ log_warn(LD_DIR, "Couldn't decode key certificate fingerprint %s",
+ escaped(tok->args[0]));
+ goto err;
+ }
+
+ if (crypto_pk_get_digest(cert->identity_key,
+ cert->cache_info.identity_digest))
+ goto err;
+
+ if (tor_memneq(cert->cache_info.identity_digest, fp_declared, DIGEST_LEN)) {
+ log_warn(LD_DIR, "Digest of certificate key didn't match declared "
+ "fingerprint");
+ goto err;
+ }
+
+ tok = find_opt_by_keyword(tokens, K_DIR_ADDRESS);
+ if (tok) {
+ struct in_addr in;
+ char *address = NULL;
+ tor_assert(tok->n_args);
+ /* XXX++ use some tor_addr parse function below instead. -RD */
+ if (tor_addr_port_split(LOG_WARN, tok->args[0], &address,
+ &cert->dir_port) < 0 ||
+ tor_inet_aton(address, &in) == 0) {
+ log_warn(LD_DIR, "Couldn't parse dir-address in certificate");
+ tor_free(address);
+ goto err;
+ }
+ cert->addr = ntohl(in.s_addr);
+ tor_free(address);
+ }
+
+ tok = find_by_keyword(tokens, K_DIR_KEY_PUBLISHED);
+ if (parse_iso_time(tok->args[0], &cert->cache_info.published_on) < 0) {
+ goto err;
+ }
+ tok = find_by_keyword(tokens, K_DIR_KEY_EXPIRES);
+ if (parse_iso_time(tok->args[0], &cert->expires) < 0) {
+ goto err;
+ }
+
+ tok = smartlist_get(tokens, smartlist_len(tokens)-1);
+ if (tok->tp != K_DIR_KEY_CERTIFICATION) {
+ log_warn(LD_DIR, "Certificate didn't end with dir-key-certification.");
+ goto err;
+ }
+
+ /* If we already have this cert, don't bother checking the signature. */
+ old_cert = authority_cert_get_by_digests(
+ cert->cache_info.identity_digest,
+ cert->signing_key_digest);
+ found = 0;
+ if (old_cert) {
+ /* XXXX We could just compare signed_descriptor_digest, but that wouldn't
+ * buy us much. */
+ if (old_cert->cache_info.signed_descriptor_len == len &&
+ old_cert->cache_info.signed_descriptor_body &&
+ tor_memeq(s, old_cert->cache_info.signed_descriptor_body, len)) {
+ log_debug(LD_DIR, "We already checked the signature on this "
+ "certificate; no need to do so again.");
+ found = 1;
+ }
+ }
+ if (!found) {
+ if (check_signature_token(digest, DIGEST_LEN, tok, cert->identity_key, 0,
+ "key certificate")) {
+ goto err;
+ }
+
+ tok = find_by_keyword(tokens, K_DIR_KEY_CROSSCERT);
+ if (check_signature_token(cert->cache_info.identity_digest,
+ DIGEST_LEN,
+ tok,
+ cert->signing_key,
+ CST_NO_CHECK_OBJTYPE,
+ "key cross-certification")) {
+ goto err;
+ }
+ }
+
+ cert->cache_info.signed_descriptor_len = len;
+ cert->cache_info.signed_descriptor_body = tor_malloc(len+1);
+ memcpy(cert->cache_info.signed_descriptor_body, s, len);
+ cert->cache_info.signed_descriptor_body[len] = 0;
+ cert->cache_info.saved_location = SAVED_NOWHERE;
+
+ if (end_of_string) {
+ *end_of_string = eat_whitespace(eos);
+ }
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(tokens);
+ if (area) {
+ DUMP_AREA(area, "authority cert");
+ memarea_drop_all(area);
+ }
+ return cert;
+ err:
+ dump_desc(s_dup, "authority cert");
+ authority_cert_free(cert);
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(tokens);
+ if (area) {
+ DUMP_AREA(area, "authority cert");
+ memarea_drop_all(area);
+ }
+ return NULL;
+}
+
+/** Helper: given a string <b>s</b>, return the start of the next router-status
+ * object (starting with "r " at the start of a line). If none is found,
+ * return the start of the directory footer, or the next directory signature.
+ * If none is found, return the end of the string. */
+static inline const char *
+find_start_of_next_routerstatus(const char *s)
+{
+ const char *eos, *footer, *sig;
+ if ((eos = strstr(s, "\nr ")))
+ ++eos;
+ else
+ eos = s + strlen(s);
+
+ footer = tor_memstr(s, eos-s, "\ndirectory-footer");
+ sig = tor_memstr(s, eos-s, "\ndirectory-signature");
+
+ if (footer && sig)
+ return MIN(footer, sig) + 1;
+ else if (footer)
+ return footer+1;
+ else if (sig)
+ return sig+1;
+ else
+ return eos;
+}
+
+/** Parse the GuardFraction string from a consensus or vote.
+ *
+ * If <b>vote</b> or <b>vote_rs</b> are set the document getting
+ * parsed is a vote routerstatus. Otherwise it's a consensus. This is
+ * the same semantic as in routerstatus_parse_entry_from_string(). */
+STATIC int
+routerstatus_parse_guardfraction(const char *guardfraction_str,
+ networkstatus_t *vote,
+ vote_routerstatus_t *vote_rs,
+ routerstatus_t *rs)
+{
+ int ok;
+ const char *end_of_header = NULL;
+ int is_consensus = !vote_rs;
+ uint32_t guardfraction;
+
+ tor_assert(bool_eq(vote, vote_rs));
+
+ /* If this info comes from a consensus, but we should't apply
+ guardfraction, just exit. */
+ if (is_consensus && !should_apply_guardfraction(NULL)) {
+ return 0;
+ }
+
+ end_of_header = strchr(guardfraction_str, '=');
+ if (!end_of_header) {
+ return -1;
+ }
+
+ guardfraction = (uint32_t)tor_parse_ulong(end_of_header+1,
+ 10, 0, 100, &ok, NULL);
+ if (!ok) {
+ log_warn(LD_DIR, "Invalid GuardFraction %s", escaped(guardfraction_str));
+ return -1;
+ }
+
+ log_debug(LD_GENERAL, "[*] Parsed %s guardfraction '%s' for '%s'.",
+ is_consensus ? "consensus" : "vote",
+ guardfraction_str, rs->nickname);
+
+ if (!is_consensus) { /* We are parsing a vote */
+ vote_rs->status.guardfraction_percentage = guardfraction;
+ vote_rs->status.has_guardfraction = 1;
+ } else {
+ /* We are parsing a consensus. Only apply guardfraction to guards. */
+ if (rs->is_possible_guard) {
+ rs->guardfraction_percentage = guardfraction;
+ rs->has_guardfraction = 1;
+ } else {
+ log_warn(LD_BUG, "Got GuardFraction for non-guard %s. "
+ "This is not supposed to happen. Not applying. ", rs->nickname);
+ }
+ }
+
+ return 0;
+}
+
+/** Summarize the protocols listed in <b>protocols</b> into <b>out</b>,
+ * falling back or correcting them based on <b>version</b> as appropriate.
+ */
+STATIC void
+summarize_protover_flags(protover_summary_flags_t *out,
+ const char *protocols,
+ const char *version)
+{
+ tor_assert(out);
+ memset(out, 0, sizeof(*out));
+ if (protocols) {
+ out->protocols_known = 1;
+ out->supports_extend2_cells =
+ protocol_list_supports_protocol(protocols, PRT_RELAY, 2);
+ out->supports_ed25519_link_handshake_compat =
+ protocol_list_supports_protocol(protocols, PRT_LINKAUTH, 3);
+ out->supports_ed25519_link_handshake_any =
+ protocol_list_supports_protocol_or_later(protocols, PRT_LINKAUTH, 3);
+ out->supports_ed25519_hs_intro =
+ protocol_list_supports_protocol(protocols, PRT_HSINTRO, 4);
+ out->supports_v3_hsdir =
+ protocol_list_supports_protocol(protocols, PRT_HSDIR,
+ PROTOVER_HSDIR_V3);
+ out->supports_v3_rendezvous_point =
+ protocol_list_supports_protocol(protocols, PRT_HSREND,
+ PROTOVER_HS_RENDEZVOUS_POINT_V3);
+ }
+ if (version && !strcmpstart(version, "Tor ")) {
+ if (!out->protocols_known) {
+ /* The version is a "Tor" version, and where there is no
+ * list of protocol versions that we should be looking at instead. */
+
+ out->supports_extend2_cells =
+ tor_version_as_new_as(version, "0.2.4.8-alpha");
+ out->protocols_known = 1;
+ } else {
+ /* Bug #22447 forces us to filter on this version. */
+ if (!tor_version_as_new_as(version, "0.3.0.8")) {
+ out->supports_v3_hsdir = 0;
+ }
+ }
+ }
+}
+
+/** Given a string at *<b>s</b>, containing a routerstatus object, and an
+ * empty smartlist at <b>tokens</b>, parse and return the first router status
+ * object in the string, and advance *<b>s</b> to just after the end of the
+ * router status. Return NULL and advance *<b>s</b> on error.
+ *
+ * If <b>vote</b> and <b>vote_rs</b> are provided, don't allocate a fresh
+ * routerstatus but use <b>vote_rs</b> instead.
+ *
+ * If <b>consensus_method</b> is nonzero, this routerstatus is part of a
+ * consensus, and we should parse it according to the method used to
+ * make that consensus.
+ *
+ * Parse according to the syntax used by the consensus flavor <b>flav</b>.
+ **/
+STATIC routerstatus_t *
+routerstatus_parse_entry_from_string(memarea_t *area,
+ const char **s, smartlist_t *tokens,
+ networkstatus_t *vote,
+ vote_routerstatus_t *vote_rs,
+ int consensus_method,
+ consensus_flavor_t flav)
+{
+ const char *eos, *s_dup = *s;
+ routerstatus_t *rs = NULL;
+ directory_token_t *tok;
+ char timebuf[ISO_TIME_LEN+1];
+ struct in_addr in;
+ int offset = 0;
+ tor_assert(tokens);
+ tor_assert(bool_eq(vote, vote_rs));
+
+ if (!consensus_method)
+ flav = FLAV_NS;
+ tor_assert(flav == FLAV_NS || flav == FLAV_MICRODESC);
+
+ eos = find_start_of_next_routerstatus(*s);
+
+ if (tokenize_string(area,*s, eos, tokens, rtrstatus_token_table,0)) {
+ log_warn(LD_DIR, "Error tokenizing router status");
+ goto err;
+ }
+ if (smartlist_len(tokens) < 1) {
+ log_warn(LD_DIR, "Impossibly short router status");
+ goto err;
+ }
+ tok = find_by_keyword(tokens, K_R);
+ tor_assert(tok->n_args >= 7); /* guaranteed by GE(7) in K_R setup */
+ if (flav == FLAV_NS) {
+ if (tok->n_args < 8) {
+ log_warn(LD_DIR, "Too few arguments to r");
+ goto err;
+ }
+ } else if (flav == FLAV_MICRODESC) {
+ offset = -1; /* There is no descriptor digest in an md consensus r line */
+ }
+
+ if (vote_rs) {
+ rs = &vote_rs->status;
+ } else {
+ rs = tor_malloc_zero(sizeof(routerstatus_t));
+ }
+
+ if (!is_legal_nickname(tok->args[0])) {
+ log_warn(LD_DIR,
+ "Invalid nickname %s in router status; skipping.",
+ escaped(tok->args[0]));
+ goto err;
+ }
+ strlcpy(rs->nickname, tok->args[0], sizeof(rs->nickname));
+
+ if (digest_from_base64(rs->identity_digest, tok->args[1])) {
+ log_warn(LD_DIR, "Error decoding identity digest %s",
+ escaped(tok->args[1]));
+ goto err;
+ }
+
+ if (flav == FLAV_NS) {
+ if (digest_from_base64(rs->descriptor_digest, tok->args[2])) {
+ log_warn(LD_DIR, "Error decoding descriptor digest %s",
+ escaped(tok->args[2]));
+ goto err;
+ }
+ }
+
+ if (tor_snprintf(timebuf, sizeof(timebuf), "%s %s",
+ tok->args[3+offset], tok->args[4+offset]) < 0 ||
+ parse_iso_time(timebuf, &rs->published_on)<0) {
+ log_warn(LD_DIR, "Error parsing time '%s %s' [%d %d]",
+ tok->args[3+offset], tok->args[4+offset],
+ offset, (int)flav);
+ goto err;
+ }
+
+ if (tor_inet_aton(tok->args[5+offset], &in) == 0) {
+ log_warn(LD_DIR, "Error parsing router address in network-status %s",
+ escaped(tok->args[5+offset]));
+ goto err;
+ }
+ rs->addr = ntohl(in.s_addr);
+
+ rs->or_port = (uint16_t) tor_parse_long(tok->args[6+offset],
+ 10,0,65535,NULL,NULL);
+ rs->dir_port = (uint16_t) tor_parse_long(tok->args[7+offset],
+ 10,0,65535,NULL,NULL);
+
+ {
+ smartlist_t *a_lines = find_all_by_keyword(tokens, K_A);
+ if (a_lines) {
+ find_single_ipv6_orport(a_lines, &rs->ipv6_addr, &rs->ipv6_orport);
+ smartlist_free(a_lines);
+ }
+ }
+
+ tok = find_opt_by_keyword(tokens, K_S);
+ if (tok && vote) {
+ int i;
+ vote_rs->flags = 0;
+ for (i=0; i < tok->n_args; ++i) {
+ int p = smartlist_string_pos(vote->known_flags, tok->args[i]);
+ if (p >= 0) {
+ vote_rs->flags |= (UINT64_C(1)<<p);
+ } else {
+ log_warn(LD_DIR, "Flags line had a flag %s not listed in known_flags.",
+ escaped(tok->args[i]));
+ goto err;
+ }
+ }
+ } else if (tok) {
+ /* This is a consensus, not a vote. */
+ int i;
+ for (i=0; i < tok->n_args; ++i) {
+ if (!strcmp(tok->args[i], "Exit"))
+ rs->is_exit = 1;
+ else if (!strcmp(tok->args[i], "Stable"))
+ rs->is_stable = 1;
+ else if (!strcmp(tok->args[i], "Fast"))
+ rs->is_fast = 1;
+ else if (!strcmp(tok->args[i], "Running"))
+ rs->is_flagged_running = 1;
+ else if (!strcmp(tok->args[i], "Named"))
+ rs->is_named = 1;
+ else if (!strcmp(tok->args[i], "Valid"))
+ rs->is_valid = 1;
+ else if (!strcmp(tok->args[i], "Guard"))
+ rs->is_possible_guard = 1;
+ else if (!strcmp(tok->args[i], "BadExit"))
+ rs->is_bad_exit = 1;
+ else if (!strcmp(tok->args[i], "Authority"))
+ rs->is_authority = 1;
+ else if (!strcmp(tok->args[i], "Unnamed") &&
+ consensus_method >= 2) {
+ /* Unnamed is computed right by consensus method 2 and later. */
+ rs->is_unnamed = 1;
+ } else if (!strcmp(tok->args[i], "HSDir")) {
+ rs->is_hs_dir = 1;
+ } else if (!strcmp(tok->args[i], "V2Dir")) {
+ rs->is_v2_dir = 1;
+ }
+ }
+ /* These are implied true by having been included in a consensus made
+ * with a given method */
+ rs->is_flagged_running = 1; /* Starting with consensus method 4. */
+ rs->is_valid = 1; /* Starting with consensus method 24. */
+ }
+ {
+ const char *protocols = NULL, *version = NULL;
+ if ((tok = find_opt_by_keyword(tokens, K_PROTO))) {
+ tor_assert(tok->n_args == 1);
+ protocols = tok->args[0];
+ }
+ if ((tok = find_opt_by_keyword(tokens, K_V))) {
+ tor_assert(tok->n_args == 1);
+ version = tok->args[0];
+ if (vote_rs) {
+ vote_rs->version = tor_strdup(tok->args[0]);
+ }
+ }
+
+ summarize_protover_flags(&rs->pv, protocols, version);
+ }
+
+ /* handle weighting/bandwidth info */
+ if ((tok = find_opt_by_keyword(tokens, K_W))) {
+ int i;
+ for (i=0; i < tok->n_args; ++i) {
+ if (!strcmpstart(tok->args[i], "Bandwidth=")) {
+ int ok;
+ rs->bandwidth_kb =
+ (uint32_t)tor_parse_ulong(strchr(tok->args[i], '=')+1,
+ 10, 0, UINT32_MAX,
+ &ok, NULL);
+ if (!ok) {
+ log_warn(LD_DIR, "Invalid Bandwidth %s", escaped(tok->args[i]));
+ goto err;
+ }
+ rs->has_bandwidth = 1;
+ } else if (!strcmpstart(tok->args[i], "Measured=") && vote_rs) {
+ int ok;
+ vote_rs->measured_bw_kb =
+ (uint32_t)tor_parse_ulong(strchr(tok->args[i], '=')+1,
+ 10, 0, UINT32_MAX, &ok, NULL);
+ if (!ok) {
+ log_warn(LD_DIR, "Invalid Measured Bandwidth %s",
+ escaped(tok->args[i]));
+ goto err;
+ }
+ vote_rs->has_measured_bw = 1;
+ vote->has_measured_bws = 1;
+ } else if (!strcmpstart(tok->args[i], "Unmeasured=1")) {
+ rs->bw_is_unmeasured = 1;
+ } else if (!strcmpstart(tok->args[i], "GuardFraction=")) {
+ if (routerstatus_parse_guardfraction(tok->args[i],
+ vote, vote_rs, rs) < 0) {
+ goto err;
+ }
+ }
+ }
+ }
+
+ /* parse exit policy summaries */
+ if ((tok = find_opt_by_keyword(tokens, K_P))) {
+ tor_assert(tok->n_args == 1);
+ if (strcmpstart(tok->args[0], "accept ") &&
+ strcmpstart(tok->args[0], "reject ")) {
+ log_warn(LD_DIR, "Unknown exit policy summary type %s.",
+ escaped(tok->args[0]));
+ goto err;
+ }
+ /* XXX weasel: parse this into ports and represent them somehow smart,
+ * maybe not here but somewhere on if we need it for the client.
+ * we should still parse it here to check it's valid tho.
+ */
+ rs->exitsummary = tor_strdup(tok->args[0]);
+ rs->has_exitsummary = 1;
+ }
+
+ if (vote_rs) {
+ SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, t) {
+ if (t->tp == K_M && t->n_args) {
+ vote_microdesc_hash_t *line =
+ tor_malloc(sizeof(vote_microdesc_hash_t));
+ line->next = vote_rs->microdesc;
+ line->microdesc_hash_line = tor_strdup(t->args[0]);
+ vote_rs->microdesc = line;
+ }
+ if (t->tp == K_ID) {
+ tor_assert(t->n_args >= 2);
+ if (!strcmp(t->args[0], "ed25519")) {
+ vote_rs->has_ed25519_listing = 1;
+ if (strcmp(t->args[1], "none") &&
+ digest256_from_base64((char*)vote_rs->ed25519_id,
+ t->args[1])<0) {
+ log_warn(LD_DIR, "Bogus ed25519 key in networkstatus vote");
+ goto err;
+ }
+ }
+ }
+ if (t->tp == K_PROTO) {
+ tor_assert(t->n_args == 1);
+ vote_rs->protocols = tor_strdup(t->args[0]);
+ }
+ } SMARTLIST_FOREACH_END(t);
+ } else if (flav == FLAV_MICRODESC) {
+ tok = find_opt_by_keyword(tokens, K_M);
+ if (tok) {
+ tor_assert(tok->n_args);
+ if (digest256_from_base64(rs->descriptor_digest, tok->args[0])) {
+ log_warn(LD_DIR, "Error decoding microdescriptor digest %s",
+ escaped(tok->args[0]));
+ goto err;
+ }
+ } else {
+ log_info(LD_BUG, "Found an entry in networkstatus with no "
+ "microdescriptor digest. (Router %s ($%s) at %s:%d.)",
+ rs->nickname, hex_str(rs->identity_digest, DIGEST_LEN),
+ fmt_addr32(rs->addr), rs->or_port);
+ }
+ }
+
+ if (!strcasecmp(rs->nickname, UNNAMED_ROUTER_NICKNAME))
+ rs->is_named = 0;
+
+ goto done;
+ err:
+ dump_desc(s_dup, "routerstatus entry");
+ if (rs && !vote_rs)
+ routerstatus_free(rs);
+ rs = NULL;
+ done:
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_clear(tokens);
+ if (area) {
+ DUMP_AREA(area, "routerstatus entry");
+ memarea_clear(area);
+ }
+ *s = eos;
+
+ return rs;
+}
+
+int
+compare_vote_routerstatus_entries(const void **_a, const void **_b)
+{
+ const vote_routerstatus_t *a = *_a, *b = *_b;
+ return fast_memcmp(a->status.identity_digest, b->status.identity_digest,
+ DIGEST_LEN);
+}
+
+/** Verify the bandwidth weights of a network status document */
+int
+networkstatus_verify_bw_weights(networkstatus_t *ns, int consensus_method)
+{
+ int64_t G=0, M=0, E=0, D=0, T=0;
+ double Wgg, Wgm, Wgd, Wmg, Wmm, Wme, Wmd, Weg, Wem, Wee, Wed;
+ double Gtotal=0, Mtotal=0, Etotal=0;
+ const char *casename = NULL;
+ int valid = 1;
+ (void) consensus_method;
+
+ const int64_t weight_scale = networkstatus_get_weight_scale_param(ns);
+ tor_assert(weight_scale >= 1);
+ Wgg = networkstatus_get_bw_weight(ns, "Wgg", -1);
+ Wgm = networkstatus_get_bw_weight(ns, "Wgm", -1);
+ Wgd = networkstatus_get_bw_weight(ns, "Wgd", -1);
+ Wmg = networkstatus_get_bw_weight(ns, "Wmg", -1);
+ Wmm = networkstatus_get_bw_weight(ns, "Wmm", -1);
+ Wme = networkstatus_get_bw_weight(ns, "Wme", -1);
+ Wmd = networkstatus_get_bw_weight(ns, "Wmd", -1);
+ Weg = networkstatus_get_bw_weight(ns, "Weg", -1);
+ Wem = networkstatus_get_bw_weight(ns, "Wem", -1);
+ Wee = networkstatus_get_bw_weight(ns, "Wee", -1);
+ Wed = networkstatus_get_bw_weight(ns, "Wed", -1);
+
+ if (Wgg<0 || Wgm<0 || Wgd<0 || Wmg<0 || Wmm<0 || Wme<0 || Wmd<0 || Weg<0
+ || Wem<0 || Wee<0 || Wed<0) {
+ log_warn(LD_BUG, "No bandwidth weights produced in consensus!");
+ return 0;
+ }
+
+ // First, sanity check basic summing properties that hold for all cases
+ // We use > 1 as the check for these because they are computed as integers.
+ // Sometimes there are rounding errors.
+ if (fabs(Wmm - weight_scale) > 1) {
+ log_warn(LD_BUG, "Wmm=%f != %"PRId64,
+ Wmm, (weight_scale));
+ valid = 0;
+ }
+
+ if (fabs(Wem - Wee) > 1) {
+ log_warn(LD_BUG, "Wem=%f != Wee=%f", Wem, Wee);
+ valid = 0;
+ }
+
+ if (fabs(Wgm - Wgg) > 1) {
+ log_warn(LD_BUG, "Wgm=%f != Wgg=%f", Wgm, Wgg);
+ valid = 0;
+ }
+
+ if (fabs(Weg - Wed) > 1) {
+ log_warn(LD_BUG, "Wed=%f != Weg=%f", Wed, Weg);
+ valid = 0;
+ }
+
+ if (fabs(Wgg + Wmg - weight_scale) > 0.001*weight_scale) {
+ log_warn(LD_BUG, "Wgg=%f != %"PRId64" - Wmg=%f", Wgg,
+ (weight_scale), Wmg);
+ valid = 0;
+ }
+
+ if (fabs(Wee + Wme - weight_scale) > 0.001*weight_scale) {
+ log_warn(LD_BUG, "Wee=%f != %"PRId64" - Wme=%f", Wee,
+ (weight_scale), Wme);
+ valid = 0;
+ }
+
+ if (fabs(Wgd + Wmd + Wed - weight_scale) > 0.001*weight_scale) {
+ log_warn(LD_BUG, "Wgd=%f + Wmd=%f + Wed=%f != %"PRId64,
+ Wgd, Wmd, Wed, (weight_scale));
+ valid = 0;
+ }
+
+ Wgg /= weight_scale;
+ Wgm /= weight_scale; (void) Wgm; // unused from here on.
+ Wgd /= weight_scale;
+
+ Wmg /= weight_scale;
+ Wmm /= weight_scale;
+ Wme /= weight_scale;
+ Wmd /= weight_scale;
+
+ Weg /= weight_scale; (void) Weg; // unused from here on.
+ Wem /= weight_scale; (void) Wem; // unused from here on.
+ Wee /= weight_scale;
+ Wed /= weight_scale;
+
+ // Then, gather G, M, E, D, T to determine case
+ SMARTLIST_FOREACH_BEGIN(ns->routerstatus_list, routerstatus_t *, rs) {
+ int is_exit = 0;
+ /* Bug #2203: Don't count bad exits as exits for balancing */
+ is_exit = rs->is_exit && !rs->is_bad_exit;
+ if (rs->has_bandwidth) {
+ T += rs->bandwidth_kb;
+ if (is_exit && rs->is_possible_guard) {
+ D += rs->bandwidth_kb;
+ Gtotal += Wgd*rs->bandwidth_kb;
+ Mtotal += Wmd*rs->bandwidth_kb;
+ Etotal += Wed*rs->bandwidth_kb;
+ } else if (is_exit) {
+ E += rs->bandwidth_kb;
+ Mtotal += Wme*rs->bandwidth_kb;
+ Etotal += Wee*rs->bandwidth_kb;
+ } else if (rs->is_possible_guard) {
+ G += rs->bandwidth_kb;
+ Gtotal += Wgg*rs->bandwidth_kb;
+ Mtotal += Wmg*rs->bandwidth_kb;
+ } else {
+ M += rs->bandwidth_kb;
+ Mtotal += Wmm*rs->bandwidth_kb;
+ }
+ } else {
+ log_warn(LD_BUG, "Missing consensus bandwidth for router %s",
+ routerstatus_describe(rs));
+ }
+ } SMARTLIST_FOREACH_END(rs);
+
+ // Finally, check equality conditions depending upon case 1, 2 or 3
+ // Full equality cases: 1, 3b
+ // Partial equality cases: 2b (E=G), 3a (M=E)
+ // Fully unknown: 2a
+ if (3*E >= T && 3*G >= T) {
+ // Case 1: Neither are scarce
+ casename = "Case 1";
+ if (fabs(Etotal-Mtotal) > 0.01*MAX(Etotal,Mtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Etotal %f != Mtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Etotal, Mtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ if (fabs(Etotal-Gtotal) > 0.01*MAX(Etotal,Gtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Etotal %f != Gtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Etotal, Gtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ if (fabs(Gtotal-Mtotal) > 0.01*MAX(Gtotal,Mtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Mtotal %f != Gtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Mtotal, Gtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ } else if (3*E < T && 3*G < T) {
+ int64_t R = MIN(E, G);
+ int64_t S = MAX(E, G);
+ /*
+ * Case 2: Both Guards and Exits are scarce
+ * Balance D between E and G, depending upon
+ * D capacity and scarcity. Devote no extra
+ * bandwidth to middle nodes.
+ */
+ if (R+D < S) { // Subcase a
+ double Rtotal, Stotal;
+ if (E < G) {
+ Rtotal = Etotal;
+ Stotal = Gtotal;
+ } else {
+ Rtotal = Gtotal;
+ Stotal = Etotal;
+ }
+ casename = "Case 2a";
+ // Rtotal < Stotal
+ if (Rtotal > Stotal) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Rtotal %f > Stotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Rtotal, Stotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ // Rtotal < T/3
+ if (3*Rtotal > T) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: 3*Rtotal %f > T "
+ "%"PRId64". G=%"PRId64" M=%"PRId64" E=%"PRId64
+ " D=%"PRId64" T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Rtotal*3, (T),
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ // Stotal < T/3
+ if (3*Stotal > T) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: 3*Stotal %f > T "
+ "%"PRId64". G=%"PRId64" M=%"PRId64" E=%"PRId64
+ " D=%"PRId64" T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Stotal*3, (T),
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ // Mtotal > T/3
+ if (3*Mtotal < T) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: 3*Mtotal %f < T "
+ "%"PRId64". "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Mtotal*3, (T),
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ } else { // Subcase b: R+D > S
+ casename = "Case 2b";
+
+ /* Check the rare-M redirect case. */
+ if (D != 0 && 3*M < T) {
+ casename = "Case 2b (balanced)";
+ if (fabs(Etotal-Mtotal) > 0.01*MAX(Etotal,Mtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Etotal %f != Mtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Etotal, Mtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ if (fabs(Etotal-Gtotal) > 0.01*MAX(Etotal,Gtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Etotal %f != Gtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Etotal, Gtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ if (fabs(Gtotal-Mtotal) > 0.01*MAX(Gtotal,Mtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Mtotal %f != Gtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Mtotal, Gtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ } else {
+ if (fabs(Etotal-Gtotal) > 0.01*MAX(Etotal,Gtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Etotal %f != Gtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Etotal, Gtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ }
+ }
+ } else { // if (E < T/3 || G < T/3) {
+ int64_t S = MIN(E, G);
+ int64_t NS = MAX(E, G);
+ if (3*(S+D) < T) { // Subcase a:
+ double Stotal;
+ double NStotal;
+ if (G < E) {
+ casename = "Case 3a (G scarce)";
+ Stotal = Gtotal;
+ NStotal = Etotal;
+ } else { // if (G >= E) {
+ casename = "Case 3a (E scarce)";
+ NStotal = Gtotal;
+ Stotal = Etotal;
+ }
+ // Stotal < T/3
+ if (3*Stotal > T) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: 3*Stotal %f > T "
+ "%"PRId64". G=%"PRId64" M=%"PRId64" E=%"PRId64
+ " D=%"PRId64" T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Stotal*3, (T),
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ if (NS >= M) {
+ if (fabs(NStotal-Mtotal) > 0.01*MAX(NStotal,Mtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: NStotal %f != Mtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, NStotal, Mtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ } else {
+ // if NS < M, NStotal > T/3 because only one of G or E is scarce
+ if (3*NStotal < T) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: 3*NStotal %f < T "
+ "%"PRId64". G=%"PRId64" M=%"PRId64
+ " E=%"PRId64" D=%"PRId64" T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, NStotal*3, (T),
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ }
+ } else { // Subcase b: S+D >= T/3
+ casename = "Case 3b";
+ if (fabs(Etotal-Mtotal) > 0.01*MAX(Etotal,Mtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Etotal %f != Mtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Etotal, Mtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ if (fabs(Etotal-Gtotal) > 0.01*MAX(Etotal,Gtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Etotal %f != Gtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Etotal, Gtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ if (fabs(Gtotal-Mtotal) > 0.01*MAX(Gtotal,Mtotal)) {
+ log_warn(LD_DIR,
+ "Bw Weight Failure for %s: Mtotal %f != Gtotal %f. "
+ "G=%"PRId64" M=%"PRId64" E=%"PRId64" D=%"PRId64
+ " T=%"PRId64". "
+ "Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
+ casename, Mtotal, Gtotal,
+ (G), (M), (E),
+ (D), (T),
+ Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
+ valid = 0;
+ }
+ }
+ }
+
+ if (valid)
+ log_notice(LD_DIR, "Bandwidth-weight %s is verified and valid.",
+ casename);
+
+ return valid;
+}
+
+/** Check if a shared random value of type <b>srv_type</b> is in
+ * <b>tokens</b>. If there is, parse it and set it to <b>srv_out</b>. Return
+ * -1 on failure, 0 on success. The resulting srv is allocated on the heap and
+ * it's the responsibility of the caller to free it. */
+static int
+extract_one_srv(smartlist_t *tokens, directory_keyword srv_type,
+ sr_srv_t **srv_out)
+{
+ int ret = -1;
+ directory_token_t *tok;
+ sr_srv_t *srv = NULL;
+ smartlist_t *chunks;
+
+ tor_assert(tokens);
+
+ chunks = smartlist_new();
+ tok = find_opt_by_keyword(tokens, srv_type);
+ if (!tok) {
+ /* That's fine, no SRV is allowed. */
+ ret = 0;
+ goto end;
+ }
+ for (int i = 0; i < tok->n_args; i++) {
+ smartlist_add(chunks, tok->args[i]);
+ }
+ srv = sr_parse_srv(chunks);
+ if (srv == NULL) {
+ log_warn(LD_DIR, "SR: Unparseable SRV %s", escaped(tok->object_body));
+ goto end;
+ }
+ /* All is good. */
+ *srv_out = srv;
+ ret = 0;
+ end:
+ smartlist_free(chunks);
+ return ret;
+}
+
+/** Extract any shared random values found in <b>tokens</b> and place them in
+ * the networkstatus <b>ns</b>. */
+static void
+extract_shared_random_srvs(networkstatus_t *ns, smartlist_t *tokens)
+{
+ const char *voter_identity;
+ networkstatus_voter_info_t *voter;
+
+ tor_assert(ns);
+ tor_assert(tokens);
+ /* Can be only one of them else code flow. */
+ tor_assert(ns->type == NS_TYPE_VOTE || ns->type == NS_TYPE_CONSENSUS);
+
+ if (ns->type == NS_TYPE_VOTE) {
+ voter = smartlist_get(ns->voters, 0);
+ tor_assert(voter);
+ voter_identity = hex_str(voter->identity_digest,
+ sizeof(voter->identity_digest));
+ } else {
+ /* Consensus has multiple voters so no specific voter. */
+ voter_identity = "consensus";
+ }
+
+ /* We extract both, and on error everything is stopped because it means
+ * the vote is malformed for the shared random value(s). */
+ if (extract_one_srv(tokens, K_PREVIOUS_SRV, &ns->sr_info.previous_srv) < 0) {
+ log_warn(LD_DIR, "SR: Unable to parse previous SRV from %s",
+ voter_identity);
+ /* Maybe we have a chance with the current SRV so let's try it anyway. */
+ }
+ if (extract_one_srv(tokens, K_CURRENT_SRV, &ns->sr_info.current_srv) < 0) {
+ log_warn(LD_DIR, "SR: Unable to parse current SRV from %s",
+ voter_identity);
+ }
+}
+
+/** Parse a v3 networkstatus vote, opinion, or consensus (depending on
+ * ns_type), from <b>s</b>, and return the result. Return NULL on failure. */
+networkstatus_t *
+networkstatus_parse_vote_from_string(const char *s, const char **eos_out,
+ networkstatus_type_t ns_type)
+{
+ smartlist_t *tokens = smartlist_new();
+ smartlist_t *rs_tokens = NULL, *footer_tokens = NULL;
+ networkstatus_voter_info_t *voter = NULL;
+ networkstatus_t *ns = NULL;
+ common_digests_t ns_digests;
+ uint8_t sha3_as_signed[DIGEST256_LEN];
+ const char *cert, *end_of_header, *end_of_footer, *s_dup = s;
+ directory_token_t *tok;
+ struct in_addr in;
+ int i, inorder, n_signatures = 0;
+ memarea_t *area = NULL, *rs_area = NULL;
+ consensus_flavor_t flav = FLAV_NS;
+ char *last_kwd=NULL;
+
+ tor_assert(s);
+
+ if (eos_out)
+ *eos_out = NULL;
+
+ if (router_get_networkstatus_v3_hashes(s, &ns_digests) ||
+ router_get_networkstatus_v3_sha3_as_signed(sha3_as_signed, s)<0) {
+ log_warn(LD_DIR, "Unable to compute digest of network-status");
+ goto err;
+ }
+
+ area = memarea_new();
+ end_of_header = find_start_of_next_routerstatus(s);
+ if (tokenize_string(area, s, end_of_header, tokens,
+ (ns_type == NS_TYPE_CONSENSUS) ?
+ networkstatus_consensus_token_table :
+ networkstatus_token_table, 0)) {
+ log_warn(LD_DIR, "Error tokenizing network-status header");
+ goto err;
+ }
+
+ ns = tor_malloc_zero(sizeof(networkstatus_t));
+ memcpy(&ns->digests, &ns_digests, sizeof(ns_digests));
+ memcpy(&ns->digest_sha3_as_signed, sha3_as_signed, sizeof(sha3_as_signed));
+
+ tok = find_by_keyword(tokens, K_NETWORK_STATUS_VERSION);
+ tor_assert(tok);
+ if (tok->n_args > 1) {
+ int flavor = networkstatus_parse_flavor_name(tok->args[1]);
+ if (flavor < 0) {
+ log_warn(LD_DIR, "Can't parse document with unknown flavor %s",
+ escaped(tok->args[1]));
+ goto err;
+ }
+ ns->flavor = flav = flavor;
+ }
+ if (flav != FLAV_NS && ns_type != NS_TYPE_CONSENSUS) {
+ log_warn(LD_DIR, "Flavor found on non-consensus networkstatus.");
+ goto err;
+ }
+
+ if (ns_type != NS_TYPE_CONSENSUS) {
+ const char *end_of_cert = NULL;
+ if (!(cert = strstr(s, "\ndir-key-certificate-version")))
+ goto err;
+ ++cert;
+ ns->cert = authority_cert_parse_from_string(cert, &end_of_cert);
+ if (!ns->cert || !end_of_cert || end_of_cert > end_of_header)
+ goto err;
+ }
+
+ tok = find_by_keyword(tokens, K_VOTE_STATUS);
+ tor_assert(tok->n_args);
+ if (!strcmp(tok->args[0], "vote")) {
+ ns->type = NS_TYPE_VOTE;
+ } else if (!strcmp(tok->args[0], "consensus")) {
+ ns->type = NS_TYPE_CONSENSUS;
+ } else if (!strcmp(tok->args[0], "opinion")) {
+ ns->type = NS_TYPE_OPINION;
+ } else {
+ log_warn(LD_DIR, "Unrecognized vote status %s in network-status",
+ escaped(tok->args[0]));
+ goto err;
+ }
+ if (ns_type != ns->type) {
+ log_warn(LD_DIR, "Got the wrong kind of v3 networkstatus.");
+ goto err;
+ }
+
+ if (ns->type == NS_TYPE_VOTE || ns->type == NS_TYPE_OPINION) {
+ tok = find_by_keyword(tokens, K_PUBLISHED);
+ if (parse_iso_time(tok->args[0], &ns->published))
+ goto err;
+
+ ns->supported_methods = smartlist_new();
+ tok = find_opt_by_keyword(tokens, K_CONSENSUS_METHODS);
+ if (tok) {
+ for (i=0; i < tok->n_args; ++i)
+ smartlist_add_strdup(ns->supported_methods, tok->args[i]);
+ } else {
+ smartlist_add_strdup(ns->supported_methods, "1");
+ }
+ } else {
+ tok = find_opt_by_keyword(tokens, K_CONSENSUS_METHOD);
+ if (tok) {
+ int num_ok;
+ ns->consensus_method = (int)tor_parse_long(tok->args[0], 10, 1, INT_MAX,
+ &num_ok, NULL);
+ if (!num_ok)
+ goto err;
+ } else {
+ ns->consensus_method = 1;
+ }
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, K_RECOMMENDED_CLIENT_PROTOCOLS)))
+ ns->recommended_client_protocols = tor_strdup(tok->args[0]);
+ if ((tok = find_opt_by_keyword(tokens, K_RECOMMENDED_RELAY_PROTOCOLS)))
+ ns->recommended_relay_protocols = tor_strdup(tok->args[0]);
+ if ((tok = find_opt_by_keyword(tokens, K_REQUIRED_CLIENT_PROTOCOLS)))
+ ns->required_client_protocols = tor_strdup(tok->args[0]);
+ if ((tok = find_opt_by_keyword(tokens, K_REQUIRED_RELAY_PROTOCOLS)))
+ ns->required_relay_protocols = tor_strdup(tok->args[0]);
+
+ tok = find_by_keyword(tokens, K_VALID_AFTER);
+ if (parse_iso_time(tok->args[0], &ns->valid_after))
+ goto err;
+
+ tok = find_by_keyword(tokens, K_FRESH_UNTIL);
+ if (parse_iso_time(tok->args[0], &ns->fresh_until))
+ goto err;
+
+ tok = find_by_keyword(tokens, K_VALID_UNTIL);
+ if (parse_iso_time(tok->args[0], &ns->valid_until))
+ goto err;
+
+ tok = find_by_keyword(tokens, K_VOTING_DELAY);
+ tor_assert(tok->n_args >= 2);
+ {
+ int ok;
+ ns->vote_seconds =
+ (int) tor_parse_long(tok->args[0], 10, 0, INT_MAX, &ok, NULL);
+ if (!ok)
+ goto err;
+ ns->dist_seconds =
+ (int) tor_parse_long(tok->args[1], 10, 0, INT_MAX, &ok, NULL);
+ if (!ok)
+ goto err;
+ }
+ if (ns->valid_after +
+ (get_options()->TestingTorNetwork ?
+ MIN_VOTE_INTERVAL_TESTING : MIN_VOTE_INTERVAL) > ns->fresh_until) {
+ log_warn(LD_DIR, "Vote/consensus freshness interval is too short");
+ goto err;
+ }
+ if (ns->valid_after +
+ (get_options()->TestingTorNetwork ?
+ MIN_VOTE_INTERVAL_TESTING : MIN_VOTE_INTERVAL)*2 > ns->valid_until) {
+ log_warn(LD_DIR, "Vote/consensus liveness interval is too short");
+ goto err;
+ }
+ if (ns->vote_seconds < MIN_VOTE_SECONDS) {
+ log_warn(LD_DIR, "Vote seconds is too short");
+ goto err;
+ }
+ if (ns->dist_seconds < MIN_DIST_SECONDS) {
+ log_warn(LD_DIR, "Dist seconds is too short");
+ goto err;
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, K_CLIENT_VERSIONS))) {
+ ns->client_versions = tor_strdup(tok->args[0]);
+ }
+ if ((tok = find_opt_by_keyword(tokens, K_SERVER_VERSIONS))) {
+ ns->server_versions = tor_strdup(tok->args[0]);
+ }
+
+ {
+ smartlist_t *package_lst = find_all_by_keyword(tokens, K_PACKAGE);
+ ns->package_lines = smartlist_new();
+ if (package_lst) {
+ SMARTLIST_FOREACH(package_lst, directory_token_t *, t,
+ smartlist_add_strdup(ns->package_lines, t->args[0]));
+ }
+ smartlist_free(package_lst);
+ }
+
+ tok = find_by_keyword(tokens, K_KNOWN_FLAGS);
+ ns->known_flags = smartlist_new();
+ inorder = 1;
+ for (i = 0; i < tok->n_args; ++i) {
+ smartlist_add_strdup(ns->known_flags, tok->args[i]);
+ if (i>0 && strcmp(tok->args[i-1], tok->args[i])>= 0) {
+ log_warn(LD_DIR, "%s >= %s", tok->args[i-1], tok->args[i]);
+ inorder = 0;
+ }
+ }
+ if (!inorder) {
+ log_warn(LD_DIR, "known-flags not in order");
+ goto err;
+ }
+ if (ns->type != NS_TYPE_CONSENSUS &&
+ smartlist_len(ns->known_flags) > MAX_KNOWN_FLAGS_IN_VOTE) {
+ /* If we allowed more than 64 flags in votes, then parsing them would make
+ * us invoke undefined behavior whenever we used 1<<flagnum to do a
+ * bit-shift. This is only for votes and opinions: consensus users don't
+ * care about flags they don't recognize, and so don't build a bitfield
+ * for them. */
+ log_warn(LD_DIR, "Too many known-flags in consensus vote or opinion");
+ goto err;
+ }
+
+ tok = find_opt_by_keyword(tokens, K_PARAMS);
+ if (tok) {
+ int any_dups = 0;
+ inorder = 1;
+ ns->net_params = smartlist_new();
+ for (i = 0; i < tok->n_args; ++i) {
+ int ok=0;
+ char *eq = strchr(tok->args[i], '=');
+ size_t eq_pos;
+ if (!eq) {
+ log_warn(LD_DIR, "Bad element '%s' in params", escaped(tok->args[i]));
+ goto err;
+ }
+ eq_pos = eq-tok->args[i];
+ tor_parse_long(eq+1, 10, INT32_MIN, INT32_MAX, &ok, NULL);
+ if (!ok) {
+ log_warn(LD_DIR, "Bad element '%s' in params", escaped(tok->args[i]));
+ goto err;
+ }
+ if (i > 0 && strcmp(tok->args[i-1], tok->args[i]) >= 0) {
+ log_warn(LD_DIR, "%s >= %s", tok->args[i-1], tok->args[i]);
+ inorder = 0;
+ }
+ if (last_kwd && eq_pos == strlen(last_kwd) &&
+ fast_memeq(last_kwd, tok->args[i], eq_pos)) {
+ log_warn(LD_DIR, "Duplicate value for %s parameter",
+ escaped(tok->args[i]));
+ any_dups = 1;
+ }
+ tor_free(last_kwd);
+ last_kwd = tor_strndup(tok->args[i], eq_pos);
+ smartlist_add_strdup(ns->net_params, tok->args[i]);
+ }
+ if (!inorder) {
+ log_warn(LD_DIR, "params not in order");
+ goto err;
+ }
+ if (any_dups) {
+ log_warn(LD_DIR, "Duplicate in parameters");
+ goto err;
+ }
+ }
+
+ ns->voters = smartlist_new();
+
+ SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, _tok) {
+ tok = _tok;
+ if (tok->tp == K_DIR_SOURCE) {
+ tor_assert(tok->n_args >= 6);
+
+ if (voter)
+ smartlist_add(ns->voters, voter);
+ voter = tor_malloc_zero(sizeof(networkstatus_voter_info_t));
+ voter->sigs = smartlist_new();
+ if (ns->type != NS_TYPE_CONSENSUS)
+ memcpy(voter->vote_digest, ns_digests.d[DIGEST_SHA1], DIGEST_LEN);
+
+ voter->nickname = tor_strdup(tok->args[0]);
+ if (strlen(tok->args[1]) != HEX_DIGEST_LEN ||
+ base16_decode(voter->identity_digest, sizeof(voter->identity_digest),
+ tok->args[1], HEX_DIGEST_LEN)
+ != sizeof(voter->identity_digest)) {
+ log_warn(LD_DIR, "Error decoding identity digest %s in "
+ "network-status document.", escaped(tok->args[1]));
+ goto err;
+ }
+ if (ns->type != NS_TYPE_CONSENSUS &&
+ tor_memneq(ns->cert->cache_info.identity_digest,
+ voter->identity_digest, DIGEST_LEN)) {
+ log_warn(LD_DIR,"Mismatch between identities in certificate and vote");
+ goto err;
+ }
+ if (ns->type != NS_TYPE_CONSENSUS) {
+ if (authority_cert_is_blacklisted(ns->cert)) {
+ log_warn(LD_DIR, "Rejecting vote signature made with blacklisted "
+ "signing key %s",
+ hex_str(ns->cert->signing_key_digest, DIGEST_LEN));
+ goto err;
+ }
+ }
+ voter->address = tor_strdup(tok->args[2]);
+ if (!tor_inet_aton(tok->args[3], &in)) {
+ log_warn(LD_DIR, "Error decoding IP address %s in network-status.",
+ escaped(tok->args[3]));
+ goto err;
+ }
+ voter->addr = ntohl(in.s_addr);
+ int ok;
+ voter->dir_port = (uint16_t)
+ tor_parse_long(tok->args[4], 10, 0, 65535, &ok, NULL);
+ if (!ok)
+ goto err;
+ voter->or_port = (uint16_t)
+ tor_parse_long(tok->args[5], 10, 0, 65535, &ok, NULL);
+ if (!ok)
+ goto err;
+ } else if (tok->tp == K_CONTACT) {
+ if (!voter || voter->contact) {
+ log_warn(LD_DIR, "contact element is out of place.");
+ goto err;
+ }
+ voter->contact = tor_strdup(tok->args[0]);
+ } else if (tok->tp == K_VOTE_DIGEST) {
+ tor_assert(ns->type == NS_TYPE_CONSENSUS);
+ tor_assert(tok->n_args >= 1);
+ if (!voter || ! tor_digest_is_zero(voter->vote_digest)) {
+ log_warn(LD_DIR, "vote-digest element is out of place.");
+ goto err;
+ }
+ if (strlen(tok->args[0]) != HEX_DIGEST_LEN ||
+ base16_decode(voter->vote_digest, sizeof(voter->vote_digest),
+ tok->args[0], HEX_DIGEST_LEN)
+ != sizeof(voter->vote_digest)) {
+ log_warn(LD_DIR, "Error decoding vote digest %s in "
+ "network-status consensus.", escaped(tok->args[0]));
+ goto err;
+ }
+ }
+ } SMARTLIST_FOREACH_END(_tok);
+ if (voter) {
+ smartlist_add(ns->voters, voter);
+ voter = NULL;
+ }
+ if (smartlist_len(ns->voters) == 0) {
+ log_warn(LD_DIR, "Missing dir-source elements in a networkstatus.");
+ goto err;
+ } else if (ns->type != NS_TYPE_CONSENSUS && smartlist_len(ns->voters) != 1) {
+ log_warn(LD_DIR, "Too many dir-source elements in a vote networkstatus.");
+ goto err;
+ }
+
+ if (ns->type != NS_TYPE_CONSENSUS &&
+ (tok = find_opt_by_keyword(tokens, K_LEGACY_DIR_KEY))) {
+ int bad = 1;
+ if (strlen(tok->args[0]) == HEX_DIGEST_LEN) {
+ networkstatus_voter_info_t *voter_0 = smartlist_get(ns->voters, 0);
+ if (base16_decode(voter_0->legacy_id_digest, DIGEST_LEN,
+ tok->args[0], HEX_DIGEST_LEN) != DIGEST_LEN)
+ bad = 1;
+ else
+ bad = 0;
+ }
+ if (bad) {
+ log_warn(LD_DIR, "Invalid legacy key digest %s on vote.",
+ escaped(tok->args[0]));
+ }
+ }
+
+ /* If this is a vote document, check if information about the shared
+ randomness protocol is included, and extract it. */
+ if (ns->type == NS_TYPE_VOTE) {
+ dirvote_parse_sr_commits(ns, tokens);
+ }
+ /* For both a vote and consensus, extract the shared random values. */
+ if (ns->type == NS_TYPE_VOTE || ns->type == NS_TYPE_CONSENSUS) {
+ extract_shared_random_srvs(ns, tokens);
+ }
+
+ /* Parse routerstatus lines. */
+ rs_tokens = smartlist_new();
+ rs_area = memarea_new();
+ s = end_of_header;
+ ns->routerstatus_list = smartlist_new();
+
+ while (!strcmpstart(s, "r ")) {
+ if (ns->type != NS_TYPE_CONSENSUS) {
+ vote_routerstatus_t *rs = tor_malloc_zero(sizeof(vote_routerstatus_t));
+ if (routerstatus_parse_entry_from_string(rs_area, &s, rs_tokens, ns,
+ rs, 0, 0)) {
+ smartlist_add(ns->routerstatus_list, rs);
+ } else {
+ vote_routerstatus_free(rs);
+ }
+ } else {
+ routerstatus_t *rs;
+ if ((rs = routerstatus_parse_entry_from_string(rs_area, &s, rs_tokens,
+ NULL, NULL,
+ ns->consensus_method,
+ flav))) {
+ /* Use exponential-backoff scheduling when downloading microdescs */
+ smartlist_add(ns->routerstatus_list, rs);
+ }
+ }
+ }
+ for (i = 1; i < smartlist_len(ns->routerstatus_list); ++i) {
+ routerstatus_t *rs1, *rs2;
+ if (ns->type != NS_TYPE_CONSENSUS) {
+ vote_routerstatus_t *a = smartlist_get(ns->routerstatus_list, i-1);
+ vote_routerstatus_t *b = smartlist_get(ns->routerstatus_list, i);
+ rs1 = &a->status; rs2 = &b->status;
+ } else {
+ rs1 = smartlist_get(ns->routerstatus_list, i-1);
+ rs2 = smartlist_get(ns->routerstatus_list, i);
+ }
+ if (fast_memcmp(rs1->identity_digest, rs2->identity_digest, DIGEST_LEN)
+ >= 0) {
+ log_warn(LD_DIR, "Networkstatus entries not sorted by identity digest");
+ goto err;
+ }
+ }
+ if (ns_type != NS_TYPE_CONSENSUS) {
+ digest256map_t *ed_id_map = digest256map_new();
+ SMARTLIST_FOREACH_BEGIN(ns->routerstatus_list, vote_routerstatus_t *,
+ vrs) {
+ if (! vrs->has_ed25519_listing ||
+ tor_mem_is_zero((const char *)vrs->ed25519_id, DIGEST256_LEN))
+ continue;
+ if (digest256map_get(ed_id_map, vrs->ed25519_id) != NULL) {
+ log_warn(LD_DIR, "Vote networkstatus ed25519 identities were not "
+ "unique");
+ digest256map_free(ed_id_map, NULL);
+ goto err;
+ }
+ digest256map_set(ed_id_map, vrs->ed25519_id, (void*)1);
+ } SMARTLIST_FOREACH_END(vrs);
+ digest256map_free(ed_id_map, NULL);
+ }
+
+ /* Parse footer; check signature. */
+ footer_tokens = smartlist_new();
+ if ((end_of_footer = strstr(s, "\nnetwork-status-version ")))
+ ++end_of_footer;
+ else
+ end_of_footer = s + strlen(s);
+ if (tokenize_string(area,s, end_of_footer, footer_tokens,
+ networkstatus_vote_footer_token_table, 0)) {
+ log_warn(LD_DIR, "Error tokenizing network-status vote footer.");
+ goto err;
+ }
+
+ {
+ int found_sig = 0;
+ SMARTLIST_FOREACH_BEGIN(footer_tokens, directory_token_t *, _tok) {
+ tok = _tok;
+ if (tok->tp == K_DIRECTORY_SIGNATURE)
+ found_sig = 1;
+ else if (found_sig) {
+ log_warn(LD_DIR, "Extraneous token after first directory-signature");
+ goto err;
+ }
+ } SMARTLIST_FOREACH_END(_tok);
+ }
+
+ if ((tok = find_opt_by_keyword(footer_tokens, K_DIRECTORY_FOOTER))) {
+ if (tok != smartlist_get(footer_tokens, 0)) {
+ log_warn(LD_DIR, "Misplaced directory-footer token");
+ goto err;
+ }
+ }
+
+ tok = find_opt_by_keyword(footer_tokens, K_BW_WEIGHTS);
+ if (tok) {
+ ns->weight_params = smartlist_new();
+ for (i = 0; i < tok->n_args; ++i) {
+ int ok=0;
+ char *eq = strchr(tok->args[i], '=');
+ if (!eq) {
+ log_warn(LD_DIR, "Bad element '%s' in weight params",
+ escaped(tok->args[i]));
+ goto err;
+ }
+ tor_parse_long(eq+1, 10, INT32_MIN, INT32_MAX, &ok, NULL);
+ if (!ok) {
+ log_warn(LD_DIR, "Bad element '%s' in params", escaped(tok->args[i]));
+ goto err;
+ }
+ smartlist_add_strdup(ns->weight_params, tok->args[i]);
+ }
+ }
+
+ SMARTLIST_FOREACH_BEGIN(footer_tokens, directory_token_t *, _tok) {
+ char declared_identity[DIGEST_LEN];
+ networkstatus_voter_info_t *v;
+ document_signature_t *sig;
+ const char *id_hexdigest = NULL;
+ const char *sk_hexdigest = NULL;
+ digest_algorithm_t alg = DIGEST_SHA1;
+ tok = _tok;
+ if (tok->tp != K_DIRECTORY_SIGNATURE)
+ continue;
+ tor_assert(tok->n_args >= 2);
+ if (tok->n_args == 2) {
+ id_hexdigest = tok->args[0];
+ sk_hexdigest = tok->args[1];
+ } else {
+ const char *algname = tok->args[0];
+ int a;
+ id_hexdigest = tok->args[1];
+ sk_hexdigest = tok->args[2];
+ a = crypto_digest_algorithm_parse_name(algname);
+ if (a<0) {
+ log_warn(LD_DIR, "Unknown digest algorithm %s; skipping",
+ escaped(algname));
+ continue;
+ }
+ alg = a;
+ }
+
+ if (!tok->object_type ||
+ strcmp(tok->object_type, "SIGNATURE") ||
+ tok->object_size < 128 || tok->object_size > 512) {
+ log_warn(LD_DIR, "Bad object type or length on directory-signature");
+ goto err;
+ }
+
+ if (strlen(id_hexdigest) != HEX_DIGEST_LEN ||
+ base16_decode(declared_identity, sizeof(declared_identity),
+ id_hexdigest, HEX_DIGEST_LEN)
+ != sizeof(declared_identity)) {
+ log_warn(LD_DIR, "Error decoding declared identity %s in "
+ "network-status document.", escaped(id_hexdigest));
+ goto err;
+ }
+ if (!(v = networkstatus_get_voter_by_id(ns, declared_identity))) {
+ log_warn(LD_DIR, "ID on signature on network-status document does "
+ "not match any declared directory source.");
+ goto err;
+ }
+ sig = tor_malloc_zero(sizeof(document_signature_t));
+ memcpy(sig->identity_digest, v->identity_digest, DIGEST_LEN);
+ sig->alg = alg;
+ if (strlen(sk_hexdigest) != HEX_DIGEST_LEN ||
+ base16_decode(sig->signing_key_digest, sizeof(sig->signing_key_digest),
+ sk_hexdigest, HEX_DIGEST_LEN)
+ != sizeof(sig->signing_key_digest)) {
+ log_warn(LD_DIR, "Error decoding declared signing key digest %s in "
+ "network-status document.", escaped(sk_hexdigest));
+ tor_free(sig);
+ goto err;
+ }
+
+ if (ns->type != NS_TYPE_CONSENSUS) {
+ if (tor_memneq(declared_identity, ns->cert->cache_info.identity_digest,
+ DIGEST_LEN)) {
+ log_warn(LD_DIR, "Digest mismatch between declared and actual on "
+ "network-status vote.");
+ tor_free(sig);
+ goto err;
+ }
+ }
+
+ if (networkstatus_get_voter_sig_by_alg(v, sig->alg)) {
+ /* We already parsed a vote with this algorithm from this voter. Use the
+ first one. */
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR, "We received a networkstatus "
+ "that contains two signatures from the same voter with the same "
+ "algorithm. Ignoring the second signature.");
+ tor_free(sig);
+ continue;
+ }
+
+ if (ns->type != NS_TYPE_CONSENSUS) {
+ if (check_signature_token(ns_digests.d[DIGEST_SHA1], DIGEST_LEN,
+ tok, ns->cert->signing_key, 0,
+ "network-status document")) {
+ tor_free(sig);
+ goto err;
+ }
+ sig->good_signature = 1;
+ } else {
+ if (tok->object_size >= INT_MAX || tok->object_size >= SIZE_T_CEILING) {
+ tor_free(sig);
+ goto err;
+ }
+ sig->signature = tor_memdup(tok->object_body, tok->object_size);
+ sig->signature_len = (int) tok->object_size;
+ }
+ smartlist_add(v->sigs, sig);
+
+ ++n_signatures;
+ } SMARTLIST_FOREACH_END(_tok);
+
+ if (! n_signatures) {
+ log_warn(LD_DIR, "No signatures on networkstatus document.");
+ goto err;
+ } else if (ns->type == NS_TYPE_VOTE && n_signatures != 1) {
+ log_warn(LD_DIR, "Received more than one signature on a "
+ "network-status vote.");
+ goto err;
+ }
+
+ if (eos_out)
+ *eos_out = end_of_footer;
+
+ goto done;
+ err:
+ dump_desc(s_dup, "v3 networkstatus");
+ networkstatus_vote_free(ns);
+ ns = NULL;
+ done:
+ if (tokens) {
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(tokens);
+ }
+ if (voter) {
+ if (voter->sigs) {
+ SMARTLIST_FOREACH(voter->sigs, document_signature_t *, sig,
+ document_signature_free(sig));
+ smartlist_free(voter->sigs);
+ }
+ tor_free(voter->nickname);
+ tor_free(voter->address);
+ tor_free(voter->contact);
+ tor_free(voter);
+ }
+ if (rs_tokens) {
+ SMARTLIST_FOREACH(rs_tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(rs_tokens);
+ }
+ if (footer_tokens) {
+ SMARTLIST_FOREACH(footer_tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(footer_tokens);
+ }
+ if (area) {
+ DUMP_AREA(area, "v3 networkstatus");
+ memarea_drop_all(area);
+ }
+ if (rs_area)
+ memarea_drop_all(rs_area);
+ tor_free(last_kwd);
+
+ return ns;
+}
+
+/** Return the common_digests_t that holds the digests of the
+ * <b>flavor_name</b>-flavored networkstatus according to the detached
+ * signatures document <b>sigs</b>, allocating a new common_digests_t as
+ * needed. */
+static common_digests_t *
+detached_get_digests(ns_detached_signatures_t *sigs, const char *flavor_name)
+{
+ common_digests_t *d = strmap_get(sigs->digests, flavor_name);
+ if (!d) {
+ d = tor_malloc_zero(sizeof(common_digests_t));
+ strmap_set(sigs->digests, flavor_name, d);
+ }
+ return d;
+}
+
+/** Return the list of signatures of the <b>flavor_name</b>-flavored
+ * networkstatus according to the detached signatures document <b>sigs</b>,
+ * allocating a new common_digests_t as needed. */
+static smartlist_t *
+detached_get_signatures(ns_detached_signatures_t *sigs,
+ const char *flavor_name)
+{
+ smartlist_t *sl = strmap_get(sigs->signatures, flavor_name);
+ if (!sl) {
+ sl = smartlist_new();
+ strmap_set(sigs->signatures, flavor_name, sl);
+ }
+ return sl;
+}
+
+/** Parse a detached v3 networkstatus signature document between <b>s</b> and
+ * <b>eos</b> and return the result. Return -1 on failure. */
+ns_detached_signatures_t *
+networkstatus_parse_detached_signatures(const char *s, const char *eos)
+{
+ /* XXXX there is too much duplicate shared between this function and
+ * networkstatus_parse_vote_from_string(). */
+ directory_token_t *tok;
+ memarea_t *area = NULL;
+ common_digests_t *digests;
+
+ smartlist_t *tokens = smartlist_new();
+ ns_detached_signatures_t *sigs =
+ tor_malloc_zero(sizeof(ns_detached_signatures_t));
+ sigs->digests = strmap_new();
+ sigs->signatures = strmap_new();
+
+ if (!eos)
+ eos = s + strlen(s);
+
+ area = memarea_new();
+ if (tokenize_string(area,s, eos, tokens,
+ networkstatus_detached_signature_token_table, 0)) {
+ log_warn(LD_DIR, "Error tokenizing detached networkstatus signatures");
+ goto err;
+ }
+
+ /* Grab all the digest-like tokens. */
+ SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, _tok) {
+ const char *algname;
+ digest_algorithm_t alg;
+ const char *flavor;
+ const char *hexdigest;
+ size_t expected_length, digest_length;
+
+ tok = _tok;
+
+ if (tok->tp == K_CONSENSUS_DIGEST) {
+ algname = "sha1";
+ alg = DIGEST_SHA1;
+ flavor = "ns";
+ hexdigest = tok->args[0];
+ } else if (tok->tp == K_ADDITIONAL_DIGEST) {
+ int a = crypto_digest_algorithm_parse_name(tok->args[1]);
+ if (a<0) {
+ log_warn(LD_DIR, "Unrecognized algorithm name %s", tok->args[0]);
+ continue;
+ }
+ alg = (digest_algorithm_t) a;
+ flavor = tok->args[0];
+ algname = tok->args[1];
+ hexdigest = tok->args[2];
+ } else {
+ continue;
+ }
+
+ digest_length = crypto_digest_algorithm_get_length(alg);
+ expected_length = digest_length * 2; /* hex encoding */
+
+ if (strlen(hexdigest) != expected_length) {
+ log_warn(LD_DIR, "Wrong length on consensus-digest in detached "
+ "networkstatus signatures");
+ goto err;
+ }
+ digests = detached_get_digests(sigs, flavor);
+ tor_assert(digests);
+ if (!tor_mem_is_zero(digests->d[alg], digest_length)) {
+ log_warn(LD_DIR, "Multiple digests for %s with %s on detached "
+ "signatures document", flavor, algname);
+ continue;
+ }
+ if (base16_decode(digests->d[alg], digest_length,
+ hexdigest, strlen(hexdigest)) != (int) digest_length) {
+ log_warn(LD_DIR, "Bad encoding on consensus-digest in detached "
+ "networkstatus signatures");
+ goto err;
+ }
+ } SMARTLIST_FOREACH_END(_tok);
+
+ tok = find_by_keyword(tokens, K_VALID_AFTER);
+ if (parse_iso_time(tok->args[0], &sigs->valid_after)) {
+ log_warn(LD_DIR, "Bad valid-after in detached networkstatus signatures");
+ goto err;
+ }
+
+ tok = find_by_keyword(tokens, K_FRESH_UNTIL);
+ if (parse_iso_time(tok->args[0], &sigs->fresh_until)) {
+ log_warn(LD_DIR, "Bad fresh-until in detached networkstatus signatures");
+ goto err;
+ }
+
+ tok = find_by_keyword(tokens, K_VALID_UNTIL);
+ if (parse_iso_time(tok->args[0], &sigs->valid_until)) {
+ log_warn(LD_DIR, "Bad valid-until in detached networkstatus signatures");
+ goto err;
+ }
+
+ SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, _tok) {
+ const char *id_hexdigest;
+ const char *sk_hexdigest;
+ const char *algname;
+ const char *flavor;
+ digest_algorithm_t alg;
+
+ char id_digest[DIGEST_LEN];
+ char sk_digest[DIGEST_LEN];
+ smartlist_t *siglist;
+ document_signature_t *sig;
+ int is_duplicate;
+
+ tok = _tok;
+ if (tok->tp == K_DIRECTORY_SIGNATURE) {
+ tor_assert(tok->n_args >= 2);
+ flavor = "ns";
+ algname = "sha1";
+ id_hexdigest = tok->args[0];
+ sk_hexdigest = tok->args[1];
+ } else if (tok->tp == K_ADDITIONAL_SIGNATURE) {
+ tor_assert(tok->n_args >= 4);
+ flavor = tok->args[0];
+ algname = tok->args[1];
+ id_hexdigest = tok->args[2];
+ sk_hexdigest = tok->args[3];
+ } else {
+ continue;
+ }
+
+ {
+ int a = crypto_digest_algorithm_parse_name(algname);
+ if (a<0) {
+ log_warn(LD_DIR, "Unrecognized algorithm name %s", algname);
+ continue;
+ }
+ alg = (digest_algorithm_t) a;
+ }
+
+ if (!tok->object_type ||
+ strcmp(tok->object_type, "SIGNATURE") ||
+ tok->object_size < 128 || tok->object_size > 512) {
+ log_warn(LD_DIR, "Bad object type or length on directory-signature");
+ goto err;
+ }
+
+ if (strlen(id_hexdigest) != HEX_DIGEST_LEN ||
+ base16_decode(id_digest, sizeof(id_digest),
+ id_hexdigest, HEX_DIGEST_LEN) != sizeof(id_digest)) {
+ log_warn(LD_DIR, "Error decoding declared identity %s in "
+ "network-status vote.", escaped(id_hexdigest));
+ goto err;
+ }
+ if (strlen(sk_hexdigest) != HEX_DIGEST_LEN ||
+ base16_decode(sk_digest, sizeof(sk_digest),
+ sk_hexdigest, HEX_DIGEST_LEN) != sizeof(sk_digest)) {
+ log_warn(LD_DIR, "Error decoding declared signing key digest %s in "
+ "network-status vote.", escaped(sk_hexdigest));
+ goto err;
+ }
+
+ siglist = detached_get_signatures(sigs, flavor);
+ is_duplicate = 0;
+ SMARTLIST_FOREACH(siglist, document_signature_t *, dsig, {
+ if (dsig->alg == alg &&
+ tor_memeq(id_digest, dsig->identity_digest, DIGEST_LEN) &&
+ tor_memeq(sk_digest, dsig->signing_key_digest, DIGEST_LEN)) {
+ is_duplicate = 1;
+ }
+ });
+ if (is_duplicate) {
+ log_warn(LD_DIR, "Two signatures with identical keys and algorithm "
+ "found.");
+ continue;
+ }
+
+ sig = tor_malloc_zero(sizeof(document_signature_t));
+ sig->alg = alg;
+ memcpy(sig->identity_digest, id_digest, DIGEST_LEN);
+ memcpy(sig->signing_key_digest, sk_digest, DIGEST_LEN);
+ if (tok->object_size >= INT_MAX || tok->object_size >= SIZE_T_CEILING) {
+ tor_free(sig);
+ goto err;
+ }
+ sig->signature = tor_memdup(tok->object_body, tok->object_size);
+ sig->signature_len = (int) tok->object_size;
+
+ smartlist_add(siglist, sig);
+ } SMARTLIST_FOREACH_END(_tok);
+
+ goto done;
+ err:
+ ns_detached_signatures_free(sigs);
+ sigs = NULL;
+ done:
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(tokens);
+ if (area) {
+ DUMP_AREA(area, "detached signatures");
+ memarea_drop_all(area);
+ }
+ return sigs;
+}
+
+/** Parse the addr policy in the string <b>s</b> and return it. If
+ * assume_action is nonnegative, then insert its action (ADDR_POLICY_ACCEPT or
+ * ADDR_POLICY_REJECT) for items that specify no action.
+ *
+ * Returns NULL on policy errors.
+ *
+ * Set *<b>malformed_list</b> to true if the entire policy list should be
+ * discarded. Otherwise, set it to false, and only this item should be ignored
+ * on error - the rest of the policy list can continue to be processed and
+ * used.
+ *
+ * The addr_policy_t returned by this function can have its address set to
+ * AF_UNSPEC for '*'. Use policy_expand_unspec() to turn this into a pair
+ * of AF_INET and AF_INET6 items.
+ */
+MOCK_IMPL(addr_policy_t *,
+router_parse_addr_policy_item_from_string,(const char *s, int assume_action,
+ int *malformed_list))
+{
+ directory_token_t *tok = NULL;
+ const char *cp, *eos;
+ /* Longest possible policy is
+ * "accept6 [ffff:ffff:..255]/128:10000-65535",
+ * which contains a max-length IPv6 address, plus 26 characters.
+ * But note that there can be an arbitrary amount of space between the
+ * accept and the address:mask/port element.
+ * We don't need to multiply TOR_ADDR_BUF_LEN by 2, as there is only one
+ * IPv6 address. But making the buffer shorter might cause valid long lines,
+ * which parsed in previous versions, to fail to parse in new versions.
+ * (These lines would have to have excessive amounts of whitespace.) */
+ char line[TOR_ADDR_BUF_LEN*2 + 32];
+ addr_policy_t *r;
+ memarea_t *area = NULL;
+
+ tor_assert(malformed_list);
+ *malformed_list = 0;
+
+ s = eat_whitespace(s);
+ /* We can only do assume_action on []-quoted IPv6, as "a" (accept)
+ * and ":" (port separator) are ambiguous */
+ if ((*s == '*' || *s == '[' || TOR_ISDIGIT(*s)) && assume_action >= 0) {
+ if (tor_snprintf(line, sizeof(line), "%s %s",
+ assume_action == ADDR_POLICY_ACCEPT?"accept":"reject", s)<0) {
+ log_warn(LD_DIR, "Policy %s is too long.", escaped(s));
+ return NULL;
+ }
+ cp = line;
+ tor_strlower(line);
+ } else { /* assume an already well-formed address policy line */
+ cp = s;
+ }
+
+ eos = cp + strlen(cp);
+ area = memarea_new();
+ tok = get_next_token(area, &cp, eos, routerdesc_token_table);
+ if (tok->tp == ERR_) {
+ log_warn(LD_DIR, "Error reading address policy: %s", tok->error);
+ goto err;
+ }
+ if (tok->tp != K_ACCEPT && tok->tp != K_ACCEPT6 &&
+ tok->tp != K_REJECT && tok->tp != K_REJECT6) {
+ log_warn(LD_DIR, "Expected 'accept' or 'reject'.");
+ goto err;
+ }
+
+ /* Use the extended interpretation of accept/reject *,
+ * expanding it into an IPv4 wildcard and an IPv6 wildcard.
+ * Also permit *4 and *6 for IPv4 and IPv6 only wildcards. */
+ r = router_parse_addr_policy(tok, TAPMP_EXTENDED_STAR);
+ if (!r) {
+ goto err;
+ }
+
+ /* Ensure that accept6/reject6 fields are followed by IPv6 addresses.
+ * AF_UNSPEC addresses are only permitted on the accept/reject field type.
+ * Unlike descriptors, torrcs exit policy accept/reject can be followed by
+ * either an IPv4 or IPv6 address. */
+ if ((tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6) &&
+ tor_addr_family(&r->addr) != AF_INET6) {
+ /* This is a non-fatal error, just ignore this one entry. */
+ *malformed_list = 0;
+ log_warn(LD_DIR, "IPv4 address '%s' with accept6/reject6 field type in "
+ "exit policy. Ignoring, but continuing to parse rules. (Use "
+ "accept/reject with IPv4 addresses.)",
+ tok->n_args == 1 ? tok->args[0] : "");
+ addr_policy_free(r);
+ r = NULL;
+ goto done;
+ }
+
+ goto done;
+ err:
+ *malformed_list = 1;
+ r = NULL;
+ done:
+ token_clear(tok);
+ if (area) {
+ DUMP_AREA(area, "policy item");
+ memarea_drop_all(area);
+ }
+ return r;
+}
+
+/** Add an exit policy stored in the token <b>tok</b> to the router info in
+ * <b>router</b>. Return 0 on success, -1 on failure. */
+static int
+router_add_exit_policy(routerinfo_t *router, directory_token_t *tok)
+{
+ addr_policy_t *newe;
+ /* Use the standard interpretation of accept/reject *, an IPv4 wildcard. */
+ newe = router_parse_addr_policy(tok, 0);
+ if (!newe)
+ return -1;
+ if (! router->exit_policy)
+ router->exit_policy = smartlist_new();
+
+ /* Ensure that in descriptors, accept/reject fields are followed by
+ * IPv4 addresses, and accept6/reject6 fields are followed by
+ * IPv6 addresses. Unlike torrcs, descriptor exit policies do not permit
+ * accept/reject followed by IPv6. */
+ if (((tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6) &&
+ tor_addr_family(&newe->addr) == AF_INET)
+ ||
+ ((tok->tp == K_ACCEPT || tok->tp == K_REJECT) &&
+ tor_addr_family(&newe->addr) == AF_INET6)) {
+ /* There's nothing the user can do about other relays' descriptors,
+ * so we don't provide usage advice here. */
+ log_warn(LD_DIR, "Mismatch between field type and address type in exit "
+ "policy '%s'. Discarding entire router descriptor.",
+ tok->n_args == 1 ? tok->args[0] : "");
+ addr_policy_free(newe);
+ return -1;
+ }
+
+ smartlist_add(router->exit_policy, newe);
+
+ return 0;
+}
+
+/** Given a K_ACCEPT[6] or K_REJECT[6] token and a router, create and return
+ * a new exit_policy_t corresponding to the token. If TAPMP_EXTENDED_STAR
+ * is set in fmt_flags, K_ACCEPT6 and K_REJECT6 tokens followed by *
+ * expand to IPv6-only policies, otherwise they expand to IPv4 and IPv6
+ * policies */
+static addr_policy_t *
+router_parse_addr_policy(directory_token_t *tok, unsigned fmt_flags)
+{
+ addr_policy_t newe;
+ char *arg;
+
+ tor_assert(tok->tp == K_REJECT || tok->tp == K_REJECT6 ||
+ tok->tp == K_ACCEPT || tok->tp == K_ACCEPT6);
+
+ if (tok->n_args != 1)
+ return NULL;
+ arg = tok->args[0];
+
+ if (!strcmpstart(arg,"private"))
+ return router_parse_addr_policy_private(tok);
+
+ memset(&newe, 0, sizeof(newe));
+
+ if (tok->tp == K_REJECT || tok->tp == K_REJECT6)
+ newe.policy_type = ADDR_POLICY_REJECT;
+ else
+ newe.policy_type = ADDR_POLICY_ACCEPT;
+
+ /* accept6/reject6 * produces an IPv6 wildcard address only.
+ * (accept/reject * produces rules for IPv4 and IPv6 wildcard addresses.) */
+ if ((fmt_flags & TAPMP_EXTENDED_STAR)
+ && (tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6)) {
+ fmt_flags |= TAPMP_STAR_IPV6_ONLY;
+ }
+
+ if (tor_addr_parse_mask_ports(arg, fmt_flags, &newe.addr, &newe.maskbits,
+ &newe.prt_min, &newe.prt_max) < 0) {
+ log_warn(LD_DIR,"Couldn't parse line %s. Dropping", escaped(arg));
+ return NULL;
+ }
+
+ return addr_policy_get_canonical_entry(&newe);
+}
+
+/** Parse an exit policy line of the format "accept[6]/reject[6] private:...".
+ * This didn't exist until Tor 0.1.1.15, so nobody should generate it in
+ * router descriptors until earlier versions are obsolete.
+ *
+ * accept/reject and accept6/reject6 private all produce rules for both
+ * IPv4 and IPv6 addresses.
+ */
+static addr_policy_t *
+router_parse_addr_policy_private(directory_token_t *tok)
+{
+ const char *arg;
+ uint16_t port_min, port_max;
+ addr_policy_t result;
+
+ arg = tok->args[0];
+ if (strcmpstart(arg, "private"))
+ return NULL;
+
+ arg += strlen("private");
+ arg = (char*) eat_whitespace(arg);
+ if (!arg || *arg != ':')
+ return NULL;
+
+ if (parse_port_range(arg+1, &port_min, &port_max)<0)
+ return NULL;
+
+ memset(&result, 0, sizeof(result));
+ if (tok->tp == K_REJECT || tok->tp == K_REJECT6)
+ result.policy_type = ADDR_POLICY_REJECT;
+ else
+ result.policy_type = ADDR_POLICY_ACCEPT;
+ result.is_private = 1;
+ result.prt_min = port_min;
+ result.prt_max = port_max;
+
+ if (tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6) {
+ log_warn(LD_GENERAL,
+ "'%s' expands into rules which apply to all private IPv4 and "
+ "IPv6 addresses. (Use accept/reject private:* for IPv4 and "
+ "IPv6.)", tok->n_args == 1 ? tok->args[0] : "");
+ }
+
+ return addr_policy_get_canonical_entry(&result);
+}
+
+/** Log and exit if <b>t</b> is malformed */
+void
+assert_addr_policy_ok(smartlist_t *lst)
+{
+ if (!lst) return;
+ SMARTLIST_FOREACH(lst, addr_policy_t *, t, {
+ tor_assert(t->policy_type == ADDR_POLICY_REJECT ||
+ t->policy_type == ADDR_POLICY_ACCEPT);
+ tor_assert(t->prt_min <= t->prt_max);
+ });
+}
+
+/** Return a newly allocated smartlist of all accept or reject tokens in
+ * <b>s</b>.
+ */
+static smartlist_t *
+find_all_exitpolicy(smartlist_t *s)
+{
+ smartlist_t *out = smartlist_new();
+ SMARTLIST_FOREACH(s, directory_token_t *, t,
+ if (t->tp == K_ACCEPT || t->tp == K_ACCEPT6 ||
+ t->tp == K_REJECT || t->tp == K_REJECT6)
+ smartlist_add(out,t));
+ return out;
+}
+
+/** Helper function for <b>router_get_hash_impl</b>: given <b>s</b>,
+ * <b>s_len</b>, <b>start_str</b>, <b>end_str</b>, and <b>end_c</b> with the
+ * same semantics as in that function, set *<b>start_out</b> (inclusive) and
+ * *<b>end_out</b> (exclusive) to the boundaries of the string to be hashed.
+ *
+ * Return 0 on success and -1 on failure.
+ */
+static int
+router_get_hash_impl_helper(const char *s, size_t s_len,
+ const char *start_str,
+ const char *end_str, char end_c,
+ int log_severity,
+ const char **start_out, const char **end_out)
+{
+ const char *start, *end;
+ start = tor_memstr(s, s_len, start_str);
+ if (!start) {
+ log_fn(log_severity,LD_DIR,
+ "couldn't find start of hashed material \"%s\"",start_str);
+ return -1;
+ }
+ if (start != s && *(start-1) != '\n') {
+ log_fn(log_severity,LD_DIR,
+ "first occurrence of \"%s\" is not at the start of a line",
+ start_str);
+ return -1;
+ }
+ end = tor_memstr(start+strlen(start_str),
+ s_len - (start-s) - strlen(start_str), end_str);
+ if (!end) {
+ log_fn(log_severity,LD_DIR,
+ "couldn't find end of hashed material \"%s\"",end_str);
+ return -1;
+ }
+ end = memchr(end+strlen(end_str), end_c, s_len - (end-s) - strlen(end_str));
+ if (!end) {
+ log_fn(log_severity,LD_DIR,
+ "couldn't find EOL");
+ return -1;
+ }
+ ++end;
+
+ *start_out = start;
+ *end_out = end;
+ return 0;
+}
+
+/** Compute the digest of the substring of <b>s</b> taken from the first
+ * occurrence of <b>start_str</b> through the first instance of c after the
+ * first subsequent occurrence of <b>end_str</b>; store the 20-byte or 32-byte
+ * result in <b>digest</b>; return 0 on success.
+ *
+ * If no such substring exists, return -1.
+ */
+static int
+router_get_hash_impl(const char *s, size_t s_len, char *digest,
+ const char *start_str,
+ const char *end_str, char end_c,
+ digest_algorithm_t alg)
+{
+ const char *start=NULL, *end=NULL;
+ if (router_get_hash_impl_helper(s,s_len,start_str,end_str,end_c,LOG_WARN,
+ &start,&end)<0)
+ return -1;
+
+ return router_compute_hash_final(digest, start, end-start, alg);
+}
+
+/** Compute the digest of the <b>len</b>-byte directory object at
+ * <b>start</b>, using <b>alg</b>. Store the result in <b>digest</b>, which
+ * must be long enough to hold it. */
+MOCK_IMPL(STATIC int,
+router_compute_hash_final,(char *digest,
+ const char *start, size_t len,
+ digest_algorithm_t alg))
+{
+ if (alg == DIGEST_SHA1) {
+ if (crypto_digest(digest, start, len) < 0) {
+ log_warn(LD_BUG,"couldn't compute digest");
+ return -1;
+ }
+ } else {
+ if (crypto_digest256(digest, start, len, alg) < 0) {
+ log_warn(LD_BUG,"couldn't compute digest");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/** As router_get_hash_impl, but compute all hashes. */
+static int
+router_get_hashes_impl(const char *s, size_t s_len, common_digests_t *digests,
+ const char *start_str,
+ const char *end_str, char end_c)
+{
+ const char *start=NULL, *end=NULL;
+ if (router_get_hash_impl_helper(s,s_len,start_str,end_str,end_c,LOG_WARN,
+ &start,&end)<0)
+ return -1;
+
+ if (crypto_common_digests(digests, start, end-start)) {
+ log_warn(LD_BUG,"couldn't compute digests");
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Assuming that s starts with a microdesc, return the start of the
+ * *NEXT* one. Return NULL on "not found." */
+static const char *
+find_start_of_next_microdesc(const char *s, const char *eos)
+{
+ int started_with_annotations;
+ s = eat_whitespace_eos(s, eos);
+ if (!s)
+ return NULL;
+
+#define CHECK_LENGTH() STMT_BEGIN \
+ if (s+32 > eos) \
+ return NULL; \
+ STMT_END
+
+#define NEXT_LINE() STMT_BEGIN \
+ s = memchr(s, '\n', eos-s); \
+ if (!s || s+1 >= eos) \
+ return NULL; \
+ s++; \
+ STMT_END
+
+ CHECK_LENGTH();
+
+ started_with_annotations = (*s == '@');
+
+ if (started_with_annotations) {
+ /* Start by advancing to the first non-annotation line. */
+ while (*s == '@')
+ NEXT_LINE();
+ }
+ CHECK_LENGTH();
+
+ /* Now we should be pointed at an onion-key line. If we are, then skip
+ * it. */
+ if (!strcmpstart(s, "onion-key"))
+ NEXT_LINE();
+
+ /* Okay, now we're pointed at the first line of the microdescriptor which is
+ not an annotation or onion-key. The next line that _is_ an annotation or
+ onion-key is the start of the next microdescriptor. */
+ while (s+32 < eos) {
+ if (*s == '@' || !strcmpstart(s, "onion-key"))
+ return s;
+ NEXT_LINE();
+ }
+ return NULL;
+
+#undef CHECK_LENGTH
+#undef NEXT_LINE
+}
+
+/** Parse as many microdescriptors as are found from the string starting at
+ * <b>s</b> and ending at <b>eos</b>. If allow_annotations is set, read any
+ * annotations we recognize and ignore ones we don't.
+ *
+ * If <b>saved_location</b> isn't SAVED_IN_CACHE, make a local copy of each
+ * descriptor in the body field of each microdesc_t.
+ *
+ * Return all newly parsed microdescriptors in a newly allocated
+ * smartlist_t. If <b>invalid_disgests_out</b> is provided, add a SHA256
+ * microdesc digest to it for every microdesc that we found to be badly
+ * formed. (This may cause duplicates) */
+smartlist_t *
+microdescs_parse_from_string(const char *s, const char *eos,
+ int allow_annotations,
+ saved_location_t where,
+ smartlist_t *invalid_digests_out)
+{
+ smartlist_t *tokens;
+ smartlist_t *result;
+ microdesc_t *md = NULL;
+ memarea_t *area;
+ const char *start = s;
+ const char *start_of_next_microdesc;
+ int flags = allow_annotations ? TS_ANNOTATIONS_OK : 0;
+ const int copy_body = (where != SAVED_IN_CACHE);
+
+ directory_token_t *tok;
+
+ if (!eos)
+ eos = s + strlen(s);
+
+ s = eat_whitespace_eos(s, eos);
+ area = memarea_new();
+ result = smartlist_new();
+ tokens = smartlist_new();
+
+ while (s < eos) {
+ int okay = 0;
+
+ start_of_next_microdesc = find_start_of_next_microdesc(s, eos);
+ if (!start_of_next_microdesc)
+ start_of_next_microdesc = eos;
+
+ md = tor_malloc_zero(sizeof(microdesc_t));
+ {
+ const char *cp = tor_memstr(s, start_of_next_microdesc-s,
+ "onion-key");
+ const int no_onion_key = (cp == NULL);
+ if (no_onion_key) {
+ cp = s; /* So that we have *some* junk to put in the body */
+ }
+
+ md->bodylen = start_of_next_microdesc - cp;
+ md->saved_location = where;
+ if (copy_body)
+ md->body = tor_memdup_nulterm(cp, md->bodylen);
+ else
+ md->body = (char*)cp;
+ md->off = cp - start;
+ crypto_digest256(md->digest, md->body, md->bodylen, DIGEST_SHA256);
+ if (no_onion_key) {
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR, "Malformed or truncated descriptor");
+ goto next;
+ }
+ }
+
+ if (tokenize_string(area, s, start_of_next_microdesc, tokens,
+ microdesc_token_table, flags)) {
+ log_warn(LD_DIR, "Unparseable microdescriptor");
+ goto next;
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, A_LAST_LISTED))) {
+ if (parse_iso_time(tok->args[0], &md->last_listed)) {
+ log_warn(LD_DIR, "Bad last-listed time in microdescriptor");
+ goto next;
+ }
+ }
+
+ tok = find_by_keyword(tokens, K_ONION_KEY);
+ if (!crypto_pk_public_exponent_ok(tok->key)) {
+ log_warn(LD_DIR,
+ "Relay's onion key had invalid exponent.");
+ goto next;
+ }
+ md->onion_pkey = tok->key;
+ tok->key = NULL;
+
+ if ((tok = find_opt_by_keyword(tokens, K_ONION_KEY_NTOR))) {
+ curve25519_public_key_t k;
+ tor_assert(tok->n_args >= 1);
+ if (curve25519_public_from_base64(&k, tok->args[0]) < 0) {
+ log_warn(LD_DIR, "Bogus ntor-onion-key in microdesc");
+ goto next;
+ }
+ md->onion_curve25519_pkey =
+ tor_memdup(&k, sizeof(curve25519_public_key_t));
+ }
+
+ smartlist_t *id_lines = find_all_by_keyword(tokens, K_ID);
+ if (id_lines) {
+ SMARTLIST_FOREACH_BEGIN(id_lines, directory_token_t *, t) {
+ tor_assert(t->n_args >= 2);
+ if (!strcmp(t->args[0], "ed25519")) {
+ if (md->ed25519_identity_pkey) {
+ log_warn(LD_DIR, "Extra ed25519 key in microdesc");
+ smartlist_free(id_lines);
+ goto next;
+ }
+ ed25519_public_key_t k;
+ if (ed25519_public_from_base64(&k, t->args[1])<0) {
+ log_warn(LD_DIR, "Bogus ed25519 key in microdesc");
+ smartlist_free(id_lines);
+ goto next;
+ }
+ md->ed25519_identity_pkey = tor_memdup(&k, sizeof(k));
+ }
+ } SMARTLIST_FOREACH_END(t);
+ smartlist_free(id_lines);
+ }
+
+ {
+ smartlist_t *a_lines = find_all_by_keyword(tokens, K_A);
+ if (a_lines) {
+ find_single_ipv6_orport(a_lines, &md->ipv6_addr, &md->ipv6_orport);
+ smartlist_free(a_lines);
+ }
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, K_FAMILY))) {
+ int i;
+ md->family = smartlist_new();
+ for (i=0;i<tok->n_args;++i) {
+ if (!is_legal_nickname_or_hexdigest(tok->args[i])) {
+ log_warn(LD_DIR, "Illegal nickname %s in family line",
+ escaped(tok->args[i]));
+ goto next;
+ }
+ smartlist_add_strdup(md->family, tok->args[i]);
+ }
+ }
+
+ if ((tok = find_opt_by_keyword(tokens, K_P))) {
+ md->exit_policy = parse_short_policy(tok->args[0]);
+ }
+ if ((tok = find_opt_by_keyword(tokens, K_P6))) {
+ md->ipv6_exit_policy = parse_short_policy(tok->args[0]);
+ }
+
+ smartlist_add(result, md);
+ okay = 1;
+
+ md = NULL;
+ next:
+ if (! okay && invalid_digests_out) {
+ smartlist_add(invalid_digests_out,
+ tor_memdup(md->digest, DIGEST256_LEN));
+ }
+ microdesc_free(md);
+ md = NULL;
+
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ memarea_clear(area);
+ smartlist_clear(tokens);
+ s = start_of_next_microdesc;
+ }
+
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ memarea_drop_all(area);
+ smartlist_free(tokens);
+
+ return result;
+}
+
+/** Extract a Tor version from a <b>platform</b> line from a router
+ * descriptor, and place the result in <b>router_version</b>.
+ *
+ * Return 1 on success, -1 on parsing failure, and 0 if the
+ * platform line does not indicate some version of Tor.
+ *
+ * If <b>strict</b> is non-zero, finding any weird version components
+ * (like negative numbers) counts as a parsing failure.
+ */
+int
+tor_version_parse_platform(const char *platform,
+ tor_version_t *router_version,
+ int strict)
+{
+ char tmp[128];
+ char *s, *s2, *start;
+
+ if (strcmpstart(platform,"Tor ")) /* nonstandard Tor; say 0. */
+ return 0;
+
+ start = (char *)eat_whitespace(platform+3);
+ if (!*start) return -1;
+ s = (char *)find_whitespace(start); /* also finds '\0', which is fine */
+ s2 = (char*)eat_whitespace(s);
+ if (!strcmpstart(s2, "(r") || !strcmpstart(s2, "(git-"))
+ s = (char*)find_whitespace(s2);
+
+ if ((size_t)(s-start+1) >= sizeof(tmp)) /* too big, no */
+ return -1;
+ strlcpy(tmp, start, s-start+1);
+
+ if (tor_version_parse(tmp, router_version)<0) {
+ log_info(LD_DIR,"Router version '%s' unparseable.",tmp);
+ return -1;
+ }
+
+ if (strict) {
+ if (router_version->major < 0 ||
+ router_version->minor < 0 ||
+ router_version->micro < 0 ||
+ router_version->patchlevel < 0 ||
+ router_version->svn_revision < 0) {
+ return -1;
+ }
+ }
+
+ return 1;
+}
+
+/** Parse the Tor version of the platform string <b>platform</b>,
+ * and compare it to the version in <b>cutoff</b>. Return 1 if
+ * the router is at least as new as the cutoff, else return 0.
+ */
+int
+tor_version_as_new_as(const char *platform, const char *cutoff)
+{
+ tor_version_t cutoff_version, router_version;
+ int r;
+ tor_assert(platform);
+
+ if (tor_version_parse(cutoff, &cutoff_version)<0) {
+ log_warn(LD_BUG,"cutoff version '%s' unparseable.",cutoff);
+ return 0;
+ }
+
+ r = tor_version_parse_platform(platform, &router_version, 0);
+ if (r == 0) {
+ /* nonstandard Tor; be safe and say yes */
+ return 1;
+ } else if (r < 0) {
+ /* unparseable version; be safe and say yes. */
+ return 1;
+ }
+
+ /* Here's why we don't need to do any special handling for svn revisions:
+ * - If neither has an svn revision, we're fine.
+ * - If the router doesn't have an svn revision, we can't assume that it
+ * is "at least" any svn revision, so we need to return 0.
+ * - If the target version doesn't have an svn revision, any svn revision
+ * (or none at all) is good enough, so return 1.
+ * - If both target and router have an svn revision, we compare them.
+ */
+
+ return tor_version_compare(&router_version, &cutoff_version) >= 0;
+}
+
+/** Parse a tor version from <b>s</b>, and store the result in <b>out</b>.
+ * Return 0 on success, -1 on failure. */
+int
+tor_version_parse(const char *s, tor_version_t *out)
+{
+ char *eos=NULL;
+ const char *cp=NULL;
+ int ok = 1;
+ /* Format is:
+ * "Tor " ? NUM dot NUM [ dot NUM [ ( pre | rc | dot ) NUM ] ] [ - tag ]
+ */
+ tor_assert(s);
+ tor_assert(out);
+
+ memset(out, 0, sizeof(tor_version_t));
+ out->status = VER_RELEASE;
+ if (!strcasecmpstart(s, "Tor "))
+ s += 4;
+
+ cp = s;
+
+#define NUMBER(m) \
+ do { \
+ if (!cp || *cp < '0' || *cp > '9') \
+ return -1; \
+ out->m = (int)tor_parse_uint64(cp, 10, 0, INT32_MAX, &ok, &eos); \
+ if (!ok) \
+ return -1; \
+ if (!eos || eos == cp) \
+ return -1; \
+ cp = eos; \
+ } while (0)
+
+#define DOT() \
+ do { \
+ if (*cp != '.') \
+ return -1; \
+ ++cp; \
+ } while (0)
+
+ NUMBER(major);
+ DOT();
+ NUMBER(minor);
+ if (*cp == 0)
+ return 0;
+ else if (*cp == '-')
+ goto status_tag;
+ DOT();
+ NUMBER(micro);
+
+ /* Get status */
+ if (*cp == 0) {
+ return 0;
+ } else if (*cp == '.') {
+ ++cp;
+ } else if (*cp == '-') {
+ goto status_tag;
+ } else if (0==strncmp(cp, "pre", 3)) {
+ out->status = VER_PRE;
+ cp += 3;
+ } else if (0==strncmp(cp, "rc", 2)) {
+ out->status = VER_RC;
+ cp += 2;
+ } else {
+ return -1;
+ }
+
+ NUMBER(patchlevel);
+
+ status_tag:
+ /* Get status tag. */
+ if (*cp == '-' || *cp == '.')
+ ++cp;
+ eos = (char*) find_whitespace(cp);
+ if (eos-cp >= (int)sizeof(out->status_tag))
+ strlcpy(out->status_tag, cp, sizeof(out->status_tag));
+ else {
+ memcpy(out->status_tag, cp, eos-cp);
+ out->status_tag[eos-cp] = 0;
+ }
+ cp = eat_whitespace(eos);
+
+ if (!strcmpstart(cp, "(r")) {
+ cp += 2;
+ out->svn_revision = (int) strtol(cp,&eos,10);
+ } else if (!strcmpstart(cp, "(git-")) {
+ char *close_paren = strchr(cp, ')');
+ int hexlen;
+ char digest[DIGEST_LEN];
+ if (! close_paren)
+ return -1;
+ cp += 5;
+ if (close_paren-cp > HEX_DIGEST_LEN)
+ return -1;
+ hexlen = (int)(close_paren-cp);
+ memwipe(digest, 0, sizeof(digest));
+ if ( hexlen == 0 || (hexlen % 2) == 1)
+ return -1;
+ if (base16_decode(digest, hexlen/2, cp, hexlen) != hexlen/2)
+ return -1;
+ memcpy(out->git_tag, digest, hexlen/2);
+ out->git_tag_len = hexlen/2;
+ }
+
+ return 0;
+#undef NUMBER
+#undef DOT
+}
+
+/** Compare two tor versions; Return <0 if a < b; 0 if a ==b, >0 if a >
+ * b. */
+int
+tor_version_compare(tor_version_t *a, tor_version_t *b)
+{
+ int i;
+ tor_assert(a);
+ tor_assert(b);
+
+ /* We take this approach to comparison to ensure the same (bogus!) behavior
+ * on all inputs as we would have seen before bug #21278 was fixed. The
+ * only important difference here is that this method doesn't cause
+ * a signed integer underflow.
+ */
+#define CMP(field) do { \
+ unsigned aval = (unsigned) a->field; \
+ unsigned bval = (unsigned) b->field; \
+ int result = (int) (aval - bval); \
+ if (result < 0) \
+ return -1; \
+ else if (result > 0) \
+ return 1; \
+ } while (0)
+
+ CMP(major);
+ CMP(minor);
+ CMP(micro);
+ CMP(status);
+ CMP(patchlevel);
+ if ((i = strcmp(a->status_tag, b->status_tag)))
+ return i;
+ CMP(svn_revision);
+ CMP(git_tag_len);
+ if (a->git_tag_len)
+ return fast_memcmp(a->git_tag, b->git_tag, a->git_tag_len);
+ else
+ return 0;
+
+#undef CMP
+}
+
+/** Return true iff versions <b>a</b> and <b>b</b> belong to the same series.
+ */
+int
+tor_version_same_series(tor_version_t *a, tor_version_t *b)
+{
+ tor_assert(a);
+ tor_assert(b);
+ return ((a->major == b->major) &&
+ (a->minor == b->minor) &&
+ (a->micro == b->micro));
+}
+
+/** Helper: Given pointers to two strings describing tor versions, return -1
+ * if _a precedes _b, 1 if _b precedes _a, and 0 if they are equivalent.
+ * Used to sort a list of versions. */
+static int
+compare_tor_version_str_ptr_(const void **_a, const void **_b)
+{
+ const char *a = *_a, *b = *_b;
+ int ca, cb;
+ tor_version_t va, vb;
+ ca = tor_version_parse(a, &va);
+ cb = tor_version_parse(b, &vb);
+ /* If they both parse, compare them. */
+ if (!ca && !cb)
+ return tor_version_compare(&va,&vb);
+ /* If one parses, it comes first. */
+ if (!ca && cb)
+ return -1;
+ if (ca && !cb)
+ return 1;
+ /* If neither parses, compare strings. Also, the directory server admin
+ ** needs to be smacked upside the head. But Tor is tolerant and gentle. */
+ return strcmp(a,b);
+}
+
+/** Sort a list of string-representations of versions in ascending order. */
+void
+sort_version_list(smartlist_t *versions, int remove_duplicates)
+{
+ smartlist_sort(versions, compare_tor_version_str_ptr_);
+
+ if (remove_duplicates)
+ smartlist_uniq(versions, compare_tor_version_str_ptr_, tor_free_);
+}
+
+/** Parse and validate the ASCII-encoded v2 descriptor in <b>desc</b>,
+ * write the parsed descriptor to the newly allocated *<b>parsed_out</b>, the
+ * binary descriptor ID of length DIGEST_LEN to <b>desc_id_out</b>, the
+ * encrypted introduction points to the newly allocated
+ * *<b>intro_points_encrypted_out</b>, their encrypted size to
+ * *<b>intro_points_encrypted_size_out</b>, the size of the encoded descriptor
+ * to *<b>encoded_size_out</b>, and a pointer to the possibly next
+ * descriptor to *<b>next_out</b>; return 0 for success (including validation)
+ * and -1 for failure.
+ *
+ * If <b>as_hsdir</b> is 1, we're parsing this as an HSDir, and we should
+ * be strict about time formats.
+ */
+int
+rend_parse_v2_service_descriptor(rend_service_descriptor_t **parsed_out,
+ char *desc_id_out,
+ char **intro_points_encrypted_out,
+ size_t *intro_points_encrypted_size_out,
+ size_t *encoded_size_out,
+ const char **next_out, const char *desc,
+ int as_hsdir)
+{
+ rend_service_descriptor_t *result =
+ tor_malloc_zero(sizeof(rend_service_descriptor_t));
+ char desc_hash[DIGEST_LEN];
+ const char *eos;
+ smartlist_t *tokens = smartlist_new();
+ directory_token_t *tok;
+ char secret_id_part[DIGEST_LEN];
+ int i, version, num_ok=1;
+ smartlist_t *versions;
+ char public_key_hash[DIGEST_LEN];
+ char test_desc_id[DIGEST_LEN];
+ memarea_t *area = NULL;
+ const int strict_time_fmt = as_hsdir;
+
+ tor_assert(desc);
+ /* Check if desc starts correctly. */
+ if (strncmp(desc, "rendezvous-service-descriptor ",
+ strlen("rendezvous-service-descriptor "))) {
+ log_info(LD_REND, "Descriptor does not start correctly.");
+ goto err;
+ }
+ /* Compute descriptor hash for later validation. */
+ if (router_get_hash_impl(desc, strlen(desc), desc_hash,
+ "rendezvous-service-descriptor ",
+ "\nsignature", '\n', DIGEST_SHA1) < 0) {
+ log_warn(LD_REND, "Couldn't compute descriptor hash.");
+ goto err;
+ }
+ /* Determine end of string. */
+ eos = strstr(desc, "\nrendezvous-service-descriptor ");
+ if (!eos)
+ eos = desc + strlen(desc);
+ else
+ eos = eos + 1;
+ /* Check length. */
+ if (eos-desc > REND_DESC_MAX_SIZE) {
+ /* XXXX+ If we are parsing this descriptor as a server, this
+ * should be a protocol warning. */
+ log_warn(LD_REND, "Descriptor length is %d which exceeds "
+ "maximum rendezvous descriptor size of %d bytes.",
+ (int)(eos-desc), REND_DESC_MAX_SIZE);
+ goto err;
+ }
+ /* Tokenize descriptor. */
+ area = memarea_new();
+ if (tokenize_string(area, desc, eos, tokens, desc_token_table, 0)) {
+ log_warn(LD_REND, "Error tokenizing descriptor.");
+ goto err;
+ }
+ /* Set next to next descriptor, if available. */
+ *next_out = eos;
+ /* Set length of encoded descriptor. */
+ *encoded_size_out = eos - desc;
+ /* Check min allowed length of token list. */
+ if (smartlist_len(tokens) < 7) {
+ log_warn(LD_REND, "Impossibly short descriptor.");
+ goto err;
+ }
+ /* Parse base32-encoded descriptor ID. */
+ tok = find_by_keyword(tokens, R_RENDEZVOUS_SERVICE_DESCRIPTOR);
+ tor_assert(tok == smartlist_get(tokens, 0));
+ tor_assert(tok->n_args == 1);
+ if (!rend_valid_descriptor_id(tok->args[0])) {
+ log_warn(LD_REND, "Invalid descriptor ID: '%s'", tok->args[0]);
+ goto err;
+ }
+ if (base32_decode(desc_id_out, DIGEST_LEN,
+ tok->args[0], REND_DESC_ID_V2_LEN_BASE32) < 0) {
+ log_warn(LD_REND, "Descriptor ID contains illegal characters: %s",
+ tok->args[0]);
+ goto err;
+ }
+ /* Parse descriptor version. */
+ tok = find_by_keyword(tokens, R_VERSION);
+ tor_assert(tok->n_args == 1);
+ result->version =
+ (int) tor_parse_long(tok->args[0], 10, 0, INT_MAX, &num_ok, NULL);
+ if (result->version != 2 || !num_ok) {
+ /* If it's <2, it shouldn't be under this format. If the number
+ * is greater than 2, we bumped it because we broke backward
+ * compatibility. See how version numbers in our other formats
+ * work. */
+ log_warn(LD_REND, "Unrecognized descriptor version: %s",
+ escaped(tok->args[0]));
+ goto err;
+ }
+ /* Parse public key. */
+ tok = find_by_keyword(tokens, R_PERMANENT_KEY);
+ result->pk = tok->key;
+ tok->key = NULL; /* Prevent free */
+ /* Parse secret ID part. */
+ tok = find_by_keyword(tokens, R_SECRET_ID_PART);
+ tor_assert(tok->n_args == 1);
+ if (strlen(tok->args[0]) != REND_SECRET_ID_PART_LEN_BASE32 ||
+ strspn(tok->args[0], BASE32_CHARS) != REND_SECRET_ID_PART_LEN_BASE32) {
+ log_warn(LD_REND, "Invalid secret ID part: '%s'", tok->args[0]);
+ goto err;
+ }
+ if (base32_decode(secret_id_part, DIGEST_LEN, tok->args[0], 32) < 0) {
+ log_warn(LD_REND, "Secret ID part contains illegal characters: %s",
+ tok->args[0]);
+ goto err;
+ }
+ /* Parse publication time -- up-to-date check is done when storing the
+ * descriptor. */
+ tok = find_by_keyword(tokens, R_PUBLICATION_TIME);
+ tor_assert(tok->n_args == 1);
+ if (parse_iso_time_(tok->args[0], &result->timestamp,
+ strict_time_fmt, 0) < 0) {
+ log_warn(LD_REND, "Invalid publication time: '%s'", tok->args[0]);
+ goto err;
+ }
+ /* Parse protocol versions. */
+ tok = find_by_keyword(tokens, R_PROTOCOL_VERSIONS);
+ tor_assert(tok->n_args == 1);
+ versions = smartlist_new();
+ smartlist_split_string(versions, tok->args[0], ",",
+ SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 0);
+ for (i = 0; i < smartlist_len(versions); i++) {
+ version = (int) tor_parse_long(smartlist_get(versions, i),
+ 10, 0, INT_MAX, &num_ok, NULL);
+ if (!num_ok) /* It's a string; let's ignore it. */
+ continue;
+ if (version >= REND_PROTOCOL_VERSION_BITMASK_WIDTH)
+ /* Avoid undefined left-shift behaviour. */
+ continue;
+ result->protocols |= 1 << version;
+ }
+ SMARTLIST_FOREACH(versions, char *, cp, tor_free(cp));
+ smartlist_free(versions);
+ /* Parse encrypted introduction points. Don't verify. */
+ tok = find_opt_by_keyword(tokens, R_INTRODUCTION_POINTS);
+ if (tok) {
+ if (strcmp(tok->object_type, "MESSAGE")) {
+ log_warn(LD_DIR, "Bad object type: introduction points should be of "
+ "type MESSAGE");
+ goto err;
+ }
+ *intro_points_encrypted_out = tor_memdup(tok->object_body,
+ tok->object_size);
+ *intro_points_encrypted_size_out = tok->object_size;
+ } else {
+ *intro_points_encrypted_out = NULL;
+ *intro_points_encrypted_size_out = 0;
+ }
+ /* Parse and verify signature. */
+ tok = find_by_keyword(tokens, R_SIGNATURE);
+ if (check_signature_token(desc_hash, DIGEST_LEN, tok, result->pk, 0,
+ "v2 rendezvous service descriptor") < 0)
+ goto err;
+ /* Verify that descriptor ID belongs to public key and secret ID part. */
+ if (crypto_pk_get_digest(result->pk, public_key_hash) < 0) {
+ log_warn(LD_REND, "Unable to compute rend descriptor public key digest");
+ goto err;
+ }
+ rend_get_descriptor_id_bytes(test_desc_id, public_key_hash,
+ secret_id_part);
+ if (tor_memneq(desc_id_out, test_desc_id, DIGEST_LEN)) {
+ log_warn(LD_REND, "Parsed descriptor ID does not match "
+ "computed descriptor ID.");
+ goto err;
+ }
+ goto done;
+ err:
+ rend_service_descriptor_free(result);
+ result = NULL;
+ done:
+ if (tokens) {
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(tokens);
+ }
+ if (area)
+ memarea_drop_all(area);
+ *parsed_out = result;
+ if (result)
+ return 0;
+ return -1;
+}
+
+/** Decrypt the encrypted introduction points in <b>ipos_encrypted</b> of
+ * length <b>ipos_encrypted_size</b> using <b>descriptor_cookie</b> and
+ * write the result to a newly allocated string that is pointed to by
+ * <b>ipos_decrypted</b> and its length to <b>ipos_decrypted_size</b>.
+ * Return 0 if decryption was successful and -1 otherwise. */
+int
+rend_decrypt_introduction_points(char **ipos_decrypted,
+ size_t *ipos_decrypted_size,
+ const char *descriptor_cookie,
+ const char *ipos_encrypted,
+ size_t ipos_encrypted_size)
+{
+ tor_assert(ipos_encrypted);
+ tor_assert(descriptor_cookie);
+ if (ipos_encrypted_size < 2) {
+ log_warn(LD_REND, "Size of encrypted introduction points is too "
+ "small.");
+ return -1;
+ }
+ if (ipos_encrypted[0] == (int)REND_BASIC_AUTH) {
+ char iv[CIPHER_IV_LEN], client_id[REND_BASIC_AUTH_CLIENT_ID_LEN],
+ session_key[CIPHER_KEY_LEN], *dec;
+ int declen, client_blocks;
+ size_t pos = 0, len, client_entries_len;
+ crypto_digest_t *digest;
+ crypto_cipher_t *cipher;
+ client_blocks = (int) ipos_encrypted[1];
+ client_entries_len = client_blocks * REND_BASIC_AUTH_CLIENT_MULTIPLE *
+ REND_BASIC_AUTH_CLIENT_ENTRY_LEN;
+ if (ipos_encrypted_size < 2 + client_entries_len + CIPHER_IV_LEN + 1) {
+ log_warn(LD_REND, "Size of encrypted introduction points is too "
+ "small.");
+ return -1;
+ }
+ memcpy(iv, ipos_encrypted + 2 + client_entries_len, CIPHER_IV_LEN);
+ digest = crypto_digest_new();
+ crypto_digest_add_bytes(digest, descriptor_cookie, REND_DESC_COOKIE_LEN);
+ crypto_digest_add_bytes(digest, iv, CIPHER_IV_LEN);
+ crypto_digest_get_digest(digest, client_id,
+ REND_BASIC_AUTH_CLIENT_ID_LEN);
+ crypto_digest_free(digest);
+ for (pos = 2; pos < 2 + client_entries_len;
+ pos += REND_BASIC_AUTH_CLIENT_ENTRY_LEN) {
+ if (tor_memeq(ipos_encrypted + pos, client_id,
+ REND_BASIC_AUTH_CLIENT_ID_LEN)) {
+ /* Attempt to decrypt introduction points. */
+ cipher = crypto_cipher_new(descriptor_cookie);
+ if (crypto_cipher_decrypt(cipher, session_key, ipos_encrypted
+ + pos + REND_BASIC_AUTH_CLIENT_ID_LEN,
+ CIPHER_KEY_LEN) < 0) {
+ log_warn(LD_REND, "Could not decrypt session key for client.");
+ crypto_cipher_free(cipher);
+ return -1;
+ }
+ crypto_cipher_free(cipher);
+
+ len = ipos_encrypted_size - 2 - client_entries_len - CIPHER_IV_LEN;
+ dec = tor_malloc_zero(len + 1);
+ declen = crypto_cipher_decrypt_with_iv(session_key, dec, len,
+ ipos_encrypted + 2 + client_entries_len,
+ ipos_encrypted_size - 2 - client_entries_len);
+
+ if (declen < 0) {
+ log_warn(LD_REND, "Could not decrypt introduction point string.");
+ tor_free(dec);
+ return -1;
+ }
+ if (fast_memcmpstart(dec, declen, "introduction-point ")) {
+ log_warn(LD_REND, "Decrypted introduction points don't "
+ "look like we could parse them.");
+ tor_free(dec);
+ continue;
+ }
+ *ipos_decrypted = dec;
+ *ipos_decrypted_size = declen;
+ return 0;
+ }
+ }
+ log_warn(LD_REND, "Could not decrypt introduction points. Please "
+ "check your authorization for this service!");
+ return -1;
+ } else if (ipos_encrypted[0] == (int)REND_STEALTH_AUTH) {
+ char *dec;
+ int declen;
+ if (ipos_encrypted_size < CIPHER_IV_LEN + 2) {
+ log_warn(LD_REND, "Size of encrypted introduction points is too "
+ "small.");
+ return -1;
+ }
+ dec = tor_malloc_zero(ipos_encrypted_size - CIPHER_IV_LEN - 1 + 1);
+
+ declen = crypto_cipher_decrypt_with_iv(descriptor_cookie, dec,
+ ipos_encrypted_size -
+ CIPHER_IV_LEN - 1,
+ ipos_encrypted + 1,
+ ipos_encrypted_size - 1);
+
+ if (declen < 0) {
+ log_warn(LD_REND, "Decrypting introduction points failed!");
+ tor_free(dec);
+ return -1;
+ }
+ *ipos_decrypted = dec;
+ *ipos_decrypted_size = declen;
+ return 0;
+ } else {
+ log_warn(LD_REND, "Unknown authorization type number: %d",
+ ipos_encrypted[0]);
+ return -1;
+ }
+}
+
+/** Parse the encoded introduction points in <b>intro_points_encoded</b> of
+ * length <b>intro_points_encoded_size</b> and write the result to the
+ * descriptor in <b>parsed</b>; return the number of successfully parsed
+ * introduction points or -1 in case of a failure. */
+int
+rend_parse_introduction_points(rend_service_descriptor_t *parsed,
+ const char *intro_points_encoded,
+ size_t intro_points_encoded_size)
+{
+ const char *current_ipo, *end_of_intro_points;
+ smartlist_t *tokens = NULL;
+ directory_token_t *tok;
+ rend_intro_point_t *intro;
+ extend_info_t *info;
+ int result, num_ok=1;
+ memarea_t *area = NULL;
+ tor_assert(parsed);
+ /** Function may only be invoked once. */
+ tor_assert(!parsed->intro_nodes);
+ if (!intro_points_encoded || intro_points_encoded_size == 0) {
+ log_warn(LD_REND, "Empty or zero size introduction point list");
+ goto err;
+ }
+ /* Consider one intro point after the other. */
+ current_ipo = intro_points_encoded;
+ end_of_intro_points = intro_points_encoded + intro_points_encoded_size;
+ tokens = smartlist_new();
+ parsed->intro_nodes = smartlist_new();
+ area = memarea_new();
+
+ while (!fast_memcmpstart(current_ipo, end_of_intro_points-current_ipo,
+ "introduction-point ")) {
+ /* Determine end of string. */
+ const char *eos = tor_memstr(current_ipo, end_of_intro_points-current_ipo,
+ "\nintroduction-point ");
+ if (!eos)
+ eos = end_of_intro_points;
+ else
+ eos = eos+1;
+ tor_assert(eos <= intro_points_encoded+intro_points_encoded_size);
+ /* Free tokens and clear token list. */
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_clear(tokens);
+ memarea_clear(area);
+ /* Tokenize string. */
+ if (tokenize_string(area, current_ipo, eos, tokens, ipo_token_table, 0)) {
+ log_warn(LD_REND, "Error tokenizing introduction point");
+ goto err;
+ }
+ /* Advance to next introduction point, if available. */
+ current_ipo = eos;
+ /* Check minimum allowed length of introduction point. */
+ if (smartlist_len(tokens) < 5) {
+ log_warn(LD_REND, "Impossibly short introduction point.");
+ goto err;
+ }
+ /* Allocate new intro point and extend info. */
+ intro = tor_malloc_zero(sizeof(rend_intro_point_t));
+ info = intro->extend_info = tor_malloc_zero(sizeof(extend_info_t));
+ /* Parse identifier. */
+ tok = find_by_keyword(tokens, R_IPO_IDENTIFIER);
+ if (base32_decode(info->identity_digest, DIGEST_LEN,
+ tok->args[0], REND_INTRO_POINT_ID_LEN_BASE32) < 0) {
+ log_warn(LD_REND, "Identity digest contains illegal characters: %s",
+ tok->args[0]);
+ rend_intro_point_free(intro);
+ goto err;
+ }
+ /* Write identifier to nickname. */
+ info->nickname[0] = '$';
+ base16_encode(info->nickname + 1, sizeof(info->nickname) - 1,
+ info->identity_digest, DIGEST_LEN);
+ /* Parse IP address. */
+ tok = find_by_keyword(tokens, R_IPO_IP_ADDRESS);
+ if (tor_addr_parse(&info->addr, tok->args[0])<0) {
+ log_warn(LD_REND, "Could not parse introduction point address.");
+ rend_intro_point_free(intro);
+ goto err;
+ }
+ if (tor_addr_family(&info->addr) != AF_INET) {
+ log_warn(LD_REND, "Introduction point address was not ipv4.");
+ rend_intro_point_free(intro);
+ goto err;
+ }
+
+ /* Parse onion port. */
+ tok = find_by_keyword(tokens, R_IPO_ONION_PORT);
+ info->port = (uint16_t) tor_parse_long(tok->args[0],10,1,65535,
+ &num_ok,NULL);
+ if (!info->port || !num_ok) {
+ log_warn(LD_REND, "Introduction point onion port %s is invalid",
+ escaped(tok->args[0]));
+ rend_intro_point_free(intro);
+ goto err;
+ }
+ /* Parse onion key. */
+ tok = find_by_keyword(tokens, R_IPO_ONION_KEY);
+ if (!crypto_pk_public_exponent_ok(tok->key)) {
+ log_warn(LD_REND,
+ "Introduction point's onion key had invalid exponent.");
+ rend_intro_point_free(intro);
+ goto err;
+ }
+ info->onion_key = tok->key;
+ tok->key = NULL; /* Prevent free */
+ /* Parse service key. */
+ tok = find_by_keyword(tokens, R_IPO_SERVICE_KEY);
+ if (!crypto_pk_public_exponent_ok(tok->key)) {
+ log_warn(LD_REND,
+ "Introduction point key had invalid exponent.");
+ rend_intro_point_free(intro);
+ goto err;
+ }
+ intro->intro_key = tok->key;
+ tok->key = NULL; /* Prevent free */
+ /* Add extend info to list of introduction points. */
+ smartlist_add(parsed->intro_nodes, intro);
+ }
+ result = smartlist_len(parsed->intro_nodes);
+ goto done;
+
+ err:
+ result = -1;
+
+ done:
+ /* Free tokens and clear token list. */
+ if (tokens) {
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(tokens);
+ }
+ if (area)
+ memarea_drop_all(area);
+
+ return result;
+}
+
+/** Parse the content of a client_key file in <b>ckstr</b> and add
+ * rend_authorized_client_t's for each parsed client to
+ * <b>parsed_clients</b>. Return the number of parsed clients as result
+ * or -1 for failure. */
+int
+rend_parse_client_keys(strmap_t *parsed_clients, const char *ckstr)
+{
+ int result = -1;
+ smartlist_t *tokens;
+ directory_token_t *tok;
+ const char *current_entry = NULL;
+ memarea_t *area = NULL;
+ char *err_msg = NULL;
+ if (!ckstr || strlen(ckstr) == 0)
+ return -1;
+ tokens = smartlist_new();
+ /* Begin parsing with first entry, skipping comments or whitespace at the
+ * beginning. */
+ area = memarea_new();
+ current_entry = eat_whitespace(ckstr);
+ while (!strcmpstart(current_entry, "client-name ")) {
+ rend_authorized_client_t *parsed_entry;
+ /* Determine end of string. */
+ const char *eos = strstr(current_entry, "\nclient-name ");
+ if (!eos)
+ eos = current_entry + strlen(current_entry);
+ else
+ eos = eos + 1;
+ /* Free tokens and clear token list. */
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_clear(tokens);
+ memarea_clear(area);
+ /* Tokenize string. */
+ if (tokenize_string(area, current_entry, eos, tokens,
+ client_keys_token_table, 0)) {
+ log_warn(LD_REND, "Error tokenizing client keys file.");
+ goto err;
+ }
+ /* Advance to next entry, if available. */
+ current_entry = eos;
+ /* Check minimum allowed length of token list. */
+ if (smartlist_len(tokens) < 2) {
+ log_warn(LD_REND, "Impossibly short client key entry.");
+ goto err;
+ }
+ /* Parse client name. */
+ tok = find_by_keyword(tokens, C_CLIENT_NAME);
+ tor_assert(tok == smartlist_get(tokens, 0));
+ tor_assert(tok->n_args == 1);
+
+ if (!rend_valid_client_name(tok->args[0])) {
+ log_warn(LD_CONFIG, "Illegal client name: %s. (Length must be "
+ "between 1 and %d, and valid characters are "
+ "[A-Za-z0-9+-_].)", tok->args[0], REND_CLIENTNAME_MAX_LEN);
+ goto err;
+ }
+ /* Check if client name is duplicate. */
+ if (strmap_get(parsed_clients, tok->args[0])) {
+ log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains a "
+ "duplicate client name: '%s'. Ignoring.", tok->args[0]);
+ goto err;
+ }
+ parsed_entry = tor_malloc_zero(sizeof(rend_authorized_client_t));
+ parsed_entry->client_name = tor_strdup(tok->args[0]);
+ strmap_set(parsed_clients, parsed_entry->client_name, parsed_entry);
+ /* Parse client key. */
+ tok = find_opt_by_keyword(tokens, C_CLIENT_KEY);
+ if (tok) {
+ parsed_entry->client_key = tok->key;
+ tok->key = NULL; /* Prevent free */
+ }
+
+ /* Parse descriptor cookie. */
+ tok = find_by_keyword(tokens, C_DESCRIPTOR_COOKIE);
+ tor_assert(tok->n_args == 1);
+ if (rend_auth_decode_cookie(tok->args[0], parsed_entry->descriptor_cookie,
+ NULL, &err_msg) < 0) {
+ tor_assert(err_msg);
+ log_warn(LD_REND, "%s", err_msg);
+ tor_free(err_msg);
+ goto err;
+ }
+ }
+ result = strmap_size(parsed_clients);
+ goto done;
+ err:
+ result = -1;
+ done:
+ /* Free tokens and clear token list. */
+ SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
+ smartlist_free(tokens);
+ if (area)
+ memarea_drop_all(area);
+ return result;
+}
+
+/** Called on startup; right now we just handle scanning the unparseable
+ * descriptor dumps, but hang anything else we might need to do in the
+ * future here as well.
+ */
+void
+routerparse_init(void)
+{
+ /*
+ * Check both if the sandbox is active and whether it's configured; no
+ * point in loading all that if we won't be able to use it after the
+ * sandbox becomes active.
+ */
+ if (!(sandbox_is_active() || get_options()->Sandbox)) {
+ dump_desc_init();
+ }
+}
+
+/** Clean up all data structures used by routerparse.c at exit */
+void
+routerparse_free_all(void)
+{
+ dump_desc_fifo_cleanup();
+}
diff --git a/src/feature/nodelist/routerparse.h b/src/feature/nodelist/routerparse.h
new file mode 100644
index 0000000000..87c2a75aa5
--- /dev/null
+++ b/src/feature/nodelist/routerparse.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file routerparse.h
+ * \brief Header file for routerparse.c.
+ **/
+
+#ifndef TOR_ROUTERPARSE_H
+#define TOR_ROUTERPARSE_H
+
+/** Possible statuses of a version of Tor, given opinions from the directory
+ * servers. */
+typedef enum version_status_t {
+ VS_RECOMMENDED=0, /**< This version is listed as recommended. */
+ VS_OLD=1, /**< This version is older than any recommended version. */
+ VS_NEW=2, /**< This version is newer than any recommended version. */
+ VS_NEW_IN_SERIES=3, /**< This version is newer than any recommended version
+ * in its series, but later recommended versions exist.
+ */
+ VS_UNRECOMMENDED=4, /**< This version is not recommended (general case). */
+ VS_EMPTY=5, /**< The version list was empty; no agreed-on versions. */
+ VS_UNKNOWN, /**< We have no idea. */
+} version_status_t;
+
+enum networkstatus_type_t;
+
+int router_get_router_hash(const char *s, size_t s_len, char *digest);
+int router_get_dir_hash(const char *s, char *digest);
+int router_get_networkstatus_v3_hashes(const char *s,
+ common_digests_t *digests);
+int router_get_networkstatus_v3_signed_boundaries(const char *s,
+ const char **start_out,
+ const char **end_out);
+int router_get_networkstatus_v3_sha3_as_signed(uint8_t *digest_out,
+ const char *s);
+int router_get_extrainfo_hash(const char *s, size_t s_len, char *digest);
+#define DIROBJ_MAX_SIG_LEN 256
+char *router_get_dirobj_signature(const char *digest,
+ size_t digest_len,
+ const crypto_pk_t *private_key);
+int router_append_dirobj_signature(char *buf, size_t buf_len,
+ const char *digest,
+ size_t digest_len,
+ crypto_pk_t *private_key);
+int router_parse_list_from_string(const char **s, const char *eos,
+ smartlist_t *dest,
+ saved_location_t saved_location,
+ int is_extrainfo,
+ int allow_annotations,
+ const char *prepend_annotations,
+ smartlist_t *invalid_digests_out);
+
+routerinfo_t *router_parse_entry_from_string(const char *s, const char *end,
+ int cache_copy,
+ int allow_annotations,
+ const char *prepend_annotations,
+ int *can_dl_again_out);
+struct digest_ri_map_t;
+extrainfo_t *extrainfo_parse_entry_from_string(const char *s, const char *end,
+ int cache_copy, struct digest_ri_map_t *routermap,
+ int *can_dl_again_out);
+MOCK_DECL(addr_policy_t *, router_parse_addr_policy_item_from_string,
+ (const char *s, int assume_action, int *malformed_list));
+version_status_t tor_version_is_obsolete(const char *myversion,
+ const char *versionlist);
+int tor_version_parse_platform(const char *platform,
+ tor_version_t *version_out,
+ int strict);
+int tor_version_as_new_as(const char *platform, const char *cutoff);
+int tor_version_parse(const char *s, tor_version_t *out);
+int tor_version_compare(tor_version_t *a, tor_version_t *b);
+int tor_version_same_series(tor_version_t *a, tor_version_t *b);
+void sort_version_list(smartlist_t *lst, int remove_duplicates);
+void assert_addr_policy_ok(smartlist_t *t);
+void dump_distinct_digest_count(int severity);
+
+int compare_vote_routerstatus_entries(const void **_a, const void **_b);
+int networkstatus_verify_bw_weights(networkstatus_t *ns, int);
+networkstatus_t *networkstatus_parse_vote_from_string(const char *s,
+ const char **eos_out,
+ enum networkstatus_type_t ns_type);
+ns_detached_signatures_t *networkstatus_parse_detached_signatures(
+ const char *s, const char *eos);
+
+smartlist_t *microdescs_parse_from_string(const char *s, const char *eos,
+ int allow_annotations,
+ saved_location_t where,
+ smartlist_t *invalid_digests_out);
+
+authority_cert_t *authority_cert_parse_from_string(const char *s,
+ const char **end_of_string);
+int rend_parse_v2_service_descriptor(rend_service_descriptor_t **parsed_out,
+ char *desc_id_out,
+ char **intro_points_encrypted_out,
+ size_t *intro_points_encrypted_size_out,
+ size_t *encoded_size_out,
+ const char **next_out, const char *desc,
+ int as_hsdir);
+int rend_decrypt_introduction_points(char **ipos_decrypted,
+ size_t *ipos_decrypted_size,
+ const char *descriptor_cookie,
+ const char *ipos_encrypted,
+ size_t ipos_encrypted_size);
+int rend_parse_introduction_points(rend_service_descriptor_t *parsed,
+ const char *intro_points_encoded,
+ size_t intro_points_encoded_size);
+int rend_parse_client_keys(strmap_t *parsed_clients, const char *str);
+
+void routerparse_init(void);
+void routerparse_free_all(void);
+
+#ifdef ROUTERPARSE_PRIVATE
+/*
+ * One entry in the list of dumped descriptors; filename dumped to, length,
+ * SHA-256 and timestamp.
+ */
+
+typedef struct {
+ char *filename;
+ size_t len;
+ uint8_t digest_sha256[DIGEST256_LEN];
+ time_t when;
+} dumped_desc_t;
+
+EXTERN(uint64_t, len_descs_dumped)
+EXTERN(smartlist_t *, descs_dumped)
+STATIC int routerstatus_parse_guardfraction(const char *guardfraction_str,
+ networkstatus_t *vote,
+ vote_routerstatus_t *vote_rs,
+ routerstatus_t *rs);
+MOCK_DECL(STATIC dumped_desc_t *, dump_desc_populate_one_file,
+ (const char *dirname, const char *f));
+STATIC void dump_desc_populate_fifo_from_directory(const char *dirname);
+STATIC void dump_desc_fifo_cleanup(void);
+struct memarea_t;
+STATIC routerstatus_t *routerstatus_parse_entry_from_string(
+ struct memarea_t *area,
+ const char **s, smartlist_t *tokens,
+ networkstatus_t *vote,
+ vote_routerstatus_t *vote_rs,
+ int consensus_method,
+ consensus_flavor_t flav);
+MOCK_DECL(STATIC void,dump_desc,(const char *desc, const char *type));
+MOCK_DECL(STATIC int, router_compute_hash_final,(char *digest,
+ const char *start, size_t len,
+ digest_algorithm_t alg));
+MOCK_DECL(STATIC int, signed_digest_equals,
+ (const uint8_t *d1, const uint8_t *d2, size_t len));
+
+STATIC void summarize_protover_flags(protover_summary_flags_t *out,
+ const char *protocols,
+ const char *version);
+#endif /* defined(ROUTERPARSE_PRIVATE) */
+
+#define ED_DESC_SIGNATURE_PREFIX "Tor router descriptor signature v1"
+
+#endif /* !defined(TOR_ROUTERPARSE_H) */
diff --git a/src/feature/nodelist/routerset.c b/src/feature/nodelist/routerset.c
new file mode 100644
index 0000000000..285ef9d821
--- /dev/null
+++ b/src/feature/nodelist/routerset.c
@@ -0,0 +1,463 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+n * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file routerset.c
+ *
+ * \brief Functions and structures to handle set-type selection of routers
+ * by name, ID, address, etc.
+ *
+ * This module implements the routerset_t data structure, whose purpose
+ * is to specify a set of relays based on a list of their identities or
+ * properties. Routersets can restrict relays by IP address mask,
+ * identity fingerprint, country codes, and nicknames (deprecated).
+ *
+ * Routersets are typically used for user-specified restrictions, and
+ * are created by invoking routerset_new and routerset_parse from
+ * config.c and confparse.c. To use a routerset, invoke one of
+ * routerset_contains_...() functions , or use
+ * routerstatus_get_all_nodes() / routerstatus_subtract_nodes() to
+ * manipulate a smartlist of node_t pointers.
+ *
+ * Country-code restrictions are implemented in geoip.c.
+ */
+
+#define ROUTERSET_PRIVATE
+
+#include "or/or.h"
+#include "or/bridges.h"
+#include "or/geoip.h"
+#include "or/nodelist.h"
+#include "or/policies.h"
+#include "or/router.h"
+#include "or/routerparse.h"
+#include "or/routerset.h"
+
+#include "or/addr_policy_st.h"
+#include "or/extend_info_st.h"
+#include "or/node_st.h"
+#include "or/routerinfo_st.h"
+#include "or/routerstatus_st.h"
+
+/** Return a new empty routerset. */
+routerset_t *
+routerset_new(void)
+{
+ routerset_t *result = tor_malloc_zero(sizeof(routerset_t));
+ result->list = smartlist_new();
+ result->names = strmap_new();
+ result->digests = digestmap_new();
+ result->policies = smartlist_new();
+ result->country_names = smartlist_new();
+ return result;
+}
+
+/** If <b>c</b> is a country code in the form {cc}, return a newly allocated
+ * string holding the "cc" part. Else, return NULL. */
+STATIC char *
+routerset_get_countryname(const char *c)
+{
+ char *country;
+
+ if (strlen(c) < 4 || c[0] !='{' || c[3] !='}')
+ return NULL;
+
+ country = tor_strndup(c+1, 2);
+ tor_strlower(country);
+ return country;
+}
+
+/** Update the routerset's <b>countries</b> bitarray_t. Called whenever
+ * the GeoIP IPv4 database is reloaded.
+ */
+void
+routerset_refresh_countries(routerset_t *target)
+{
+ int cc;
+ bitarray_free(target->countries);
+
+ if (!geoip_is_loaded(AF_INET)) {
+ target->countries = NULL;
+ target->n_countries = 0;
+ return;
+ }
+ target->n_countries = geoip_get_n_countries();
+ target->countries = bitarray_init_zero(target->n_countries);
+ SMARTLIST_FOREACH_BEGIN(target->country_names, const char *, country) {
+ cc = geoip_get_country(country);
+ if (cc >= 0) {
+ tor_assert(cc < target->n_countries);
+ bitarray_set(target->countries, cc);
+ } else {
+ log_warn(LD_CONFIG, "Country code '%s' is not recognized.",
+ country);
+ }
+ } SMARTLIST_FOREACH_END(country);
+}
+
+/** Parse the string <b>s</b> to create a set of routerset entries, and add
+ * them to <b>target</b>. In log messages, refer to the string as
+ * <b>description</b>. Return 0 on success, -1 on failure.
+ *
+ * Three kinds of elements are allowed in routersets: nicknames, IP address
+ * patterns, and fingerprints. They may be surrounded by optional space, and
+ * must be separated by commas.
+ */
+int
+routerset_parse(routerset_t *target, const char *s, const char *description)
+{
+ int r = 0;
+ int added_countries = 0;
+ char *countryname;
+ smartlist_t *list = smartlist_new();
+ int malformed_list;
+ smartlist_split_string(list, s, ",",
+ SPLIT_SKIP_SPACE | SPLIT_IGNORE_BLANK, 0);
+ SMARTLIST_FOREACH_BEGIN(list, char *, nick) {
+ addr_policy_t *p;
+ /* if it doesn't pass our validation, assume it's malformed */
+ malformed_list = 1;
+ if (is_legal_hexdigest(nick)) {
+ char d[DIGEST_LEN];
+ if (*nick == '$')
+ ++nick;
+ log_debug(LD_CONFIG, "Adding identity %s to %s", nick, description);
+ base16_decode(d, sizeof(d), nick, HEX_DIGEST_LEN);
+ digestmap_set(target->digests, d, (void*)1);
+ } else if (is_legal_nickname(nick)) {
+ log_debug(LD_CONFIG, "Adding nickname %s to %s", nick, description);
+ strmap_set_lc(target->names, nick, (void*)1);
+ } else if ((countryname = routerset_get_countryname(nick)) != NULL) {
+ log_debug(LD_CONFIG, "Adding country %s to %s", nick,
+ description);
+ smartlist_add(target->country_names, countryname);
+ added_countries = 1;
+ } else if ((strchr(nick,'.') || strchr(nick, ':') || strchr(nick, '*'))
+ && (p = router_parse_addr_policy_item_from_string(
+ nick, ADDR_POLICY_REJECT,
+ &malformed_list))) {
+ /* IPv4 addresses contain '.', IPv6 addresses contain ':',
+ * and wildcard addresses contain '*'. */
+ log_debug(LD_CONFIG, "Adding address %s to %s", nick, description);
+ smartlist_add(target->policies, p);
+ } else if (malformed_list) {
+ log_warn(LD_CONFIG, "Entry '%s' in %s is malformed. Discarding entire"
+ " list.", nick, description);
+ r = -1;
+ tor_free(nick);
+ SMARTLIST_DEL_CURRENT(list, nick);
+ } else {
+ log_notice(LD_CONFIG, "Entry '%s' in %s is ignored. Using the"
+ " remainder of the list.", nick, description);
+ tor_free(nick);
+ SMARTLIST_DEL_CURRENT(list, nick);
+ }
+ } SMARTLIST_FOREACH_END(nick);
+ policy_expand_unspec(&target->policies);
+ smartlist_add_all(target->list, list);
+ smartlist_free(list);
+ if (added_countries)
+ routerset_refresh_countries(target);
+ return r;
+}
+
+/** Add all members of the set <b>source</b> to <b>target</b>. */
+void
+routerset_union(routerset_t *target, const routerset_t *source)
+{
+ char *s;
+ tor_assert(target);
+ if (!source || !source->list)
+ return;
+ s = routerset_to_string(source);
+ routerset_parse(target, s, "other routerset");
+ tor_free(s);
+}
+
+/** Return true iff <b>set</b> lists only nicknames and digests, and includes
+ * no IP ranges or countries. */
+int
+routerset_is_list(const routerset_t *set)
+{
+ return smartlist_len(set->country_names) == 0 &&
+ smartlist_len(set->policies) == 0;
+}
+
+/** Return true iff we need a GeoIP IP-to-country database to make sense of
+ * <b>set</b>. */
+int
+routerset_needs_geoip(const routerset_t *set)
+{
+ return set && smartlist_len(set->country_names);
+}
+
+/** Return true iff there are no entries in <b>set</b>. */
+int
+routerset_is_empty(const routerset_t *set)
+{
+ return !set || smartlist_len(set->list) == 0;
+}
+
+/** Return the number of entries in <b>set</b>. This does NOT return a
+ * negative value. */
+int
+routerset_len(const routerset_t *set)
+{
+ if (!set) {
+ return 0;
+ }
+ return smartlist_len(set->list);
+}
+
+/** Helper. Return true iff <b>set</b> contains a router based on the other
+ * provided fields. Return higher values for more specific subentries: a
+ * single router is more specific than an address range of routers, which is
+ * more specific in turn than a country code.
+ *
+ * (If country is -1, then we take the country
+ * from addr.) */
+STATIC int
+routerset_contains(const routerset_t *set, const tor_addr_t *addr,
+ uint16_t orport,
+ const char *nickname, const char *id_digest,
+ country_t country)
+{
+ if (!set || !set->list)
+ return 0;
+ if (nickname && strmap_get_lc(set->names, nickname))
+ return 4;
+ if (id_digest && digestmap_get(set->digests, id_digest))
+ return 4;
+ if (addr && compare_tor_addr_to_addr_policy(addr, orport, set->policies)
+ == ADDR_POLICY_REJECTED)
+ return 3;
+ if (set->countries) {
+ if (country < 0 && addr)
+ country = geoip_get_country_by_addr(addr);
+
+ if (country >= 0 && country < set->n_countries &&
+ bitarray_is_set(set->countries, country))
+ return 2;
+ }
+ return 0;
+}
+
+/** If *<b>setp</b> includes at least one country code, or if
+ * <b>only_some_cc_set</b> is 0, add the ?? and A1 country codes to
+ * *<b>setp</b>, creating it as needed. Return true iff *<b>setp</b> changed.
+ */
+int
+routerset_add_unknown_ccs(routerset_t **setp, int only_if_some_cc_set)
+{
+ routerset_t *set;
+ int add_unknown, add_a1;
+ if (only_if_some_cc_set) {
+ if (!*setp || smartlist_len((*setp)->country_names) == 0)
+ return 0;
+ }
+ if (!*setp)
+ *setp = routerset_new();
+
+ set = *setp;
+
+ add_unknown = ! smartlist_contains_string_case(set->country_names, "??") &&
+ geoip_get_country("??") >= 0;
+ add_a1 = ! smartlist_contains_string_case(set->country_names, "a1") &&
+ geoip_get_country("A1") >= 0;
+
+ if (add_unknown) {
+ smartlist_add_strdup(set->country_names, "??");
+ smartlist_add_strdup(set->list, "{??}");
+ }
+ if (add_a1) {
+ smartlist_add_strdup(set->country_names, "a1");
+ smartlist_add_strdup(set->list, "{a1}");
+ }
+
+ if (add_unknown || add_a1) {
+ routerset_refresh_countries(set);
+ return 1;
+ }
+ return 0;
+}
+
+/** Return true iff we can tell that <b>ei</b> is a member of <b>set</b>. */
+int
+routerset_contains_extendinfo(const routerset_t *set, const extend_info_t *ei)
+{
+ return routerset_contains(set,
+ &ei->addr,
+ ei->port,
+ ei->nickname,
+ ei->identity_digest,
+ -1 /*country*/);
+}
+
+/** Return true iff <b>ri</b> is in <b>set</b>. If country is <b>-1</b>, we
+ * look up the country. */
+int
+routerset_contains_router(const routerset_t *set, const routerinfo_t *ri,
+ country_t country)
+{
+ tor_addr_t addr;
+ tor_addr_from_ipv4h(&addr, ri->addr);
+ return routerset_contains(set,
+ &addr,
+ ri->or_port,
+ ri->nickname,
+ ri->cache_info.identity_digest,
+ country);
+}
+
+/** Return true iff <b>rs</b> is in <b>set</b>. If country is <b>-1</b>, we
+ * look up the country. */
+int
+routerset_contains_routerstatus(const routerset_t *set,
+ const routerstatus_t *rs,
+ country_t country)
+{
+ tor_addr_t addr;
+ tor_addr_from_ipv4h(&addr, rs->addr);
+ return routerset_contains(set,
+ &addr,
+ rs->or_port,
+ rs->nickname,
+ rs->identity_digest,
+ country);
+}
+
+/** Return true iff <b>node</b> is in <b>set</b>. */
+int
+routerset_contains_node(const routerset_t *set, const node_t *node)
+{
+ if (node->rs)
+ return routerset_contains_routerstatus(set, node->rs, node->country);
+ else if (node->ri)
+ return routerset_contains_router(set, node->ri, node->country);
+ else
+ return 0;
+}
+
+/** Return true iff <b>routerset</b> contains the bridge <b>bridge</b>. */
+int
+routerset_contains_bridge(const routerset_t *set, const bridge_info_t *bridge)
+{
+ const char *id = (const char*)bridge_get_rsa_id_digest(bridge);
+ const tor_addr_port_t *addrport = bridge_get_addr_port(bridge);
+
+ tor_assert(addrport);
+ return routerset_contains(set, &addrport->addr, addrport->port,
+ NULL, id, -1);
+}
+
+/** Add every known node_t that is a member of <b>routerset</b> to
+ * <b>out</b>, but never add any that are part of <b>excludeset</b>.
+ * If <b>running_only</b>, only add the running ones. */
+void
+routerset_get_all_nodes(smartlist_t *out, const routerset_t *routerset,
+ const routerset_t *excludeset, int running_only)
+{
+ tor_assert(out);
+ if (!routerset || !routerset->list)
+ return;
+
+ if (routerset_is_list(routerset)) {
+ /* No routers are specified by type; all are given by name or digest.
+ * we can do a lookup in O(len(routerset)). */
+ SMARTLIST_FOREACH(routerset->list, const char *, name, {
+ const node_t *node = node_get_by_nickname(name, 0);
+ if (node) {
+ if (!running_only || node->is_running)
+ if (!routerset_contains_node(excludeset, node))
+ smartlist_add(out, (void*)node);
+ }
+ });
+ } else {
+ /* We need to iterate over the routerlist to get all the ones of the
+ * right kind. */
+ smartlist_t *nodes = nodelist_get_list();
+ SMARTLIST_FOREACH(nodes, const node_t *, node, {
+ if (running_only && !node->is_running)
+ continue;
+ if (routerset_contains_node(routerset, node) &&
+ !routerset_contains_node(excludeset, node))
+ smartlist_add(out, (void*)node);
+ });
+ }
+}
+
+/** Remove every node_t from <b>lst</b> that is in <b>routerset</b>. */
+void
+routerset_subtract_nodes(smartlist_t *lst, const routerset_t *routerset)
+{
+ tor_assert(lst);
+ if (!routerset)
+ return;
+ SMARTLIST_FOREACH(lst, const node_t *, node, {
+ if (routerset_contains_node(routerset, node)) {
+ //log_debug(LD_DIR, "Subtracting %s",r->nickname);
+ SMARTLIST_DEL_CURRENT(lst, node);
+ }
+ });
+}
+
+/** Return a new string that when parsed by routerset_parse_string() will
+ * yield <b>set</b>. */
+char *
+routerset_to_string(const routerset_t *set)
+{
+ if (!set || !set->list)
+ return tor_strdup("");
+ return smartlist_join_strings(set->list, ",", 0, NULL);
+}
+
+/** Helper: return true iff old and new are both NULL, or both non-NULL
+ * equal routersets. */
+int
+routerset_equal(const routerset_t *old, const routerset_t *new)
+{
+ if (routerset_is_empty(old) && routerset_is_empty(new)) {
+ /* Two empty sets are equal */
+ return 1;
+ } else if (routerset_is_empty(old) || routerset_is_empty(new)) {
+ /* An empty set is equal to nothing else. */
+ return 0;
+ }
+ tor_assert(old != NULL);
+ tor_assert(new != NULL);
+
+ if (smartlist_len(old->list) != smartlist_len(new->list))
+ return 0;
+
+ SMARTLIST_FOREACH(old->list, const char *, cp1, {
+ const char *cp2 = smartlist_get(new->list, cp1_sl_idx);
+ if (strcmp(cp1, cp2))
+ return 0;
+ });
+
+ return 1;
+}
+
+/** Free all storage held in <b>routerset</b>. */
+void
+routerset_free_(routerset_t *routerset)
+{
+ if (!routerset)
+ return;
+
+ SMARTLIST_FOREACH(routerset->list, char *, cp, tor_free(cp));
+ smartlist_free(routerset->list);
+ SMARTLIST_FOREACH(routerset->policies, addr_policy_t *, p,
+ addr_policy_free(p));
+ smartlist_free(routerset->policies);
+ SMARTLIST_FOREACH(routerset->country_names, char *, cp, tor_free(cp));
+ smartlist_free(routerset->country_names);
+
+ strmap_free(routerset->names, NULL);
+ digestmap_free(routerset->digests, NULL);
+ bitarray_free(routerset->countries);
+ tor_free(routerset);
+}
diff --git a/src/feature/nodelist/routerset.h b/src/feature/nodelist/routerset.h
new file mode 100644
index 0000000000..8a13ca042a
--- /dev/null
+++ b/src/feature/nodelist/routerset.h
@@ -0,0 +1,89 @@
+/* Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file routerlist.h
+ * \brief Header file for routerset.c
+ **/
+
+#ifndef TOR_ROUTERSET_H
+#define TOR_ROUTERSET_H
+
+routerset_t *routerset_new(void);
+void routerset_refresh_countries(routerset_t *rs);
+int routerset_parse(routerset_t *target, const char *s,
+ const char *description);
+void routerset_union(routerset_t *target, const routerset_t *source);
+int routerset_is_list(const routerset_t *set);
+int routerset_needs_geoip(const routerset_t *set);
+int routerset_is_empty(const routerset_t *set);
+int routerset_contains_router(const routerset_t *set, const routerinfo_t *ri,
+ country_t country);
+int routerset_contains_routerstatus(const routerset_t *set,
+ const routerstatus_t *rs,
+ country_t country);
+int routerset_contains_extendinfo(const routerset_t *set,
+ const extend_info_t *ei);
+struct bridge_info_t;
+int routerset_contains_bridge(const routerset_t *set,
+ const struct bridge_info_t *bridge);
+int routerset_contains_node(const routerset_t *set, const node_t *node);
+
+void routerset_get_all_nodes(smartlist_t *out, const routerset_t *routerset,
+ const routerset_t *excludeset,
+ int running_only);
+int routerset_add_unknown_ccs(routerset_t **setp, int only_if_some_cc_set);
+void routerset_subtract_nodes(smartlist_t *out,
+ const routerset_t *routerset);
+
+char *routerset_to_string(const routerset_t *routerset);
+int routerset_equal(const routerset_t *old, const routerset_t *new);
+void routerset_free_(routerset_t *routerset);
+#define routerset_free(rs) FREE_AND_NULL(routerset_t, routerset_free_, (rs))
+int routerset_len(const routerset_t *set);
+
+#ifdef ROUTERSET_PRIVATE
+#include "lib/container/bitarray.h"
+
+STATIC char * routerset_get_countryname(const char *c);
+STATIC int routerset_contains(const routerset_t *set, const tor_addr_t *addr,
+ uint16_t orport,
+ const char *nickname, const char *id_digest,
+ country_t country);
+
+/** A routerset specifies constraints on a set of possible routerinfos, based
+ * on their names, identities, or addresses. It is optimized for determining
+ * whether a router is a member or not, in O(1+P) time, where P is the number
+ * of address policy constraints. */
+struct routerset_t {
+ /** A list of strings for the elements of the policy. Each string is either
+ * a nickname, a hexadecimal identity fingerprint, or an address policy. A
+ * router belongs to the set if its nickname OR its identity OR its address
+ * matches an entry here. */
+ smartlist_t *list;
+ /** A map from lowercase nicknames of routers in the set to (void*)1 */
+ strmap_t *names;
+ /** A map from identity digests routers in the set to (void*)1 */
+ digestmap_t *digests;
+ /** An address policy for routers in the set. For implementation reasons,
+ * a router belongs to the set if it is _rejected_ by this policy. */
+ smartlist_t *policies;
+
+ /** A human-readable description of what this routerset is for. Used in
+ * log messages. */
+ char *description;
+
+ /** A list of the country codes in this set. */
+ smartlist_t *country_names;
+ /** Total number of countries we knew about when we built <b>countries</b>.*/
+ int n_countries;
+ /** Bit array mapping the return value of geoip_get_country() to 1 iff the
+ * country is a member of this routerset. Note that we MUST call
+ * routerset_refresh_countries() whenever the geoip country list is
+ * reloaded. */
+ bitarray_t *countries;
+};
+#endif /* defined(ROUTERSET_PRIVATE) */
+#endif /* !defined(TOR_ROUTERSET_H) */
diff --git a/src/feature/nodelist/routerstatus_st.h b/src/feature/nodelist/routerstatus_st.h
new file mode 100644
index 0000000000..3de4a40ae4
--- /dev/null
+++ b/src/feature/nodelist/routerstatus_st.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef ROUTERSTATUS_ST_H
+#define ROUTERSTATUS_ST_H
+
+#include "or/download_status_st.h"
+
+/** Contents of a single router entry in a network status object.
+ */
+struct routerstatus_t {
+ time_t published_on; /**< When was this router published? */
+ char nickname[MAX_NICKNAME_LEN+1]; /**< The nickname this router says it
+ * has. */
+ char identity_digest[DIGEST_LEN]; /**< Digest of the router's identity
+ * key. */
+ /** Digest of the router's most recent descriptor or microdescriptor.
+ * If it's a descriptor, we only use the first DIGEST_LEN bytes. */
+ char descriptor_digest[DIGEST256_LEN];
+ uint32_t addr; /**< IPv4 address for this router, in host order. */
+ uint16_t or_port; /**< IPv4 OR port for this router. */
+ uint16_t dir_port; /**< Directory port for this router. */
+ tor_addr_t ipv6_addr; /**< IPv6 address for this router. */
+ uint16_t ipv6_orport; /**< IPv6 OR port for this router. */
+ unsigned int is_authority:1; /**< True iff this router is an authority. */
+ unsigned int is_exit:1; /**< True iff this router is a good exit. */
+ unsigned int is_stable:1; /**< True iff this router stays up a long time. */
+ unsigned int is_fast:1; /**< True iff this router has good bandwidth. */
+ /** True iff this router is called 'running' in the consensus. We give it
+ * this funny name so that we don't accidentally use this bit as a view of
+ * whether we think the router is *currently* running. If that's what you
+ * want to know, look at is_running in node_t. */
+ unsigned int is_flagged_running:1;
+ unsigned int is_named:1; /**< True iff "nickname" belongs to this router. */
+ unsigned int is_unnamed:1; /**< True iff "nickname" belongs to another
+ * router. */
+ unsigned int is_valid:1; /**< True iff this router isn't invalid. */
+ unsigned int is_possible_guard:1; /**< True iff this router would be a good
+ * choice as an entry guard. */
+ unsigned int is_bad_exit:1; /**< True iff this node is a bad choice for
+ * an exit node. */
+ unsigned int is_hs_dir:1; /**< True iff this router is a v2-or-later hidden
+ * service directory. */
+ unsigned int is_v2_dir:1; /** True iff this router publishes an open DirPort
+ * or it claims to accept tunnelled dir requests.
+ */
+
+ unsigned int has_bandwidth:1; /**< The vote/consensus had bw info */
+ unsigned int has_exitsummary:1; /**< The vote/consensus had exit summaries */
+ unsigned int bw_is_unmeasured:1; /**< This is a consensus entry, with
+ * the Unmeasured flag set. */
+
+ /** Flags to summarize the protocol versions for this routerstatus_t. */
+ protover_summary_flags_t pv;
+
+ uint32_t bandwidth_kb; /**< Bandwidth (capacity) of the router as reported in
+ * the vote/consensus, in kilobytes/sec. */
+
+ /** The consensus has guardfraction information for this router. */
+ unsigned int has_guardfraction:1;
+ /** The guardfraction value of this router. */
+ uint32_t guardfraction_percentage;
+
+ char *exitsummary; /**< exit policy summary -
+ * XXX weasel: this probably should not stay a string. */
+
+ /* ---- The fields below aren't derived from the networkstatus; they
+ * hold local information only. */
+
+ time_t last_dir_503_at; /**< When did this router last tell us that it
+ * was too busy to serve directory info? */
+ download_status_t dl_status;
+
+};
+
+#endif
+
diff --git a/src/feature/nodelist/signed_descriptor_st.h b/src/feature/nodelist/signed_descriptor_st.h
new file mode 100644
index 0000000000..90cd4a2703
--- /dev/null
+++ b/src/feature/nodelist/signed_descriptor_st.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef SIGNED_DESCRIPTOR_ST_H
+#define SIGNED_DESCRIPTOR_ST_H
+
+#include "or/download_status_st.h"
+
+/** Information need to cache an onion router's descriptor. */
+struct signed_descriptor_t {
+ /** Pointer to the raw server descriptor, preceded by annotations. Not
+ * necessarily NUL-terminated. If saved_location is SAVED_IN_CACHE, this
+ * pointer is null. */
+ char *signed_descriptor_body;
+ /** Length of the annotations preceding the server descriptor. */
+ size_t annotations_len;
+ /** Length of the server descriptor. */
+ size_t signed_descriptor_len;
+ /** Digest of the server descriptor, computed as specified in
+ * dir-spec.txt. */
+ char signed_descriptor_digest[DIGEST_LEN];
+ /** Identity digest of the router. */
+ char identity_digest[DIGEST_LEN];
+ /** Declared publication time of the descriptor. */
+ time_t published_on;
+ /** For routerdescs only: digest of the corresponding extrainfo. */
+ char extra_info_digest[DIGEST_LEN];
+ /** For routerdescs only: A SHA256-digest of the extrainfo (if any) */
+ char extra_info_digest256[DIGEST256_LEN];
+ /** Certificate for ed25519 signing key. */
+ struct tor_cert_st *signing_key_cert;
+ /** For routerdescs only: Status of downloading the corresponding
+ * extrainfo. */
+ download_status_t ei_dl_status;
+ /** Where is the descriptor saved? */
+ saved_location_t saved_location;
+ /** If saved_location is SAVED_IN_CACHE or SAVED_IN_JOURNAL, the offset of
+ * this descriptor in the corresponding file. */
+ off_t saved_offset;
+ /** What position is this descriptor within routerlist->routers or
+ * routerlist->old_routers? -1 for none. */
+ int routerlist_index;
+ /** The valid-until time of the most recent consensus that listed this
+ * descriptor. 0 for "never listed in a consensus, so far as we know." */
+ time_t last_listed_as_valid_until;
+ /* If true, we do not ever try to save this object in the cache. */
+ unsigned int do_not_cache : 1;
+ /* If true, this item is meant to represent an extrainfo. */
+ unsigned int is_extrainfo : 1;
+ /* If true, we got an extrainfo for this item, and the digest was right,
+ * but it was incompatible. */
+ unsigned int extrainfo_is_bogus : 1;
+ /* If true, we are willing to transmit this item unencrypted. */
+ unsigned int send_unencrypted : 1;
+};
+
+#endif
+
diff --git a/src/feature/nodelist/torcert.c b/src/feature/nodelist/torcert.c
new file mode 100644
index 0000000000..39c6605c65
--- /dev/null
+++ b/src/feature/nodelist/torcert.c
@@ -0,0 +1,725 @@
+/* Copyright (c) 2014-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file torcert.c
+ *
+ * \brief Implementation for ed25519-signed certificates as used in the Tor
+ * protocol.
+ *
+ * This certificate format is designed to be simple and compact; it's
+ * documented in tor-spec.txt in the torspec.git repository. All of the
+ * certificates in this format are signed with an Ed25519 key; the
+ * contents themselves may be another Ed25519 key, a digest of a
+ * RSA key, or some other material.
+ *
+ * In this module there is also support for a crooss-certification of
+ * Ed25519 identities using (older) RSA1024 identities.
+ *
+ * Tor uses other types of certificate too, beyond those described in this
+ * module. Notably, our use of TLS requires us to touch X.509 certificates,
+ * even though sensible people would stay away from those. Our X.509
+ * certificates are represented with tor_x509_cert_t, and implemented in
+ * tortls.c. We also have a separate certificate type that authorities
+ * use to authenticate their RSA signing keys with their RSA identity keys:
+ * that one is authority_cert_t, and it's mostly handled in routerlist.c.
+ */
+
+#include "or/or.h"
+#include "or/config.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/torcert.h"
+#include "trunnel/ed25519_cert.h"
+#include "lib/log/torlog.h"
+#include "trunnel/link_handshake.h"
+#include "lib/tls/tortls.h"
+
+#include "or/or_handshake_certs_st.h"
+
+/** Helper for tor_cert_create(): signs any 32 bytes, not just an ed25519
+ * key.
+ */
+static tor_cert_t *
+tor_cert_sign_impl(const ed25519_keypair_t *signing_key,
+ uint8_t cert_type,
+ uint8_t signed_key_type,
+ const uint8_t signed_key_info[32],
+ time_t now, time_t lifetime,
+ uint32_t flags)
+{
+ tor_cert_t *torcert = NULL;
+
+ ed25519_cert_t *cert = ed25519_cert_new();
+ cert->cert_type = cert_type;
+ cert->exp_field = (uint32_t) CEIL_DIV(now + lifetime, 3600);
+ cert->cert_key_type = signed_key_type;
+ memcpy(cert->certified_key, signed_key_info, 32);
+
+ if (flags & CERT_FLAG_INCLUDE_SIGNING_KEY) {
+ ed25519_cert_extension_t *ext = ed25519_cert_extension_new();
+ ext->ext_type = CERTEXT_SIGNED_WITH_KEY;
+ memcpy(ext->un_signing_key, signing_key->pubkey.pubkey, 32);
+ ed25519_cert_add_ext(cert, ext);
+ ++cert->n_extensions;
+ }
+
+ const ssize_t alloc_len = ed25519_cert_encoded_len(cert);
+ tor_assert(alloc_len > 0);
+ uint8_t *encoded = tor_malloc(alloc_len);
+ const ssize_t real_len = ed25519_cert_encode(encoded, alloc_len, cert);
+ if (real_len < 0)
+ goto err;
+ tor_assert(real_len == alloc_len);
+ tor_assert(real_len > ED25519_SIG_LEN);
+ uint8_t *sig = encoded + (real_len - ED25519_SIG_LEN);
+ tor_assert(tor_mem_is_zero((char*)sig, ED25519_SIG_LEN));
+
+ ed25519_signature_t signature;
+ if (ed25519_sign(&signature, encoded,
+ real_len-ED25519_SIG_LEN, signing_key)<0) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Can't sign certificate");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+ memcpy(sig, signature.sig, ED25519_SIG_LEN);
+
+ torcert = tor_cert_parse(encoded, real_len);
+ if (! torcert) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Generated a certificate we cannot parse");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+
+ if (tor_cert_checksig(torcert, &signing_key->pubkey, now) < 0) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Generated a certificate whose signature we can't "
+ "check: %s", tor_cert_describe_signature_status(torcert));
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+
+ tor_free(encoded);
+
+ goto done;
+
+ /* LCOV_EXCL_START */
+ err:
+ tor_cert_free(torcert);
+ torcert = NULL;
+ /* LCOV_EXCL_STOP */
+
+ done:
+ ed25519_cert_free(cert);
+ tor_free(encoded);
+ return torcert;
+}
+
+/**
+ * Create and return a new new certificate of type <b>cert_type</b> to
+ * authenticate <b>signed_key</b> using the key <b>signing_key</b>. The
+ * certificate should remain valid for at least <b>lifetime</b> seconds after
+ * <b>now</b>.
+ *
+ * If CERT_FLAG_INCLUDE_SIGNING_KEY is set in <b>flags</b>, embed
+ * the public part of <b>signing_key</b> in the certificate.
+ */
+tor_cert_t *
+tor_cert_create(const ed25519_keypair_t *signing_key,
+ uint8_t cert_type,
+ const ed25519_public_key_t *signed_key,
+ time_t now, time_t lifetime,
+ uint32_t flags)
+{
+ return tor_cert_sign_impl(signing_key, cert_type,
+ SIGNED_KEY_TYPE_ED25519, signed_key->pubkey,
+ now, lifetime, flags);
+}
+
+/** Release all storage held for <b>cert</b>. */
+void
+tor_cert_free_(tor_cert_t *cert)
+{
+ if (! cert)
+ return;
+
+ if (cert->encoded)
+ memwipe(cert->encoded, 0, cert->encoded_len);
+ tor_free(cert->encoded);
+
+ memwipe(cert, 0, sizeof(tor_cert_t));
+ tor_free(cert);
+}
+
+/** Parse a certificate encoded with <b>len</b> bytes in <b>encoded</b>. */
+tor_cert_t *
+tor_cert_parse(const uint8_t *encoded, const size_t len)
+{
+ tor_cert_t *cert = NULL;
+ ed25519_cert_t *parsed = NULL;
+ ssize_t got_len = ed25519_cert_parse(&parsed, encoded, len);
+ if (got_len < 0 || (size_t) got_len != len)
+ goto err;
+
+ cert = tor_malloc_zero(sizeof(tor_cert_t));
+ cert->encoded = tor_memdup(encoded, len);
+ cert->encoded_len = len;
+
+ memcpy(cert->signed_key.pubkey, parsed->certified_key, 32);
+ int64_t valid_until_64 = ((int64_t)parsed->exp_field) * 3600;
+#if SIZEOF_TIME_T < 8
+ if (valid_until_64 > TIME_MAX)
+ valid_until_64 = TIME_MAX - 1;
+#endif
+ cert->valid_until = (time_t) valid_until_64;
+ cert->cert_type = parsed->cert_type;
+
+ for (unsigned i = 0; i < ed25519_cert_getlen_ext(parsed); ++i) {
+ ed25519_cert_extension_t *ext = ed25519_cert_get_ext(parsed, i);
+ if (ext->ext_type == CERTEXT_SIGNED_WITH_KEY) {
+ if (cert->signing_key_included)
+ goto err;
+
+ cert->signing_key_included = 1;
+ memcpy(cert->signing_key.pubkey, ext->un_signing_key, 32);
+ } else if (ext->ext_flags & CERTEXT_FLAG_AFFECTS_VALIDATION) {
+ /* Unrecognized extension with affects_validation set */
+ goto err;
+ }
+ }
+
+ goto done;
+ err:
+ tor_cert_free(cert);
+ cert = NULL;
+ done:
+ ed25519_cert_free(parsed);
+ return cert;
+}
+
+/** Fill in <b>checkable_out</b> with the information needed to check
+ * the signature on <b>cert</b> with <b>pubkey</b>.
+ *
+ * On success, if <b>expiration_out</b> is provided, and it is some time
+ * _after_ the expiration time of this certificate, set it to the
+ * expiration time of this certificate.
+ */
+int
+tor_cert_get_checkable_sig(ed25519_checkable_t *checkable_out,
+ const tor_cert_t *cert,
+ const ed25519_public_key_t *pubkey,
+ time_t *expiration_out)
+{
+ if (! pubkey) {
+ if (cert->signing_key_included)
+ pubkey = &cert->signing_key;
+ else
+ return -1;
+ }
+
+ checkable_out->msg = cert->encoded;
+ checkable_out->pubkey = pubkey;
+ tor_assert(cert->encoded_len > ED25519_SIG_LEN);
+ const size_t signed_len = cert->encoded_len - ED25519_SIG_LEN;
+ checkable_out->len = signed_len;
+ memcpy(checkable_out->signature.sig,
+ cert->encoded + signed_len, ED25519_SIG_LEN);
+
+ if (expiration_out) {
+ *expiration_out = MIN(*expiration_out, cert->valid_until);
+ }
+
+ return 0;
+}
+
+/** Validates the signature on <b>cert</b> with <b>pubkey</b> relative to the
+ * current time <b>now</b>. (If <b>now</b> is 0, do not check the expiration
+ * time.) Return 0 on success, -1 on failure. Sets flags in <b>cert</b> as
+ * appropriate.
+ */
+int
+tor_cert_checksig(tor_cert_t *cert,
+ const ed25519_public_key_t *pubkey, time_t now)
+{
+ ed25519_checkable_t checkable;
+ int okay;
+ time_t expires = TIME_MAX;
+
+ if (tor_cert_get_checkable_sig(&checkable, cert, pubkey, &expires) < 0)
+ return -1;
+
+ if (now && now > expires) {
+ cert->cert_expired = 1;
+ return -1;
+ }
+
+ if (ed25519_checksig_batch(&okay, &checkable, 1) < 0) {
+ cert->sig_bad = 1;
+ return -1;
+ } else {
+ cert->sig_ok = 1;
+ /* Only copy the checkable public key when it is different from the signing
+ * key of the certificate to avoid undefined behavior. */
+ if (cert->signing_key.pubkey != checkable.pubkey->pubkey) {
+ memcpy(cert->signing_key.pubkey, checkable.pubkey->pubkey, 32);
+ }
+ cert->cert_valid = 1;
+ return 0;
+ }
+}
+
+/** Return a string describing the status of the signature on <b>cert</b>
+ *
+ * Will always be "unchecked" unless tor_cert_checksig has been called.
+ */
+const char *
+tor_cert_describe_signature_status(const tor_cert_t *cert)
+{
+ if (cert->cert_expired) {
+ return "expired";
+ } else if (cert->sig_bad) {
+ return "mis-signed";
+ } else if (cert->sig_ok) {
+ return "okay";
+ } else {
+ return "unchecked";
+ }
+}
+
+/** Return a new copy of <b>cert</b> */
+tor_cert_t *
+tor_cert_dup(const tor_cert_t *cert)
+{
+ tor_cert_t *newcert = tor_memdup(cert, sizeof(tor_cert_t));
+ if (cert->encoded)
+ newcert->encoded = tor_memdup(cert->encoded, cert->encoded_len);
+ return newcert;
+}
+
+/** Return true iff cert1 and cert2 are the same cert. */
+int
+tor_cert_eq(const tor_cert_t *cert1, const tor_cert_t *cert2)
+{
+ tor_assert(cert1);
+ tor_assert(cert2);
+ return cert1->encoded_len == cert2->encoded_len &&
+ tor_memeq(cert1->encoded, cert2->encoded, cert1->encoded_len);
+}
+
+/** Return true iff cert1 and cert2 are the same cert, or if they are both
+ * NULL. */
+int
+tor_cert_opt_eq(const tor_cert_t *cert1, const tor_cert_t *cert2)
+{
+ if (cert1 == NULL && cert2 == NULL)
+ return 1;
+ if (!cert1 || !cert2)
+ return 0;
+ return tor_cert_eq(cert1, cert2);
+}
+
+#define RSA_ED_CROSSCERT_PREFIX "Tor TLS RSA/Ed25519 cross-certificate"
+
+/** Create new cross-certification object to certify <b>ed_key</b> as the
+ * master ed25519 identity key for the RSA identity key <b>rsa_key</b>.
+ * Allocates and stores the encoded certificate in *<b>cert</b>, and returns
+ * the number of bytes stored. Returns negative on error.*/
+ssize_t
+tor_make_rsa_ed25519_crosscert(const ed25519_public_key_t *ed_key,
+ const crypto_pk_t *rsa_key,
+ time_t expires,
+ uint8_t **cert)
+{
+ // It is later than 1985, since otherwise there would be no C89
+ // compilers. (Try to diagnose #22466.)
+ tor_assert_nonfatal(expires >= 15 * 365 * 86400);
+
+ uint8_t *res;
+
+ rsa_ed_crosscert_t *cc = rsa_ed_crosscert_new();
+ memcpy(cc->ed_key, ed_key->pubkey, ED25519_PUBKEY_LEN);
+ cc->expiration = (uint32_t) CEIL_DIV(expires, 3600);
+ cc->sig_len = crypto_pk_keysize(rsa_key);
+ rsa_ed_crosscert_setlen_sig(cc, crypto_pk_keysize(rsa_key));
+
+ ssize_t alloc_sz = rsa_ed_crosscert_encoded_len(cc);
+ tor_assert(alloc_sz > 0);
+ res = tor_malloc_zero(alloc_sz);
+ ssize_t sz = rsa_ed_crosscert_encode(res, alloc_sz, cc);
+ tor_assert(sz > 0 && sz <= alloc_sz);
+
+ crypto_digest_t *d = crypto_digest256_new(DIGEST_SHA256);
+ crypto_digest_add_bytes(d, RSA_ED_CROSSCERT_PREFIX,
+ strlen(RSA_ED_CROSSCERT_PREFIX));
+
+ const int signed_part_len = 32 + 4;
+ crypto_digest_add_bytes(d, (char*)res, signed_part_len);
+
+ uint8_t digest[DIGEST256_LEN];
+ crypto_digest_get_digest(d, (char*)digest, sizeof(digest));
+ crypto_digest_free(d);
+
+ int siglen = crypto_pk_private_sign(rsa_key,
+ (char*)rsa_ed_crosscert_getarray_sig(cc),
+ rsa_ed_crosscert_getlen_sig(cc),
+ (char*)digest, sizeof(digest));
+ tor_assert(siglen > 0 && siglen <= (int)crypto_pk_keysize(rsa_key));
+ tor_assert(siglen <= UINT8_MAX);
+ cc->sig_len = siglen;
+ rsa_ed_crosscert_setlen_sig(cc, siglen);
+
+ sz = rsa_ed_crosscert_encode(res, alloc_sz, cc);
+ rsa_ed_crosscert_free(cc);
+ *cert = res;
+ return sz;
+}
+
+/**
+ * Check whether the <b>crosscert_len</b> byte certificate in <b>crosscert</b>
+ * is in fact a correct cross-certification of <b>master_key</b> using
+ * the RSA key <b>rsa_id_key</b>.
+ *
+ * Also reject the certificate if it expired before
+ * <b>reject_if_expired_before</b>.
+ *
+ * Return 0 on success, negative on failure.
+ */
+MOCK_IMPL(int,
+rsa_ed25519_crosscert_check, (const uint8_t *crosscert,
+ const size_t crosscert_len,
+ const crypto_pk_t *rsa_id_key,
+ const ed25519_public_key_t *master_key,
+ const time_t reject_if_expired_before))
+{
+ rsa_ed_crosscert_t *cc = NULL;
+ int rv;
+
+#define ERR(code, s) \
+ do { \
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, \
+ "Received a bad RSA->Ed25519 crosscert: %s", \
+ (s)); \
+ rv = (code); \
+ goto err; \
+ } while (0)
+
+ if (BUG(crypto_pk_keysize(rsa_id_key) > PK_BYTES))
+ return -1;
+
+ if (BUG(!crosscert))
+ return -1;
+
+ ssize_t parsed_len = rsa_ed_crosscert_parse(&cc, crosscert, crosscert_len);
+ if (parsed_len < 0 || crosscert_len != (size_t)parsed_len) {
+ ERR(-2, "Unparseable or overlong crosscert");
+ }
+
+ if (tor_memneq(rsa_ed_crosscert_getarray_ed_key(cc),
+ master_key->pubkey,
+ ED25519_PUBKEY_LEN)) {
+ ERR(-3, "Crosscert did not match Ed25519 key");
+ }
+
+ const uint32_t expiration_date = rsa_ed_crosscert_get_expiration(cc);
+ const uint64_t expiration_time = ((uint64_t)expiration_date) * 3600;
+
+ if (reject_if_expired_before < 0 ||
+ expiration_time < (uint64_t)reject_if_expired_before) {
+ ERR(-4, "Crosscert is expired");
+ }
+
+ const uint8_t *eos = rsa_ed_crosscert_get_end_of_signed(cc);
+ const uint8_t *sig = rsa_ed_crosscert_getarray_sig(cc);
+ const uint8_t siglen = rsa_ed_crosscert_get_sig_len(cc);
+ tor_assert(eos >= crosscert);
+ tor_assert((size_t)(eos - crosscert) <= crosscert_len);
+ tor_assert(siglen == rsa_ed_crosscert_getlen_sig(cc));
+
+ /* Compute the digest */
+ uint8_t digest[DIGEST256_LEN];
+ crypto_digest_t *d = crypto_digest256_new(DIGEST_SHA256);
+ crypto_digest_add_bytes(d, RSA_ED_CROSSCERT_PREFIX,
+ strlen(RSA_ED_CROSSCERT_PREFIX));
+ crypto_digest_add_bytes(d, (char*)crosscert, eos-crosscert);
+ crypto_digest_get_digest(d, (char*)digest, sizeof(digest));
+ crypto_digest_free(d);
+
+ /* Now check the signature */
+ uint8_t signed_[PK_BYTES];
+ int signed_len = crypto_pk_public_checksig(rsa_id_key,
+ (char*)signed_, sizeof(signed_),
+ (char*)sig, siglen);
+ if (signed_len < DIGEST256_LEN) {
+ ERR(-5, "Bad signature, or length of signed data not as expected");
+ }
+
+ if (tor_memneq(digest, signed_, DIGEST256_LEN)) {
+ ERR(-6, "The signature was good, but it didn't match the data");
+ }
+
+ rv = 0;
+ err:
+ rsa_ed_crosscert_free(cc);
+ return rv;
+}
+
+/** Construct and return a new empty or_handshake_certs object */
+or_handshake_certs_t *
+or_handshake_certs_new(void)
+{
+ return tor_malloc_zero(sizeof(or_handshake_certs_t));
+}
+
+/** Release all storage held in <b>certs</b> */
+void
+or_handshake_certs_free_(or_handshake_certs_t *certs)
+{
+ if (!certs)
+ return;
+
+ tor_x509_cert_free(certs->auth_cert);
+ tor_x509_cert_free(certs->link_cert);
+ tor_x509_cert_free(certs->id_cert);
+
+ tor_cert_free(certs->ed_id_sign);
+ tor_cert_free(certs->ed_sign_link);
+ tor_cert_free(certs->ed_sign_auth);
+ tor_free(certs->ed_rsa_crosscert);
+
+ memwipe(certs, 0xBD, sizeof(*certs));
+ tor_free(certs);
+}
+
+#undef ERR
+#define ERR(s) \
+ do { \
+ log_fn(severity, LD_PROTOCOL, \
+ "Received a bad CERTS cell: %s", \
+ (s)); \
+ return 0; \
+ } while (0)
+
+int
+or_handshake_certs_rsa_ok(int severity,
+ or_handshake_certs_t *certs,
+ tor_tls_t *tls,
+ time_t now)
+{
+ tor_x509_cert_t *link_cert = certs->link_cert;
+ tor_x509_cert_t *auth_cert = certs->auth_cert;
+ tor_x509_cert_t *id_cert = certs->id_cert;
+
+ if (certs->started_here) {
+ if (! (id_cert && link_cert))
+ ERR("The certs we wanted (ID, Link) were missing");
+ if (! tor_tls_cert_matches_key(tls, link_cert))
+ ERR("The link certificate didn't match the TLS public key");
+ if (! tor_tls_cert_is_valid(severity, link_cert, id_cert, now, 0))
+ ERR("The link certificate was not valid");
+ if (! tor_tls_cert_is_valid(severity, id_cert, id_cert, now, 1))
+ ERR("The ID certificate was not valid");
+ } else {
+ if (! (id_cert && auth_cert))
+ ERR("The certs we wanted (ID, Auth) were missing");
+ if (! tor_tls_cert_is_valid(LOG_PROTOCOL_WARN, auth_cert, id_cert, now, 1))
+ ERR("The authentication certificate was not valid");
+ if (! tor_tls_cert_is_valid(LOG_PROTOCOL_WARN, id_cert, id_cert, now, 1))
+ ERR("The ID certificate was not valid");
+ }
+
+ return 1;
+}
+
+/** Check all the ed25519 certificates in <b>certs</b> against each other, and
+ * against the peer certificate in <b>tls</b> if appropriate. On success,
+ * return 0; on failure, return a negative value and warn at level
+ * <b>severity</b> */
+int
+or_handshake_certs_ed25519_ok(int severity,
+ or_handshake_certs_t *certs,
+ tor_tls_t *tls,
+ time_t now)
+{
+ ed25519_checkable_t check[10];
+ unsigned n_checkable = 0;
+ time_t expiration = TIME_MAX;
+
+#define ADDCERT(cert, pk) \
+ do { \
+ tor_assert(n_checkable < ARRAY_LENGTH(check)); \
+ if (tor_cert_get_checkable_sig(&check[n_checkable++], cert, pk, \
+ &expiration) < 0) \
+ ERR("Could not get checkable cert."); \
+ } while (0)
+
+ if (! certs->ed_id_sign || !certs->ed_id_sign->signing_key_included) {
+ ERR("No Ed25519 signing key");
+ }
+ ADDCERT(certs->ed_id_sign, NULL);
+
+ if (certs->started_here) {
+ if (! certs->ed_sign_link)
+ ERR("No Ed25519 link key");
+ {
+ /* check for a match with the TLS cert. */
+ tor_x509_cert_t *peer_cert = tor_tls_get_peer_cert(tls);
+ if (BUG(!peer_cert)) {
+ /* This is a bug, because if we got to this point, we are a connection
+ * that was initiated here, and we completed a TLS handshake. The
+ * other side *must* have given us a certificate! */
+ ERR("No x509 peer cert"); // LCOV_EXCL_LINE
+ }
+ const common_digests_t *peer_cert_digests =
+ tor_x509_cert_get_cert_digests(peer_cert);
+ int okay = tor_memeq(peer_cert_digests->d[DIGEST_SHA256],
+ certs->ed_sign_link->signed_key.pubkey,
+ DIGEST256_LEN);
+ tor_x509_cert_free(peer_cert);
+ if (!okay)
+ ERR("Link certificate does not match TLS certificate");
+ }
+
+ ADDCERT(certs->ed_sign_link, &certs->ed_id_sign->signed_key);
+
+ } else {
+ if (! certs->ed_sign_auth)
+ ERR("No Ed25519 link authentication key");
+ ADDCERT(certs->ed_sign_auth, &certs->ed_id_sign->signed_key);
+ }
+
+ if (expiration < now) {
+ ERR("At least one certificate expired.");
+ }
+
+ /* Okay, we've gotten ready to check all the Ed25519 certificates.
+ * Now, we are going to check the RSA certificate's cross-certification
+ * with the ED certificates.
+ *
+ * FFFF In the future, we might want to make this optional.
+ */
+
+ tor_x509_cert_t *rsa_id_cert = certs->id_cert;
+ if (!rsa_id_cert) {
+ ERR("Missing legacy RSA ID certificate");
+ }
+ if (! tor_tls_cert_is_valid(severity, rsa_id_cert, rsa_id_cert, now, 1)) {
+ ERR("The legacy RSA ID certificate was not valid");
+ }
+ if (! certs->ed_rsa_crosscert) {
+ ERR("Missing RSA->Ed25519 crosscert");
+ }
+ crypto_pk_t *rsa_id_key = tor_tls_cert_get_key(rsa_id_cert);
+ if (!rsa_id_key) {
+ ERR("RSA ID cert had no RSA key");
+ }
+
+ if (rsa_ed25519_crosscert_check(certs->ed_rsa_crosscert,
+ certs->ed_rsa_crosscert_len,
+ rsa_id_key,
+ &certs->ed_id_sign->signing_key,
+ now) < 0) {
+ crypto_pk_free(rsa_id_key);
+ ERR("Invalid RSA->Ed25519 crosscert");
+ }
+ crypto_pk_free(rsa_id_key);
+ rsa_id_key = NULL;
+
+ /* FFFF We could save a little time in the client case by queueing
+ * this batch to check it later, along with the signature from the
+ * AUTHENTICATE cell. That will change our data flow a bit, though,
+ * so I say "postpone". */
+
+ if (ed25519_checksig_batch(NULL, check, n_checkable) < 0) {
+ ERR("At least one Ed25519 certificate was badly signed");
+ }
+
+ return 1;
+}
+
+/**
+ * Check the Ed certificates and/or the RSA certificates, as appropriate. If
+ * we obtained an Ed25519 identity, set *ed_id_out. If we obtained an RSA
+ * identity, set *rs_id_out. Otherwise, set them both to NULL.
+ */
+void
+or_handshake_certs_check_both(int severity,
+ or_handshake_certs_t *certs,
+ tor_tls_t *tls,
+ time_t now,
+ const ed25519_public_key_t **ed_id_out,
+ const common_digests_t **rsa_id_out)
+{
+ tor_assert(ed_id_out);
+ tor_assert(rsa_id_out);
+
+ *ed_id_out = NULL;
+ *rsa_id_out = NULL;
+
+ if (certs->ed_id_sign) {
+ if (or_handshake_certs_ed25519_ok(severity, certs, tls, now)) {
+ tor_assert(certs->ed_id_sign);
+ tor_assert(certs->id_cert);
+
+ *ed_id_out = &certs->ed_id_sign->signing_key;
+ *rsa_id_out = tor_x509_cert_get_id_digests(certs->id_cert);
+
+ /* If we reached this point, we did not look at any of the
+ * subsidiary RSA certificates, so we'd better just remove them.
+ */
+ tor_x509_cert_free(certs->link_cert);
+ tor_x509_cert_free(certs->auth_cert);
+ certs->link_cert = certs->auth_cert = NULL;
+ }
+ /* We do _not_ fall through here. If you provided us Ed25519
+ * certificates, we expect to verify them! */
+ } else {
+ /* No ed25519 keys given in the CERTS cell */
+ if (or_handshake_certs_rsa_ok(severity, certs, tls, now)) {
+ *rsa_id_out = tor_x509_cert_get_id_digests(certs->id_cert);
+ }
+ }
+}
+
+/* === ENCODING === */
+
+/* Encode the ed25519 certificate <b>cert</b> and put the newly allocated
+ * string in <b>cert_str_out</b>. Return 0 on success else a negative value. */
+int
+tor_cert_encode_ed22519(const tor_cert_t *cert, char **cert_str_out)
+{
+ int ret = -1;
+ char *ed_cert_b64 = NULL;
+ size_t ed_cert_b64_len;
+
+ tor_assert(cert);
+ tor_assert(cert_str_out);
+
+ /* Get the encoded size and add the NUL byte. */
+ ed_cert_b64_len = base64_encode_size(cert->encoded_len,
+ BASE64_ENCODE_MULTILINE) + 1;
+ ed_cert_b64 = tor_malloc_zero(ed_cert_b64_len);
+
+ /* Base64 encode the encoded certificate. */
+ if (base64_encode(ed_cert_b64, ed_cert_b64_len,
+ (const char *) cert->encoded, cert->encoded_len,
+ BASE64_ENCODE_MULTILINE) < 0) {
+ /* LCOV_EXCL_START */
+ log_err(LD_BUG, "Couldn't base64-encode ed22519 cert!");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+
+ /* Put everything together in a NUL terminated string. */
+ tor_asprintf(cert_str_out,
+ "-----BEGIN ED25519 CERT-----\n"
+ "%s"
+ "-----END ED25519 CERT-----",
+ ed_cert_b64);
+ /* Success! */
+ ret = 0;
+
+ err:
+ tor_free(ed_cert_b64);
+ return ret;
+}
diff --git a/src/feature/nodelist/torcert.h b/src/feature/nodelist/torcert.h
new file mode 100644
index 0000000000..5fa97679df
--- /dev/null
+++ b/src/feature/nodelist/torcert.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2014-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TORCERT_H_INCLUDED
+#define TORCERT_H_INCLUDED
+
+#include "lib/crypt_ops/crypto_ed25519.h"
+
+#define SIGNED_KEY_TYPE_ED25519 0x01
+
+#define CERT_TYPE_ID_SIGNING 0x04
+#define CERT_TYPE_SIGNING_LINK 0x05
+#define CERT_TYPE_SIGNING_AUTH 0x06
+#define CERT_TYPE_SIGNING_HS_DESC 0x08
+#define CERT_TYPE_AUTH_HS_IP_KEY 0x09
+#define CERT_TYPE_ONION_ID 0x0A
+#define CERT_TYPE_CROSS_HS_IP_KEYS 0x0B
+
+#define CERT_FLAG_INCLUDE_SIGNING_KEY 0x1
+
+/** An ed25519-signed certificate as used throughout the Tor protocol.
+ **/
+typedef struct tor_cert_st {
+ /** The key authenticated by this certificate */
+ ed25519_public_key_t signed_key;
+ /** The key that signed this certificate. This value may be unset if the
+ * certificate has never been checked, and didn't include its own key. */
+ ed25519_public_key_t signing_key;
+ /** A time after which this certificate will no longer be valid. */
+ time_t valid_until;
+
+ /** The encoded representation of this certificate */
+ uint8_t *encoded;
+ /** The length of <b>encoded</b> */
+ size_t encoded_len;
+
+ /** One of CERT_TYPE_... */
+ uint8_t cert_type;
+ /** True iff we received a signing key embedded in this certificate */
+ unsigned signing_key_included : 1;
+ /** True iff we checked the signature and found it bad */
+ unsigned sig_bad : 1;
+ /** True iff we checked the signature and found it correct */
+ unsigned sig_ok : 1;
+ /** True iff we checked the signature and first found that the cert
+ * had expired */
+ unsigned cert_expired : 1;
+ /** True iff we checked the signature and found the whole cert valid */
+ unsigned cert_valid : 1;
+} tor_cert_t;
+
+struct tor_tls_t;
+
+tor_cert_t *tor_cert_create(const ed25519_keypair_t *signing_key,
+ uint8_t cert_type,
+ const ed25519_public_key_t *signed_key,
+ time_t now, time_t lifetime,
+ uint32_t flags);
+
+tor_cert_t *tor_cert_parse(const uint8_t *cert, size_t certlen);
+
+void tor_cert_free_(tor_cert_t *cert);
+#define tor_cert_free(cert) FREE_AND_NULL(tor_cert_t, tor_cert_free_, (cert))
+
+int tor_cert_get_checkable_sig(ed25519_checkable_t *checkable_out,
+ const tor_cert_t *out,
+ const ed25519_public_key_t *pubkey,
+ time_t *expiration_out);
+
+int tor_cert_checksig(tor_cert_t *cert,
+ const ed25519_public_key_t *pubkey, time_t now);
+const char *tor_cert_describe_signature_status(const tor_cert_t *cert);
+
+tor_cert_t *tor_cert_dup(const tor_cert_t *cert);
+int tor_cert_eq(const tor_cert_t *cert1, const tor_cert_t *cert2);
+int tor_cert_opt_eq(const tor_cert_t *cert1, const tor_cert_t *cert2);
+
+ssize_t tor_make_rsa_ed25519_crosscert(const ed25519_public_key_t *ed_key,
+ const crypto_pk_t *rsa_key,
+ time_t expires,
+ uint8_t **cert);
+MOCK_DECL(int,
+rsa_ed25519_crosscert_check, (const uint8_t *crosscert,
+ const size_t crosscert_len,
+ const crypto_pk_t *rsa_id_key,
+ const ed25519_public_key_t *master_key,
+ const time_t reject_if_expired_before));
+
+or_handshake_certs_t *or_handshake_certs_new(void);
+void or_handshake_certs_free_(or_handshake_certs_t *certs);
+#define or_handshake_certs_free(certs) \
+ FREE_AND_NULL(or_handshake_certs_t, or_handshake_certs_free_, (certs))
+int or_handshake_certs_rsa_ok(int severity,
+ or_handshake_certs_t *certs,
+ struct tor_tls_t *tls,
+ time_t now);
+int or_handshake_certs_ed25519_ok(int severity,
+ or_handshake_certs_t *certs,
+ struct tor_tls_t *tls,
+ time_t now);
+void or_handshake_certs_check_both(int severity,
+ or_handshake_certs_t *certs,
+ struct tor_tls_t *tls,
+ time_t now,
+ const ed25519_public_key_t **ed_id_out,
+ const common_digests_t **rsa_id_out);
+
+int tor_cert_encode_ed22519(const tor_cert_t *cert, char **cert_str_out);
+
+#endif /* !defined(TORCERT_H_INCLUDED) */
diff --git a/src/feature/nodelist/vote_routerstatus_st.h b/src/feature/nodelist/vote_routerstatus_st.h
new file mode 100644
index 0000000000..1b85737df8
--- /dev/null
+++ b/src/feature/nodelist/vote_routerstatus_st.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef VOTE_ROUTERSTATUS_ST_H
+#define VOTE_ROUTERSTATUS_ST_H
+
+#include "or/routerstatus_st.h"
+#include "lib/defs/x25519_sizes.h"
+
+/** The claim about a single router, made in a vote. */
+struct vote_routerstatus_t {
+ routerstatus_t status; /**< Underlying 'status' object for this router.
+ * Flags are redundant. */
+ /** How many known-flags are allowed in a vote? This is the width of
+ * the flags field of vote_routerstatus_t */
+#define MAX_KNOWN_FLAGS_IN_VOTE 64
+ uint64_t flags; /**< Bit-field for all recognized flags; index into
+ * networkstatus_t.known_flags. */
+ char *version; /**< The version that the authority says this router is
+ * running. */
+ char *protocols; /**< The protocols that this authority says this router
+ * provides. */
+ unsigned int has_measured_bw:1; /**< The vote had a measured bw */
+ /** True iff the vote included an entry for ed25519 ID, or included
+ * "id ed25519 none" to indicate that there was no ed25519 ID. */
+ unsigned int has_ed25519_listing:1;
+ /** True if the Ed25519 listing here is the consensus-opinion for the
+ * Ed25519 listing; false if there was no consensus on Ed25519 key status,
+ * or if this VRS doesn't reflect it. */
+ unsigned int ed25519_reflects_consensus:1;
+ uint32_t measured_bw_kb; /**< Measured bandwidth (capacity) of the router */
+ /** The hash or hashes that the authority claims this microdesc has. */
+ vote_microdesc_hash_t *microdesc;
+ /** Ed25519 identity for this router, or zero if it has none. */
+ uint8_t ed25519_id[ED25519_PUBKEY_LEN];
+};
+
+#endif