aboutsummaryrefslogtreecommitdiff
path: root/src/feature/client
diff options
context:
space:
mode:
Diffstat (limited to 'src/feature/client')
-rw-r--r--src/feature/client/addressmap.c1156
-rw-r--r--src/feature/client/addressmap.h65
-rw-r--r--src/feature/client/bridges.c937
-rw-r--r--src/feature/client/bridges.h80
-rw-r--r--src/feature/client/circpathbias.c1578
-rw-r--r--src/feature/client/circpathbias.h28
-rw-r--r--src/feature/client/dnsserv.c415
-rw-r--r--src/feature/client/dnsserv.h27
-rw-r--r--src/feature/client/entrynodes.c3694
-rw-r--r--src/feature/client/entrynodes.h639
-rw-r--r--src/feature/client/transports.c1738
-rw-r--r--src/feature/client/transports.h147
12 files changed, 10504 insertions, 0 deletions
diff --git a/src/feature/client/addressmap.c b/src/feature/client/addressmap.c
new file mode 100644
index 0000000000..ba78a5f908
--- /dev/null
+++ b/src/feature/client/addressmap.c
@@ -0,0 +1,1156 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file addressmap.c
+ *
+ * \brief The addressmap module manages the processes by which we rewrite
+ * addresses in client requess. It handles the MapAddress controller and
+ * torrc commands, and the TrackHostExits feature, and the client-side DNS
+ * cache (deprecated).
+ */
+
+#define ADDRESSMAP_PRIVATE
+
+#include "lib/crypt_ops/crypto_rand.h"
+
+#include "or/or.h"
+#include "or/addressmap.h"
+#include "or/circuituse.h"
+#include "or/config.h"
+#include "or/connection_edge.h"
+#include "or/control.h"
+#include "or/dns.h"
+#include "or/nodelist.h"
+#include "or/routerset.h"
+
+#include "or/entry_connection_st.h"
+
+/** A client-side struct to remember requests to rewrite addresses
+ * to new addresses. These structs are stored in the hash table
+ * "addressmap" below.
+ *
+ * There are 5 ways to set an address mapping:
+ * - A MapAddress command from the controller [permanent]
+ * - An AddressMap directive in the torrc [permanent]
+ * - When a TrackHostExits torrc directive is triggered [temporary]
+ * - When a DNS resolve succeeds [temporary]
+ * - When a DNS resolve fails [temporary]
+ *
+ * When an addressmap request is made but one is already registered,
+ * the new one is replaced only if the currently registered one has
+ * no "new_address" (that is, it's in the process of DNS resolve),
+ * or if the new one is permanent (expires==0 or 1).
+ *
+ * (We overload the 'expires' field, using "0" for mappings set via
+ * the configuration file, "1" for mappings set from the control
+ * interface, and other values for DNS and TrackHostExit mappings that can
+ * expire.)
+ *
+ * A mapping may be 'wildcarded'. If "src_wildcard" is true, then
+ * any address that ends with a . followed by the key for this entry will
+ * get remapped by it. If "dst_wildcard" is also true, then only the
+ * matching suffix of such addresses will get replaced by new_address.
+ */
+typedef struct {
+ char *new_address;
+ time_t expires;
+ addressmap_entry_source_bitfield_t source:3;
+ unsigned src_wildcard:1;
+ unsigned dst_wildcard:1;
+ short num_resolve_failures;
+} addressmap_entry_t;
+
+/** Entry for mapping addresses to which virtual address we mapped them to. */
+typedef struct {
+ char *ipv4_address;
+ char *ipv6_address;
+ char *hostname_address;
+} virtaddress_entry_t;
+
+/** A hash table to store client-side address rewrite instructions. */
+static strmap_t *addressmap=NULL;
+
+/**
+ * Table mapping addresses to which virtual address, if any, we
+ * assigned them to.
+ *
+ * We maintain the following invariant: if [A,B] is in
+ * virtaddress_reversemap, then B must be a virtual address, and [A,B]
+ * must be in addressmap. We do not require that the converse hold:
+ * if it fails, then we could end up mapping two virtual addresses to
+ * the same address, which is no disaster.
+ **/
+static strmap_t *virtaddress_reversemap=NULL;
+
+/** Initialize addressmap. */
+void
+addressmap_init(void)
+{
+ addressmap = strmap_new();
+ virtaddress_reversemap = strmap_new();
+}
+
+#define addressmap_ent_free(ent) \
+ FREE_AND_NULL(addressmap_entry_t, addressmap_ent_free_, (ent))
+
+/** Free the memory associated with the addressmap entry <b>_ent</b>. */
+static void
+addressmap_ent_free_(addressmap_entry_t *ent)
+{
+ if (!ent)
+ return;
+
+ tor_free(ent->new_address);
+ tor_free(ent);
+}
+
+static void
+addressmap_ent_free_void(void *ent)
+{
+ addressmap_ent_free_(ent);
+}
+
+#define addressmap_virtaddress_ent_free(ent) \
+ FREE_AND_NULL(virtaddress_entry_t, addressmap_virtaddress_ent_free_, (ent))
+
+/** Free storage held by a virtaddress_entry_t* entry in <b>_ent</b>. */
+static void
+addressmap_virtaddress_ent_free_(virtaddress_entry_t *ent)
+{
+ if (!ent)
+ return;
+ tor_free(ent->ipv4_address);
+ tor_free(ent->ipv6_address);
+ tor_free(ent->hostname_address);
+ tor_free(ent);
+}
+
+static void
+addressmap_virtaddress_ent_free_void(void *ent)
+{
+ addressmap_virtaddress_ent_free_(ent);
+}
+
+/** Remove <b>address</b> (which must map to <b>ent</b>) from the
+ * virtual address map. */
+static void
+addressmap_virtaddress_remove(const char *address, addressmap_entry_t *ent)
+{
+ if (ent && ent->new_address &&
+ address_is_in_virtual_range(ent->new_address)) {
+ virtaddress_entry_t *ve =
+ strmap_get(virtaddress_reversemap, ent->new_address);
+ /*log_fn(LOG_NOTICE,"remove reverse mapping for %s",ent->new_address);*/
+ if (ve) {
+ if (!strcmp(address, ve->ipv4_address))
+ tor_free(ve->ipv4_address);
+ if (!strcmp(address, ve->ipv6_address))
+ tor_free(ve->ipv6_address);
+ if (!strcmp(address, ve->hostname_address))
+ tor_free(ve->hostname_address);
+ if (!ve->ipv4_address && !ve->ipv6_address && !ve->hostname_address) {
+ tor_free(ve);
+ strmap_remove(virtaddress_reversemap, ent->new_address);
+ }
+ }
+ }
+}
+
+/** Remove <b>ent</b> (which must be mapped to by <b>address</b>) from the
+ * client address maps, and then free it. */
+static void
+addressmap_ent_remove(const char *address, addressmap_entry_t *ent)
+{
+ addressmap_virtaddress_remove(address, ent);
+ addressmap_ent_free(ent);
+}
+
+/** Unregister all TrackHostExits mappings from any address to
+ * *.exitname.exit. */
+void
+clear_trackexithost_mappings(const char *exitname)
+{
+ char *suffix = NULL;
+ if (!addressmap || !exitname)
+ return;
+ tor_asprintf(&suffix, ".%s.exit", exitname);
+ tor_strlower(suffix);
+
+ STRMAP_FOREACH_MODIFY(addressmap, address, addressmap_entry_t *, ent) {
+ if (ent->source == ADDRMAPSRC_TRACKEXIT &&
+ !strcmpend(ent->new_address, suffix)) {
+ addressmap_ent_remove(address, ent);
+ MAP_DEL_CURRENT(address);
+ }
+ } STRMAP_FOREACH_END;
+
+ tor_free(suffix);
+}
+
+/** Remove all TRACKEXIT mappings from the addressmap for which the target
+ * host is unknown or no longer allowed, or for which the source address
+ * is no longer in trackexithosts. */
+void
+addressmap_clear_excluded_trackexithosts(const or_options_t *options)
+{
+ const routerset_t *allow_nodes = options->ExitNodes;
+ const routerset_t *exclude_nodes = options->ExcludeExitNodesUnion_;
+
+ if (!addressmap)
+ return;
+ if (routerset_is_empty(allow_nodes))
+ allow_nodes = NULL;
+ if (allow_nodes == NULL && routerset_is_empty(exclude_nodes))
+ return;
+
+ STRMAP_FOREACH_MODIFY(addressmap, address, addressmap_entry_t *, ent) {
+ size_t len;
+ const char *target = ent->new_address, *dot;
+ char *nodename;
+ const node_t *node;
+
+ if (!target) {
+ /* DNS resolving in progress */
+ continue;
+ } else if (strcmpend(target, ".exit")) {
+ /* Not a .exit mapping */
+ continue;
+ } else if (ent->source != ADDRMAPSRC_TRACKEXIT) {
+ /* Not a trackexit mapping. */
+ continue;
+ }
+ len = strlen(target);
+ if (len < 6)
+ continue; /* malformed. */
+ dot = target + len - 6; /* dot now points to just before .exit */
+ while (dot > target && *dot != '.')
+ dot--;
+ if (*dot == '.') dot++;
+ nodename = tor_strndup(dot, len-5-(dot-target));
+ node = node_get_by_nickname(nodename, NNF_NO_WARN_UNNAMED);
+ tor_free(nodename);
+ if (!node ||
+ (allow_nodes && !routerset_contains_node(allow_nodes, node)) ||
+ routerset_contains_node(exclude_nodes, node) ||
+ !hostname_in_track_host_exits(options, address)) {
+ /* We don't know this one, or we want to be rid of it. */
+ addressmap_ent_remove(address, ent);
+ MAP_DEL_CURRENT(address);
+ }
+ } STRMAP_FOREACH_END;
+}
+
+/** Return true iff <b>address</b> is one that we are configured to
+ * automap on resolve according to <b>options</b>. */
+int
+addressmap_address_should_automap(const char *address,
+ const or_options_t *options)
+{
+ const smartlist_t *suffix_list = options->AutomapHostsSuffixes;
+
+ if (!suffix_list)
+ return 0;
+
+ SMARTLIST_FOREACH_BEGIN(suffix_list, const char *, suffix) {
+ if (!strcmp(suffix, "."))
+ return 1;
+ if (!strcasecmpend(address, suffix))
+ return 1;
+ } SMARTLIST_FOREACH_END(suffix);
+ return 0;
+}
+
+/** Remove all AUTOMAP mappings from the addressmap for which the
+ * source address no longer matches AutomapHostsSuffixes, which is
+ * no longer allowed by AutomapHostsOnResolve, or for which the
+ * target address is no longer in the virtual network. */
+void
+addressmap_clear_invalid_automaps(const or_options_t *options)
+{
+ int clear_all = !options->AutomapHostsOnResolve;
+ const smartlist_t *suffixes = options->AutomapHostsSuffixes;
+
+ if (!addressmap)
+ return;
+
+ if (!suffixes)
+ clear_all = 1; /* This should be impossible, but let's be sure. */
+
+ STRMAP_FOREACH_MODIFY(addressmap, src_address, addressmap_entry_t *, ent) {
+ int remove_this = clear_all;
+ if (ent->source != ADDRMAPSRC_AUTOMAP)
+ continue; /* not an automap mapping. */
+
+ if (!remove_this) {
+ remove_this = ! addressmap_address_should_automap(src_address, options);
+ }
+
+ if (!remove_this && ! address_is_in_virtual_range(ent->new_address))
+ remove_this = 1;
+
+ if (remove_this) {
+ addressmap_ent_remove(src_address, ent);
+ MAP_DEL_CURRENT(src_address);
+ }
+ } STRMAP_FOREACH_END;
+}
+
+/** Remove all entries from the addressmap that were set via the
+ * configuration file or the command line. */
+void
+addressmap_clear_configured(void)
+{
+ addressmap_get_mappings(NULL, 0, 0, 0);
+}
+
+/** Remove all entries from the addressmap that are set to expire, ever. */
+void
+addressmap_clear_transient(void)
+{
+ addressmap_get_mappings(NULL, 2, TIME_MAX, 0);
+}
+
+/** Clean out entries from the addressmap cache that were
+ * added long enough ago that they are no longer valid.
+ */
+void
+addressmap_clean(time_t now)
+{
+ addressmap_get_mappings(NULL, 2, now, 0);
+}
+
+/** Free all the elements in the addressmap, and free the addressmap
+ * itself. */
+void
+addressmap_free_all(void)
+{
+ strmap_free(addressmap, addressmap_ent_free_void);
+ addressmap = NULL;
+
+ strmap_free(virtaddress_reversemap, addressmap_virtaddress_ent_free_void);
+ virtaddress_reversemap = NULL;
+}
+
+/** Try to find a match for AddressMap expressions that use
+ * wildcard notation such as '*.c.d *.e.f' (so 'a.c.d' will map to 'a.e.f') or
+ * '*.c.d a.b.c' (so 'a.c.d' will map to a.b.c).
+ * Return the matching entry in AddressMap or NULL if no match is found.
+ * For expressions such as '*.c.d *.e.f', truncate <b>address</b> 'a.c.d'
+ * to 'a' before we return the matching AddressMap entry.
+ *
+ * This function does not handle the case where a pattern of the form "*.c.d"
+ * matches the address c.d -- that's done by the main addressmap_rewrite
+ * function.
+ */
+static addressmap_entry_t *
+addressmap_match_superdomains(char *address)
+{
+ addressmap_entry_t *val;
+ char *cp;
+
+ cp = address;
+ while ((cp = strchr(cp, '.'))) {
+ /* cp now points to a suffix of address that begins with a . */
+ val = strmap_get_lc(addressmap, cp+1);
+ if (val && val->src_wildcard) {
+ if (val->dst_wildcard)
+ *cp = '\0';
+ return val;
+ }
+ ++cp;
+ }
+ return NULL;
+}
+
+/** Look at address, and rewrite it until it doesn't want any
+ * more rewrites; but don't get into an infinite loop.
+ * Don't write more than maxlen chars into address. Return true if the
+ * address changed; false otherwise. Set *<b>expires_out</b> to the
+ * expiry time of the result, or to <b>time_max</b> if the result does
+ * not expire.
+ *
+ * If <b>exit_source_out</b> is non-null, we set it as follows. If we the
+ * address starts out as a non-exit address, and we remap it to an .exit
+ * address at any point, then set *<b>exit_source_out</b> to the
+ * address_entry_source_t of the first such rule. Set *<b>exit_source_out</b>
+ * to ADDRMAPSRC_NONE if there is no such rewrite, or if the original address
+ * was a .exit.
+ */
+int
+addressmap_rewrite(char *address, size_t maxlen,
+ unsigned flags,
+ time_t *expires_out,
+ addressmap_entry_source_t *exit_source_out)
+{
+ addressmap_entry_t *ent;
+ int rewrites;
+ time_t expires = TIME_MAX;
+ addressmap_entry_source_t exit_source = ADDRMAPSRC_NONE;
+ char *addr_orig = tor_strdup(address);
+ char *log_addr_orig = NULL;
+
+ /* We use a loop here to limit the total number of rewrites we do,
+ * so that we can't hit an infinite loop. */
+ for (rewrites = 0; rewrites < 16; rewrites++) {
+ int exact_match = 0;
+ log_addr_orig = tor_strdup(escaped_safe_str_client(address));
+
+ /* First check to see if there's an exact match for this address */
+ ent = strmap_get(addressmap, address);
+
+ if (!ent || !ent->new_address) {
+ /* And if we don't have an exact match, try to check whether
+ * we have a pattern-based match.
+ */
+ ent = addressmap_match_superdomains(address);
+ } else {
+ if (ent->src_wildcard && !ent->dst_wildcard &&
+ !strcasecmp(address, ent->new_address)) {
+ /* This is a rule like "rewrite *.example.com to example.com", and we
+ * just got "example.com". Instead of calling it an infinite loop,
+ * call it complete. */
+ goto done;
+ }
+ exact_match = 1;
+ }
+
+ if (!ent || !ent->new_address) {
+ /* We still have no match at all. We're done! */
+ goto done;
+ }
+
+ /* Check wither the flags we were passed tell us not to use this
+ * mapping. */
+ switch (ent->source) {
+ case ADDRMAPSRC_DNS:
+ {
+ sa_family_t f;
+ tor_addr_t tmp;
+ f = tor_addr_parse(&tmp, ent->new_address);
+ if (f == AF_INET && !(flags & AMR_FLAG_USE_IPV4_DNS))
+ goto done;
+ else if (f == AF_INET6 && !(flags & AMR_FLAG_USE_IPV6_DNS))
+ goto done;
+ }
+ break;
+ case ADDRMAPSRC_CONTROLLER:
+ case ADDRMAPSRC_TORRC:
+ if (!(flags & AMR_FLAG_USE_MAPADDRESS))
+ goto done;
+ break;
+ case ADDRMAPSRC_AUTOMAP:
+ if (!(flags & AMR_FLAG_USE_AUTOMAP))
+ goto done;
+ break;
+ case ADDRMAPSRC_TRACKEXIT:
+ if (!(flags & AMR_FLAG_USE_TRACKEXIT))
+ goto done;
+ break;
+ case ADDRMAPSRC_NONE:
+ default:
+ log_warn(LD_BUG, "Unknown addrmap source value %d. Ignoring it.",
+ (int) ent->source);
+ goto done;
+ }
+
+ /* Now fill in the address with the new address. That might be via
+ * appending some new stuff to the end, or via just replacing it. */
+ if (ent->dst_wildcard && !exact_match) {
+ strlcat(address, ".", maxlen);
+ strlcat(address, ent->new_address, maxlen);
+ } else {
+ strlcpy(address, ent->new_address, maxlen);
+ }
+
+ /* Is this now a .exit address? If so, remember where we got it.*/
+ if (!strcmpend(address, ".exit") &&
+ strcmpend(addr_orig, ".exit") &&
+ exit_source == ADDRMAPSRC_NONE) {
+ exit_source = ent->source;
+ }
+
+ log_info(LD_APP, "Addressmap: rewriting %s to %s",
+ log_addr_orig, escaped_safe_str_client(address));
+ if (ent->expires > 1 && ent->expires < expires)
+ expires = ent->expires;
+
+ tor_free(log_addr_orig);
+ }
+ log_warn(LD_CONFIG,
+ "Loop detected: we've rewritten %s 16 times! Using it as-is.",
+ escaped_safe_str_client(address));
+ /* it's fine to rewrite a rewrite, but don't loop forever */
+
+ done:
+ tor_free(addr_orig);
+ tor_free(log_addr_orig);
+ if (exit_source_out)
+ *exit_source_out = exit_source;
+ if (expires_out)
+ *expires_out = expires;
+ return (rewrites > 0);
+}
+
+/** If we have a cached reverse DNS entry for the address stored in the
+ * <b>maxlen</b>-byte buffer <b>address</b> (typically, a dotted quad) then
+ * rewrite to the cached value and return 1. Otherwise return 0. Set
+ * *<b>expires_out</b> to the expiry time of the result, or to <b>time_max</b>
+ * if the result does not expire. */
+int
+addressmap_rewrite_reverse(char *address, size_t maxlen, unsigned flags,
+ time_t *expires_out)
+{
+ char *s, *cp;
+ addressmap_entry_t *ent;
+ int r = 0;
+ {
+ sa_family_t f;
+ tor_addr_t tmp;
+ f = tor_addr_parse(&tmp, address);
+ if (f == AF_INET && !(flags & AMR_FLAG_USE_IPV4_DNS))
+ return 0;
+ else if (f == AF_INET6 && !(flags & AMR_FLAG_USE_IPV6_DNS))
+ return 0;
+ /* FFFF we should reverse-map virtual addresses even if we haven't
+ * enabled DNS cacheing. */
+ }
+
+ tor_asprintf(&s, "REVERSE[%s]", address);
+ ent = strmap_get(addressmap, s);
+ if (ent) {
+ cp = tor_strdup(escaped_safe_str_client(ent->new_address));
+ log_info(LD_APP, "Rewrote reverse lookup %s -> %s",
+ escaped_safe_str_client(s), cp);
+ tor_free(cp);
+ strlcpy(address, ent->new_address, maxlen);
+ r = 1;
+ }
+
+ if (expires_out)
+ *expires_out = (ent && ent->expires > 1) ? ent->expires : TIME_MAX;
+
+ tor_free(s);
+ return r;
+}
+
+/** Return 1 if <b>address</b> is already registered, else return 0. If address
+ * is already registered, and <b>update_expires</b> is non-zero, then update
+ * the expiry time on the mapping with update_expires if it is a
+ * mapping created by TrackHostExits. */
+int
+addressmap_have_mapping(const char *address, int update_expiry)
+{
+ addressmap_entry_t *ent;
+ if (!(ent=strmap_get_lc(addressmap, address)))
+ return 0;
+ if (update_expiry && ent->source==ADDRMAPSRC_TRACKEXIT)
+ ent->expires=time(NULL) + update_expiry;
+ return 1;
+}
+
+/** Register a request to map <b>address</b> to <b>new_address</b>,
+ * which will expire on <b>expires</b> (or 0 if never expires from
+ * config file, 1 if never expires from controller, 2 if never expires
+ * (virtual address mapping) from the controller.)
+ *
+ * <b>new_address</b> should be a newly dup'ed string, which we'll use or
+ * free as appropriate. We will leave <b>address</b> alone.
+ *
+ * If <b>wildcard_addr</b> is true, then the mapping will match any address
+ * equal to <b>address</b>, or any address ending with a period followed by
+ * <b>address</b>. If <b>wildcard_addr</b> and <b>wildcard_new_addr</b> are
+ * both true, the mapping will rewrite addresses that end with
+ * ".<b>address</b>" into ones that end with ".<b>new_address</b>".
+ *
+ * If <b>new_address</b> is NULL, or <b>new_address</b> is equal to
+ * <b>address</b> and <b>wildcard_addr</b> is equal to
+ * <b>wildcard_new_addr</b>, remove any mappings that exist from
+ * <b>address</b>.
+ *
+ * It is an error to set <b>wildcard_new_addr</b> if <b>wildcard_addr</b> is
+ * not set. */
+void
+addressmap_register(const char *address, char *new_address, time_t expires,
+ addressmap_entry_source_t source,
+ const int wildcard_addr,
+ const int wildcard_new_addr)
+{
+ addressmap_entry_t *ent;
+
+ if (wildcard_new_addr)
+ tor_assert(wildcard_addr);
+
+ ent = strmap_get(addressmap, address);
+ if (!new_address || (!strcasecmp(address,new_address) &&
+ wildcard_addr == wildcard_new_addr)) {
+ /* Remove the mapping, if any. */
+ tor_free(new_address);
+ if (ent) {
+ addressmap_ent_remove(address,ent);
+ strmap_remove(addressmap, address);
+ }
+ return;
+ }
+ if (!ent) { /* make a new one and register it */
+ ent = tor_malloc_zero(sizeof(addressmap_entry_t));
+ strmap_set(addressmap, address, ent);
+ } else if (ent->new_address) { /* we need to clean up the old mapping. */
+ if (expires > 1) {
+ log_info(LD_APP,"Temporary addressmap ('%s' to '%s') not performed, "
+ "since it's already mapped to '%s'",
+ safe_str_client(address),
+ safe_str_client(new_address),
+ safe_str_client(ent->new_address));
+ tor_free(new_address);
+ return;
+ }
+ if (address_is_in_virtual_range(ent->new_address) &&
+ expires != 2) {
+ /* XXX This isn't the perfect test; we want to avoid removing
+ * mappings set from the control interface _as virtual mapping */
+ addressmap_virtaddress_remove(address, ent);
+ }
+ tor_free(ent->new_address);
+ } /* else { we have an in-progress resolve with no mapping. } */
+
+ ent->new_address = new_address;
+ ent->expires = expires==2 ? 1 : expires;
+ ent->num_resolve_failures = 0;
+ ent->source = source;
+ ent->src_wildcard = wildcard_addr ? 1 : 0;
+ ent->dst_wildcard = wildcard_new_addr ? 1 : 0;
+
+ log_info(LD_CONFIG, "Addressmap: (re)mapped '%s' to '%s'",
+ safe_str_client(address),
+ safe_str_client(ent->new_address));
+ control_event_address_mapped(address, ent->new_address, expires, NULL, 1);
+}
+
+/** An attempt to resolve <b>address</b> failed at some OR.
+ * Increment the number of resolve failures we have on record
+ * for it, and then return that number.
+ */
+int
+client_dns_incr_failures(const char *address)
+{
+ addressmap_entry_t *ent = strmap_get(addressmap, address);
+ if (!ent) {
+ ent = tor_malloc_zero(sizeof(addressmap_entry_t));
+ ent->expires = time(NULL) + MAX_DNS_ENTRY_AGE;
+ strmap_set(addressmap,address,ent);
+ }
+ if (ent->num_resolve_failures < SHRT_MAX)
+ ++ent->num_resolve_failures; /* don't overflow */
+ log_info(LD_APP, "Address %s now has %d resolve failures.",
+ safe_str_client(address),
+ ent->num_resolve_failures);
+ return ent->num_resolve_failures;
+}
+
+/** If <b>address</b> is in the client DNS addressmap, reset
+ * the number of resolve failures we have on record for it.
+ * This is used when we fail a stream because it won't resolve:
+ * otherwise future attempts on that address will only try once.
+ */
+void
+client_dns_clear_failures(const char *address)
+{
+ addressmap_entry_t *ent = strmap_get(addressmap, address);
+ if (ent)
+ ent->num_resolve_failures = 0;
+}
+
+/** Record the fact that <b>address</b> resolved to <b>name</b>.
+ * We can now use this in subsequent streams via addressmap_rewrite()
+ * so we can more correctly choose an exit that will allow <b>address</b>.
+ *
+ * If <b>exitname</b> is defined, then append the addresses with
+ * ".exitname.exit" before registering the mapping.
+ *
+ * If <b>ttl</b> is nonnegative, the mapping will be valid for
+ * <b>ttl</b>seconds; otherwise, we use the default.
+ */
+static void
+client_dns_set_addressmap_impl(entry_connection_t *for_conn,
+ const char *address, const char *name,
+ const char *exitname,
+ int ttl)
+{
+ char *extendedaddress=NULL, *extendedval=NULL;
+ (void)for_conn;
+
+ tor_assert(address);
+ tor_assert(name);
+
+ if (ttl<0)
+ ttl = DEFAULT_DNS_TTL;
+ else
+ ttl = dns_clip_ttl(ttl);
+
+ if (exitname) {
+ /* XXXX fails to ever get attempts to get an exit address of
+ * google.com.digest[=~]nickname.exit; we need a syntax for this that
+ * won't make strict RFC952-compliant applications (like us) barf. */
+ tor_asprintf(&extendedaddress,
+ "%s.%s.exit", address, exitname);
+ tor_asprintf(&extendedval,
+ "%s.%s.exit", name, exitname);
+ } else {
+ tor_asprintf(&extendedaddress,
+ "%s", address);
+ tor_asprintf(&extendedval,
+ "%s", name);
+ }
+ addressmap_register(extendedaddress, extendedval,
+ time(NULL) + ttl, ADDRMAPSRC_DNS, 0, 0);
+ tor_free(extendedaddress);
+}
+
+/** Record the fact that <b>address</b> resolved to <b>val</b>.
+ * We can now use this in subsequent streams via addressmap_rewrite()
+ * so we can more correctly choose an exit that will allow <b>address</b>.
+ *
+ * If <b>exitname</b> is defined, then append the addresses with
+ * ".exitname.exit" before registering the mapping.
+ *
+ * If <b>ttl</b> is nonnegative, the mapping will be valid for
+ * <b>ttl</b>seconds; otherwise, we use the default.
+ */
+void
+client_dns_set_addressmap(entry_connection_t *for_conn,
+ const char *address,
+ const tor_addr_t *val,
+ const char *exitname,
+ int ttl)
+{
+ tor_addr_t addr_tmp;
+ char valbuf[TOR_ADDR_BUF_LEN];
+
+ tor_assert(address);
+ tor_assert(val);
+
+ if (tor_addr_parse(&addr_tmp, address) >= 0)
+ return; /* If address was an IP address already, don't add a mapping. */
+
+ if (tor_addr_family(val) == AF_INET) {
+ if (! for_conn->entry_cfg.cache_ipv4_answers)
+ return;
+ } else if (tor_addr_family(val) == AF_INET6) {
+ if (! for_conn->entry_cfg.cache_ipv6_answers)
+ return;
+ }
+
+ if (! tor_addr_to_str(valbuf, val, sizeof(valbuf), 1))
+ return;
+
+ client_dns_set_addressmap_impl(for_conn, address, valbuf, exitname, ttl);
+}
+
+/** Add a cache entry noting that <b>address</b> (ordinarily a dotted quad)
+ * resolved via a RESOLVE_PTR request to the hostname <b>v</b>.
+ *
+ * If <b>exitname</b> is defined, then append the addresses with
+ * ".exitname.exit" before registering the mapping.
+ *
+ * If <b>ttl</b> is nonnegative, the mapping will be valid for
+ * <b>ttl</b>seconds; otherwise, we use the default.
+ */
+void
+client_dns_set_reverse_addressmap(entry_connection_t *for_conn,
+ const char *address, const char *v,
+ const char *exitname,
+ int ttl)
+{
+ char *s = NULL;
+ {
+ tor_addr_t tmp_addr;
+ sa_family_t f = tor_addr_parse(&tmp_addr, address);
+ if ((f == AF_INET && ! for_conn->entry_cfg.cache_ipv4_answers) ||
+ (f == AF_INET6 && ! for_conn->entry_cfg.cache_ipv6_answers))
+ return;
+ }
+ tor_asprintf(&s, "REVERSE[%s]", address);
+ client_dns_set_addressmap_impl(for_conn, s, v, exitname, ttl);
+ tor_free(s);
+}
+
+/* By default, we hand out 127.192.0.1 through 127.254.254.254.
+ * These addresses should map to localhost, so even if the
+ * application accidentally tried to connect to them directly (not
+ * via Tor), it wouldn't get too far astray.
+ *
+ * These options are configured by parse_virtual_addr_network().
+ */
+
+static virtual_addr_conf_t virtaddr_conf_ipv4;
+static virtual_addr_conf_t virtaddr_conf_ipv6;
+
+/** Read a netmask of the form 127.192.0.0/10 from "val", and check whether
+ * it's a valid set of virtual addresses to hand out in response to MAPADDRESS
+ * requests. Return 0 on success; set *msg (if provided) to a newly allocated
+ * string and return -1 on failure. If validate_only is false, sets the
+ * actual virtual address range to the parsed value. */
+int
+parse_virtual_addr_network(const char *val, sa_family_t family,
+ int validate_only,
+ char **msg)
+{
+ const int ipv6 = (family == AF_INET6);
+ tor_addr_t addr;
+ maskbits_t bits;
+ const int max_prefix_bits = ipv6 ? 104 : 16;
+ virtual_addr_conf_t *conf = ipv6 ? &virtaddr_conf_ipv6 : &virtaddr_conf_ipv4;
+
+ if (!val || val[0] == '\0') {
+ if (msg)
+ tor_asprintf(msg, "Value not present (%s) after VirtualAddressNetwork%s",
+ val?"Empty":"NULL", ipv6?"IPv6":"");
+ return -1;
+ }
+ if (tor_addr_parse_mask_ports(val, 0, &addr, &bits, NULL, NULL) < 0) {
+ if (msg)
+ tor_asprintf(msg, "Error parsing VirtualAddressNetwork%s %s",
+ ipv6?"IPv6":"", val);
+ return -1;
+ }
+ if (tor_addr_family(&addr) != family) {
+ if (msg)
+ tor_asprintf(msg, "Incorrect address type for VirtualAddressNetwork%s",
+ ipv6?"IPv6":"");
+ return -1;
+ }
+#if 0
+ if (port_min != 1 || port_max != 65535) {
+ if (msg)
+ tor_asprintf(msg, "Can't specify ports on VirtualAddressNetwork%s",
+ ipv6?"IPv6":"");
+ return -1;
+ }
+#endif /* 0 */
+
+ if (bits > max_prefix_bits) {
+ if (msg)
+ tor_asprintf(msg, "VirtualAddressNetwork%s expects a /%d "
+ "network or larger",ipv6?"IPv6":"", max_prefix_bits);
+ return -1;
+ }
+
+ if (validate_only)
+ return 0;
+
+ tor_addr_copy(&conf->addr, &addr);
+ conf->bits = bits;
+
+ return 0;
+}
+
+/**
+ * Return true iff <b>addr</b> is likely to have been returned by
+ * client_dns_get_unused_address.
+ **/
+int
+address_is_in_virtual_range(const char *address)
+{
+ tor_addr_t addr;
+ tor_assert(address);
+ if (!strcasecmpend(address, ".virtual")) {
+ return 1;
+ } else if (tor_addr_parse(&addr, address) >= 0) {
+ const virtual_addr_conf_t *conf = (tor_addr_family(&addr) == AF_INET6) ?
+ &virtaddr_conf_ipv6 : &virtaddr_conf_ipv4;
+ if (tor_addr_compare_masked(&addr, &conf->addr, conf->bits, CMP_EXACT)==0)
+ return 1;
+ }
+ return 0;
+}
+
+/** Return a random address conforming to the virtual address configuration
+ * in <b>conf</b>.
+ */
+STATIC void
+get_random_virtual_addr(const virtual_addr_conf_t *conf, tor_addr_t *addr_out)
+{
+ uint8_t tmp[4];
+ const uint8_t *addr_bytes;
+ uint8_t bytes[16];
+ const int ipv6 = tor_addr_family(&conf->addr) == AF_INET6;
+ const int total_bytes = ipv6 ? 16 : 4;
+
+ tor_assert(conf->bits <= total_bytes * 8);
+
+ /* Set addr_bytes to the bytes of the virtual network, in host order */
+ if (ipv6) {
+ addr_bytes = tor_addr_to_in6_addr8(&conf->addr);
+ } else {
+ set_uint32(tmp, tor_addr_to_ipv4n(&conf->addr));
+ addr_bytes = tmp;
+ }
+
+ /* Get an appropriate number of random bytes. */
+ crypto_rand((char*)bytes, total_bytes);
+
+ /* Now replace the first "conf->bits" bits of 'bytes' with addr_bytes*/
+ if (conf->bits >= 8)
+ memcpy(bytes, addr_bytes, conf->bits / 8);
+ if (conf->bits & 7) {
+ uint8_t mask = 0xff >> (conf->bits & 7);
+ bytes[conf->bits/8] &= mask;
+ bytes[conf->bits/8] |= addr_bytes[conf->bits/8] & ~mask;
+ }
+
+ if (ipv6)
+ tor_addr_from_ipv6_bytes(addr_out, (char*) bytes);
+ else
+ tor_addr_from_ipv4n(addr_out, get_uint32(bytes));
+
+ tor_assert(tor_addr_compare_masked(addr_out, &conf->addr,
+ conf->bits, CMP_EXACT)==0);
+}
+
+/** Return a newly allocated string holding an address of <b>type</b>
+ * (one of RESOLVED_TYPE_{IPV4|IPV6|HOSTNAME}) that has not yet been
+ * mapped, and that is very unlikely to be the address of any real host.
+ *
+ * May return NULL if we have run out of virtual addresses.
+ */
+static char *
+addressmap_get_virtual_address(int type)
+{
+ char buf[64];
+ tor_assert(addressmap);
+
+ if (type == RESOLVED_TYPE_HOSTNAME) {
+ char rand_bytes[10];
+ do {
+ crypto_rand(rand_bytes, sizeof(rand_bytes));
+ base32_encode(buf,sizeof(buf),rand_bytes,sizeof(rand_bytes));
+ strlcat(buf, ".virtual", sizeof(buf));
+ } while (strmap_get(addressmap, buf));
+ return tor_strdup(buf);
+ } else if (type == RESOLVED_TYPE_IPV4 || type == RESOLVED_TYPE_IPV6) {
+ const int ipv6 = (type == RESOLVED_TYPE_IPV6);
+ const virtual_addr_conf_t *conf = ipv6 ?
+ &virtaddr_conf_ipv6 : &virtaddr_conf_ipv4;
+
+ /* Don't try more than 1000 times. This gives us P < 1e-9 for
+ * failing to get a good address so long as the address space is
+ * less than ~97.95% full. That's always going to be true under
+ * sensible circumstances for an IPv6 /10, and it's going to be
+ * true for an IPv4 /10 as long as we've handed out less than
+ * 4.08 million addresses. */
+ uint32_t attempts = 1000;
+
+ tor_addr_t addr;
+
+ while (attempts--) {
+ get_random_virtual_addr(conf, &addr);
+
+ if (!ipv6) {
+ /* Don't hand out any .0 or .255 address. */
+ const uint32_t a = tor_addr_to_ipv4h(&addr);
+ if ((a & 0xff) == 0 || (a & 0xff) == 0xff)
+ continue;
+ }
+
+ tor_addr_to_str(buf, &addr, sizeof(buf), 1);
+ if (!strmap_get(addressmap, buf)) {
+ /* XXXX This code is to make sure I didn't add an undecorated version
+ * by mistake. I hope it's needless. */
+ char tmp[TOR_ADDR_BUF_LEN];
+ tor_addr_to_str(tmp, &addr, sizeof(tmp), 0);
+ if (strmap_get(addressmap, tmp)) {
+ // LCOV_EXCL_START
+ log_warn(LD_BUG, "%s wasn't in the addressmap, but %s was.",
+ buf, tmp);
+ continue;
+ // LCOV_EXCL_STOP
+ }
+
+ return tor_strdup(buf);
+ }
+ }
+ log_warn(LD_CONFIG, "Ran out of virtual addresses!");
+ return NULL;
+ } else {
+ // LCOV_EXCL_START
+ log_warn(LD_BUG, "Called with unsupported address type (%d)", type);
+ return NULL;
+ // LCOV_EXCL_STOP
+ }
+}
+
+/** A controller has requested that we map some address of type
+ * <b>type</b> to the address <b>new_address</b>. Choose an address
+ * that is unlikely to be used, and map it, and return it in a newly
+ * allocated string. If another address of the same type is already
+ * mapped to <b>new_address</b>, try to return a copy of that address.
+ *
+ * The string in <b>new_address</b> may be freed or inserted into a map
+ * as appropriate. May return NULL if are out of virtual addresses.
+ **/
+const char *
+addressmap_register_virtual_address(int type, char *new_address)
+{
+ char **addrp;
+ virtaddress_entry_t *vent;
+ int vent_needs_to_be_added = 0;
+
+ tor_assert(new_address);
+ tor_assert(addressmap);
+ tor_assert(virtaddress_reversemap);
+
+ vent = strmap_get(virtaddress_reversemap, new_address);
+ if (!vent) {
+ vent = tor_malloc_zero(sizeof(virtaddress_entry_t));
+ vent_needs_to_be_added = 1;
+ }
+
+ if (type == RESOLVED_TYPE_IPV4)
+ addrp = &vent->ipv4_address;
+ else if (type == RESOLVED_TYPE_IPV6)
+ addrp = &vent->ipv6_address;
+ else
+ addrp = &vent->hostname_address;
+
+ if (*addrp) {
+ addressmap_entry_t *ent = strmap_get(addressmap, *addrp);
+ if (ent && ent->new_address &&
+ !strcasecmp(new_address, ent->new_address)) {
+ tor_free(new_address);
+ tor_assert(!vent_needs_to_be_added);
+ return *addrp;
+ } else {
+ log_warn(LD_BUG,
+ "Internal confusion: I thought that '%s' was mapped to by "
+ "'%s', but '%s' really maps to '%s'. This is a harmless bug.",
+ safe_str_client(new_address),
+ safe_str_client(*addrp),
+ safe_str_client(*addrp),
+ ent?safe_str_client(ent->new_address):"(nothing)");
+ }
+ }
+
+ tor_free(*addrp);
+ *addrp = addressmap_get_virtual_address(type);
+ if (!*addrp) {
+ tor_free(vent);
+ tor_free(new_address);
+ return NULL;
+ }
+ log_info(LD_APP, "Registering map from %s to %s", *addrp, new_address);
+ if (vent_needs_to_be_added)
+ strmap_set(virtaddress_reversemap, new_address, vent);
+ addressmap_register(*addrp, new_address, 2, ADDRMAPSRC_AUTOMAP, 0, 0);
+
+ /* FFFF register corresponding reverse mapping. */
+
+#if 0
+ {
+ /* Try to catch possible bugs */
+ addressmap_entry_t *ent;
+ ent = strmap_get(addressmap, *addrp);
+ tor_assert(ent);
+ tor_assert(!strcasecmp(ent->new_address,new_address));
+ vent = strmap_get(virtaddress_reversemap, new_address);
+ tor_assert(vent);
+ tor_assert(!strcasecmp(*addrp,
+ (type == RESOLVED_TYPE_IPV4) ?
+ vent->ipv4_address : vent->hostname_address));
+ log_info(LD_APP, "Map from %s to %s okay.",
+ safe_str_client(*addrp),
+ safe_str_client(new_address));
+ }
+#endif /* 0 */
+
+ return *addrp;
+}
+
+/** Return 1 if <b>address</b> has funny characters in it like colons. Return
+ * 0 if it's fine, or if we're configured to allow it anyway. <b>client</b>
+ * should be true if we're using this address as a client; false if we're
+ * using it as a server.
+ */
+int
+address_is_invalid_destination(const char *address, int client)
+{
+ if (client) {
+ if (get_options()->AllowNonRFC953Hostnames)
+ return 0;
+ } else {
+ if (get_options()->ServerDNSAllowNonRFC953Hostnames)
+ return 0;
+ }
+
+ /* It might be an IPv6 address! */
+ {
+ tor_addr_t a;
+ if (tor_addr_parse(&a, address) >= 0)
+ return 0;
+ }
+
+ while (*address) {
+ if (TOR_ISALNUM(*address) ||
+ *address == '-' ||
+ *address == '.' ||
+ *address == '_') /* Underscore is not allowed, but Windows does it
+ * sometimes, just to thumb its nose at the IETF. */
+ ++address;
+ else
+ return 1;
+ }
+ return 0;
+}
+
+/** Iterate over all address mappings which have expiry times between
+ * min_expires and max_expires, inclusive. If sl is provided, add an
+ * "old-addr new-addr expiry" string to sl for each mapping, omitting
+ * the expiry time if want_expiry is false. If sl is NULL, remove the
+ * mappings.
+ */
+void
+addressmap_get_mappings(smartlist_t *sl, time_t min_expires,
+ time_t max_expires, int want_expiry)
+{
+ strmap_iter_t *iter;
+ const char *key;
+ void *val_;
+ addressmap_entry_t *val;
+
+ if (!addressmap)
+ addressmap_init();
+
+ for (iter = strmap_iter_init(addressmap); !strmap_iter_done(iter); ) {
+ strmap_iter_get(iter, &key, &val_);
+ val = val_;
+ if (val->expires >= min_expires && val->expires <= max_expires) {
+ if (!sl) {
+ iter = strmap_iter_next_rmv(addressmap,iter);
+ addressmap_ent_remove(key, val);
+ continue;
+ } else if (val->new_address) {
+ const char *src_wc = val->src_wildcard ? "*." : "";
+ const char *dst_wc = val->dst_wildcard ? "*." : "";
+ if (want_expiry) {
+ if (val->expires < 3 || val->expires == TIME_MAX)
+ smartlist_add_asprintf(sl, "%s%s %s%s NEVER",
+ src_wc, key, dst_wc, val->new_address);
+ else {
+ char isotime[ISO_TIME_LEN+1];
+ format_iso_time(isotime, val->expires);
+ smartlist_add_asprintf(sl, "%s%s %s%s \"%s\"",
+ src_wc, key, dst_wc, val->new_address,
+ isotime);
+ }
+ } else {
+ smartlist_add_asprintf(sl, "%s%s %s%s",
+ src_wc, key, dst_wc, val->new_address);
+ }
+ }
+ }
+ iter = strmap_iter_next(addressmap,iter);
+ }
+}
diff --git a/src/feature/client/addressmap.h b/src/feature/client/addressmap.h
new file mode 100644
index 0000000000..b0db5c8b4e
--- /dev/null
+++ b/src/feature/client/addressmap.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_ADDRESSMAP_H
+#define TOR_ADDRESSMAP_H
+
+#include "lib/testsupport/testsupport.h"
+
+void addressmap_init(void);
+void addressmap_clear_excluded_trackexithosts(const or_options_t *options);
+void addressmap_clear_invalid_automaps(const or_options_t *options);
+void addressmap_clean(time_t now);
+void addressmap_clear_configured(void);
+void addressmap_clear_transient(void);
+void addressmap_free_all(void);
+#define AMR_FLAG_USE_IPV4_DNS (1u<<0)
+#define AMR_FLAG_USE_IPV6_DNS (1u<<1)
+#define AMR_FLAG_USE_MAPADDRESS (1u<<2)
+#define AMR_FLAG_USE_AUTOMAP (1u<<3)
+#define AMR_FLAG_USE_TRACKEXIT (1u<<4)
+int addressmap_rewrite(char *address, size_t maxlen, unsigned flags,
+ time_t *expires_out,
+ addressmap_entry_source_t *exit_source_out);
+int addressmap_rewrite_reverse(char *address, size_t maxlen, unsigned flags,
+ time_t *expires_out);
+int addressmap_have_mapping(const char *address, int update_timeout);
+
+void addressmap_register(const char *address, char *new_address,
+ time_t expires, addressmap_entry_source_t source,
+ const int address_wildcard,
+ const int new_address_wildcard);
+int parse_virtual_addr_network(const char *val,
+ sa_family_t family, int validate_only,
+ char **msg);
+int client_dns_incr_failures(const char *address);
+void client_dns_clear_failures(const char *address);
+void client_dns_set_addressmap(entry_connection_t *for_conn,
+ const char *address, const tor_addr_t *val,
+ const char *exitname, int ttl);
+const char *addressmap_register_virtual_address(int type, char *new_address);
+void addressmap_get_mappings(smartlist_t *sl, time_t min_expires,
+ time_t max_expires, int want_expiry);
+int address_is_in_virtual_range(const char *addr);
+void clear_trackexithost_mappings(const char *exitname);
+void client_dns_set_reverse_addressmap(entry_connection_t *for_conn,
+ const char *address, const char *v,
+ const char *exitname, int ttl);
+int addressmap_address_should_automap(const char *address,
+ const or_options_t *options);
+
+#ifdef ADDRESSMAP_PRIVATE
+typedef struct virtual_addr_conf_t {
+ tor_addr_t addr;
+ maskbits_t bits;
+} virtual_addr_conf_t;
+
+STATIC void get_random_virtual_addr(const virtual_addr_conf_t *conf,
+ tor_addr_t *addr_out);
+#endif /* defined(ADDRESSMAP_PRIVATE) */
+
+#endif /* !defined(TOR_ADDRESSMAP_H) */
+
diff --git a/src/feature/client/bridges.c b/src/feature/client/bridges.c
new file mode 100644
index 0000000000..ca0a13f2a0
--- /dev/null
+++ b/src/feature/client/bridges.c
@@ -0,0 +1,937 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file bridges.c
+ * \brief Code to manage bridges and bridge selection.
+ *
+ * Bridges are fixed entry nodes, used for censorship circumvention.
+ **/
+
+#define TOR_BRIDGES_PRIVATE
+
+#include "or/or.h"
+#include "or/bridges.h"
+#include "or/circuitbuild.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/directory.h"
+#include "or/entrynodes.h"
+#include "or/nodelist.h"
+#include "or/policies.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/routerset.h"
+#include "or/transports.h"
+
+#include "or/extend_info_st.h"
+#include "or/node_st.h"
+#include "or/routerinfo_st.h"
+#include "or/routerstatus_st.h"
+
+/** Information about a configured bridge. Currently this just matches the
+ * ones in the torrc file, but one day we may be able to learn about new
+ * bridges on our own, and remember them in the state file. */
+struct bridge_info_t {
+ /** Address and port of the bridge, as configured by the user.*/
+ tor_addr_port_t addrport_configured;
+ /** Address of the bridge. */
+ tor_addr_t addr;
+ /** TLS port for the bridge. */
+ uint16_t port;
+ /** Boolean: We are re-parsing our bridge list, and we are going to remove
+ * this one if we don't find it in the list of configured bridges. */
+ unsigned marked_for_removal : 1;
+ /** Expected identity digest, or all zero bytes if we don't know what the
+ * digest should be. */
+ char identity[DIGEST_LEN];
+
+ /** Name of pluggable transport protocol taken from its config line. */
+ char *transport_name;
+
+ /** When should we next try to fetch a descriptor for this bridge? */
+ download_status_t fetch_status;
+
+ /** A smartlist of k=v values to be passed to the SOCKS proxy, if
+ transports are used for this bridge. */
+ smartlist_t *socks_args;
+};
+
+#define bridge_free(bridge) \
+ FREE_AND_NULL(bridge_info_t, bridge_free_, (bridge))
+
+static void bridge_free_(bridge_info_t *bridge);
+static void rewrite_node_address_for_bridge(const bridge_info_t *bridge,
+ node_t *node);
+
+/** A list of configured bridges. Whenever we actually get a descriptor
+ * for one, we add it as an entry guard. Note that the order of bridges
+ * in this list does not necessarily correspond to the order of bridges
+ * in the torrc. */
+static smartlist_t *bridge_list = NULL;
+
+/** Mark every entry of the bridge list to be removed on our next call to
+ * sweep_bridge_list unless it has first been un-marked. */
+void
+mark_bridge_list(void)
+{
+ if (!bridge_list)
+ bridge_list = smartlist_new();
+ SMARTLIST_FOREACH(bridge_list, bridge_info_t *, b,
+ b->marked_for_removal = 1);
+}
+
+/** Remove every entry of the bridge list that was marked with
+ * mark_bridge_list if it has not subsequently been un-marked. */
+void
+sweep_bridge_list(void)
+{
+ if (!bridge_list)
+ bridge_list = smartlist_new();
+ SMARTLIST_FOREACH_BEGIN(bridge_list, bridge_info_t *, b) {
+ if (b->marked_for_removal) {
+ SMARTLIST_DEL_CURRENT(bridge_list, b);
+ bridge_free(b);
+ }
+ } SMARTLIST_FOREACH_END(b);
+}
+
+/** Initialize the bridge list to empty, creating it if needed. */
+STATIC void
+clear_bridge_list(void)
+{
+ if (!bridge_list)
+ bridge_list = smartlist_new();
+ SMARTLIST_FOREACH(bridge_list, bridge_info_t *, b, bridge_free(b));
+ smartlist_clear(bridge_list);
+}
+
+/** Free the bridge <b>bridge</b>. */
+static void
+bridge_free_(bridge_info_t *bridge)
+{
+ if (!bridge)
+ return;
+
+ tor_free(bridge->transport_name);
+ if (bridge->socks_args) {
+ SMARTLIST_FOREACH(bridge->socks_args, char*, s, tor_free(s));
+ smartlist_free(bridge->socks_args);
+ }
+
+ tor_free(bridge);
+}
+
+/** Return a list of all the configured bridges, as bridge_info_t pointers. */
+const smartlist_t *
+bridge_list_get(void)
+{
+ if (!bridge_list)
+ bridge_list = smartlist_new();
+ return bridge_list;
+}
+
+/**
+ * Given a <b>bridge</b>, return a pointer to its RSA identity digest, or
+ * NULL if we don't know one for it.
+ */
+const uint8_t *
+bridge_get_rsa_id_digest(const bridge_info_t *bridge)
+{
+ tor_assert(bridge);
+ if (tor_digest_is_zero(bridge->identity))
+ return NULL;
+ else
+ return (const uint8_t *) bridge->identity;
+}
+
+/**
+ * Given a <b>bridge</b>, return a pointer to its configured addr:port
+ * combination.
+ */
+const tor_addr_port_t *
+bridge_get_addr_port(const bridge_info_t *bridge)
+{
+ tor_assert(bridge);
+ return &bridge->addrport_configured;
+}
+
+/** If we have a bridge configured whose digest matches <b>digest</b>, or a
+ * bridge with no known digest whose address matches any of the
+ * tor_addr_port_t's in <b>orports</b>, return that bridge. Else return
+ * NULL. */
+STATIC bridge_info_t *
+get_configured_bridge_by_orports_digest(const char *digest,
+ const smartlist_t *orports)
+{
+ if (!bridge_list)
+ return NULL;
+ SMARTLIST_FOREACH_BEGIN(bridge_list, bridge_info_t *, bridge)
+ {
+ if (tor_digest_is_zero(bridge->identity)) {
+ SMARTLIST_FOREACH_BEGIN(orports, tor_addr_port_t *, ap)
+ {
+ if (tor_addr_compare(&bridge->addr, &ap->addr, CMP_EXACT) == 0 &&
+ bridge->port == ap->port)
+ return bridge;
+ }
+ SMARTLIST_FOREACH_END(ap);
+ }
+ if (digest && tor_memeq(bridge->identity, digest, DIGEST_LEN))
+ return bridge;
+ }
+ SMARTLIST_FOREACH_END(bridge);
+ return NULL;
+}
+
+/** If we have a bridge configured whose digest matches <b>digest</b>, or a
+ * bridge with no known digest whose address matches <b>addr</b>:<b>port</b>,
+ * return that bridge. Else return NULL. If <b>digest</b> is NULL, check for
+ * address/port matches only. */
+bridge_info_t *
+get_configured_bridge_by_addr_port_digest(const tor_addr_t *addr,
+ uint16_t port,
+ const char *digest)
+{
+ if (!bridge_list)
+ return NULL;
+ SMARTLIST_FOREACH_BEGIN(bridge_list, bridge_info_t *, bridge)
+ {
+ if ((tor_digest_is_zero(bridge->identity) || digest == NULL) &&
+ !tor_addr_compare(&bridge->addr, addr, CMP_EXACT) &&
+ bridge->port == port)
+ return bridge;
+ if (digest && tor_memeq(bridge->identity, digest, DIGEST_LEN))
+ return bridge;
+ }
+ SMARTLIST_FOREACH_END(bridge);
+ return NULL;
+}
+
+/**
+ * As get_configured_bridge_by_addr_port, but require that the
+ * address match <b>addr</b>:<b>port</b>, and that the ID digest match
+ * <b>digest</b>. (The other function will ignore the address if the
+ * digest matches.)
+ */
+bridge_info_t *
+get_configured_bridge_by_exact_addr_port_digest(const tor_addr_t *addr,
+ uint16_t port,
+ const char *digest)
+{
+ if (!bridge_list)
+ return NULL;
+ SMARTLIST_FOREACH_BEGIN(bridge_list, bridge_info_t *, bridge) {
+ if (!tor_addr_compare(&bridge->addr, addr, CMP_EXACT) &&
+ bridge->port == port) {
+
+ if (digest && tor_memeq(bridge->identity, digest, DIGEST_LEN))
+ return bridge;
+ else if (!digest || tor_digest_is_zero(bridge->identity))
+ return bridge;
+ }
+
+ } SMARTLIST_FOREACH_END(bridge);
+ return NULL;
+}
+
+/** If we have a bridge configured whose digest matches <b>digest</b>, or a
+ * bridge with no known digest whose address matches <b>addr</b>:<b>port</b>,
+ * return 1. Else return 0. If <b>digest</b> is NULL, check for
+ * address/port matches only. */
+int
+addr_is_a_configured_bridge(const tor_addr_t *addr,
+ uint16_t port,
+ const char *digest)
+{
+ tor_assert(addr);
+ return get_configured_bridge_by_addr_port_digest(addr, port, digest) ? 1 : 0;
+}
+
+/** If we have a bridge configured whose digest matches
+ * <b>ei->identity_digest</b>, or a bridge with no known digest whose address
+ * matches <b>ei->addr</b>:<b>ei->port</b>, return 1. Else return 0.
+ * If <b>ei->onion_key</b> is NULL, check for address/port matches only. */
+int
+extend_info_is_a_configured_bridge(const extend_info_t *ei)
+{
+ const char *digest = ei->onion_key ? ei->identity_digest : NULL;
+ return addr_is_a_configured_bridge(&ei->addr, ei->port, digest);
+}
+
+/** Wrapper around get_configured_bridge_by_addr_port_digest() to look
+ * it up via router descriptor <b>ri</b>. */
+static bridge_info_t *
+get_configured_bridge_by_routerinfo(const routerinfo_t *ri)
+{
+ bridge_info_t *bi = NULL;
+ smartlist_t *orports = router_get_all_orports(ri);
+ bi = get_configured_bridge_by_orports_digest(ri->cache_info.identity_digest,
+ orports);
+ SMARTLIST_FOREACH(orports, tor_addr_port_t *, p, tor_free(p));
+ smartlist_free(orports);
+ return bi;
+}
+
+/** Return 1 if <b>ri</b> is one of our known bridges, else 0. */
+int
+routerinfo_is_a_configured_bridge(const routerinfo_t *ri)
+{
+ return get_configured_bridge_by_routerinfo(ri) ? 1 : 0;
+}
+
+/** Return 1 if <b>node</b> is one of our configured bridges, else 0. */
+int
+node_is_a_configured_bridge(const node_t *node)
+{
+ int retval = 0;
+ smartlist_t *orports = node_get_all_orports(node);
+ retval = get_configured_bridge_by_orports_digest(node->identity,
+ orports) != NULL;
+ SMARTLIST_FOREACH(orports, tor_addr_port_t *, p, tor_free(p));
+ smartlist_free(orports);
+ return retval;
+}
+
+/** We made a connection to a router at <b>addr</b>:<b>port</b>
+ * without knowing its digest. Its digest turned out to be <b>digest</b>.
+ * If it was a bridge, and we still don't know its digest, record it.
+ */
+void
+learned_router_identity(const tor_addr_t *addr, uint16_t port,
+ const char *digest,
+ const ed25519_public_key_t *ed_id)
+{
+ // XXXX prop220 use ed_id here, once there is some way to specify
+ (void)ed_id;
+ int learned = 0;
+ bridge_info_t *bridge =
+ get_configured_bridge_by_exact_addr_port_digest(addr, port, digest);
+ if (bridge && tor_digest_is_zero(bridge->identity)) {
+ memcpy(bridge->identity, digest, DIGEST_LEN);
+ learned = 1;
+ }
+ /* XXXX prop220 remember bridge ed25519 identities -- add a field */
+#if 0
+ if (bridge && ed_id &&
+ ed25519_public_key_is_zero(&bridge->ed25519_identity) &&
+ !ed25519_public_key_is_zero(ed_id)) {
+ memcpy(&bridge->ed25519_identity, ed_id, sizeof(*ed_id));
+ learned = 1;
+ }
+#endif /* 0 */
+ if (learned) {
+ char *transport_info = NULL;
+ const char *transport_name =
+ find_transport_name_by_bridge_addrport(addr, port);
+ if (transport_name)
+ tor_asprintf(&transport_info, " (with transport '%s')", transport_name);
+
+ // XXXX prop220 log both fingerprints.
+ log_notice(LD_DIR, "Learned fingerprint %s for bridge %s%s.",
+ hex_str(digest, DIGEST_LEN), fmt_addrport(addr, port),
+ transport_info ? transport_info : "");
+ tor_free(transport_info);
+ entry_guard_learned_bridge_identity(&bridge->addrport_configured,
+ (const uint8_t *)digest);
+ }
+}
+
+/** Return true if <b>bridge</b> has the same identity digest as
+ * <b>digest</b>. If <b>digest</b> is NULL, it matches
+ * bridges with unspecified identity digests. */
+static int
+bridge_has_digest(const bridge_info_t *bridge, const char *digest)
+{
+ if (digest)
+ return tor_memeq(digest, bridge->identity, DIGEST_LEN);
+ else
+ return tor_digest_is_zero(bridge->identity);
+}
+
+/** We are about to add a new bridge at <b>addr</b>:<b>port</b>, with optional
+ * <b>digest</b> and <b>transport_name</b>. Mark for removal any previously
+ * existing bridge with the same address and port, and warn the user as
+ * appropriate.
+ */
+STATIC void
+bridge_resolve_conflicts(const tor_addr_t *addr, uint16_t port,
+ const char *digest, const char *transport_name)
+{
+ /* Iterate the already-registered bridge list:
+
+ If you find a bridge with the same address and port, mark it for
+ removal. It doesn't make sense to have two active bridges with
+ the same IP:PORT. If the bridge in question has a different
+ digest or transport than <b>digest</b>/<b>transport_name</b>,
+ it's probably a misconfiguration and we should warn the user.
+ */
+ SMARTLIST_FOREACH_BEGIN(bridge_list, bridge_info_t *, bridge) {
+ if (bridge->marked_for_removal)
+ continue;
+
+ if (tor_addr_eq(&bridge->addr, addr) && (bridge->port == port)) {
+
+ bridge->marked_for_removal = 1;
+
+ if (!bridge_has_digest(bridge, digest) ||
+ strcmp_opt(bridge->transport_name, transport_name)) {
+ /* warn the user */
+ char *bridge_description_new, *bridge_description_old;
+ tor_asprintf(&bridge_description_new, "%s:%s:%s",
+ fmt_addrport(addr, port),
+ digest ? hex_str(digest, DIGEST_LEN) : "",
+ transport_name ? transport_name : "");
+ tor_asprintf(&bridge_description_old, "%s:%s:%s",
+ fmt_addrport(&bridge->addr, bridge->port),
+ tor_digest_is_zero(bridge->identity) ?
+ "" : hex_str(bridge->identity,DIGEST_LEN),
+ bridge->transport_name ? bridge->transport_name : "");
+
+ log_warn(LD_GENERAL,"Tried to add bridge '%s', but we found a conflict"
+ " with the already registered bridge '%s'. We will discard"
+ " the old bridge and keep '%s'. If this is not what you"
+ " wanted, please change your configuration file accordingly.",
+ bridge_description_new, bridge_description_old,
+ bridge_description_new);
+
+ tor_free(bridge_description_new);
+ tor_free(bridge_description_old);
+ }
+ }
+ } SMARTLIST_FOREACH_END(bridge);
+}
+
+/** Return True if we have a bridge that uses a transport with name
+ * <b>transport_name</b>. */
+MOCK_IMPL(int,
+transport_is_needed, (const char *transport_name))
+{
+ if (!bridge_list)
+ return 0;
+
+ SMARTLIST_FOREACH_BEGIN(bridge_list, const bridge_info_t *, bridge) {
+ if (bridge->transport_name &&
+ !strcmp(bridge->transport_name, transport_name))
+ return 1;
+ } SMARTLIST_FOREACH_END(bridge);
+
+ return 0;
+}
+
+/** Register the bridge information in <b>bridge_line</b> to the
+ * bridge subsystem. Steals reference of <b>bridge_line</b>. */
+void
+bridge_add_from_config(bridge_line_t *bridge_line)
+{
+ bridge_info_t *b;
+
+ // XXXX prop220 add a way to specify ed25519 ID to bridge_line_t.
+
+ { /* Log the bridge we are about to register: */
+ log_debug(LD_GENERAL, "Registering bridge at %s (transport: %s) (%s)",
+ fmt_addrport(&bridge_line->addr, bridge_line->port),
+ bridge_line->transport_name ?
+ bridge_line->transport_name : "no transport",
+ tor_digest_is_zero(bridge_line->digest) ?
+ "no key listed" : hex_str(bridge_line->digest, DIGEST_LEN));
+
+ if (bridge_line->socks_args) { /* print socks arguments */
+ int i = 0;
+
+ tor_assert(smartlist_len(bridge_line->socks_args) > 0);
+
+ log_debug(LD_GENERAL, "Bridge uses %d SOCKS arguments:",
+ smartlist_len(bridge_line->socks_args));
+ SMARTLIST_FOREACH(bridge_line->socks_args, const char *, arg,
+ log_debug(LD_CONFIG, "%d: %s", ++i, arg));
+ }
+ }
+
+ bridge_resolve_conflicts(&bridge_line->addr,
+ bridge_line->port,
+ bridge_line->digest,
+ bridge_line->transport_name);
+
+ b = tor_malloc_zero(sizeof(bridge_info_t));
+ tor_addr_copy(&b->addrport_configured.addr, &bridge_line->addr);
+ b->addrport_configured.port = bridge_line->port;
+ tor_addr_copy(&b->addr, &bridge_line->addr);
+ b->port = bridge_line->port;
+ memcpy(b->identity, bridge_line->digest, DIGEST_LEN);
+ if (bridge_line->transport_name)
+ b->transport_name = bridge_line->transport_name;
+ b->fetch_status.schedule = DL_SCHED_BRIDGE;
+ b->fetch_status.increment_on = DL_SCHED_INCREMENT_ATTEMPT;
+ /* We can't reset the bridge's download status here, because UseBridges
+ * might be 0 now, and it might be changed to 1 much later. */
+ b->socks_args = bridge_line->socks_args;
+ if (!bridge_list)
+ bridge_list = smartlist_new();
+
+ tor_free(bridge_line); /* Deallocate bridge_line now. */
+
+ smartlist_add(bridge_list, b);
+}
+
+/** If <b>digest</b> is one of our known bridges, return it. */
+STATIC bridge_info_t *
+find_bridge_by_digest(const char *digest)
+{
+ if (! bridge_list)
+ return NULL;
+ SMARTLIST_FOREACH(bridge_list, bridge_info_t *, bridge,
+ {
+ if (tor_memeq(bridge->identity, digest, DIGEST_LEN))
+ return bridge;
+ });
+ return NULL;
+}
+
+/** Given the <b>addr</b> and <b>port</b> of a bridge, if that bridge
+ * supports a pluggable transport, return its name. Otherwise, return
+ * NULL. */
+const char *
+find_transport_name_by_bridge_addrport(const tor_addr_t *addr, uint16_t port)
+{
+ if (!bridge_list)
+ return NULL;
+
+ SMARTLIST_FOREACH_BEGIN(bridge_list, const bridge_info_t *, bridge) {
+ if (tor_addr_eq(&bridge->addr, addr) &&
+ (bridge->port == port))
+ return bridge->transport_name;
+ } SMARTLIST_FOREACH_END(bridge);
+
+ return NULL;
+}
+
+/** If <b>addr</b> and <b>port</b> match the address and port of a
+ * bridge of ours that uses pluggable transports, place its transport
+ * in <b>transport</b>.
+ *
+ * Return 0 on success (found a transport, or found a bridge with no
+ * transport, or found no bridge); return -1 if we should be using a
+ * transport, but the transport could not be found.
+ */
+int
+get_transport_by_bridge_addrport(const tor_addr_t *addr, uint16_t port,
+ const transport_t **transport)
+{
+ *transport = NULL;
+ if (!bridge_list)
+ return 0;
+
+ SMARTLIST_FOREACH_BEGIN(bridge_list, const bridge_info_t *, bridge) {
+ if (tor_addr_eq(&bridge->addr, addr) &&
+ (bridge->port == port)) { /* bridge matched */
+ if (bridge->transport_name) { /* it also uses pluggable transports */
+ *transport = transport_get_by_name(bridge->transport_name);
+ if (*transport == NULL) { /* it uses pluggable transports, but
+ the transport could not be found! */
+ return -1;
+ }
+ return 0;
+ } else { /* bridge matched, but it doesn't use transports. */
+ break;
+ }
+ }
+ } SMARTLIST_FOREACH_END(bridge);
+
+ *transport = NULL;
+ return 0;
+}
+
+/** Return a smartlist containing all the SOCKS arguments that we
+ * should pass to the SOCKS proxy. */
+const smartlist_t *
+get_socks_args_by_bridge_addrport(const tor_addr_t *addr, uint16_t port)
+{
+ bridge_info_t *bridge = get_configured_bridge_by_addr_port_digest(addr,
+ port,
+ NULL);
+ return bridge ? bridge->socks_args : NULL;
+}
+
+/** We need to ask <b>bridge</b> for its server descriptor. */
+static void
+launch_direct_bridge_descriptor_fetch(bridge_info_t *bridge)
+{
+ const or_options_t *options = get_options();
+ circuit_guard_state_t *guard_state = NULL;
+
+ if (connection_get_by_type_addr_port_purpose(
+ CONN_TYPE_DIR, &bridge->addr, bridge->port,
+ DIR_PURPOSE_FETCH_SERVERDESC))
+ return; /* it's already on the way */
+
+ if (routerset_contains_bridge(options->ExcludeNodes, bridge)) {
+ download_status_mark_impossible(&bridge->fetch_status);
+ log_warn(LD_APP, "Not using bridge at %s: it is in ExcludeNodes.",
+ safe_str_client(fmt_and_decorate_addr(&bridge->addr)));
+ return;
+ }
+
+ /* Until we get a descriptor for the bridge, we only know one address for
+ * it. */
+ if (!fascist_firewall_allows_address_addr(&bridge->addr, bridge->port,
+ FIREWALL_OR_CONNECTION, 0, 0)) {
+ log_notice(LD_CONFIG, "Tried to fetch a descriptor directly from a "
+ "bridge, but that bridge is not reachable through our "
+ "firewall.");
+ return;
+ }
+
+ /* If we already have a node_t for this bridge, rewrite its address now. */
+ node_t *node = node_get_mutable_by_id(bridge->identity);
+ if (node) {
+ rewrite_node_address_for_bridge(bridge, node);
+ }
+
+ tor_addr_port_t bridge_addrport;
+ memcpy(&bridge_addrport.addr, &bridge->addr, sizeof(tor_addr_t));
+ bridge_addrport.port = bridge->port;
+
+ guard_state = get_guard_state_for_bridge_desc_fetch(bridge->identity);
+
+ directory_request_t *req =
+ directory_request_new(DIR_PURPOSE_FETCH_SERVERDESC);
+ directory_request_set_or_addr_port(req, &bridge_addrport);
+ directory_request_set_directory_id_digest(req, bridge->identity);
+ directory_request_set_router_purpose(req, ROUTER_PURPOSE_BRIDGE);
+ directory_request_set_resource(req, "authority.z");
+ if (guard_state) {
+ directory_request_set_guard_state(req, guard_state);
+ }
+ directory_initiate_request(req);
+ directory_request_free(req);
+}
+
+/** Fetching the bridge descriptor from the bridge authority returned a
+ * "not found". Fall back to trying a direct fetch. */
+void
+retry_bridge_descriptor_fetch_directly(const char *digest)
+{
+ bridge_info_t *bridge = find_bridge_by_digest(digest);
+ if (!bridge)
+ return; /* not found? oh well. */
+
+ launch_direct_bridge_descriptor_fetch(bridge);
+}
+
+/** For each bridge in our list for which we don't currently have a
+ * descriptor, fetch a new copy of its descriptor -- either directly
+ * from the bridge or via a bridge authority. */
+void
+fetch_bridge_descriptors(const or_options_t *options, time_t now)
+{
+ int num_bridge_auths = get_n_authorities(BRIDGE_DIRINFO);
+ int ask_bridge_directly;
+ int can_use_bridge_authority;
+
+ if (!bridge_list)
+ return;
+
+ /* If we still have unconfigured managed proxies, don't go and
+ connect to a bridge. */
+ if (pt_proxies_configuration_pending())
+ return;
+
+ SMARTLIST_FOREACH_BEGIN(bridge_list, bridge_info_t *, bridge)
+ {
+ /* This resets the download status on first use */
+ if (!download_status_is_ready(&bridge->fetch_status, now))
+ continue; /* don't bother, no need to retry yet */
+ if (routerset_contains_bridge(options->ExcludeNodes, bridge)) {
+ download_status_mark_impossible(&bridge->fetch_status);
+ log_warn(LD_APP, "Not using bridge at %s: it is in ExcludeNodes.",
+ safe_str_client(fmt_and_decorate_addr(&bridge->addr)));
+ continue;
+ }
+
+ /* schedule the next attempt
+ * we can't increment after a failure, because sometimes we use the
+ * bridge authority, and sometimes we use the bridge direct */
+ download_status_increment_attempt(
+ &bridge->fetch_status,
+ safe_str_client(fmt_and_decorate_addr(&bridge->addr)),
+ now);
+
+ can_use_bridge_authority = !tor_digest_is_zero(bridge->identity) &&
+ num_bridge_auths;
+ ask_bridge_directly = !can_use_bridge_authority ||
+ !options->UpdateBridgesFromAuthority;
+ log_debug(LD_DIR, "ask_bridge_directly=%d (%d, %d, %d)",
+ ask_bridge_directly, tor_digest_is_zero(bridge->identity),
+ !options->UpdateBridgesFromAuthority, !num_bridge_auths);
+
+ if (ask_bridge_directly &&
+ !fascist_firewall_allows_address_addr(&bridge->addr, bridge->port,
+ FIREWALL_OR_CONNECTION, 0,
+ 0)) {
+ log_notice(LD_DIR, "Bridge at '%s' isn't reachable by our "
+ "firewall policy. %s.",
+ fmt_addrport(&bridge->addr, bridge->port),
+ can_use_bridge_authority ?
+ "Asking bridge authority instead" : "Skipping");
+ if (can_use_bridge_authority)
+ ask_bridge_directly = 0;
+ else
+ continue;
+ }
+
+ if (ask_bridge_directly) {
+ /* we need to ask the bridge itself for its descriptor. */
+ launch_direct_bridge_descriptor_fetch(bridge);
+ } else {
+ /* We have a digest and we want to ask an authority. We could
+ * combine all the requests into one, but that may give more
+ * hints to the bridge authority than we want to give. */
+ char resource[10 + HEX_DIGEST_LEN];
+ memcpy(resource, "fp/", 3);
+ base16_encode(resource+3, HEX_DIGEST_LEN+1,
+ bridge->identity, DIGEST_LEN);
+ memcpy(resource+3+HEX_DIGEST_LEN, ".z", 3);
+ log_info(LD_DIR, "Fetching bridge info '%s' from bridge authority.",
+ resource);
+ directory_get_from_dirserver(DIR_PURPOSE_FETCH_SERVERDESC,
+ ROUTER_PURPOSE_BRIDGE, resource, 0, DL_WANT_AUTHORITY);
+ }
+ }
+ SMARTLIST_FOREACH_END(bridge);
+}
+
+/** If our <b>bridge</b> is configured to be a different address than
+ * the bridge gives in <b>node</b>, rewrite the routerinfo
+ * we received to use the address we meant to use. Now we handle
+ * multihomed bridges better.
+ */
+static void
+rewrite_node_address_for_bridge(const bridge_info_t *bridge, node_t *node)
+{
+ /* XXXX move this function. */
+ /* XXXX overridden addresses should really live in the node_t, so that the
+ * routerinfo_t and the microdesc_t can be immutable. But we can only
+ * do that safely if we know that no function that connects to an OR
+ * does so through an address from any source other than node_get_addr().
+ */
+ tor_addr_t addr;
+ const or_options_t *options = get_options();
+
+ if (node->ri) {
+ routerinfo_t *ri = node->ri;
+ tor_addr_from_ipv4h(&addr, ri->addr);
+ if ((!tor_addr_compare(&bridge->addr, &addr, CMP_EXACT) &&
+ bridge->port == ri->or_port) ||
+ (!tor_addr_compare(&bridge->addr, &ri->ipv6_addr, CMP_EXACT) &&
+ bridge->port == ri->ipv6_orport)) {
+ /* they match, so no need to do anything */
+ } else {
+ if (tor_addr_family(&bridge->addr) == AF_INET) {
+ ri->addr = tor_addr_to_ipv4h(&bridge->addr);
+ ri->or_port = bridge->port;
+ log_info(LD_DIR,
+ "Adjusted bridge routerinfo for '%s' to match configured "
+ "address %s:%d.",
+ ri->nickname, fmt_addr32(ri->addr), ri->or_port);
+ } else if (tor_addr_family(&bridge->addr) == AF_INET6) {
+ tor_addr_copy(&ri->ipv6_addr, &bridge->addr);
+ ri->ipv6_orport = bridge->port;
+ log_info(LD_DIR,
+ "Adjusted bridge routerinfo for '%s' to match configured "
+ "address %s.",
+ ri->nickname, fmt_addrport(&ri->ipv6_addr, ri->ipv6_orport));
+ } else {
+ log_err(LD_BUG, "Address family not supported: %d.",
+ tor_addr_family(&bridge->addr));
+ return;
+ }
+ }
+
+ if (options->ClientPreferIPv6ORPort == -1) {
+ /* Mark which address to use based on which bridge_t we got. */
+ node->ipv6_preferred = (tor_addr_family(&bridge->addr) == AF_INET6 &&
+ !tor_addr_is_null(&node->ri->ipv6_addr));
+ } else {
+ /* Mark which address to use based on user preference */
+ node->ipv6_preferred = (fascist_firewall_prefer_ipv6_orport(options) &&
+ !tor_addr_is_null(&node->ri->ipv6_addr));
+ }
+
+ /* XXXipv6 we lack support for falling back to another address for
+ the same relay, warn the user */
+ if (!tor_addr_is_null(&ri->ipv6_addr)) {
+ tor_addr_port_t ap;
+ node_get_pref_orport(node, &ap);
+ log_notice(LD_CONFIG,
+ "Bridge '%s' has both an IPv4 and an IPv6 address. "
+ "Will prefer using its %s address (%s) based on %s.",
+ ri->nickname,
+ node->ipv6_preferred ? "IPv6" : "IPv4",
+ fmt_addrport(&ap.addr, ap.port),
+ options->ClientPreferIPv6ORPort == -1 ?
+ "the configured Bridge address" :
+ "ClientPreferIPv6ORPort");
+ }
+ }
+ if (node->rs) {
+ routerstatus_t *rs = node->rs;
+ tor_addr_from_ipv4h(&addr, rs->addr);
+
+ if ((!tor_addr_compare(&bridge->addr, &addr, CMP_EXACT) &&
+ bridge->port == rs->or_port) ||
+ (!tor_addr_compare(&bridge->addr, &rs->ipv6_addr, CMP_EXACT) &&
+ bridge->port == rs->ipv6_orport)) {
+ /* they match, so no need to do anything */
+ } else {
+ if (tor_addr_family(&bridge->addr) == AF_INET) {
+ rs->addr = tor_addr_to_ipv4h(&bridge->addr);
+ rs->or_port = bridge->port;
+ log_info(LD_DIR,
+ "Adjusted bridge routerstatus for '%s' to match "
+ "configured address %s.",
+ rs->nickname, fmt_addrport(&bridge->addr, rs->or_port));
+ /* set IPv6 preferences even if there is no ri */
+ } else if (tor_addr_family(&bridge->addr) == AF_INET6) {
+ tor_addr_copy(&rs->ipv6_addr, &bridge->addr);
+ rs->ipv6_orport = bridge->port;
+ log_info(LD_DIR,
+ "Adjusted bridge routerstatus for '%s' to match configured"
+ " address %s.",
+ rs->nickname, fmt_addrport(&rs->ipv6_addr, rs->ipv6_orport));
+ } else {
+ log_err(LD_BUG, "Address family not supported: %d.",
+ tor_addr_family(&bridge->addr));
+ return;
+ }
+ }
+
+ if (options->ClientPreferIPv6ORPort == -1) {
+ /* Mark which address to use based on which bridge_t we got. */
+ node->ipv6_preferred = (tor_addr_family(&bridge->addr) == AF_INET6 &&
+ !tor_addr_is_null(&node->rs->ipv6_addr));
+ } else {
+ /* Mark which address to use based on user preference */
+ node->ipv6_preferred = (fascist_firewall_prefer_ipv6_orport(options) &&
+ !tor_addr_is_null(&node->rs->ipv6_addr));
+ }
+
+ /* XXXipv6 we lack support for falling back to another address for
+ the same relay, warn the user */
+ if (!tor_addr_is_null(&rs->ipv6_addr)) {
+ tor_addr_port_t ap;
+ node_get_pref_orport(node, &ap);
+ log_notice(LD_CONFIG,
+ "Bridge '%s' has both an IPv4 and an IPv6 address. "
+ "Will prefer using its %s address (%s) based on %s.",
+ rs->nickname,
+ node->ipv6_preferred ? "IPv6" : "IPv4",
+ fmt_addrport(&ap.addr, ap.port),
+ options->ClientPreferIPv6ORPort == -1 ?
+ "the configured Bridge address" :
+ "ClientPreferIPv6ORPort");
+ }
+ }
+}
+
+/** We just learned a descriptor for a bridge. See if that
+ * digest is in our entry guard list, and add it if not. */
+void
+learned_bridge_descriptor(routerinfo_t *ri, int from_cache)
+{
+ tor_assert(ri);
+ tor_assert(ri->purpose == ROUTER_PURPOSE_BRIDGE);
+ if (get_options()->UseBridges) {
+ /* Retry directory downloads whenever we get a bridge descriptor:
+ * - when bootstrapping, and
+ * - when we aren't sure if any of our bridges are reachable.
+ * Keep on retrying until we have at least one reachable bridge. */
+ int first = num_bridges_usable(0) < 1;
+ bridge_info_t *bridge = get_configured_bridge_by_routerinfo(ri);
+ time_t now = time(NULL);
+ router_set_status(ri->cache_info.identity_digest, 1);
+
+ if (bridge) { /* if we actually want to use this one */
+ node_t *node;
+ /* it's here; schedule its re-fetch for a long time from now. */
+ if (!from_cache) {
+ /* This schedules the re-fetch at a constant interval, which produces
+ * a pattern of bridge traffic. But it's better than trying all
+ * configured briges several times in the first few minutes. */
+ download_status_reset(&bridge->fetch_status);
+ }
+
+ node = node_get_mutable_by_id(ri->cache_info.identity_digest);
+ tor_assert(node);
+ rewrite_node_address_for_bridge(bridge, node);
+ if (tor_digest_is_zero(bridge->identity)) {
+ memcpy(bridge->identity,ri->cache_info.identity_digest, DIGEST_LEN);
+ log_notice(LD_DIR, "Learned identity %s for bridge at %s:%d",
+ hex_str(bridge->identity, DIGEST_LEN),
+ fmt_and_decorate_addr(&bridge->addr),
+ (int) bridge->port);
+ }
+ entry_guard_learned_bridge_identity(&bridge->addrport_configured,
+ (const uint8_t*)ri->cache_info.identity_digest);
+
+ log_notice(LD_DIR, "new bridge descriptor '%s' (%s): %s", ri->nickname,
+ from_cache ? "cached" : "fresh", router_describe(ri));
+ /* If we didn't have a reachable bridge before this one, try directory
+ * documents again. */
+ if (first) {
+ routerlist_retry_directory_downloads(now);
+ }
+ }
+ }
+}
+
+/** Return a smartlist containing all bridge identity digests */
+MOCK_IMPL(smartlist_t *,
+list_bridge_identities, (void))
+{
+ smartlist_t *result = NULL;
+ char *digest_tmp;
+
+ if (get_options()->UseBridges && bridge_list) {
+ result = smartlist_new();
+
+ SMARTLIST_FOREACH_BEGIN(bridge_list, bridge_info_t *, b) {
+ digest_tmp = tor_malloc(DIGEST_LEN);
+ memcpy(digest_tmp, b->identity, DIGEST_LEN);
+ smartlist_add(result, digest_tmp);
+ } SMARTLIST_FOREACH_END(b);
+ }
+
+ return result;
+}
+
+/** Get the download status for a bridge descriptor given its identity */
+MOCK_IMPL(download_status_t *,
+get_bridge_dl_status_by_id, (const char *digest))
+{
+ download_status_t *dl = NULL;
+
+ if (digest && get_options()->UseBridges && bridge_list) {
+ SMARTLIST_FOREACH_BEGIN(bridge_list, bridge_info_t *, b) {
+ if (tor_memeq(digest, b->identity, DIGEST_LEN)) {
+ dl = &(b->fetch_status);
+ break;
+ }
+ } SMARTLIST_FOREACH_END(b);
+ }
+
+ return dl;
+}
+
+/** Release all storage held in bridges.c */
+void
+bridges_free_all(void)
+{
+ clear_bridge_list();
+ smartlist_free(bridge_list);
+ bridge_list = NULL;
+}
+
diff --git a/src/feature/client/bridges.h b/src/feature/client/bridges.h
new file mode 100644
index 0000000000..70588c1b91
--- /dev/null
+++ b/src/feature/client/bridges.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file bridges.h
+ * \brief Header file for circuitbuild.c.
+ **/
+
+#ifndef TOR_BRIDGES_H
+#define TOR_BRIDGES_H
+
+struct bridge_line_t;
+struct ed25519_public_key_t;
+
+/* Opaque handle to a configured bridge */
+typedef struct bridge_info_t bridge_info_t;
+
+void mark_bridge_list(void);
+void sweep_bridge_list(void);
+const smartlist_t *bridge_list_get(void);
+const uint8_t *bridge_get_rsa_id_digest(const bridge_info_t *bridge);
+const tor_addr_port_t * bridge_get_addr_port(const bridge_info_t *bridge);
+bridge_info_t *get_configured_bridge_by_addr_port_digest(
+ const tor_addr_t *addr,
+ uint16_t port,
+ const char *digest);
+bridge_info_t *get_configured_bridge_by_exact_addr_port_digest(
+ const tor_addr_t *addr,
+ uint16_t port,
+ const char *digest);
+
+int addr_is_a_configured_bridge(const tor_addr_t *addr, uint16_t port,
+ const char *digest);
+int extend_info_is_a_configured_bridge(const extend_info_t *ei);
+int routerinfo_is_a_configured_bridge(const routerinfo_t *ri);
+int node_is_a_configured_bridge(const node_t *node);
+void learned_router_identity(const tor_addr_t *addr, uint16_t port,
+ const char *digest,
+ const struct ed25519_public_key_t *ed_id);
+
+void bridge_add_from_config(struct bridge_line_t *bridge_line);
+void retry_bridge_descriptor_fetch_directly(const char *digest);
+void fetch_bridge_descriptors(const or_options_t *options, time_t now);
+void learned_bridge_descriptor(routerinfo_t *ri, int from_cache);
+const smartlist_t *get_socks_args_by_bridge_addrport(const tor_addr_t *addr,
+ uint16_t port);
+
+int any_bridges_dont_support_microdescriptors(void);
+
+const char *find_transport_name_by_bridge_addrport(const tor_addr_t *addr,
+ uint16_t port);
+struct transport_t;
+int get_transport_by_bridge_addrport(const tor_addr_t *addr, uint16_t port,
+ const struct transport_t **transport);
+
+MOCK_DECL(int, transport_is_needed, (const char *transport_name));
+int validate_pluggable_transports_config(void);
+
+MOCK_DECL(smartlist_t *, list_bridge_identities, (void));
+MOCK_DECL(download_status_t *, get_bridge_dl_status_by_id,
+ (const char *digest));
+
+void bridges_free_all(void);
+
+#ifdef TOR_BRIDGES_PRIVATE
+STATIC void clear_bridge_list(void);
+STATIC bridge_info_t *find_bridge_by_digest(const char *digest);
+STATIC bridge_info_t *get_configured_bridge_by_orports_digest(
+ const char *digest,
+ const smartlist_t *orports);
+STATIC void bridge_resolve_conflicts(const tor_addr_t *addr,
+ uint16_t port,
+ const char *digest,
+ const char *transport_name);
+#endif /* defined(TOR_BRIDGES_PRIVATE) */
+
+#endif /* !defined(TOR_BRIDGES_H) */
diff --git a/src/feature/client/circpathbias.c b/src/feature/client/circpathbias.c
new file mode 100644
index 0000000000..32b3212d3f
--- /dev/null
+++ b/src/feature/client/circpathbias.c
@@ -0,0 +1,1578 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circpathbias.c
+ *
+ * \brief Code to track success/failure rates of circuits built through
+ * different tor nodes, in an attempt to detect attacks where
+ * an attacker deliberately causes circuits to fail until the client
+ * choses a path they like.
+ *
+ * This code is currently configured in a warning-only mode, though false
+ * positives appear to be rare in practice. There is also support for
+ * disabling really bad guards, but it's quite experimental and may have bad
+ * anonymity effects.
+ *
+ * The information here is associated with the entry_guard_t object for
+ * each guard, and stored persistently in the state file.
+ */
+
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/circpathbias.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuituse.h"
+#include "or/circuitstats.h"
+#include "or/connection_edge.h"
+#include "or/config.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "or/entrynodes.h"
+#include "or/networkstatus.h"
+#include "or/relay.h"
+#include "lib/math/fp.h"
+#include "lib/math/laplace.h"
+
+#include "or/cell_st.h"
+#include "or/cpath_build_state_st.h"
+#include "or/crypt_path_st.h"
+#include "or/extend_info_st.h"
+#include "or/origin_circuit_st.h"
+
+static void pathbias_count_successful_close(origin_circuit_t *circ);
+static void pathbias_count_collapse(origin_circuit_t *circ);
+static void pathbias_count_use_failed(origin_circuit_t *circ);
+static void pathbias_measure_use_rate(entry_guard_t *guard);
+static void pathbias_measure_close_rate(entry_guard_t *guard);
+static void pathbias_scale_use_rates(entry_guard_t *guard);
+static void pathbias_scale_close_rates(entry_guard_t *guard);
+static int entry_guard_inc_circ_attempt_count(entry_guard_t *guard);
+
+/** Increment the number of times we successfully extended a circuit to
+ * <b>guard</b>, first checking if the failure rate is high enough that
+ * we should eliminate the guard. Return -1 if the guard looks no good;
+ * return 0 if the guard looks fine.
+ */
+static int
+entry_guard_inc_circ_attempt_count(entry_guard_t *guard)
+{
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ entry_guards_changed();
+
+ pathbias_measure_close_rate(guard);
+
+ if (pb->path_bias_disabled)
+ return -1;
+
+ pathbias_scale_close_rates(guard);
+ pb->circ_attempts++;
+
+ log_info(LD_CIRC, "Got success count %f/%f for guard %s",
+ pb->circ_successes, pb->circ_attempts,
+ entry_guard_describe(guard));
+ return 0;
+}
+
+/** The minimum number of circuit attempts before we start
+ * thinking about warning about path bias and dropping guards */
+static int
+pathbias_get_min_circs(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_MIN_CIRC 150
+ if (options->PathBiasCircThreshold >= 5)
+ return options->PathBiasCircThreshold;
+ else
+ return networkstatus_get_param(NULL, "pb_mincircs",
+ DFLT_PATH_BIAS_MIN_CIRC,
+ 5, INT32_MAX);
+}
+
+/** The circuit success rate below which we issue a notice */
+static double
+pathbias_get_notice_rate(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_NOTICE_PCT 70
+ if (options->PathBiasNoticeRate >= 0.0)
+ return options->PathBiasNoticeRate;
+ else
+ return networkstatus_get_param(NULL, "pb_noticepct",
+ DFLT_PATH_BIAS_NOTICE_PCT, 0, 100)/100.0;
+}
+
+/** The circuit success rate below which we issue a warn */
+static double
+pathbias_get_warn_rate(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_WARN_PCT 50
+ if (options->PathBiasWarnRate >= 0.0)
+ return options->PathBiasWarnRate;
+ else
+ return networkstatus_get_param(NULL, "pb_warnpct",
+ DFLT_PATH_BIAS_WARN_PCT, 0, 100)/100.0;
+}
+
+/* XXXX I'd like to have this be static again, but entrynodes.c needs it. */
+/**
+ * The extreme rate is the rate at which we would drop the guard,
+ * if pb_dropguard is also set. Otherwise we just warn.
+ */
+double
+pathbias_get_extreme_rate(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_EXTREME_PCT 30
+ if (options->PathBiasExtremeRate >= 0.0)
+ return options->PathBiasExtremeRate;
+ else
+ return networkstatus_get_param(NULL, "pb_extremepct",
+ DFLT_PATH_BIAS_EXTREME_PCT, 0, 100)/100.0;
+}
+
+/* XXXX I'd like to have this be static again, but entrynodes.c needs it. */
+/**
+ * If 1, we actually disable use of guards that fall below
+ * the extreme_pct.
+ */
+int
+pathbias_get_dropguards(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_DROP_GUARDS 0
+ if (options->PathBiasDropGuards >= 0)
+ return options->PathBiasDropGuards;
+ else
+ return networkstatus_get_param(NULL, "pb_dropguards",
+ DFLT_PATH_BIAS_DROP_GUARDS, 0, 1);
+}
+
+/**
+ * This is the number of circuits at which we scale our
+ * counts by mult_factor/scale_factor. Note, this count is
+ * not exact, as we only perform the scaling in the event
+ * of no integer truncation.
+ */
+static int
+pathbias_get_scale_threshold(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_SCALE_THRESHOLD 300
+ if (options->PathBiasScaleThreshold >= 10)
+ return options->PathBiasScaleThreshold;
+ else
+ return networkstatus_get_param(NULL, "pb_scalecircs",
+ DFLT_PATH_BIAS_SCALE_THRESHOLD, 10,
+ INT32_MAX);
+}
+
+/**
+ * Compute the path bias scaling ratio from the consensus
+ * parameters pb_multfactor/pb_scalefactor.
+ *
+ * Returns a value in (0, 1.0] which we multiply our pathbias
+ * counts with to scale them down.
+ */
+static double
+pathbias_get_scale_ratio(const or_options_t *options)
+{
+ /*
+ * The scale factor is the denominator for our scaling
+ * of circuit counts for our path bias window.
+ *
+ * Note that our use of doubles for the path bias state
+ * file means that powers of 2 work best here.
+ */
+ int denominator = networkstatus_get_param(NULL, "pb_scalefactor",
+ 2, 2, INT32_MAX);
+ (void) options;
+ /**
+ * The mult factor is the numerator for our scaling
+ * of circuit counts for our path bias window. It
+ * allows us to scale by fractions.
+ */
+ return networkstatus_get_param(NULL, "pb_multfactor",
+ 1, 1, denominator)/((double)denominator);
+}
+
+/** The minimum number of circuit usage attempts before we start
+ * thinking about warning about path use bias and dropping guards */
+static int
+pathbias_get_min_use(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_MIN_USE 20
+ if (options->PathBiasUseThreshold >= 3)
+ return options->PathBiasUseThreshold;
+ else
+ return networkstatus_get_param(NULL, "pb_minuse",
+ DFLT_PATH_BIAS_MIN_USE,
+ 3, INT32_MAX);
+}
+
+/** The circuit use success rate below which we issue a notice */
+static double
+pathbias_get_notice_use_rate(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_NOTICE_USE_PCT 80
+ if (options->PathBiasNoticeUseRate >= 0.0)
+ return options->PathBiasNoticeUseRate;
+ else
+ return networkstatus_get_param(NULL, "pb_noticeusepct",
+ DFLT_PATH_BIAS_NOTICE_USE_PCT,
+ 0, 100)/100.0;
+}
+
+/**
+ * The extreme use rate is the rate at which we would drop the guard,
+ * if pb_dropguard is also set. Otherwise we just warn.
+ */
+double
+pathbias_get_extreme_use_rate(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_EXTREME_USE_PCT 60
+ if (options->PathBiasExtremeUseRate >= 0.0)
+ return options->PathBiasExtremeUseRate;
+ else
+ return networkstatus_get_param(NULL, "pb_extremeusepct",
+ DFLT_PATH_BIAS_EXTREME_USE_PCT,
+ 0, 100)/100.0;
+}
+
+/**
+ * This is the number of circuits at which we scale our
+ * use counts by mult_factor/scale_factor. Note, this count is
+ * not exact, as we only perform the scaling in the event
+ * of no integer truncation.
+ */
+static int
+pathbias_get_scale_use_threshold(const or_options_t *options)
+{
+#define DFLT_PATH_BIAS_SCALE_USE_THRESHOLD 100
+ if (options->PathBiasScaleUseThreshold >= 10)
+ return options->PathBiasScaleUseThreshold;
+ else
+ return networkstatus_get_param(NULL, "pb_scaleuse",
+ DFLT_PATH_BIAS_SCALE_USE_THRESHOLD,
+ 10, INT32_MAX);
+}
+
+/**
+ * Convert a Guard's path state to string.
+ */
+const char *
+pathbias_state_to_string(path_state_t state)
+{
+ switch (state) {
+ case PATH_STATE_NEW_CIRC:
+ return "new";
+ case PATH_STATE_BUILD_ATTEMPTED:
+ return "build attempted";
+ case PATH_STATE_BUILD_SUCCEEDED:
+ return "build succeeded";
+ case PATH_STATE_USE_ATTEMPTED:
+ return "use attempted";
+ case PATH_STATE_USE_SUCCEEDED:
+ return "use succeeded";
+ case PATH_STATE_USE_FAILED:
+ return "use failed";
+ case PATH_STATE_ALREADY_COUNTED:
+ return "already counted";
+ }
+
+ return "unknown";
+}
+
+/**
+ * This function decides if a circuit has progressed far enough to count
+ * as a circuit "attempt". As long as end-to-end tagging is possible,
+ * we assume the adversary will use it over hop-to-hop failure. Therefore,
+ * we only need to account bias for the last hop. This should make us
+ * much more resilient to ambient circuit failure, and also make that
+ * failure easier to measure (we only need to measure Exit failure rates).
+ */
+static int
+pathbias_is_new_circ_attempt(origin_circuit_t *circ)
+{
+#define N2N_TAGGING_IS_POSSIBLE
+#ifdef N2N_TAGGING_IS_POSSIBLE
+ /* cpath is a circular list. We want circs with more than one hop,
+ * and the second hop must be waiting for keys still (it's just
+ * about to get them). */
+ return circ->cpath &&
+ circ->cpath->next != circ->cpath &&
+ circ->cpath->next->state == CPATH_STATE_AWAITING_KEYS;
+#else /* !(defined(N2N_TAGGING_IS_POSSIBLE)) */
+ /* If tagging attacks are no longer possible, we probably want to
+ * count bias from the first hop. However, one could argue that
+ * timing-based tagging is still more useful than per-hop failure.
+ * In which case, we'd never want to use this.
+ */
+ return circ->cpath &&
+ circ->cpath->state == CPATH_STATE_AWAITING_KEYS;
+#endif /* defined(N2N_TAGGING_IS_POSSIBLE) */
+}
+
+/**
+ * Decide if the path bias code should count a circuit.
+ *
+ * @returns 1 if we should count it, 0 otherwise.
+ */
+static int
+pathbias_should_count(origin_circuit_t *circ)
+{
+#define PATHBIAS_COUNT_INTERVAL (600)
+ static ratelim_t count_limit =
+ RATELIM_INIT(PATHBIAS_COUNT_INTERVAL);
+ char *rate_msg = NULL;
+
+ /* We can't do path bias accounting without entry guards.
+ * Testing and controller circuits also have no guards.
+ *
+ * We also don't count server-side rends, because their
+ * endpoint could be chosen maliciously.
+ * Similarly, we can't count client-side intro attempts,
+ * because clients can be manipulated into connecting to
+ * malicious intro points. */
+ if (get_options()->UseEntryGuards == 0 ||
+ circ->base_.purpose == CIRCUIT_PURPOSE_TESTING ||
+ circ->base_.purpose == CIRCUIT_PURPOSE_CONTROLLER ||
+ circ->base_.purpose == CIRCUIT_PURPOSE_S_CONNECT_REND ||
+ circ->base_.purpose == CIRCUIT_PURPOSE_S_REND_JOINED ||
+ (circ->base_.purpose >= CIRCUIT_PURPOSE_C_INTRODUCING &&
+ circ->base_.purpose <= CIRCUIT_PURPOSE_C_INTRODUCE_ACKED)) {
+
+ /* Check to see if the shouldcount result has changed due to a
+ * unexpected purpose change that would affect our results.
+ *
+ * The reason we check the path state too here is because for the
+ * cannibalized versions of these purposes, we count them as successful
+ * before their purpose change.
+ */
+ if (circ->pathbias_shouldcount == PATHBIAS_SHOULDCOUNT_COUNTED
+ && circ->path_state != PATH_STATE_ALREADY_COUNTED) {
+ log_info(LD_BUG,
+ "Circuit %d is now being ignored despite being counted "
+ "in the past. Purpose is %s, path state is %s",
+ circ->global_identifier,
+ circuit_purpose_to_string(circ->base_.purpose),
+ pathbias_state_to_string(circ->path_state));
+ }
+ circ->pathbias_shouldcount = PATHBIAS_SHOULDCOUNT_IGNORED;
+ return 0;
+ }
+
+ /* Completely ignore one hop circuits */
+ if (circ->build_state->onehop_tunnel ||
+ circ->build_state->desired_path_len == 1) {
+ /* Check for inconsistency */
+ if (circ->build_state->desired_path_len != 1 ||
+ !circ->build_state->onehop_tunnel) {
+ if ((rate_msg = rate_limit_log(&count_limit, approx_time()))) {
+ log_info(LD_BUG,
+ "One-hop circuit has length %d. Path state is %s. "
+ "Circuit is a %s currently %s.%s",
+ circ->build_state->desired_path_len,
+ pathbias_state_to_string(circ->path_state),
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state),
+ rate_msg);
+ tor_free(rate_msg);
+ }
+ tor_fragile_assert();
+ }
+
+ /* Check to see if the shouldcount result has changed due to a
+ * unexpected change that would affect our results */
+ if (circ->pathbias_shouldcount == PATHBIAS_SHOULDCOUNT_COUNTED) {
+ log_info(LD_BUG,
+ "One-hop circuit %d is now being ignored despite being counted "
+ "in the past. Purpose is %s, path state is %s",
+ circ->global_identifier,
+ circuit_purpose_to_string(circ->base_.purpose),
+ pathbias_state_to_string(circ->path_state));
+ }
+ circ->pathbias_shouldcount = PATHBIAS_SHOULDCOUNT_IGNORED;
+ return 0;
+ }
+
+ /* Check to see if the shouldcount result has changed due to a
+ * unexpected purpose change that would affect our results */
+ if (circ->pathbias_shouldcount == PATHBIAS_SHOULDCOUNT_IGNORED) {
+ log_info(LD_BUG,
+ "Circuit %d is now being counted despite being ignored "
+ "in the past. Purpose is %s, path state is %s",
+ circ->global_identifier,
+ circuit_purpose_to_string(circ->base_.purpose),
+ pathbias_state_to_string(circ->path_state));
+ }
+ circ->pathbias_shouldcount = PATHBIAS_SHOULDCOUNT_COUNTED;
+
+ return 1;
+}
+
+/**
+ * Check our circuit state to see if this is a successful circuit attempt.
+ * If so, record it in the current guard's path bias circ_attempt count.
+ *
+ * Also check for several potential error cases for bug #6475.
+ */
+int
+pathbias_count_build_attempt(origin_circuit_t *circ)
+{
+#define CIRC_ATTEMPT_NOTICE_INTERVAL (600)
+ static ratelim_t circ_attempt_notice_limit =
+ RATELIM_INIT(CIRC_ATTEMPT_NOTICE_INTERVAL);
+ char *rate_msg = NULL;
+
+ if (!pathbias_should_count(circ)) {
+ return 0;
+ }
+
+ if (pathbias_is_new_circ_attempt(circ)) {
+ /* Help track down the real cause of bug #6475: */
+ if (circ->has_opened && circ->path_state != PATH_STATE_BUILD_ATTEMPTED) {
+ if ((rate_msg = rate_limit_log(&circ_attempt_notice_limit,
+ approx_time()))) {
+ log_info(LD_BUG,
+ "Opened circuit is in strange path state %s. "
+ "Circuit is a %s currently %s.%s",
+ pathbias_state_to_string(circ->path_state),
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state),
+ rate_msg);
+ tor_free(rate_msg);
+ }
+ }
+
+ /* Don't re-count cannibalized circs.. */
+ if (!circ->has_opened) {
+ entry_guard_t *guard = NULL;
+
+ if (circ->cpath && circ->cpath->extend_info) {
+ guard = entry_guard_get_by_id_digest(
+ circ->cpath->extend_info->identity_digest);
+ } else if (circ->base_.n_chan) {
+ guard =
+ entry_guard_get_by_id_digest(circ->base_.n_chan->identity_digest);
+ }
+
+ if (guard) {
+ if (circ->path_state == PATH_STATE_NEW_CIRC) {
+ circ->path_state = PATH_STATE_BUILD_ATTEMPTED;
+
+ if (entry_guard_inc_circ_attempt_count(guard) < 0) {
+ /* Bogus guard; we already warned. */
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ } else {
+ if ((rate_msg = rate_limit_log(&circ_attempt_notice_limit,
+ approx_time()))) {
+ log_info(LD_BUG,
+ "Unopened circuit has strange path state %s. "
+ "Circuit is a %s currently %s.%s",
+ pathbias_state_to_string(circ->path_state),
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state),
+ rate_msg);
+ tor_free(rate_msg);
+ }
+ }
+ } else {
+ if ((rate_msg = rate_limit_log(&circ_attempt_notice_limit,
+ approx_time()))) {
+ log_info(LD_CIRC,
+ "Unopened circuit has no known guard. "
+ "Circuit is a %s currently %s.%s",
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state),
+ rate_msg);
+ tor_free(rate_msg);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Check our circuit state to see if this is a successful circuit
+ * completion. If so, record it in the current guard's path bias
+ * success count.
+ *
+ * Also check for several potential error cases for bug #6475.
+ */
+void
+pathbias_count_build_success(origin_circuit_t *circ)
+{
+#define SUCCESS_NOTICE_INTERVAL (600)
+ static ratelim_t success_notice_limit =
+ RATELIM_INIT(SUCCESS_NOTICE_INTERVAL);
+ char *rate_msg = NULL;
+ entry_guard_t *guard = NULL;
+
+ if (!pathbias_should_count(circ)) {
+ return;
+ }
+
+ /* Don't count cannibalized/reused circs for path bias
+ * "build" success, since they get counted under "use" success. */
+ if (!circ->has_opened) {
+ if (circ->cpath && circ->cpath->extend_info) {
+ guard = entry_guard_get_by_id_digest(
+ circ->cpath->extend_info->identity_digest);
+ }
+
+ if (guard) {
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ if (circ->path_state == PATH_STATE_BUILD_ATTEMPTED) {
+ circ->path_state = PATH_STATE_BUILD_SUCCEEDED;
+ pb->circ_successes++;
+ entry_guards_changed();
+
+ log_info(LD_CIRC, "Got success count %f/%f for guard %s",
+ pb->circ_successes, pb->circ_attempts,
+ entry_guard_describe(guard));
+ } else {
+ if ((rate_msg = rate_limit_log(&success_notice_limit,
+ approx_time()))) {
+ log_info(LD_BUG,
+ "Succeeded circuit is in strange path state %s. "
+ "Circuit is a %s currently %s.%s",
+ pathbias_state_to_string(circ->path_state),
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state),
+ rate_msg);
+ tor_free(rate_msg);
+ }
+ }
+
+ if (pb->circ_attempts < pb->circ_successes) {
+ log_notice(LD_BUG, "Unexpectedly high successes counts (%f/%f) "
+ "for guard %s",
+ pb->circ_successes, pb->circ_attempts,
+ entry_guard_describe(guard));
+ }
+ /* In rare cases, CIRCUIT_PURPOSE_TESTING can get converted to
+ * CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT and have no guards here.
+ * No need to log that case. */
+ } else if (circ->base_.purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
+ if ((rate_msg = rate_limit_log(&success_notice_limit,
+ approx_time()))) {
+ log_info(LD_CIRC,
+ "Completed circuit has no known guard. "
+ "Circuit is a %s currently %s.%s",
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state),
+ rate_msg);
+ tor_free(rate_msg);
+ }
+ }
+ } else {
+ if (circ->path_state < PATH_STATE_BUILD_SUCCEEDED) {
+ if ((rate_msg = rate_limit_log(&success_notice_limit,
+ approx_time()))) {
+ log_info(LD_BUG,
+ "Opened circuit is in strange path state %s. "
+ "Circuit is a %s currently %s.%s",
+ pathbias_state_to_string(circ->path_state),
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state),
+ rate_msg);
+ tor_free(rate_msg);
+ }
+ }
+ }
+}
+
+/**
+ * Record an attempt to use a circuit. Changes the circuit's
+ * path state and update its guard's usage counter.
+ *
+ * Used for path bias usage accounting.
+ */
+void
+pathbias_count_use_attempt(origin_circuit_t *circ)
+{
+ if (!pathbias_should_count(circ)) {
+ return;
+ }
+
+ if (circ->path_state < PATH_STATE_BUILD_SUCCEEDED) {
+ log_notice(LD_BUG,
+ "Used circuit is in strange path state %s. "
+ "Circuit is a %s currently %s.",
+ pathbias_state_to_string(circ->path_state),
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state));
+ } else if (circ->path_state < PATH_STATE_USE_ATTEMPTED) {
+ entry_guard_t *guard = entry_guard_get_by_id_digest(
+ circ->cpath->extend_info->identity_digest);
+ if (guard) {
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ pathbias_measure_use_rate(guard);
+ pathbias_scale_use_rates(guard);
+ pb->use_attempts++;
+ entry_guards_changed();
+
+ log_debug(LD_CIRC,
+ "Marked circuit %d (%f/%f) as used for guard %s.",
+ circ->global_identifier,
+ pb->use_successes, pb->use_attempts,
+ entry_guard_describe(guard));
+ }
+
+ circ->path_state = PATH_STATE_USE_ATTEMPTED;
+ } else {
+ /* Harmless but educational log message */
+ log_info(LD_CIRC,
+ "Used circuit %d is already in path state %s. "
+ "Circuit is a %s currently %s.",
+ circ->global_identifier,
+ pathbias_state_to_string(circ->path_state),
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state));
+ }
+
+ return;
+}
+
+/**
+ * Check the circuit's path state is appropriate and mark it as
+ * successfully used. Used for path bias usage accounting.
+ *
+ * We don't actually increment the guard's counters until
+ * pathbias_check_close(), because the circuit can still transition
+ * back to PATH_STATE_USE_ATTEMPTED if a stream fails later (this
+ * is done so we can probe the circuit for liveness at close).
+ */
+void
+pathbias_mark_use_success(origin_circuit_t *circ)
+{
+ if (!pathbias_should_count(circ)) {
+ return;
+ }
+
+ if (circ->path_state < PATH_STATE_USE_ATTEMPTED) {
+ log_notice(LD_BUG,
+ "Used circuit %d is in strange path state %s. "
+ "Circuit is a %s currently %s.",
+ circ->global_identifier,
+ pathbias_state_to_string(circ->path_state),
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state));
+
+ pathbias_count_use_attempt(circ);
+ }
+
+ /* We don't do any accounting at the guard until actual circuit close */
+ circ->path_state = PATH_STATE_USE_SUCCEEDED;
+
+ return;
+}
+
+/**
+ * If a stream ever detatches from a circuit in a retriable way,
+ * we need to mark this circuit as still needing either another
+ * successful stream, or in need of a probe.
+ *
+ * An adversary could let the first stream request succeed (ie the
+ * resolve), but then tag and timeout the remainder (via cell
+ * dropping), forcing them on new circuits.
+ *
+ * Rolling back the state will cause us to probe such circuits, which
+ * should lead to probe failures in the event of such tagging due to
+ * either unrecognized cells coming in while we wait for the probe,
+ * or the cipher state getting out of sync in the case of dropped cells.
+ */
+void
+pathbias_mark_use_rollback(origin_circuit_t *circ)
+{
+ if (circ->path_state == PATH_STATE_USE_SUCCEEDED) {
+ log_info(LD_CIRC,
+ "Rolling back pathbias use state to 'attempted' for detached "
+ "circuit %d", circ->global_identifier);
+ circ->path_state = PATH_STATE_USE_ATTEMPTED;
+ }
+}
+
+/**
+ * Actually count a circuit success towards a guard's usage counters
+ * if the path state is appropriate.
+ */
+static void
+pathbias_count_use_success(origin_circuit_t *circ)
+{
+ entry_guard_t *guard;
+
+ if (!pathbias_should_count(circ)) {
+ return;
+ }
+
+ if (circ->path_state != PATH_STATE_USE_SUCCEEDED) {
+ log_notice(LD_BUG,
+ "Successfully used circuit %d is in strange path state %s. "
+ "Circuit is a %s currently %s.",
+ circ->global_identifier,
+ pathbias_state_to_string(circ->path_state),
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state));
+ } else {
+ guard = entry_guard_get_by_id_digest(
+ circ->cpath->extend_info->identity_digest);
+ if (guard) {
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ pb->use_successes++;
+ entry_guards_changed();
+
+ if (pb->use_attempts < pb->use_successes) {
+ log_notice(LD_BUG, "Unexpectedly high use successes counts (%f/%f) "
+ "for guard %s",
+ pb->use_successes, pb->use_attempts,
+ entry_guard_describe(guard));
+ }
+
+ log_debug(LD_CIRC,
+ "Marked circuit %d (%f/%f) as used successfully for guard %s",
+ circ->global_identifier, pb->use_successes,
+ pb->use_attempts,
+ entry_guard_describe(guard));
+ }
+ }
+
+ return;
+}
+
+/**
+ * Send a probe down a circuit that the client attempted to use,
+ * but for which the stream timed out/failed. The probe is a
+ * RELAY_BEGIN cell with a 0.a.b.c destination address, which
+ * the exit will reject and reply back, echoing that address.
+ *
+ * The reason for such probes is because it is possible to bias
+ * a user's paths simply by causing timeouts, and these timeouts
+ * are not possible to differentiate from unresponsive servers.
+ *
+ * The probe is sent at the end of the circuit lifetime for two
+ * reasons: to prevent cryptographic taggers from being able to
+ * drop cells to cause timeouts, and to prevent easy recognition
+ * of probes before any real client traffic happens.
+ *
+ * Returns -1 if we couldn't probe, 0 otherwise.
+ */
+static int
+pathbias_send_usable_probe(circuit_t *circ)
+{
+ /* Based on connection_ap_handshake_send_begin() */
+ char payload[CELL_PAYLOAD_SIZE];
+ int payload_len;
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+ crypt_path_t *cpath_layer = NULL;
+ char *probe_nonce = NULL;
+
+ tor_assert(ocirc);
+
+ cpath_layer = ocirc->cpath->prev;
+
+ if (cpath_layer->state != CPATH_STATE_OPEN) {
+ /* This can happen for cannibalized circuits. Their
+ * last hop isn't yet open */
+ log_info(LD_CIRC,
+ "Got pathbias probe request for unopened circuit %d. "
+ "Opened %d, len %d", ocirc->global_identifier,
+ ocirc->has_opened, ocirc->build_state->desired_path_len);
+ return -1;
+ }
+
+ /* We already went down this road. */
+ if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING &&
+ ocirc->pathbias_probe_id) {
+ log_info(LD_CIRC,
+ "Got pathbias probe request for circuit %d with "
+ "outstanding probe", ocirc->global_identifier);
+ return -1;
+ }
+
+ /* Can't probe if the channel isn't open */
+ if (circ->n_chan == NULL ||
+ (!CHANNEL_IS_OPEN(circ->n_chan)
+ && !CHANNEL_IS_MAINT(circ->n_chan))) {
+ log_info(LD_CIRC,
+ "Skipping pathbias probe for circuit %d: Channel is not open.",
+ ocirc->global_identifier);
+ return -1;
+ }
+
+ circuit_change_purpose(circ, CIRCUIT_PURPOSE_PATH_BIAS_TESTING);
+
+ /* Update timestamp for when circuit_expire_building() should kill us */
+ tor_gettimeofday(&circ->timestamp_began);
+
+ /* Generate a random address for the nonce */
+ crypto_rand((char*)&ocirc->pathbias_probe_nonce,
+ sizeof(ocirc->pathbias_probe_nonce));
+ ocirc->pathbias_probe_nonce &= 0x00ffffff;
+ probe_nonce = tor_dup_ip(ocirc->pathbias_probe_nonce);
+
+ tor_snprintf(payload,RELAY_PAYLOAD_SIZE, "%s:25", probe_nonce);
+ payload_len = (int)strlen(payload)+1;
+
+ // XXX: need this? Can we assume ipv4 will always be supported?
+ // If not, how do we tell?
+ //if (payload_len <= RELAY_PAYLOAD_SIZE - 4 && edge_conn->begincell_flags) {
+ // set_uint32(payload + payload_len, htonl(edge_conn->begincell_flags));
+ // payload_len += 4;
+ //}
+
+ /* Generate+Store stream id, make sure it's non-zero */
+ ocirc->pathbias_probe_id = get_unique_stream_id_by_circ(ocirc);
+
+ if (ocirc->pathbias_probe_id==0) {
+ log_warn(LD_CIRC,
+ "Ran out of stream IDs on circuit %u during "
+ "pathbias probe attempt.", ocirc->global_identifier);
+ tor_free(probe_nonce);
+ return -1;
+ }
+
+ log_info(LD_CIRC,
+ "Sending pathbias testing cell to %s:25 on stream %d for circ %d.",
+ probe_nonce, ocirc->pathbias_probe_id, ocirc->global_identifier);
+ tor_free(probe_nonce);
+
+ /* Send a test relay cell */
+ if (relay_send_command_from_edge(ocirc->pathbias_probe_id, circ,
+ RELAY_COMMAND_BEGIN, payload,
+ payload_len, cpath_layer) < 0) {
+ log_notice(LD_CIRC,
+ "Failed to send pathbias probe cell on circuit %d.",
+ ocirc->global_identifier);
+ return -1;
+ }
+
+ /* Mark it freshly dirty so it doesn't get expired in the meantime */
+ circ->timestamp_dirty = time(NULL);
+
+ return 0;
+}
+
+/**
+ * Check the response to a pathbias probe, to ensure the
+ * cell is recognized and the nonce and other probe
+ * characteristics are as expected.
+ *
+ * If the response is valid, return 0. Otherwise return < 0.
+ */
+int
+pathbias_check_probe_response(circuit_t *circ, const cell_t *cell)
+{
+ /* Based on connection_edge_process_relay_cell() */
+ relay_header_t rh;
+ int reason;
+ uint32_t ipv4_host;
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+
+ tor_assert(cell);
+ tor_assert(ocirc);
+ tor_assert(circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING);
+
+ relay_header_unpack(&rh, cell->payload);
+
+ reason = rh.length > 0 ?
+ get_uint8(cell->payload+RELAY_HEADER_SIZE) : END_STREAM_REASON_MISC;
+
+ if (rh.command == RELAY_COMMAND_END &&
+ reason == END_STREAM_REASON_EXITPOLICY &&
+ ocirc->pathbias_probe_id == rh.stream_id) {
+
+ /* Check length+extract host: It is in network order after the reason code.
+ * See connection_edge_end(). */
+ if (rh.length < 9) { /* reason+ipv4+dns_ttl */
+ log_notice(LD_PROTOCOL,
+ "Short path bias probe response length field (%d).", rh.length);
+ return - END_CIRC_REASON_TORPROTOCOL;
+ }
+
+ ipv4_host = ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+1));
+
+ /* Check nonce */
+ if (ipv4_host == ocirc->pathbias_probe_nonce) {
+ pathbias_mark_use_success(ocirc);
+ circuit_mark_for_close(circ, END_CIRC_REASON_FINISHED);
+ log_info(LD_CIRC,
+ "Got valid path bias probe back for circ %d, stream %d.",
+ ocirc->global_identifier, ocirc->pathbias_probe_id);
+ return 0;
+ } else {
+ log_notice(LD_CIRC,
+ "Got strange probe value 0x%x vs 0x%x back for circ %d, "
+ "stream %d.", ipv4_host, ocirc->pathbias_probe_nonce,
+ ocirc->global_identifier, ocirc->pathbias_probe_id);
+ return -1;
+ }
+ }
+ log_info(LD_CIRC,
+ "Got another cell back back on pathbias probe circuit %d: "
+ "Command: %d, Reason: %d, Stream-id: %d",
+ ocirc->global_identifier, rh.command, reason, rh.stream_id);
+ return -1;
+}
+
+/**
+ * Check if a circuit was used and/or closed successfully.
+ *
+ * If we attempted to use the circuit to carry a stream but failed
+ * for whatever reason, or if the circuit mysteriously died before
+ * we could attach any streams, record these two cases.
+ *
+ * If we *have* successfully used the circuit, or it appears to
+ * have been closed by us locally, count it as a success.
+ *
+ * Returns 0 if we're done making decisions with the circ,
+ * or -1 if we want to probe it first.
+ */
+int
+pathbias_check_close(origin_circuit_t *ocirc, int reason)
+{
+ circuit_t *circ = &ocirc->base_;
+
+ if (!pathbias_should_count(ocirc)) {
+ return 0;
+ }
+
+ switch (ocirc->path_state) {
+ /* If the circuit was closed after building, but before use, we need
+ * to ensure we were the ones who tried to close it (and not a remote
+ * actor). */
+ case PATH_STATE_BUILD_SUCCEEDED:
+ if (reason & END_CIRC_REASON_FLAG_REMOTE) {
+ /* Remote circ close reasons on an unused circuit all could be bias */
+ log_info(LD_CIRC,
+ "Circuit %d remote-closed without successful use for reason %d. "
+ "Circuit purpose %d currently %d,%s. Len %d.",
+ ocirc->global_identifier,
+ reason, circ->purpose, ocirc->has_opened,
+ circuit_state_to_string(circ->state),
+ ocirc->build_state->desired_path_len);
+ pathbias_count_collapse(ocirc);
+ } else if ((reason & ~END_CIRC_REASON_FLAG_REMOTE)
+ == END_CIRC_REASON_CHANNEL_CLOSED &&
+ circ->n_chan &&
+ circ->n_chan->reason_for_closing
+ != CHANNEL_CLOSE_REQUESTED) {
+ /* If we didn't close the channel ourselves, it could be bias */
+ /* XXX: Only count bias if the network is live?
+ * What about clock jumps/suspends? */
+ log_info(LD_CIRC,
+ "Circuit %d's channel closed without successful use for reason "
+ "%d, channel reason %d. Circuit purpose %d currently %d,%s. Len "
+ "%d.", ocirc->global_identifier,
+ reason, circ->n_chan->reason_for_closing,
+ circ->purpose, ocirc->has_opened,
+ circuit_state_to_string(circ->state),
+ ocirc->build_state->desired_path_len);
+ pathbias_count_collapse(ocirc);
+ } else {
+ pathbias_count_successful_close(ocirc);
+ }
+ break;
+
+ /* If we tried to use a circuit but failed, we should probe it to ensure
+ * it has not been tampered with. */
+ case PATH_STATE_USE_ATTEMPTED:
+ /* XXX: Only probe and/or count failure if the network is live?
+ * What about clock jumps/suspends? */
+ if (pathbias_send_usable_probe(circ) == 0)
+ return -1;
+ else
+ pathbias_count_use_failed(ocirc);
+
+ /* Any circuit where there were attempted streams but no successful
+ * streams could be bias */
+ log_info(LD_CIRC,
+ "Circuit %d closed without successful use for reason %d. "
+ "Circuit purpose %d currently %d,%s. Len %d.",
+ ocirc->global_identifier,
+ reason, circ->purpose, ocirc->has_opened,
+ circuit_state_to_string(circ->state),
+ ocirc->build_state->desired_path_len);
+ break;
+
+ case PATH_STATE_USE_SUCCEEDED:
+ pathbias_count_successful_close(ocirc);
+ pathbias_count_use_success(ocirc);
+ break;
+
+ case PATH_STATE_USE_FAILED:
+ pathbias_count_use_failed(ocirc);
+ break;
+
+ case PATH_STATE_NEW_CIRC:
+ case PATH_STATE_BUILD_ATTEMPTED:
+ case PATH_STATE_ALREADY_COUNTED:
+ default:
+ // Other states are uninteresting. No stats to count.
+ break;
+ }
+
+ ocirc->path_state = PATH_STATE_ALREADY_COUNTED;
+
+ return 0;
+}
+
+/**
+ * Count a successfully closed circuit.
+ */
+static void
+pathbias_count_successful_close(origin_circuit_t *circ)
+{
+ entry_guard_t *guard = NULL;
+ if (!pathbias_should_count(circ)) {
+ return;
+ }
+
+ if (circ->cpath && circ->cpath->extend_info) {
+ guard = entry_guard_get_by_id_digest(
+ circ->cpath->extend_info->identity_digest);
+ }
+
+ if (guard) {
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ /* In the long run: circuit_success ~= successful_circuit_close +
+ * circ_failure + stream_failure */
+ pb->successful_circuits_closed++;
+ entry_guards_changed();
+ } else if (circ->base_.purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
+ /* In rare cases, CIRCUIT_PURPOSE_TESTING can get converted to
+ * CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT and have no guards here.
+ * No need to log that case. */
+ log_info(LD_CIRC,
+ "Successfully closed circuit has no known guard. "
+ "Circuit is a %s currently %s",
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state));
+ }
+}
+
+/**
+ * Count a circuit that fails after it is built, but before it can
+ * carry any traffic.
+ *
+ * This is needed because there are ways to destroy a
+ * circuit after it has successfully completed. Right now, this is
+ * used for purely informational/debugging purposes.
+ */
+static void
+pathbias_count_collapse(origin_circuit_t *circ)
+{
+ entry_guard_t *guard = NULL;
+
+ if (!pathbias_should_count(circ)) {
+ return;
+ }
+
+ if (circ->cpath && circ->cpath->extend_info) {
+ guard = entry_guard_get_by_id_digest(
+ circ->cpath->extend_info->identity_digest);
+ }
+
+ if (guard) {
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ pb->collapsed_circuits++;
+ entry_guards_changed();
+ } else if (circ->base_.purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
+ /* In rare cases, CIRCUIT_PURPOSE_TESTING can get converted to
+ * CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT and have no guards here.
+ * No need to log that case. */
+ log_info(LD_CIRC,
+ "Destroyed circuit has no known guard. "
+ "Circuit is a %s currently %s",
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state));
+ }
+}
+
+/**
+ * Count a known failed circuit (because we could not probe it).
+ *
+ * This counter is informational.
+ */
+static void
+pathbias_count_use_failed(origin_circuit_t *circ)
+{
+ entry_guard_t *guard = NULL;
+ if (!pathbias_should_count(circ)) {
+ return;
+ }
+
+ if (circ->cpath && circ->cpath->extend_info) {
+ guard = entry_guard_get_by_id_digest(
+ circ->cpath->extend_info->identity_digest);
+ }
+
+ if (guard) {
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ pb->unusable_circuits++;
+ entry_guards_changed();
+ } else if (circ->base_.purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
+ /* In rare cases, CIRCUIT_PURPOSE_TESTING can get converted to
+ * CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT and have no guards here.
+ * No need to log that case. */
+ /* XXX note cut-and-paste code in this function compared to nearby
+ * functions. Would be nice to refactor. -RD */
+ log_info(LD_CIRC,
+ "Stream-failing circuit has no known guard. "
+ "Circuit is a %s currently %s",
+ circuit_purpose_to_string(circ->base_.purpose),
+ circuit_state_to_string(circ->base_.state));
+ }
+}
+
+/**
+ * Count timeouts for path bias log messages.
+ *
+ * These counts are purely informational.
+ */
+void
+pathbias_count_timeout(origin_circuit_t *circ)
+{
+ entry_guard_t *guard = NULL;
+
+ if (!pathbias_should_count(circ)) {
+ return;
+ }
+
+ /* For hidden service circs, they can actually be used
+ * successfully and then time out later (because
+ * the other side declines to use them). */
+ if (circ->path_state == PATH_STATE_USE_SUCCEEDED) {
+ return;
+ }
+
+ if (circ->cpath && circ->cpath->extend_info) {
+ guard = entry_guard_get_by_id_digest(
+ circ->cpath->extend_info->identity_digest);
+ }
+
+ if (guard) {
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ pb->timeouts++;
+ entry_guards_changed();
+ }
+}
+
+/**
+ * Helper function to count all of the currently opened circuits
+ * for a guard that are in a given path state range. The state
+ * range is inclusive on both ends.
+ */
+static int
+pathbias_count_circs_in_states(entry_guard_t *guard,
+ path_state_t from,
+ path_state_t to)
+{
+ int open_circuits = 0;
+
+ /* Count currently open circuits. Give them the benefit of the doubt. */
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ origin_circuit_t *ocirc = NULL;
+ if (!CIRCUIT_IS_ORIGIN(circ) || /* didn't originate here */
+ circ->marked_for_close) /* already counted */
+ continue;
+
+ ocirc = TO_ORIGIN_CIRCUIT(circ);
+
+ if (!ocirc->cpath || !ocirc->cpath->extend_info)
+ continue;
+
+ if (ocirc->path_state >= from &&
+ ocirc->path_state <= to &&
+ pathbias_should_count(ocirc) &&
+ fast_memeq(entry_guard_get_rsa_id_digest(guard),
+ ocirc->cpath->extend_info->identity_digest,
+ DIGEST_LEN)) {
+ log_debug(LD_CIRC, "Found opened circuit %d in path_state %s",
+ ocirc->global_identifier,
+ pathbias_state_to_string(ocirc->path_state));
+ open_circuits++;
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+
+ return open_circuits;
+}
+
+/**
+ * Return the number of circuits counted as successfully closed for
+ * this guard.
+ *
+ * Also add in the currently open circuits to give them the benefit
+ * of the doubt.
+ */
+double
+pathbias_get_close_success_count(entry_guard_t *guard)
+{
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ return pb->successful_circuits_closed +
+ pathbias_count_circs_in_states(guard,
+ PATH_STATE_BUILD_SUCCEEDED,
+ PATH_STATE_USE_SUCCEEDED);
+}
+
+/**
+ * Return the number of circuits counted as successfully used
+ * this guard.
+ *
+ * Also add in the currently open circuits that we are attempting
+ * to use to give them the benefit of the doubt.
+ */
+double
+pathbias_get_use_success_count(entry_guard_t *guard)
+{
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ return pb->use_successes +
+ pathbias_count_circs_in_states(guard,
+ PATH_STATE_USE_ATTEMPTED,
+ PATH_STATE_USE_SUCCEEDED);
+}
+
+/**
+ * Check the path bias use rate against our consensus parameter limits.
+ *
+ * Emits a log message if the use success rates are too low.
+ *
+ * If pathbias_get_dropguards() is set, we also disable the use of
+ * very failure prone guards.
+ */
+static void
+pathbias_measure_use_rate(entry_guard_t *guard)
+{
+ const or_options_t *options = get_options();
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ if (pb->use_attempts > pathbias_get_min_use(options)) {
+ /* Note: We rely on the < comparison here to allow us to set a 0
+ * rate and disable the feature entirely. If refactoring, don't
+ * change to <= */
+ if (pathbias_get_use_success_count(guard)/pb->use_attempts
+ < pathbias_get_extreme_use_rate(options)) {
+ /* Dropping is currently disabled by default. */
+ if (pathbias_get_dropguards(options)) {
+ if (!pb->path_bias_disabled) {
+ log_warn(LD_CIRC,
+ "Your Guard %s is failing to carry an extremely large "
+ "amount of stream on its circuits. "
+ "To avoid potential route manipulation attacks, Tor has "
+ "disabled use of this guard. "
+ "Use counts are %ld/%ld. Success counts are %ld/%ld. "
+ "%ld circuits completed, %ld were unusable, %ld collapsed, "
+ "and %ld timed out. "
+ "For reference, your timeout cutoff is %ld seconds.",
+ entry_guard_describe(guard),
+ tor_lround(pathbias_get_use_success_count(guard)),
+ tor_lround(pb->use_attempts),
+ tor_lround(pathbias_get_close_success_count(guard)),
+ tor_lround(pb->circ_attempts),
+ tor_lround(pb->circ_successes),
+ tor_lround(pb->unusable_circuits),
+ tor_lround(pb->collapsed_circuits),
+ tor_lround(pb->timeouts),
+ tor_lround(get_circuit_build_close_time_ms()/1000));
+ pb->path_bias_disabled = 1;
+ return;
+ }
+ } else if (!pb->path_bias_use_extreme) {
+ pb->path_bias_use_extreme = 1;
+ log_warn(LD_CIRC,
+ "Your Guard %s is failing to carry an extremely large "
+ "amount of streams on its circuits. "
+ "This could indicate a route manipulation attack, network "
+ "overload, bad local network connectivity, or a bug. "
+ "Use counts are %ld/%ld. Success counts are %ld/%ld. "
+ "%ld circuits completed, %ld were unusable, %ld collapsed, "
+ "and %ld timed out. "
+ "For reference, your timeout cutoff is %ld seconds.",
+ entry_guard_describe(guard),
+ tor_lround(pathbias_get_use_success_count(guard)),
+ tor_lround(pb->use_attempts),
+ tor_lround(pathbias_get_close_success_count(guard)),
+ tor_lround(pb->circ_attempts),
+ tor_lround(pb->circ_successes),
+ tor_lround(pb->unusable_circuits),
+ tor_lround(pb->collapsed_circuits),
+ tor_lround(pb->timeouts),
+ tor_lround(get_circuit_build_close_time_ms()/1000));
+ }
+ } else if (pathbias_get_use_success_count(guard)/pb->use_attempts
+ < pathbias_get_notice_use_rate(options)) {
+ if (!pb->path_bias_use_noticed) {
+ pb->path_bias_use_noticed = 1;
+ log_notice(LD_CIRC,
+ "Your Guard %s is failing to carry more streams on its "
+ "circuits than usual. "
+ "Most likely this means the Tor network is overloaded "
+ "or your network connection is poor. "
+ "Use counts are %ld/%ld. Success counts are %ld/%ld. "
+ "%ld circuits completed, %ld were unusable, %ld collapsed, "
+ "and %ld timed out. "
+ "For reference, your timeout cutoff is %ld seconds.",
+ entry_guard_describe(guard),
+ tor_lround(pathbias_get_use_success_count(guard)),
+ tor_lround(pb->use_attempts),
+ tor_lround(pathbias_get_close_success_count(guard)),
+ tor_lround(pb->circ_attempts),
+ tor_lround(pb->circ_successes),
+ tor_lround(pb->unusable_circuits),
+ tor_lround(pb->collapsed_circuits),
+ tor_lround(pb->timeouts),
+ tor_lround(get_circuit_build_close_time_ms()/1000));
+ }
+ }
+ }
+}
+
+/**
+ * Check the path bias circuit close status rates against our consensus
+ * parameter limits.
+ *
+ * Emits a log message if the use success rates are too low.
+ *
+ * If pathbias_get_dropguards() is set, we also disable the use of
+ * very failure prone guards.
+ *
+ * XXX: This function shares similar log messages and checks to
+ * pathbias_measure_use_rate(). It may be possible to combine them
+ * eventually, especially if we can ever remove the need for 3
+ * levels of closure warns (if the overall circuit failure rate
+ * goes down with ntor). One way to do so would be to multiply
+ * the build rate with the use rate to get an idea of the total
+ * fraction of the total network paths the user is able to use.
+ * See ticket #8159.
+ */
+static void
+pathbias_measure_close_rate(entry_guard_t *guard)
+{
+ const or_options_t *options = get_options();
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ if (pb->circ_attempts > pathbias_get_min_circs(options)) {
+ /* Note: We rely on the < comparison here to allow us to set a 0
+ * rate and disable the feature entirely. If refactoring, don't
+ * change to <= */
+ if (pathbias_get_close_success_count(guard)/pb->circ_attempts
+ < pathbias_get_extreme_rate(options)) {
+ /* Dropping is currently disabled by default. */
+ if (pathbias_get_dropguards(options)) {
+ if (!pb->path_bias_disabled) {
+ log_warn(LD_CIRC,
+ "Your Guard %s is failing an extremely large "
+ "amount of circuits. "
+ "To avoid potential route manipulation attacks, Tor has "
+ "disabled use of this guard. "
+ "Success counts are %ld/%ld. Use counts are %ld/%ld. "
+ "%ld circuits completed, %ld were unusable, %ld collapsed, "
+ "and %ld timed out. "
+ "For reference, your timeout cutoff is %ld seconds.",
+ entry_guard_describe(guard),
+ tor_lround(pathbias_get_close_success_count(guard)),
+ tor_lround(pb->circ_attempts),
+ tor_lround(pathbias_get_use_success_count(guard)),
+ tor_lround(pb->use_attempts),
+ tor_lround(pb->circ_successes),
+ tor_lround(pb->unusable_circuits),
+ tor_lround(pb->collapsed_circuits),
+ tor_lround(pb->timeouts),
+ tor_lround(get_circuit_build_close_time_ms()/1000));
+ pb->path_bias_disabled = 1;
+ return;
+ }
+ } else if (!pb->path_bias_extreme) {
+ pb->path_bias_extreme = 1;
+ log_warn(LD_CIRC,
+ "Your Guard %s is failing an extremely large "
+ "amount of circuits. "
+ "This could indicate a route manipulation attack, "
+ "extreme network overload, or a bug. "
+ "Success counts are %ld/%ld. Use counts are %ld/%ld. "
+ "%ld circuits completed, %ld were unusable, %ld collapsed, "
+ "and %ld timed out. "
+ "For reference, your timeout cutoff is %ld seconds.",
+ entry_guard_describe(guard),
+ tor_lround(pathbias_get_close_success_count(guard)),
+ tor_lround(pb->circ_attempts),
+ tor_lround(pathbias_get_use_success_count(guard)),
+ tor_lround(pb->use_attempts),
+ tor_lround(pb->circ_successes),
+ tor_lround(pb->unusable_circuits),
+ tor_lround(pb->collapsed_circuits),
+ tor_lround(pb->timeouts),
+ tor_lround(get_circuit_build_close_time_ms()/1000));
+ }
+ } else if (pathbias_get_close_success_count(guard)/pb->circ_attempts
+ < pathbias_get_warn_rate(options)) {
+ if (!pb->path_bias_warned) {
+ pb->path_bias_warned = 1;
+ log_warn(LD_CIRC,
+ "Your Guard %s is failing a very large "
+ "amount of circuits. "
+ "Most likely this means the Tor network is "
+ "overloaded, but it could also mean an attack against "
+ "you or potentially the guard itself. "
+ "Success counts are %ld/%ld. Use counts are %ld/%ld. "
+ "%ld circuits completed, %ld were unusable, %ld collapsed, "
+ "and %ld timed out. "
+ "For reference, your timeout cutoff is %ld seconds.",
+ entry_guard_describe(guard),
+ tor_lround(pathbias_get_close_success_count(guard)),
+ tor_lround(pb->circ_attempts),
+ tor_lround(pathbias_get_use_success_count(guard)),
+ tor_lround(pb->use_attempts),
+ tor_lround(pb->circ_successes),
+ tor_lround(pb->unusable_circuits),
+ tor_lround(pb->collapsed_circuits),
+ tor_lround(pb->timeouts),
+ tor_lround(get_circuit_build_close_time_ms()/1000));
+ }
+ } else if (pathbias_get_close_success_count(guard)/pb->circ_attempts
+ < pathbias_get_notice_rate(options)) {
+ if (!pb->path_bias_noticed) {
+ pb->path_bias_noticed = 1;
+ log_notice(LD_CIRC,
+ "Your Guard %s is failing more circuits than "
+ "usual. "
+ "Most likely this means the Tor network is overloaded. "
+ "Success counts are %ld/%ld. Use counts are %ld/%ld. "
+ "%ld circuits completed, %ld were unusable, %ld collapsed, "
+ "and %ld timed out. "
+ "For reference, your timeout cutoff is %ld seconds.",
+ entry_guard_describe(guard),
+ tor_lround(pathbias_get_close_success_count(guard)),
+ tor_lround(pb->circ_attempts),
+ tor_lround(pathbias_get_use_success_count(guard)),
+ tor_lround(pb->use_attempts),
+ tor_lround(pb->circ_successes),
+ tor_lround(pb->unusable_circuits),
+ tor_lround(pb->collapsed_circuits),
+ tor_lround(pb->timeouts),
+ tor_lround(get_circuit_build_close_time_ms()/1000));
+ }
+ }
+ }
+}
+
+/**
+ * This function scales the path bias use rates if we have
+ * more data than the scaling threshold. This allows us to
+ * be more sensitive to recent measurements.
+ *
+ * XXX: The attempt count transfer stuff here might be done
+ * better by keeping separate pending counters that get
+ * transferred at circuit close. See ticket #8160.
+ */
+static void
+pathbias_scale_close_rates(entry_guard_t *guard)
+{
+ const or_options_t *options = get_options();
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ /* If we get a ton of circuits, just scale everything down */
+ if (pb->circ_attempts > pathbias_get_scale_threshold(options)) {
+ double scale_ratio = pathbias_get_scale_ratio(options);
+ int opened_attempts = pathbias_count_circs_in_states(guard,
+ PATH_STATE_BUILD_ATTEMPTED, PATH_STATE_BUILD_ATTEMPTED);
+ int opened_built = pathbias_count_circs_in_states(guard,
+ PATH_STATE_BUILD_SUCCEEDED,
+ PATH_STATE_USE_FAILED);
+ /* Verify that the counts are sane before and after scaling */
+ int counts_are_sane = (pb->circ_attempts >= pb->circ_successes);
+
+ pb->circ_attempts -= (opened_attempts+opened_built);
+ pb->circ_successes -= opened_built;
+
+ pb->circ_attempts *= scale_ratio;
+ pb->circ_successes *= scale_ratio;
+ pb->timeouts *= scale_ratio;
+ pb->successful_circuits_closed *= scale_ratio;
+ pb->collapsed_circuits *= scale_ratio;
+ pb->unusable_circuits *= scale_ratio;
+
+ pb->circ_attempts += (opened_attempts+opened_built);
+ pb->circ_successes += opened_built;
+
+ entry_guards_changed();
+
+ log_info(LD_CIRC,
+ "Scaled pathbias counts to (%f,%f)/%f (%d/%d open) for guard "
+ "%s",
+ pb->circ_successes, pb->successful_circuits_closed,
+ pb->circ_attempts, opened_built, opened_attempts,
+ entry_guard_describe(guard));
+
+ /* Have the counts just become invalid by this scaling attempt? */
+ if (counts_are_sane && pb->circ_attempts < pb->circ_successes) {
+ log_notice(LD_BUG,
+ "Scaling has mangled pathbias counts to %f/%f (%d/%d open) "
+ "for guard %s",
+ pb->circ_successes, pb->circ_attempts, opened_built,
+ opened_attempts,
+ entry_guard_describe(guard));
+ }
+ }
+}
+
+/**
+ * This function scales the path bias circuit close rates if we have
+ * more data than the scaling threshold. This allows us to be more
+ * sensitive to recent measurements.
+ *
+ * XXX: The attempt count transfer stuff here might be done
+ * better by keeping separate pending counters that get
+ * transferred at circuit close. See ticket #8160.
+ */
+void
+pathbias_scale_use_rates(entry_guard_t *guard)
+{
+ const or_options_t *options = get_options();
+ guard_pathbias_t *pb = entry_guard_get_pathbias_state(guard);
+
+ /* If we get a ton of circuits, just scale everything down */
+ if (pb->use_attempts > pathbias_get_scale_use_threshold(options)) {
+ double scale_ratio = pathbias_get_scale_ratio(options);
+ int opened_attempts = pathbias_count_circs_in_states(guard,
+ PATH_STATE_USE_ATTEMPTED, PATH_STATE_USE_SUCCEEDED);
+ /* Verify that the counts are sane before and after scaling */
+ int counts_are_sane = (pb->use_attempts >= pb->use_successes);
+
+ pb->use_attempts -= opened_attempts;
+
+ pb->use_attempts *= scale_ratio;
+ pb->use_successes *= scale_ratio;
+
+ pb->use_attempts += opened_attempts;
+
+ log_info(LD_CIRC,
+ "Scaled pathbias use counts to %f/%f (%d open) for guard %s",
+ pb->use_successes, pb->use_attempts, opened_attempts,
+ entry_guard_describe(guard));
+
+ /* Have the counts just become invalid by this scaling attempt? */
+ if (counts_are_sane && pb->use_attempts < pb->use_successes) {
+ log_notice(LD_BUG,
+ "Scaling has mangled pathbias usage counts to %f/%f "
+ "(%d open) for guard %s",
+ pb->circ_successes, pb->circ_attempts,
+ opened_attempts, entry_guard_describe(guard));
+ }
+
+ entry_guards_changed();
+ }
+}
diff --git a/src/feature/client/circpathbias.h b/src/feature/client/circpathbias.h
new file mode 100644
index 0000000000..c99d1277bb
--- /dev/null
+++ b/src/feature/client/circpathbias.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitbuild.h
+ * \brief Header file for circuitbuild.c.
+ **/
+
+#ifndef TOR_CIRCPATHBIAS_H
+#define TOR_CIRCPATHBIAS_H
+
+double pathbias_get_extreme_rate(const or_options_t *options);
+double pathbias_get_extreme_use_rate(const or_options_t *options);
+int pathbias_get_dropguards(const or_options_t *options);
+void pathbias_count_timeout(origin_circuit_t *circ);
+void pathbias_count_build_success(origin_circuit_t *circ);
+int pathbias_count_build_attempt(origin_circuit_t *circ);
+int pathbias_check_close(origin_circuit_t *circ, int reason);
+int pathbias_check_probe_response(circuit_t *circ, const cell_t *cell);
+void pathbias_count_use_attempt(origin_circuit_t *circ);
+void pathbias_mark_use_success(origin_circuit_t *circ);
+void pathbias_mark_use_rollback(origin_circuit_t *circ);
+const char *pathbias_state_to_string(enum path_state_t state);
+
+#endif /* !defined(TOR_CIRCPATHBIAS_H) */
diff --git a/src/feature/client/dnsserv.c b/src/feature/client/dnsserv.c
new file mode 100644
index 0000000000..6e75254239
--- /dev/null
+++ b/src/feature/client/dnsserv.c
@@ -0,0 +1,415 @@
+/* Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file dnsserv.c
+ * \brief Implements client-side DNS proxy server code.
+ *
+ * When a user enables the DNSPort configuration option to have their local
+ * Tor client handle DNS requests, this module handles it. It functions as a
+ * "DNS Server" on the client side, which client applications use.
+ *
+ * Inbound DNS requests are represented as entry_connection_t here (since
+ * that's how Tor represents client-side streams), which are kept associated
+ * with an evdns_server_request structure as exposed by Libevent's
+ * evdns code.
+ *
+ * Upon receiving a DNS request, libevent calls our evdns_server_callback()
+ * function here, which causes this module to create an entry_connection_t
+ * request as appropriate. Later, when that request is answered,
+ * connection_edge.c calls dnsserv_resolved() so we can finish up and tell the
+ * DNS client.
+ **/
+
+#include "or/or.h"
+#include "or/dnsserv.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/connection_edge.h"
+#include "or/control.h"
+#include "or/main.h"
+#include "or/policies.h"
+
+#include "or/control_connection_st.h"
+#include "or/entry_connection_st.h"
+#include "or/listener_connection_st.h"
+#include "or/socks_request_st.h"
+#include "lib/evloop/compat_libevent.h"
+
+#include <event2/dns.h>
+#include <event2/dns_compat.h>
+/* XXXX this implies we want an improved evdns */
+#include <event2/dns_struct.h>
+
+/** Helper function: called by evdns whenever the client sends a request to our
+ * DNSPort. We need to eventually answer the request <b>req</b>.
+ */
+static void
+evdns_server_callback(struct evdns_server_request *req, void *data_)
+{
+ const listener_connection_t *listener = data_;
+ entry_connection_t *entry_conn;
+ edge_connection_t *conn;
+ int i = 0;
+ struct evdns_server_question *q = NULL, *supported_q = NULL;
+ struct sockaddr_storage addr;
+ struct sockaddr *sa;
+ int addrlen;
+ tor_addr_t tor_addr;
+ uint16_t port;
+ int err = DNS_ERR_NONE;
+ char *q_name;
+
+ tor_assert(req);
+
+ log_info(LD_APP, "Got a new DNS request!");
+
+ req->flags |= 0x80; /* set RA */
+
+ /* First, check whether the requesting address matches our SOCKSPolicy. */
+ if ((addrlen = evdns_server_request_get_requesting_addr(req,
+ (struct sockaddr*)&addr, (socklen_t)sizeof(addr))) < 0) {
+ log_warn(LD_APP, "Couldn't get requesting address.");
+ evdns_server_request_respond(req, DNS_ERR_SERVERFAILED);
+ return;
+ }
+ (void) addrlen;
+ sa = (struct sockaddr*) &addr;
+ if (tor_addr_from_sockaddr(&tor_addr, sa, &port)<0) {
+ log_warn(LD_APP, "Requesting address wasn't recognized.");
+ evdns_server_request_respond(req, DNS_ERR_SERVERFAILED);
+ return;
+ }
+
+ if (!socks_policy_permits_address(&tor_addr)) {
+ log_warn(LD_APP, "Rejecting DNS request from disallowed IP.");
+ evdns_server_request_respond(req, DNS_ERR_REFUSED);
+ return;
+ }
+
+ /* Now, let's find the first actual question of a type we can answer in this
+ * DNS request. It makes us a little noncompliant to act like this; we
+ * should fix that eventually if it turns out to make a difference for
+ * anybody. */
+ if (req->nquestions == 0) {
+ log_info(LD_APP, "No questions in DNS request; sending back nil reply.");
+ evdns_server_request_respond(req, 0);
+ return;
+ }
+ if (req->nquestions > 1) {
+ log_info(LD_APP, "Got a DNS request with more than one question; I only "
+ "handle one question at a time for now. Skipping the extras.");
+ }
+ for (i = 0; i < req->nquestions; ++i) {
+ if (req->questions[i]->dns_question_class != EVDNS_CLASS_INET)
+ continue;
+ switch (req->questions[i]->type) {
+ case EVDNS_TYPE_A:
+ case EVDNS_TYPE_AAAA:
+ case EVDNS_TYPE_PTR:
+ /* We always pick the first one of these questions, if there is
+ one. */
+ if (! supported_q)
+ supported_q = req->questions[i];
+ break;
+ default:
+ break;
+ }
+ }
+ if (supported_q)
+ q = supported_q;
+ if (!q) {
+ log_info(LD_APP, "None of the questions we got were ones we're willing "
+ "to support. Sending NOTIMPL.");
+ evdns_server_request_respond(req, DNS_ERR_NOTIMPL);
+ return;
+ }
+
+ /* Make sure the name isn't too long: This should be impossible, I think. */
+ if (err == DNS_ERR_NONE && strlen(q->name) > MAX_SOCKS_ADDR_LEN-1)
+ err = DNS_ERR_FORMAT;
+
+ if (err != DNS_ERR_NONE || !supported_q) {
+ /* We got an error? There's no question we're willing to answer? Then
+ * send back an answer immediately; we're done. */
+ evdns_server_request_respond(req, err);
+ return;
+ }
+
+ /* Make a new dummy AP connection, and attach the request to it. */
+ entry_conn = entry_connection_new(CONN_TYPE_AP, AF_INET);
+ conn = ENTRY_TO_EDGE_CONN(entry_conn);
+ CONNECTION_AP_EXPECT_NONPENDING(entry_conn);
+ TO_CONN(conn)->state = AP_CONN_STATE_RESOLVE_WAIT;
+ conn->is_dns_request = 1;
+
+ tor_addr_copy(&TO_CONN(conn)->addr, &tor_addr);
+ TO_CONN(conn)->port = port;
+ TO_CONN(conn)->address = tor_addr_to_str_dup(&tor_addr);
+
+ if (q->type == EVDNS_TYPE_A || q->type == EVDNS_TYPE_AAAA ||
+ q->type == EVDNS_QTYPE_ALL) {
+ entry_conn->socks_request->command = SOCKS_COMMAND_RESOLVE;
+ } else {
+ tor_assert(q->type == EVDNS_TYPE_PTR);
+ entry_conn->socks_request->command = SOCKS_COMMAND_RESOLVE_PTR;
+ }
+
+ /* This serves our DNS port so enable DNS request by default. */
+ entry_conn->entry_cfg.dns_request = 1;
+ if (q->type == EVDNS_TYPE_A || q->type == EVDNS_QTYPE_ALL) {
+ entry_conn->entry_cfg.ipv4_traffic = 1;
+ entry_conn->entry_cfg.ipv6_traffic = 0;
+ entry_conn->entry_cfg.prefer_ipv6 = 0;
+ } else if (q->type == EVDNS_TYPE_AAAA) {
+ entry_conn->entry_cfg.ipv4_traffic = 0;
+ entry_conn->entry_cfg.ipv6_traffic = 1;
+ entry_conn->entry_cfg.prefer_ipv6 = 1;
+ }
+
+ strlcpy(entry_conn->socks_request->address, q->name,
+ sizeof(entry_conn->socks_request->address));
+
+ entry_conn->socks_request->listener_type = listener->base_.type;
+ entry_conn->dns_server_request = req;
+ entry_conn->entry_cfg.isolation_flags = listener->entry_cfg.isolation_flags;
+ entry_conn->entry_cfg.session_group = listener->entry_cfg.session_group;
+ entry_conn->nym_epoch = get_signewnym_epoch();
+
+ if (connection_add(ENTRY_TO_CONN(entry_conn)) < 0) {
+ log_warn(LD_APP, "Couldn't register dummy connection for DNS request");
+ evdns_server_request_respond(req, DNS_ERR_SERVERFAILED);
+ connection_free_(ENTRY_TO_CONN(entry_conn));
+ return;
+ }
+
+ control_event_stream_status(entry_conn, STREAM_EVENT_NEW_RESOLVE, 0);
+
+ /* Now, unless a controller asked us to leave streams unattached,
+ * throw the connection over to get rewritten (which will
+ * answer it immediately if it's in the cache, or completely bogus, or
+ * automapped), and then attached to a circuit. */
+ log_info(LD_APP, "Passing request for %s to rewrite_and_attach.",
+ escaped_safe_str_client(q->name));
+ q_name = tor_strdup(q->name); /* q could be freed in rewrite_and_attach */
+ connection_ap_rewrite_and_attach_if_allowed(entry_conn, NULL, NULL);
+ /* Now, the connection is marked if it was bad. */
+
+ log_info(LD_APP, "Passed request for %s to rewrite_and_attach_if_allowed.",
+ escaped_safe_str_client(q_name));
+ tor_free(q_name);
+}
+
+/** Helper function: called whenever the client sends a resolve request to our
+ * controller. We need to eventually answer the request <b>req</b>.
+ * Returns 0 if the controller will be getting (or has gotten) an event in
+ * response; -1 if we couldn't launch the request.
+ */
+int
+dnsserv_launch_request(const char *name, int reverse,
+ control_connection_t *control_conn)
+{
+ entry_connection_t *entry_conn;
+ edge_connection_t *conn;
+ char *q_name;
+
+ /* Make a new dummy AP connection, and attach the request to it. */
+ entry_conn = entry_connection_new(CONN_TYPE_AP, AF_INET);
+ entry_conn->entry_cfg.dns_request = 1;
+ conn = ENTRY_TO_EDGE_CONN(entry_conn);
+ CONNECTION_AP_EXPECT_NONPENDING(entry_conn);
+ conn->base_.state = AP_CONN_STATE_RESOLVE_WAIT;
+
+ tor_addr_copy(&TO_CONN(conn)->addr, &control_conn->base_.addr);
+#ifdef AF_UNIX
+ /*
+ * The control connection can be AF_UNIX and if so tor_addr_to_str_dup will
+ * unhelpfully say "<unknown address type>"; say "(Tor_internal)"
+ * instead.
+ */
+ if (control_conn->base_.socket_family == AF_UNIX) {
+ TO_CONN(conn)->port = 0;
+ TO_CONN(conn)->address = tor_strdup("(Tor_internal)");
+ } else {
+ TO_CONN(conn)->port = control_conn->base_.port;
+ TO_CONN(conn)->address = tor_addr_to_str_dup(&control_conn->base_.addr);
+ }
+#else /* !(defined(AF_UNIX)) */
+ TO_CONN(conn)->port = control_conn->base_.port;
+ TO_CONN(conn)->address = tor_addr_to_str_dup(&control_conn->base_.addr);
+#endif /* defined(AF_UNIX) */
+
+ if (reverse)
+ entry_conn->socks_request->command = SOCKS_COMMAND_RESOLVE_PTR;
+ else
+ entry_conn->socks_request->command = SOCKS_COMMAND_RESOLVE;
+
+ conn->is_dns_request = 1;
+
+ strlcpy(entry_conn->socks_request->address, name,
+ sizeof(entry_conn->socks_request->address));
+
+ entry_conn->socks_request->listener_type = CONN_TYPE_CONTROL_LISTENER;
+ entry_conn->original_dest_address = tor_strdup(name);
+ entry_conn->entry_cfg.session_group = SESSION_GROUP_CONTROL_RESOLVE;
+ entry_conn->nym_epoch = get_signewnym_epoch();
+ entry_conn->entry_cfg.isolation_flags = ISO_DEFAULT;
+
+ if (connection_add(TO_CONN(conn))<0) {
+ log_warn(LD_APP, "Couldn't register dummy connection for RESOLVE request");
+ connection_free_(TO_CONN(conn));
+ return -1;
+ }
+
+ control_event_stream_status(entry_conn, STREAM_EVENT_NEW_RESOLVE, 0);
+
+ /* Now, unless a controller asked us to leave streams unattached,
+ * throw the connection over to get rewritten (which will
+ * answer it immediately if it's in the cache, or completely bogus, or
+ * automapped), and then attached to a circuit. */
+ log_info(LD_APP, "Passing request for %s to rewrite_and_attach.",
+ escaped_safe_str_client(name));
+ q_name = tor_strdup(name); /* q could be freed in rewrite_and_attach */
+ connection_ap_rewrite_and_attach_if_allowed(entry_conn, NULL, NULL);
+ /* Now, the connection is marked if it was bad. */
+
+ log_info(LD_APP, "Passed request for %s to rewrite_and_attach_if_allowed.",
+ escaped_safe_str_client(q_name));
+ tor_free(q_name);
+ return 0;
+}
+
+/** If there is a pending request on <b>conn</b> that's waiting for an answer,
+ * send back an error and free the request. */
+void
+dnsserv_reject_request(entry_connection_t *conn)
+{
+ if (conn->dns_server_request) {
+ evdns_server_request_respond(conn->dns_server_request,
+ DNS_ERR_SERVERFAILED);
+ conn->dns_server_request = NULL;
+ }
+}
+
+/** Look up the original name that corresponds to 'addr' in req. We use this
+ * to preserve case in order to facilitate clients using 0x20-hacks to avoid
+ * DNS poisoning. */
+static const char *
+evdns_get_orig_address(const struct evdns_server_request *req,
+ int rtype, const char *addr)
+{
+ int i, type;
+
+ switch (rtype) {
+ case RESOLVED_TYPE_IPV4:
+ type = EVDNS_TYPE_A;
+ break;
+ case RESOLVED_TYPE_HOSTNAME:
+ type = EVDNS_TYPE_PTR;
+ break;
+ case RESOLVED_TYPE_IPV6:
+ type = EVDNS_TYPE_AAAA;
+ break;
+ case RESOLVED_TYPE_ERROR:
+ case RESOLVED_TYPE_ERROR_TRANSIENT:
+ /* Addr doesn't matter, since we're not sending it back in the reply.*/
+ return addr;
+ default:
+ tor_fragile_assert();
+ return addr;
+ }
+
+ for (i = 0; i < req->nquestions; ++i) {
+ const struct evdns_server_question *q = req->questions[i];
+ if (q->type == type && !strcasecmp(q->name, addr))
+ return q->name;
+ }
+ return addr;
+}
+
+/** Tell the dns request waiting for an answer on <b>conn</b> that we have an
+ * answer of type <b>answer_type</b> (RESOLVE_TYPE_IPV4/IPV6/ERR), of length
+ * <b>answer_len</b>, in <b>answer</b>, with TTL <b>ttl</b>. Doesn't do
+ * any caching; that's handled elsewhere. */
+void
+dnsserv_resolved(entry_connection_t *conn,
+ int answer_type,
+ size_t answer_len,
+ const char *answer,
+ int ttl)
+{
+ struct evdns_server_request *req = conn->dns_server_request;
+ const char *name;
+ int err = DNS_ERR_NONE;
+ if (!req)
+ return;
+ name = evdns_get_orig_address(req, answer_type,
+ conn->socks_request->address);
+
+ /* XXXX Re-do; this is dumb. */
+ if (ttl < 60)
+ ttl = 60;
+
+ /* The evdns interface is: add a bunch of reply items (corresponding to one
+ * or more of the questions in the request); then, call
+ * evdns_server_request_respond. */
+ if (answer_type == RESOLVED_TYPE_IPV6) {
+ evdns_server_request_add_aaaa_reply(req,
+ name,
+ 1, answer, ttl);
+ } else if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4 &&
+ conn->socks_request->command == SOCKS_COMMAND_RESOLVE) {
+ evdns_server_request_add_a_reply(req,
+ name,
+ 1, answer, ttl);
+ } else if (answer_type == RESOLVED_TYPE_HOSTNAME &&
+ answer_len < 256 &&
+ conn->socks_request->command == SOCKS_COMMAND_RESOLVE_PTR) {
+ char *ans = tor_strndup(answer, answer_len);
+ evdns_server_request_add_ptr_reply(req, NULL,
+ name,
+ ans, ttl);
+ tor_free(ans);
+ } else if (answer_type == RESOLVED_TYPE_ERROR) {
+ err = DNS_ERR_NOTEXIST;
+ } else { /* answer_type == RESOLVED_TYPE_ERROR_TRANSIENT */
+ err = DNS_ERR_SERVERFAILED;
+ }
+
+ evdns_server_request_respond(req, err);
+
+ conn->dns_server_request = NULL;
+}
+
+/** Set up the evdns server port for the UDP socket on <b>conn</b>, which
+ * must be an AP_DNS_LISTENER */
+void
+dnsserv_configure_listener(connection_t *conn)
+{
+ listener_connection_t *listener_conn;
+ tor_assert(conn);
+ tor_assert(SOCKET_OK(conn->s));
+ tor_assert(conn->type == CONN_TYPE_AP_DNS_LISTENER);
+
+ listener_conn = TO_LISTENER_CONN(conn);
+ listener_conn->dns_server_port =
+ tor_evdns_add_server_port(conn->s, 0, evdns_server_callback,
+ listener_conn);
+}
+
+/** Free the evdns server port for <b>conn</b>, which must be an
+ * AP_DNS_LISTENER. */
+void
+dnsserv_close_listener(connection_t *conn)
+{
+ listener_connection_t *listener_conn;
+ tor_assert(conn);
+ tor_assert(conn->type == CONN_TYPE_AP_DNS_LISTENER);
+
+ listener_conn = TO_LISTENER_CONN(conn);
+
+ if (listener_conn->dns_server_port) {
+ evdns_close_server_port(listener_conn->dns_server_port);
+ listener_conn->dns_server_port = NULL;
+ }
+}
diff --git a/src/feature/client/dnsserv.h b/src/feature/client/dnsserv.h
new file mode 100644
index 0000000000..afdde3a342
--- /dev/null
+++ b/src/feature/client/dnsserv.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file dnsserv.h
+ * \brief Header file for dnsserv.c.
+ **/
+
+#ifndef TOR_DNSSERV_H
+#define TOR_DNSSERV_H
+
+void dnsserv_configure_listener(connection_t *conn);
+void dnsserv_close_listener(connection_t *conn);
+void dnsserv_resolved(entry_connection_t *conn,
+ int answer_type,
+ size_t answer_len,
+ const char *answer,
+ int ttl);
+void dnsserv_reject_request(entry_connection_t *conn);
+int dnsserv_launch_request(const char *name, int is_reverse,
+ control_connection_t *control_conn);
+
+#endif /* !defined(TOR_DNSSERV_H) */
+
diff --git a/src/feature/client/entrynodes.c b/src/feature/client/entrynodes.c
new file mode 100644
index 0000000000..ba9c30f8b3
--- /dev/null
+++ b/src/feature/client/entrynodes.c
@@ -0,0 +1,3694 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file entrynodes.c
+ * \brief Code to manage our fixed first nodes for various functions.
+ *
+ * Entry nodes can be guards (for general use) or bridges (for censorship
+ * circumvention).
+ *
+ * In general, we use entry guards to prevent traffic-sampling attacks:
+ * if we chose every circuit independently, an adversary controlling
+ * some fraction of paths on the network would observe a sample of every
+ * user's traffic. Using guards gives users a chance of not being
+ * profiled.
+ *
+ * The current entry guard selection code is designed to try to avoid
+ * _ever_ trying every guard on the network, to try to stick to guards
+ * that we've used before, to handle hostile/broken networks, and
+ * to behave sanely when the network goes up and down.
+ *
+ * Our algorithm works as follows: First, we maintain a SAMPLE of guards
+ * we've seen in the networkstatus consensus. We maintain this sample
+ * over time, and store it persistently; it is chosen without reference
+ * to our configuration or firewall rules. Guards remain in the sample
+ * as they enter and leave the consensus. We expand this sample as
+ * needed, up to a maximum size.
+ *
+ * As a subset of the sample, we maintain a FILTERED SET of the guards
+ * that we would be willing to use if we could connect to them. The
+ * filter removes all the guards that we're excluding because they're
+ * bridges (or not bridges), because we have restrictive firewall rules,
+ * because of ExcludeNodes, because we of path bias restrictions,
+ * because they're absent from the network at present, and so on.
+ *
+ * As a subset of the filtered set, we keep a REACHABLE FILTERED SET
+ * (also called a "usable filtered set") of those guards that we call
+ * "reachable" or "maybe reachable". A guard is reachable if we've
+ * connected to it more recently than we've failed. A guard is "maybe
+ * reachable" if we have never tried to connect to it, or if we
+ * failed to connect to it so long ago that we no longer think our
+ * failure means it's down.
+ *
+ * As a persistent ordered list whose elements are taken from the
+ * sampled set, we track a CONFIRMED GUARDS LIST. A guard becomes
+ * confirmed when we successfully build a circuit through it, and decide
+ * to use that circuit. We order the guards on this list by the order
+ * in which they became confirmed.
+ *
+ * And as a final group, we have an ordered list of PRIMARY GUARDS,
+ * whose elements are taken from the filtered set. We prefer
+ * confirmed guards to non-confirmed guards for this list, and place
+ * other restrictions on it. The primary guards are the ones that we
+ * connect to "when nothing is wrong" -- circuits through them can be used
+ * immediately.
+ *
+ * To build circuits, we take a primary guard if possible -- or a
+ * reachable filtered confirmed guard if no primary guard is possible --
+ * or a random reachable filtered guard otherwise. If the guard is
+ * primary, we can use the circuit immediately on success. Otherwise,
+ * the guard is now "pending" -- we won't use its circuit unless all
+ * of the circuits we're trying to build through better guards have
+ * definitely failed.
+ *
+ * While we're building circuits, we track a little "guard state" for
+ * each circuit. We use this to keep track of whether the circuit is
+ * one that we can use as soon as it's done, or whether it's one that
+ * we should keep around to see if we can do better. In the latter case,
+ * a periodic call to entry_guards_upgrade_waiting_circuits() will
+ * eventually upgrade it.
+ **/
+/* DOCDOC -- expand this.
+ *
+ * Information invariants:
+ *
+ * [x] whenever a guard becomes unreachable, clear its usable_filtered flag.
+ *
+ * [x] Whenever a guard becomes reachable or maybe-reachable, if its filtered
+ * flag is set, set its usable_filtered flag.
+ *
+ * [x] Whenever we get a new consensus, call update_from_consensus(). (LATER.)
+ *
+ * [x] Whenever the configuration changes in a relevant way, update the
+ * filtered/usable flags. (LATER.)
+ *
+ * [x] Whenever we add a guard to the sample, make sure its filtered/usable
+ * flags are set as possible.
+ *
+ * [x] Whenever we remove a guard from the sample, remove it from the primary
+ * and confirmed lists.
+ *
+ * [x] When we make a guard confirmed, update the primary list.
+ *
+ * [x] When we make a guard filtered or unfiltered, update the primary list.
+ *
+ * [x] When we are about to pick a guard, make sure that the primary list is
+ * full.
+ *
+ * [x] Before calling sample_reachable_filtered_entry_guards(), make sure
+ * that the filtered, primary, and confirmed flags are up-to-date.
+ *
+ * [x] Call entry_guard_consider_retry every time we are about to check
+ * is_usable_filtered or is_reachable, and every time we set
+ * is_filtered to 1.
+ *
+ * [x] Call entry_guards_changed_for_guard_selection() whenever we update
+ * a persistent field.
+ */
+
+#define ENTRYNODES_PRIVATE
+
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/bridges.h"
+#include "or/circpathbias.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuituse.h"
+#include "or/circuitstats.h"
+#include "or/config.h"
+#include "or/confparse.h"
+#include "or/connection.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "or/directory.h"
+#include "or/entrynodes.h"
+#include "or/main.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/policies.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/routerparse.h"
+#include "or/routerset.h"
+#include "or/transports.h"
+#include "or/statefile.h"
+#include "lib/math/fp.h"
+#include "lib/encoding/confline.h"
+
+#include "or/node_st.h"
+#include "or/origin_circuit_st.h"
+#include "or/or_state_st.h"
+
+#include "lib/crypt_ops/digestset.h"
+
+/** A list of existing guard selection contexts. */
+static smartlist_t *guard_contexts = NULL;
+/** The currently enabled guard selection context. */
+static guard_selection_t *curr_guard_context = NULL;
+
+/** A value of 1 means that at least one context has changed,
+ * and those changes need to be flushed to disk. */
+static int entry_guards_dirty = 0;
+
+static void entry_guard_set_filtered_flags(const or_options_t *options,
+ guard_selection_t *gs,
+ entry_guard_t *guard);
+static void pathbias_check_use_success_count(entry_guard_t *guard);
+static void pathbias_check_close_success_count(entry_guard_t *guard);
+static int node_is_possible_guard(const node_t *node);
+static int node_passes_guard_filter(const or_options_t *options,
+ const node_t *node);
+static entry_guard_t *entry_guard_add_to_sample_impl(guard_selection_t *gs,
+ const uint8_t *rsa_id_digest,
+ const char *nickname,
+ const tor_addr_port_t *bridge_addrport);
+static entry_guard_t *get_sampled_guard_by_bridge_addr(guard_selection_t *gs,
+ const tor_addr_port_t *addrport);
+static int entry_guard_obeys_restriction(const entry_guard_t *guard,
+ const entry_guard_restriction_t *rst);
+
+/** Return 0 if we should apply guardfraction information found in the
+ * consensus. A specific consensus can be specified with the
+ * <b>ns</b> argument, if NULL the most recent one will be picked.*/
+int
+should_apply_guardfraction(const networkstatus_t *ns)
+{
+ /* We need to check the corresponding torrc option and the consensus
+ * parameter if we need to. */
+ const or_options_t *options = get_options();
+
+ /* If UseGuardFraction is 'auto' then check the same-named consensus
+ * parameter. If the consensus parameter is not present, default to
+ * "off". */
+ if (options->UseGuardFraction == -1) {
+ return networkstatus_get_param(ns, "UseGuardFraction",
+ 0, /* default to "off" */
+ 0, 1);
+ }
+
+ return options->UseGuardFraction;
+}
+
+/** Return true iff we know a preferred descriptor for <b>guard</b> */
+static int
+guard_has_descriptor(const entry_guard_t *guard)
+{
+ const node_t *node = node_get_by_id(guard->identity);
+ if (!node)
+ return 0;
+ return node_has_preferred_descriptor(node, 1);
+}
+
+/**
+ * Try to determine the correct type for a selection named "name",
+ * if <b>type</b> is GS_TYPE_INFER.
+ */
+STATIC guard_selection_type_t
+guard_selection_infer_type(guard_selection_type_t type,
+ const char *name)
+{
+ if (type == GS_TYPE_INFER) {
+ if (!strcmp(name, "bridges"))
+ type = GS_TYPE_BRIDGE;
+ else if (!strcmp(name, "restricted"))
+ type = GS_TYPE_RESTRICTED;
+ else
+ type = GS_TYPE_NORMAL;
+ }
+ return type;
+}
+
+/**
+ * Allocate and return a new guard_selection_t, with the name <b>name</b>.
+ */
+STATIC guard_selection_t *
+guard_selection_new(const char *name,
+ guard_selection_type_t type)
+{
+ guard_selection_t *gs;
+
+ type = guard_selection_infer_type(type, name);
+
+ gs = tor_malloc_zero(sizeof(*gs));
+ gs->name = tor_strdup(name);
+ gs->type = type;
+ gs->sampled_entry_guards = smartlist_new();
+ gs->confirmed_entry_guards = smartlist_new();
+ gs->primary_entry_guards = smartlist_new();
+
+ return gs;
+}
+
+/**
+ * Return the guard selection called <b>name</b>. If there is none, and
+ * <b>create_if_absent</b> is true, then create and return it. If there
+ * is none, and <b>create_if_absent</b> is false, then return NULL.
+ */
+STATIC guard_selection_t *
+get_guard_selection_by_name(const char *name,
+ guard_selection_type_t type,
+ int create_if_absent)
+{
+ if (!guard_contexts) {
+ guard_contexts = smartlist_new();
+ }
+ SMARTLIST_FOREACH_BEGIN(guard_contexts, guard_selection_t *, gs) {
+ if (!strcmp(gs->name, name))
+ return gs;
+ } SMARTLIST_FOREACH_END(gs);
+
+ if (! create_if_absent)
+ return NULL;
+
+ log_debug(LD_GUARD, "Creating a guard selection called %s", name);
+ guard_selection_t *new_selection = guard_selection_new(name, type);
+ smartlist_add(guard_contexts, new_selection);
+
+ return new_selection;
+}
+
+/**
+ * Allocate the first guard context that we're planning to use,
+ * and make it the current context.
+ */
+static void
+create_initial_guard_context(void)
+{
+ tor_assert(! curr_guard_context);
+ if (!guard_contexts) {
+ guard_contexts = smartlist_new();
+ }
+ guard_selection_type_t type = GS_TYPE_INFER;
+ const char *name = choose_guard_selection(
+ get_options(),
+ networkstatus_get_live_consensus(approx_time()),
+ NULL,
+ &type);
+ tor_assert(name); // "name" can only be NULL if we had an old name.
+ tor_assert(type != GS_TYPE_INFER);
+ log_notice(LD_GUARD, "Starting with guard context \"%s\"", name);
+ curr_guard_context = get_guard_selection_by_name(name, type, 1);
+}
+
+/** Get current default guard_selection_t, creating it if necessary */
+guard_selection_t *
+get_guard_selection_info(void)
+{
+ if (!curr_guard_context) {
+ create_initial_guard_context();
+ }
+
+ return curr_guard_context;
+}
+
+/** Return a statically allocated human-readable description of <b>guard</b>
+ */
+const char *
+entry_guard_describe(const entry_guard_t *guard)
+{
+ static char buf[256];
+ tor_snprintf(buf, sizeof(buf),
+ "%s ($%s)",
+ strlen(guard->nickname) ? guard->nickname : "[bridge]",
+ hex_str(guard->identity, DIGEST_LEN));
+ return buf;
+}
+
+/** Return <b>guard</b>'s 20-byte RSA identity digest */
+const char *
+entry_guard_get_rsa_id_digest(const entry_guard_t *guard)
+{
+ return guard->identity;
+}
+
+/** Return the pathbias state associated with <b>guard</b>. */
+guard_pathbias_t *
+entry_guard_get_pathbias_state(entry_guard_t *guard)
+{
+ return &guard->pb;
+}
+
+HANDLE_IMPL(entry_guard, entry_guard_t, ATTR_UNUSED STATIC)
+
+/** Return an interval betweeen 'now' and 'max_backdate' seconds in the past,
+ * chosen uniformly at random. We use this before recording persistent
+ * dates, so that we aren't leaking exactly when we recorded it.
+ */
+MOCK_IMPL(STATIC time_t,
+randomize_time,(time_t now, time_t max_backdate))
+{
+ tor_assert(max_backdate > 0);
+
+ time_t earliest = now - max_backdate;
+ time_t latest = now;
+ if (earliest <= 0)
+ earliest = 1;
+ if (latest <= earliest)
+ latest = earliest + 1;
+
+ return crypto_rand_time_range(earliest, latest);
+}
+
+/**
+ * @name parameters for networkstatus algorithm
+ *
+ * These parameters are taken from the consensus; some are overrideable in
+ * the torrc.
+ */
+/**@{*/
+/**
+ * We never let our sampled guard set grow larger than this fraction
+ * of the guards on the network.
+ */
+STATIC double
+get_max_sample_threshold(void)
+{
+ int32_t pct =
+ networkstatus_get_param(NULL, "guard-max-sample-threshold-percent",
+ DFLT_MAX_SAMPLE_THRESHOLD_PERCENT,
+ 1, 100);
+ return pct / 100.0;
+}
+/**
+ * We never let our sampled guard set grow larger than this number.
+ */
+STATIC int
+get_max_sample_size_absolute(void)
+{
+ return (int) networkstatus_get_param(NULL, "guard-max-sample-size",
+ DFLT_MAX_SAMPLE_SIZE,
+ 1, INT32_MAX);
+}
+/**
+ * We always try to make our sample contain at least this many guards.
+ */
+STATIC int
+get_min_filtered_sample_size(void)
+{
+ return networkstatus_get_param(NULL, "guard-min-filtered-sample-size",
+ DFLT_MIN_FILTERED_SAMPLE_SIZE,
+ 1, INT32_MAX);
+}
+/**
+ * If a guard is unlisted for this many days in a row, we remove it.
+ */
+STATIC int
+get_remove_unlisted_guards_after_days(void)
+{
+ return networkstatus_get_param(NULL,
+ "guard-remove-unlisted-guards-after-days",
+ DFLT_REMOVE_UNLISTED_GUARDS_AFTER_DAYS,
+ 1, 365*10);
+}
+/**
+ * We remove unconfirmed guards from the sample after this many days,
+ * regardless of whether they are listed or unlisted.
+ */
+STATIC int
+get_guard_lifetime(void)
+{
+ if (get_options()->GuardLifetime >= 86400)
+ return get_options()->GuardLifetime;
+ int32_t days;
+ days = networkstatus_get_param(NULL,
+ "guard-lifetime-days",
+ DFLT_GUARD_LIFETIME_DAYS, 1, 365*10);
+ return days * 86400;
+}
+/**
+ * We remove confirmed guards from the sample if they were sampled
+ * GUARD_LIFETIME_DAYS ago and confirmed this many days ago.
+ */
+STATIC int
+get_guard_confirmed_min_lifetime(void)
+{
+ if (get_options()->GuardLifetime >= 86400)
+ return get_options()->GuardLifetime;
+ int32_t days;
+ days = networkstatus_get_param(NULL, "guard-confirmed-min-lifetime-days",
+ DFLT_GUARD_CONFIRMED_MIN_LIFETIME_DAYS,
+ 1, 365*10);
+ return days * 86400;
+}
+/**
+ * How many guards do we try to keep on our primary guard list?
+ */
+STATIC int
+get_n_primary_guards(void)
+{
+ /* If the user has explicitly configured the number of primary guards, do
+ * what the user wishes to do */
+ const int configured_primaries = get_options()->NumPrimaryGuards;
+ if (configured_primaries) {
+ return configured_primaries;
+ }
+
+ /* otherwise check for consensus parameter and if that's not set either, just
+ * use the default value. */
+ return networkstatus_get_param(NULL,
+ "guard-n-primary-guards",
+ DFLT_N_PRIMARY_GUARDS, 1, INT32_MAX);
+}
+/**
+ * Return the number of the live primary guards we should look at when
+ * making a circuit.
+ */
+STATIC int
+get_n_primary_guards_to_use(guard_usage_t usage)
+{
+ int configured;
+ const char *param_name;
+ int param_default;
+
+ /* If the user has explicitly configured the amount of guards, use
+ that. Otherwise, fall back to the default value. */
+ if (usage == GUARD_USAGE_DIRGUARD) {
+ configured = get_options()->NumDirectoryGuards;
+ param_name = "guard-n-primary-dir-guards-to-use";
+ param_default = DFLT_N_PRIMARY_DIR_GUARDS_TO_USE;
+ } else {
+ configured = get_options()->NumEntryGuards;
+ param_name = "guard-n-primary-guards-to-use";
+ param_default = DFLT_N_PRIMARY_GUARDS_TO_USE;
+ }
+ if (configured >= 1) {
+ return configured;
+ }
+ return networkstatus_get_param(NULL,
+ param_name, param_default, 1, INT32_MAX);
+}
+/**
+ * If we haven't successfully built or used a circuit in this long, then
+ * consider that the internet is probably down.
+ */
+STATIC int
+get_internet_likely_down_interval(void)
+{
+ return networkstatus_get_param(NULL, "guard-internet-likely-down-interval",
+ DFLT_INTERNET_LIKELY_DOWN_INTERVAL,
+ 1, INT32_MAX);
+}
+/**
+ * If we're trying to connect to a nonprimary guard for at least this
+ * many seconds, and we haven't gotten the connection to work, we will treat
+ * lower-priority guards as usable.
+ */
+STATIC int
+get_nonprimary_guard_connect_timeout(void)
+{
+ return networkstatus_get_param(NULL,
+ "guard-nonprimary-guard-connect-timeout",
+ DFLT_NONPRIMARY_GUARD_CONNECT_TIMEOUT,
+ 1, INT32_MAX);
+}
+/**
+ * If a circuit has been sitting around in 'waiting for better guard' state
+ * for at least this long, we'll expire it.
+ */
+STATIC int
+get_nonprimary_guard_idle_timeout(void)
+{
+ return networkstatus_get_param(NULL,
+ "guard-nonprimary-guard-idle-timeout",
+ DFLT_NONPRIMARY_GUARD_IDLE_TIMEOUT,
+ 1, INT32_MAX);
+}
+/**
+ * If our configuration retains fewer than this fraction of guards from the
+ * torrc, we are in a restricted setting.
+ */
+STATIC double
+get_meaningful_restriction_threshold(void)
+{
+ int32_t pct = networkstatus_get_param(NULL,
+ "guard-meaningful-restriction-percent",
+ DFLT_MEANINGFUL_RESTRICTION_PERCENT,
+ 1, INT32_MAX);
+ return pct / 100.0;
+}
+/**
+ * If our configuration retains fewer than this fraction of guards from the
+ * torrc, we are in an extremely restricted setting, and should warn.
+ */
+STATIC double
+get_extreme_restriction_threshold(void)
+{
+ int32_t pct = networkstatus_get_param(NULL,
+ "guard-extreme-restriction-percent",
+ DFLT_EXTREME_RESTRICTION_PERCENT,
+ 1, INT32_MAX);
+ return pct / 100.0;
+}
+
+/* Mark <b>guard</b> as maybe reachable again. */
+static void
+mark_guard_maybe_reachable(entry_guard_t *guard)
+{
+ if (guard->is_reachable != GUARD_REACHABLE_NO) {
+ return;
+ }
+
+ /* Note that we do not clear failing_since: this guard is now only
+ * _maybe-reachable_. */
+ guard->is_reachable = GUARD_REACHABLE_MAYBE;
+ if (guard->is_filtered_guard)
+ guard->is_usable_filtered_guard = 1;
+}
+
+/**
+ * Called when the network comes up after having seemed to be down for
+ * a while: Mark the primary guards as maybe-reachable so that we'll
+ * try them again.
+ */
+STATIC void
+mark_primary_guards_maybe_reachable(guard_selection_t *gs)
+{
+ tor_assert(gs);
+
+ if (!gs->primary_guards_up_to_date)
+ entry_guards_update_primary(gs);
+
+ SMARTLIST_FOREACH_BEGIN(gs->primary_entry_guards, entry_guard_t *, guard) {
+ mark_guard_maybe_reachable(guard);
+ } SMARTLIST_FOREACH_END(guard);
+}
+
+/* Called when we exhaust all guards in our sampled set: Marks all guards as
+ maybe-reachable so that we 'll try them again. */
+static void
+mark_all_guards_maybe_reachable(guard_selection_t *gs)
+{
+ tor_assert(gs);
+
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ mark_guard_maybe_reachable(guard);
+ } SMARTLIST_FOREACH_END(guard);
+}
+
+/**@}*/
+
+/**
+ * Given our options and our list of nodes, return the name of the
+ * guard selection that we should use. Return NULL for "use the
+ * same selection you were using before.
+ */
+STATIC const char *
+choose_guard_selection(const or_options_t *options,
+ const networkstatus_t *live_ns,
+ const guard_selection_t *old_selection,
+ guard_selection_type_t *type_out)
+{
+ tor_assert(options);
+ tor_assert(type_out);
+
+ if (options->UseBridges) {
+ *type_out = GS_TYPE_BRIDGE;
+ return "bridges";
+ }
+
+ if (! live_ns) {
+ /* without a networkstatus, we can't tell any more than that. */
+ *type_out = GS_TYPE_NORMAL;
+ return "default";
+ }
+
+ const smartlist_t *nodes = nodelist_get_list();
+ int n_guards = 0, n_passing_filter = 0;
+ SMARTLIST_FOREACH_BEGIN(nodes, const node_t *, node) {
+ if (node_is_possible_guard(node)) {
+ ++n_guards;
+ if (node_passes_guard_filter(options, node)) {
+ ++n_passing_filter;
+ }
+ }
+ } SMARTLIST_FOREACH_END(node);
+
+ /* We use separate 'high' and 'low' thresholds here to prevent flapping
+ * back and forth */
+ const int meaningful_threshold_high =
+ (int)(n_guards * get_meaningful_restriction_threshold() * 1.05);
+ const int meaningful_threshold_mid =
+ (int)(n_guards * get_meaningful_restriction_threshold());
+ const int meaningful_threshold_low =
+ (int)(n_guards * get_meaningful_restriction_threshold() * .95);
+ const int extreme_threshold =
+ (int)(n_guards * get_extreme_restriction_threshold());
+
+ /*
+ If we have no previous selection, then we're "restricted" iff we are
+ below the meaningful restriction threshold. That's easy enough.
+
+ But if we _do_ have a previous selection, we make it a little
+ "sticky": we only move from "restricted" to "default" when we find
+ that we're above the threshold plus 5%, and we only move from
+ "default" to "restricted" when we're below the threshold minus 5%.
+ That should prevent us from flapping back and forth if we happen to
+ be hovering very close to the default.
+
+ The extreme threshold is for warning only.
+ */
+
+ static int have_warned_extreme_threshold = 0;
+ if (n_guards &&
+ n_passing_filter < extreme_threshold &&
+ ! have_warned_extreme_threshold) {
+ have_warned_extreme_threshold = 1;
+ const double exclude_frac =
+ (n_guards - n_passing_filter) / (double)n_guards;
+ log_warn(LD_GUARD, "Your configuration excludes %d%% of all possible "
+ "guards. That's likely to make you stand out from the "
+ "rest of the world.", (int)(exclude_frac * 100));
+ }
+
+ /* Easy case: no previous selection. Just check if we are in restricted or
+ normal guard selection. */
+ if (old_selection == NULL) {
+ if (n_passing_filter >= meaningful_threshold_mid) {
+ *type_out = GS_TYPE_NORMAL;
+ return "default";
+ } else {
+ *type_out = GS_TYPE_RESTRICTED;
+ return "restricted";
+ }
+ }
+
+ /* Trickier case: we do have a previous guard selection context. */
+ tor_assert(old_selection);
+
+ /* Use high and low thresholds to decide guard selection, and if we fall in
+ the middle then keep the current guard selection context. */
+ if (n_passing_filter >= meaningful_threshold_high) {
+ *type_out = GS_TYPE_NORMAL;
+ return "default";
+ } else if (n_passing_filter < meaningful_threshold_low) {
+ *type_out = GS_TYPE_RESTRICTED;
+ return "restricted";
+ } else {
+ /* we are in the middle: maintain previous guard selection */
+ *type_out = old_selection->type;
+ return old_selection->name;
+ }
+}
+
+/**
+ * Check whether we should switch from our current guard selection to a
+ * different one. If so, switch and return 1. Return 0 otherwise.
+ *
+ * On a 1 return, the caller should mark all currently live circuits unusable
+ * for new streams, by calling circuit_mark_all_unused_circs() and
+ * circuit_mark_all_dirty_circs_as_unusable().
+ */
+int
+update_guard_selection_choice(const or_options_t *options)
+{
+ if (!curr_guard_context) {
+ create_initial_guard_context();
+ return 1;
+ }
+
+ guard_selection_type_t type = GS_TYPE_INFER;
+ const char *new_name = choose_guard_selection(
+ options,
+ networkstatus_get_live_consensus(approx_time()),
+ curr_guard_context,
+ &type);
+ tor_assert(new_name);
+ tor_assert(type != GS_TYPE_INFER);
+
+ const char *cur_name = curr_guard_context->name;
+ if (! strcmp(cur_name, new_name)) {
+ log_debug(LD_GUARD,
+ "Staying with guard context \"%s\" (no change)", new_name);
+ return 0; // No change
+ }
+
+ log_notice(LD_GUARD, "Switching to guard context \"%s\" (was using \"%s\")",
+ new_name, cur_name);
+ guard_selection_t *new_guard_context;
+ new_guard_context = get_guard_selection_by_name(new_name, type, 1);
+ tor_assert(new_guard_context);
+ tor_assert(new_guard_context != curr_guard_context);
+ curr_guard_context = new_guard_context;
+
+ return 1;
+}
+
+/**
+ * Return true iff <b>node</b> has all the flags needed for us to consider it
+ * a possible guard when sampling guards.
+ */
+static int
+node_is_possible_guard(const node_t *node)
+{
+ /* The "GUARDS" set is all nodes in the nodelist for which this predicate
+ * holds. */
+
+ tor_assert(node);
+ return (node->is_possible_guard &&
+ node->is_stable &&
+ node->is_fast &&
+ node->is_valid &&
+ node_is_dir(node) &&
+ !router_digest_is_me(node->identity));
+}
+
+/**
+ * Return the sampled guard with the RSA identity digest <b>rsa_id</b>, or
+ * NULL if we don't have one. */
+STATIC entry_guard_t *
+get_sampled_guard_with_id(guard_selection_t *gs,
+ const uint8_t *rsa_id)
+{
+ tor_assert(gs);
+ tor_assert(rsa_id);
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ if (tor_memeq(guard->identity, rsa_id, DIGEST_LEN))
+ return guard;
+ } SMARTLIST_FOREACH_END(guard);
+ return NULL;
+}
+
+/** If <b>gs</b> contains a sampled entry guard matching <b>bridge</b>,
+ * return that guard. Otherwise return NULL. */
+static entry_guard_t *
+get_sampled_guard_for_bridge(guard_selection_t *gs,
+ const bridge_info_t *bridge)
+{
+ const uint8_t *id = bridge_get_rsa_id_digest(bridge);
+ const tor_addr_port_t *addrport = bridge_get_addr_port(bridge);
+ entry_guard_t *guard;
+ if (BUG(!addrport))
+ return NULL; // LCOV_EXCL_LINE
+ guard = get_sampled_guard_by_bridge_addr(gs, addrport);
+ if (! guard || (id && tor_memneq(id, guard->identity, DIGEST_LEN)))
+ return NULL;
+ else
+ return guard;
+}
+
+/** If we know a bridge_info_t matching <b>guard</b>, return that
+ * bridge. Otherwise return NULL. */
+static bridge_info_t *
+get_bridge_info_for_guard(const entry_guard_t *guard)
+{
+ const uint8_t *identity = NULL;
+ if (! tor_digest_is_zero(guard->identity)) {
+ identity = (const uint8_t *)guard->identity;
+ }
+ if (BUG(guard->bridge_addr == NULL))
+ return NULL;
+
+ return get_configured_bridge_by_exact_addr_port_digest(
+ &guard->bridge_addr->addr,
+ guard->bridge_addr->port,
+ (const char*)identity);
+}
+
+/**
+ * Return true iff we have a sampled guard with the RSA identity digest
+ * <b>rsa_id</b>. */
+static inline int
+have_sampled_guard_with_id(guard_selection_t *gs, const uint8_t *rsa_id)
+{
+ return get_sampled_guard_with_id(gs, rsa_id) != NULL;
+}
+
+/**
+ * Allocate a new entry_guard_t object for <b>node</b>, add it to the
+ * sampled entry guards in <b>gs</b>, and return it. <b>node</b> must
+ * not currently be a sampled guard in <b>gs</b>.
+ */
+STATIC entry_guard_t *
+entry_guard_add_to_sample(guard_selection_t *gs,
+ const node_t *node)
+{
+ log_info(LD_GUARD, "Adding %s to the entry guard sample set.",
+ node_describe(node));
+
+ /* make sure that the guard is not already sampled. */
+ if (BUG(have_sampled_guard_with_id(gs, (const uint8_t*)node->identity)))
+ return NULL; // LCOV_EXCL_LINE
+
+ return entry_guard_add_to_sample_impl(gs,
+ (const uint8_t*)node->identity,
+ node_get_nickname(node),
+ NULL);
+}
+
+/**
+ * Backend: adds a new sampled guard to <b>gs</b>, with given identity,
+ * nickname, and ORPort. rsa_id_digest and bridge_addrport are optional, but
+ * we need one of them. nickname is optional. The caller is responsible for
+ * maintaining the size limit of the SAMPLED_GUARDS set.
+ */
+static entry_guard_t *
+entry_guard_add_to_sample_impl(guard_selection_t *gs,
+ const uint8_t *rsa_id_digest,
+ const char *nickname,
+ const tor_addr_port_t *bridge_addrport)
+{
+ const int GUARD_LIFETIME = get_guard_lifetime();
+ tor_assert(gs);
+
+ // XXXX #20827 take ed25519 identity here too.
+
+ /* Make sure we can actually identify the guard. */
+ if (BUG(!rsa_id_digest && !bridge_addrport))
+ return NULL; // LCOV_EXCL_LINE
+
+ entry_guard_t *guard = tor_malloc_zero(sizeof(entry_guard_t));
+
+ /* persistent fields */
+ guard->is_persistent = (rsa_id_digest != NULL);
+ guard->selection_name = tor_strdup(gs->name);
+ if (rsa_id_digest)
+ memcpy(guard->identity, rsa_id_digest, DIGEST_LEN);
+ if (nickname)
+ strlcpy(guard->nickname, nickname, sizeof(guard->nickname));
+ guard->sampled_on_date = randomize_time(approx_time(), GUARD_LIFETIME/10);
+ tor_free(guard->sampled_by_version);
+ guard->sampled_by_version = tor_strdup(VERSION);
+ guard->currently_listed = 1;
+ guard->confirmed_idx = -1;
+
+ /* non-persistent fields */
+ guard->is_reachable = GUARD_REACHABLE_MAYBE;
+ if (bridge_addrport)
+ guard->bridge_addr = tor_memdup(bridge_addrport, sizeof(*bridge_addrport));
+
+ smartlist_add(gs->sampled_entry_guards, guard);
+ guard->in_selection = gs;
+ entry_guard_set_filtered_flags(get_options(), gs, guard);
+ entry_guards_changed_for_guard_selection(gs);
+ return guard;
+}
+
+/**
+ * Add an entry guard to the "bridges" guard selection sample, with
+ * information taken from <b>bridge</b>. Return that entry guard.
+ */
+static entry_guard_t *
+entry_guard_add_bridge_to_sample(guard_selection_t *gs,
+ const bridge_info_t *bridge)
+{
+ const uint8_t *id_digest = bridge_get_rsa_id_digest(bridge);
+ const tor_addr_port_t *addrport = bridge_get_addr_port(bridge);
+
+ tor_assert(addrport);
+
+ /* make sure that the guard is not already sampled. */
+ if (BUG(get_sampled_guard_for_bridge(gs, bridge)))
+ return NULL; // LCOV_EXCL_LINE
+
+ return entry_guard_add_to_sample_impl(gs, id_digest, NULL, addrport);
+}
+
+/**
+ * Return the entry_guard_t in <b>gs</b> whose address is <b>addrport</b>,
+ * or NULL if none exists.
+*/
+static entry_guard_t *
+get_sampled_guard_by_bridge_addr(guard_selection_t *gs,
+ const tor_addr_port_t *addrport)
+{
+ if (! gs)
+ return NULL;
+ if (BUG(!addrport))
+ return NULL;
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, g) {
+ if (g->bridge_addr && tor_addr_port_eq(addrport, g->bridge_addr))
+ return g;
+ } SMARTLIST_FOREACH_END(g);
+ return NULL;
+}
+
+/** Update the guard subsystem's knowledge of the identity of the bridge
+ * at <b>addrport</b>. Idempotent.
+ */
+void
+entry_guard_learned_bridge_identity(const tor_addr_port_t *addrport,
+ const uint8_t *rsa_id_digest)
+{
+ guard_selection_t *gs = get_guard_selection_by_name("bridges",
+ GS_TYPE_BRIDGE,
+ 0);
+ if (!gs)
+ return;
+
+ entry_guard_t *g = get_sampled_guard_by_bridge_addr(gs, addrport);
+ if (!g)
+ return;
+
+ int make_persistent = 0;
+
+ if (tor_digest_is_zero(g->identity)) {
+ memcpy(g->identity, rsa_id_digest, DIGEST_LEN);
+ make_persistent = 1;
+ } else if (tor_memeq(g->identity, rsa_id_digest, DIGEST_LEN)) {
+ /* Nothing to see here; we learned something we already knew. */
+ if (BUG(! g->is_persistent))
+ make_persistent = 1;
+ } else {
+ char old_id[HEX_DIGEST_LEN+1];
+ base16_encode(old_id, sizeof(old_id), g->identity, sizeof(g->identity));
+ log_warn(LD_BUG, "We 'learned' an identity %s for a bridge at %s:%d, but "
+ "we already knew a different one (%s). Ignoring the new info as "
+ "possibly bogus.",
+ hex_str((const char *)rsa_id_digest, DIGEST_LEN),
+ fmt_and_decorate_addr(&addrport->addr), addrport->port,
+ old_id);
+ return; // redundant, but let's be clear: we're not making this persistent.
+ }
+
+ if (make_persistent) {
+ g->is_persistent = 1;
+ entry_guards_changed_for_guard_selection(gs);
+ }
+}
+
+/**
+ * Return the number of sampled guards in <b>gs</b> that are "filtered"
+ * (that is, we're willing to connect to them) and that are "usable"
+ * (that is, either "reachable" or "maybe reachable").
+ *
+ * If a restriction is provided in <b>rst</b>, do not count any guards that
+ * violate it.
+ */
+STATIC int
+num_reachable_filtered_guards(const guard_selection_t *gs,
+ const entry_guard_restriction_t *rst)
+{
+ int n_reachable_filtered_guards = 0;
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ entry_guard_consider_retry(guard);
+ if (! entry_guard_obeys_restriction(guard, rst))
+ continue;
+ if (guard->is_usable_filtered_guard)
+ ++n_reachable_filtered_guards;
+ } SMARTLIST_FOREACH_END(guard);
+ return n_reachable_filtered_guards;
+}
+
+/** Return the actual maximum size for the sample in <b>gs</b>,
+ * given that we know about <b>n_guards</b> total. */
+static int
+get_max_sample_size(guard_selection_t *gs,
+ int n_guards)
+{
+ const int using_bridges = (gs->type == GS_TYPE_BRIDGE);
+ const int min_sample = get_min_filtered_sample_size();
+
+ /* If we are in bridge mode, expand our sample set as needed without worrying
+ * about max size. We should respect the user's wishes to use many bridges if
+ * that's what they have specified in their configuration file. */
+ if (using_bridges)
+ return INT_MAX;
+
+ const int max_sample_by_pct = (int)(n_guards * get_max_sample_threshold());
+ const int max_sample_absolute = get_max_sample_size_absolute();
+ const int max_sample = MIN(max_sample_by_pct, max_sample_absolute);
+ if (max_sample < min_sample)
+ return min_sample;
+ else
+ return max_sample;
+}
+
+/**
+ * Return a smartlist of the all the guards that are not currently
+ * members of the sample (GUARDS - SAMPLED_GUARDS). The elements of
+ * this list are node_t pointers in the non-bridge case, and
+ * bridge_info_t pointers in the bridge case. Set *<b>n_guards_out/b>
+ * to the number of guards that we found in GUARDS, including those
+ * that were already sampled.
+ */
+static smartlist_t *
+get_eligible_guards(const or_options_t *options,
+ guard_selection_t *gs,
+ int *n_guards_out)
+{
+ /* Construct eligible_guards as GUARDS - SAMPLED_GUARDS */
+ smartlist_t *eligible_guards = smartlist_new();
+ int n_guards = 0; // total size of "GUARDS"
+
+ if (gs->type == GS_TYPE_BRIDGE) {
+ const smartlist_t *bridges = bridge_list_get();
+ SMARTLIST_FOREACH_BEGIN(bridges, bridge_info_t *, bridge) {
+ ++n_guards;
+ if (NULL != get_sampled_guard_for_bridge(gs, bridge)) {
+ continue;
+ }
+ smartlist_add(eligible_guards, bridge);
+ } SMARTLIST_FOREACH_END(bridge);
+ } else {
+ const smartlist_t *nodes = nodelist_get_list();
+ const int n_sampled = smartlist_len(gs->sampled_entry_guards);
+
+ /* Build a bloom filter of our current guards: let's keep this O(N). */
+ digestset_t *sampled_guard_ids = digestset_new(n_sampled);
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, const entry_guard_t *,
+ guard) {
+ digestset_add(sampled_guard_ids, guard->identity);
+ } SMARTLIST_FOREACH_END(guard);
+
+ SMARTLIST_FOREACH_BEGIN(nodes, const node_t *, node) {
+ if (! node_is_possible_guard(node))
+ continue;
+ if (gs->type == GS_TYPE_RESTRICTED) {
+ /* In restricted mode, we apply the filter BEFORE sampling, so
+ * that we are sampling from the nodes that we might actually
+ * select. If we sampled first, we might wind up with a sample
+ * that didn't include any EntryNodes at all. */
+ if (! node_passes_guard_filter(options, node))
+ continue;
+ }
+ ++n_guards;
+ if (digestset_probably_contains(sampled_guard_ids, node->identity))
+ continue;
+ smartlist_add(eligible_guards, (node_t*)node);
+ } SMARTLIST_FOREACH_END(node);
+
+ /* Now we can free that bloom filter. */
+ digestset_free(sampled_guard_ids);
+ }
+
+ *n_guards_out = n_guards;
+ return eligible_guards;
+}
+
+/** Helper: given a smartlist of either bridge_info_t (if gs->type is
+ * GS_TYPE_BRIDGE) or node_t (otherwise), pick one that can be a guard,
+ * add it as a guard, remove it from the list, and return a new
+ * entry_guard_t. Return NULL on failure. */
+static entry_guard_t *
+select_and_add_guard_item_for_sample(guard_selection_t *gs,
+ smartlist_t *eligible_guards)
+{
+ entry_guard_t *added_guard;
+ if (gs->type == GS_TYPE_BRIDGE) {
+ const bridge_info_t *bridge = smartlist_choose(eligible_guards);
+ if (BUG(!bridge))
+ return NULL; // LCOV_EXCL_LINE
+ smartlist_remove(eligible_guards, bridge);
+ added_guard = entry_guard_add_bridge_to_sample(gs, bridge);
+ } else {
+ const node_t *node =
+ node_sl_choose_by_bandwidth(eligible_guards, WEIGHT_FOR_GUARD);
+ if (BUG(!node))
+ return NULL; // LCOV_EXCL_LINE
+ smartlist_remove(eligible_guards, node);
+ added_guard = entry_guard_add_to_sample(gs, node);
+ }
+
+ return added_guard;
+}
+
+/**
+ * Return true iff we need a consensus to update our guards, but we don't
+ * have one. (We can return 0 here either if the consensus is _not_ missing,
+ * or if we don't need a consensus because we're using bridges.)
+ */
+static int
+live_consensus_is_missing(const guard_selection_t *gs)
+{
+ tor_assert(gs);
+ if (gs->type == GS_TYPE_BRIDGE) {
+ /* We don't update bridges from the consensus; they aren't there. */
+ return 0;
+ }
+ return networkstatus_get_live_consensus(approx_time()) == NULL;
+}
+
+/**
+ * Add new guards to the sampled guards in <b>gs</b> until there are
+ * enough usable filtered guards, but never grow the sample beyond its
+ * maximum size. Return the last guard added, or NULL if none were
+ * added.
+ */
+STATIC entry_guard_t *
+entry_guards_expand_sample(guard_selection_t *gs)
+{
+ tor_assert(gs);
+ const or_options_t *options = get_options();
+
+ if (live_consensus_is_missing(gs)) {
+ log_info(LD_GUARD, "Not expanding the sample guard set; we have "
+ "no live consensus.");
+ return NULL;
+ }
+
+ int n_sampled = smartlist_len(gs->sampled_entry_guards);
+ entry_guard_t *added_guard = NULL;
+ int n_usable_filtered_guards = num_reachable_filtered_guards(gs, NULL);
+ int n_guards = 0;
+ smartlist_t *eligible_guards = get_eligible_guards(options, gs, &n_guards);
+
+ const int max_sample = get_max_sample_size(gs, n_guards);
+ const int min_filtered_sample = get_min_filtered_sample_size();
+
+ log_info(LD_GUARD, "Expanding the sample guard set. We have %d guards "
+ "in the sample, and %d eligible guards to extend it with.",
+ n_sampled, smartlist_len(eligible_guards));
+
+ while (n_usable_filtered_guards < min_filtered_sample) {
+ /* Has our sample grown too large to expand? */
+ if (n_sampled >= max_sample) {
+ log_info(LD_GUARD, "Not expanding the guard sample any further; "
+ "just hit the maximum sample threshold of %d",
+ max_sample);
+ goto done;
+ }
+
+ /* Did we run out of guards? */
+ if (smartlist_len(eligible_guards) == 0) {
+ /* LCOV_EXCL_START
+ As long as MAX_SAMPLE_THRESHOLD makes can't be adjusted to
+ allow all guards to be sampled, this can't be reached.
+ */
+ log_info(LD_GUARD, "Not expanding the guard sample any further; "
+ "just ran out of eligible guards");
+ goto done;
+ /* LCOV_EXCL_STOP */
+ }
+
+ /* Otherwise we can add at least one new guard. */
+ added_guard = select_and_add_guard_item_for_sample(gs, eligible_guards);
+ if (!added_guard)
+ goto done; // LCOV_EXCL_LINE -- only fails on BUG.
+
+ ++n_sampled;
+
+ if (added_guard->is_usable_filtered_guard)
+ ++n_usable_filtered_guards;
+ }
+
+ done:
+ smartlist_free(eligible_guards);
+ return added_guard;
+}
+
+/**
+ * Helper: <b>guard</b> has just been removed from the sampled guards:
+ * also remove it from primary and confirmed. */
+static void
+remove_guard_from_confirmed_and_primary_lists(guard_selection_t *gs,
+ entry_guard_t *guard)
+{
+ if (guard->is_primary) {
+ guard->is_primary = 0;
+ smartlist_remove_keeporder(gs->primary_entry_guards, guard);
+ } else {
+ if (BUG(smartlist_contains(gs->primary_entry_guards, guard))) {
+ smartlist_remove_keeporder(gs->primary_entry_guards, guard);
+ }
+ }
+
+ if (guard->confirmed_idx >= 0) {
+ smartlist_remove_keeporder(gs->confirmed_entry_guards, guard);
+ guard->confirmed_idx = -1;
+ guard->confirmed_on_date = 0;
+ } else {
+ if (BUG(smartlist_contains(gs->confirmed_entry_guards, guard))) {
+ // LCOV_EXCL_START
+ smartlist_remove_keeporder(gs->confirmed_entry_guards, guard);
+ // LCOV_EXCL_STOP
+ }
+ }
+}
+
+/** Return true iff <b>guard</b> is currently "listed" -- that is, it
+ * appears in the consensus, or as a configured bridge (as
+ * appropriate) */
+MOCK_IMPL(STATIC int,
+entry_guard_is_listed,(guard_selection_t *gs, const entry_guard_t *guard))
+{
+ if (gs->type == GS_TYPE_BRIDGE) {
+ return NULL != get_bridge_info_for_guard(guard);
+ } else {
+ const node_t *node = node_get_by_id(guard->identity);
+
+ return node && node_is_possible_guard(node);
+ }
+}
+
+/**
+ * Update the status of all sampled guards based on the arrival of a
+ * new consensus networkstatus document. This will include marking
+ * some guards as listed or unlisted, and removing expired guards. */
+STATIC void
+sampled_guards_update_from_consensus(guard_selection_t *gs)
+{
+ tor_assert(gs);
+ const int REMOVE_UNLISTED_GUARDS_AFTER =
+ (get_remove_unlisted_guards_after_days() * 86400);
+ const int unlisted_since_slop = REMOVE_UNLISTED_GUARDS_AFTER / 5;
+
+ // It's important to use only a live consensus here; we don't want to
+ // make changes based on anything expired or old.
+ if (live_consensus_is_missing(gs)) {
+ log_info(LD_GUARD, "Not updating the sample guard set; we have "
+ "no live consensus.");
+ return;
+ }
+ log_info(LD_GUARD, "Updating sampled guard status based on received "
+ "consensus.");
+
+ int n_changes = 0;
+
+ /* First: Update listed/unlisted. */
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ /* XXXX #20827 check ed ID too */
+ const int is_listed = entry_guard_is_listed(gs, guard);
+
+ if (is_listed && ! guard->currently_listed) {
+ ++n_changes;
+ guard->currently_listed = 1;
+ guard->unlisted_since_date = 0;
+ log_info(LD_GUARD, "Sampled guard %s is now listed again.",
+ entry_guard_describe(guard));
+ } else if (!is_listed && guard->currently_listed) {
+ ++n_changes;
+ guard->currently_listed = 0;
+ guard->unlisted_since_date = randomize_time(approx_time(),
+ unlisted_since_slop);
+ log_info(LD_GUARD, "Sampled guard %s is now unlisted.",
+ entry_guard_describe(guard));
+ } else if (is_listed && guard->currently_listed) {
+ log_debug(LD_GUARD, "Sampled guard %s is still listed.",
+ entry_guard_describe(guard));
+ } else {
+ tor_assert(! is_listed && ! guard->currently_listed);
+ log_debug(LD_GUARD, "Sampled guard %s is still unlisted.",
+ entry_guard_describe(guard));
+ }
+
+ /* Clean up unlisted_since_date, just in case. */
+ if (guard->currently_listed && guard->unlisted_since_date) {
+ ++n_changes;
+ guard->unlisted_since_date = 0;
+ log_warn(LD_BUG, "Sampled guard %s was listed, but with "
+ "unlisted_since_date set. Fixing.",
+ entry_guard_describe(guard));
+ } else if (!guard->currently_listed && ! guard->unlisted_since_date) {
+ ++n_changes;
+ guard->unlisted_since_date = randomize_time(approx_time(),
+ unlisted_since_slop);
+ log_warn(LD_BUG, "Sampled guard %s was unlisted, but with "
+ "unlisted_since_date unset. Fixing.",
+ entry_guard_describe(guard));
+ }
+ } SMARTLIST_FOREACH_END(guard);
+
+ const time_t remove_if_unlisted_since =
+ approx_time() - REMOVE_UNLISTED_GUARDS_AFTER;
+ const time_t maybe_remove_if_sampled_before =
+ approx_time() - get_guard_lifetime();
+ const time_t remove_if_confirmed_before =
+ approx_time() - get_guard_confirmed_min_lifetime();
+
+ /* Then: remove the ones that have been junk for too long */
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ int rmv = 0;
+
+ if (guard->currently_listed == 0 &&
+ guard->unlisted_since_date < remove_if_unlisted_since) {
+ /*
+ "We have a live consensus, and {IS_LISTED} is false, and
+ {FIRST_UNLISTED_AT} is over {REMOVE_UNLISTED_GUARDS_AFTER}
+ days in the past."
+ */
+ log_info(LD_GUARD, "Removing sampled guard %s: it has been unlisted "
+ "for over %d days", entry_guard_describe(guard),
+ get_remove_unlisted_guards_after_days());
+ rmv = 1;
+ } else if (guard->sampled_on_date < maybe_remove_if_sampled_before) {
+ /* We have a live consensus, and {ADDED_ON_DATE} is over
+ {GUARD_LIFETIME} ago, *and* {CONFIRMED_ON_DATE} is either
+ "never", or over {GUARD_CONFIRMED_MIN_LIFETIME} ago.
+ */
+ if (guard->confirmed_on_date == 0) {
+ rmv = 1;
+ log_info(LD_GUARD, "Removing sampled guard %s: it was sampled "
+ "over %d days ago, but never confirmed.",
+ entry_guard_describe(guard),
+ get_guard_lifetime() / 86400);
+ } else if (guard->confirmed_on_date < remove_if_confirmed_before) {
+ rmv = 1;
+ log_info(LD_GUARD, "Removing sampled guard %s: it was sampled "
+ "over %d days ago, and confirmed over %d days ago.",
+ entry_guard_describe(guard),
+ get_guard_lifetime() / 86400,
+ get_guard_confirmed_min_lifetime() / 86400);
+ }
+ }
+
+ if (rmv) {
+ ++n_changes;
+ SMARTLIST_DEL_CURRENT(gs->sampled_entry_guards, guard);
+ remove_guard_from_confirmed_and_primary_lists(gs, guard);
+ entry_guard_free(guard);
+ }
+ } SMARTLIST_FOREACH_END(guard);
+
+ if (n_changes) {
+ gs->primary_guards_up_to_date = 0;
+ entry_guards_update_filtered_sets(gs);
+ /* We don't need to rebuild the confirmed list right here -- we may have
+ * removed confirmed guards above, but we can't have added any new
+ * confirmed guards.
+ */
+ entry_guards_changed_for_guard_selection(gs);
+ }
+}
+
+/**
+ * Return true iff <b>node</b> is a Tor relay that we are configured to
+ * be able to connect to. */
+static int
+node_passes_guard_filter(const or_options_t *options,
+ const node_t *node)
+{
+ /* NOTE: Make sure that this function stays in sync with
+ * options_transition_affects_entry_guards */
+ if (routerset_contains_node(options->ExcludeNodes, node))
+ return 0;
+
+ if (options->EntryNodes &&
+ !routerset_contains_node(options->EntryNodes, node))
+ return 0;
+
+ if (!fascist_firewall_allows_node(node, FIREWALL_OR_CONNECTION, 0))
+ return 0;
+
+ if (node_is_a_configured_bridge(node))
+ return 0;
+
+ return 1;
+}
+
+/** Helper: Return true iff <b>bridge</b> passes our configuration
+ * filter-- if it is a relay that we are configured to be able to
+ * connect to. */
+static int
+bridge_passes_guard_filter(const or_options_t *options,
+ const bridge_info_t *bridge)
+{
+ tor_assert(bridge);
+ if (!bridge)
+ return 0;
+
+ if (routerset_contains_bridge(options->ExcludeNodes, bridge))
+ return 0;
+
+ /* Ignore entrynodes */
+ const tor_addr_port_t *addrport = bridge_get_addr_port(bridge);
+
+ if (!fascist_firewall_allows_address_addr(&addrport->addr,
+ addrport->port,
+ FIREWALL_OR_CONNECTION,
+ 0, 0))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * Return true iff <b>guard</b> is a Tor relay that we are configured to
+ * be able to connect to, and we haven't disabled it for omission from
+ * the consensus or path bias issues. */
+static int
+entry_guard_passes_filter(const or_options_t *options, guard_selection_t *gs,
+ entry_guard_t *guard)
+{
+ if (guard->currently_listed == 0)
+ return 0;
+ if (guard->pb.path_bias_disabled)
+ return 0;
+
+ if (gs->type == GS_TYPE_BRIDGE) {
+ const bridge_info_t *bridge = get_bridge_info_for_guard(guard);
+ if (bridge == NULL)
+ return 0;
+ return bridge_passes_guard_filter(options, bridge);
+ } else {
+ const node_t *node = node_get_by_id(guard->identity);
+ if (node == NULL) {
+ // This can happen when currently_listed is true, and we're not updating
+ // it because we don't have a live consensus.
+ return 0;
+ }
+
+ return node_passes_guard_filter(options, node);
+ }
+}
+
+/** Return true iff <b>guard</b> is in the same family as <b>node</b>.
+ */
+static int
+guard_in_node_family(const entry_guard_t *guard, const node_t *node)
+{
+ const node_t *guard_node = node_get_by_id(guard->identity);
+ if (guard_node) {
+ return nodes_in_same_family(guard_node, node);
+ } else {
+ /* If we don't have a node_t for the guard node, we might have
+ * a bridge_info_t for it. So let's check to see whether the bridge
+ * address matches has any family issues.
+ *
+ * (Strictly speaking, I believe this check is unnecessary, since we only
+ * use it to avoid the exit's family when building circuits, and we don't
+ * build multihop circuits until we have a routerinfo_t for the
+ * bridge... at which point, we'll also have a node_t for the
+ * bridge. Nonetheless, it seems wise to include it, in case our
+ * assumptions change down the road. -nickm.)
+ */
+ if (get_options()->EnforceDistinctSubnets && guard->bridge_addr) {
+ tor_addr_t node_addr;
+ node_get_addr(node, &node_addr);
+ if (addrs_in_same_network_family(&node_addr,
+ &guard->bridge_addr->addr)) {
+ return 1;
+ }
+ }
+ return 0;
+ }
+}
+
+/* Allocate and return a new exit guard restriction (where <b>exit_id</b> is of
+ * size DIGEST_LEN) */
+STATIC entry_guard_restriction_t *
+guard_create_exit_restriction(const uint8_t *exit_id)
+{
+ entry_guard_restriction_t *rst = NULL;
+ rst = tor_malloc_zero(sizeof(entry_guard_restriction_t));
+ rst->type = RST_EXIT_NODE;
+ memcpy(rst->exclude_id, exit_id, DIGEST_LEN);
+ return rst;
+}
+
+/** If we have fewer than this many possible usable guards, don't set
+ * MD-availability-based restrictions: we might blacklist all of them. */
+#define MIN_GUARDS_FOR_MD_RESTRICTION 10
+
+/** Return true if we should set md dirserver restrictions. We might not want
+ * to set those if our guard options are too restricted, since we don't want
+ * to blacklist all of them. */
+static int
+should_set_md_dirserver_restriction(void)
+{
+ const guard_selection_t *gs = get_guard_selection_info();
+ int num_usable_guards = num_reachable_filtered_guards(gs, NULL);
+
+ /* Don't set restriction if too few reachable filtered guards. */
+ if (num_usable_guards < MIN_GUARDS_FOR_MD_RESTRICTION) {
+ log_info(LD_GUARD, "Not setting md restriction: only %d"
+ " usable guards.", num_usable_guards);
+ return 0;
+ }
+
+ /* We have enough usable guards: set MD restriction */
+ return 1;
+}
+
+/** Allocate and return an outdated md guard restriction. Return NULL if no
+ * such restriction is needed. */
+STATIC entry_guard_restriction_t *
+guard_create_dirserver_md_restriction(void)
+{
+ entry_guard_restriction_t *rst = NULL;
+
+ if (!should_set_md_dirserver_restriction()) {
+ log_debug(LD_GUARD, "Not setting md restriction: too few "
+ "filtered guards.");
+ return NULL;
+ }
+
+ rst = tor_malloc_zero(sizeof(entry_guard_restriction_t));
+ rst->type = RST_OUTDATED_MD_DIRSERVER;
+
+ return rst;
+}
+
+/* Return True if <b>guard</b> obeys the exit restriction <b>rst</b>. */
+static int
+guard_obeys_exit_restriction(const entry_guard_t *guard,
+ const entry_guard_restriction_t *rst)
+{
+ tor_assert(rst->type == RST_EXIT_NODE);
+
+ // Exclude the exit ID and all of its family.
+ const node_t *node = node_get_by_id((const char*)rst->exclude_id);
+ if (node && guard_in_node_family(guard, node))
+ return 0;
+
+ return tor_memneq(guard->identity, rst->exclude_id, DIGEST_LEN);
+}
+
+/** Return True if <b>guard</b> should be used as a dirserver for fetching
+ * microdescriptors. */
+static int
+guard_obeys_md_dirserver_restriction(const entry_guard_t *guard)
+{
+ /* If this guard is an outdated dirserver, don't use it. */
+ if (microdesc_relay_is_outdated_dirserver(guard->identity)) {
+ log_info(LD_GENERAL, "Skipping %s dirserver: outdated",
+ hex_str(guard->identity, DIGEST_LEN));
+ return 0;
+ }
+
+ log_debug(LD_GENERAL, "%s dirserver obeys md restrictions",
+ hex_str(guard->identity, DIGEST_LEN));
+
+ return 1;
+}
+
+/**
+ * Return true iff <b>guard</b> obeys the restrictions defined in <b>rst</b>.
+ * (If <b>rst</b> is NULL, there are no restrictions.)
+ */
+static int
+entry_guard_obeys_restriction(const entry_guard_t *guard,
+ const entry_guard_restriction_t *rst)
+{
+ tor_assert(guard);
+ if (! rst)
+ return 1; // No restriction? No problem.
+
+ if (rst->type == RST_EXIT_NODE) {
+ return guard_obeys_exit_restriction(guard, rst);
+ } else if (rst->type == RST_OUTDATED_MD_DIRSERVER) {
+ return guard_obeys_md_dirserver_restriction(guard);
+ }
+
+ tor_assert_nonfatal_unreached();
+ return 0;
+}
+
+/**
+ * Update the <b>is_filtered_guard</b> and <b>is_usable_filtered_guard</b>
+ * flags on <b>guard</b>. */
+void
+entry_guard_set_filtered_flags(const or_options_t *options,
+ guard_selection_t *gs,
+ entry_guard_t *guard)
+{
+ unsigned was_filtered = guard->is_filtered_guard;
+ guard->is_filtered_guard = 0;
+ guard->is_usable_filtered_guard = 0;
+
+ if (entry_guard_passes_filter(options, gs, guard)) {
+ guard->is_filtered_guard = 1;
+
+ if (guard->is_reachable != GUARD_REACHABLE_NO)
+ guard->is_usable_filtered_guard = 1;
+
+ entry_guard_consider_retry(guard);
+ }
+ log_debug(LD_GUARD, "Updated sampled guard %s: filtered=%d; "
+ "reachable_filtered=%d.", entry_guard_describe(guard),
+ guard->is_filtered_guard, guard->is_usable_filtered_guard);
+
+ if (!bool_eq(was_filtered, guard->is_filtered_guard)) {
+ /* This guard might now be primary or nonprimary. */
+ gs->primary_guards_up_to_date = 0;
+ }
+}
+
+/**
+ * Update the <b>is_filtered_guard</b> and <b>is_usable_filtered_guard</b>
+ * flag on every guard in <b>gs</b>. */
+STATIC void
+entry_guards_update_filtered_sets(guard_selection_t *gs)
+{
+ const or_options_t *options = get_options();
+
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ entry_guard_set_filtered_flags(options, gs, guard);
+ } SMARTLIST_FOREACH_END(guard);
+}
+
+/**
+ * Return a random guard from the reachable filtered sample guards
+ * in <b>gs</b>, subject to the exclusion rules listed in <b>flags</b>.
+ * Return NULL if no such guard can be found.
+ *
+ * Make sure that the sample is big enough, and that all the filter flags
+ * are set correctly, before calling this function.
+ *
+ * If a restriction is provided in <b>rst</b>, do not return any guards that
+ * violate it.
+ **/
+STATIC entry_guard_t *
+sample_reachable_filtered_entry_guards(guard_selection_t *gs,
+ const entry_guard_restriction_t *rst,
+ unsigned flags)
+{
+ tor_assert(gs);
+ entry_guard_t *result = NULL;
+ const unsigned exclude_confirmed = flags & SAMPLE_EXCLUDE_CONFIRMED;
+ const unsigned exclude_primary = flags & SAMPLE_EXCLUDE_PRIMARY;
+ const unsigned exclude_pending = flags & SAMPLE_EXCLUDE_PENDING;
+ const unsigned no_update_primary = flags & SAMPLE_NO_UPDATE_PRIMARY;
+ const unsigned need_descriptor = flags & SAMPLE_EXCLUDE_NO_DESCRIPTOR;
+
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ entry_guard_consider_retry(guard);
+ } SMARTLIST_FOREACH_END(guard);
+
+ const int n_reachable_filtered = num_reachable_filtered_guards(gs, rst);
+
+ log_info(LD_GUARD, "Trying to sample a reachable guard: We know of %d "
+ "in the USABLE_FILTERED set.", n_reachable_filtered);
+
+ const int min_filtered_sample = get_min_filtered_sample_size();
+ if (n_reachable_filtered < min_filtered_sample) {
+ log_info(LD_GUARD, " (That isn't enough. Trying to expand the sample.)");
+ entry_guards_expand_sample(gs);
+ }
+
+ if (exclude_primary && !gs->primary_guards_up_to_date && !no_update_primary)
+ entry_guards_update_primary(gs);
+
+ /* Build the set of reachable filtered guards. */
+ smartlist_t *reachable_filtered_sample = smartlist_new();
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ entry_guard_consider_retry(guard);// redundant, but cheap.
+ if (! entry_guard_obeys_restriction(guard, rst))
+ continue;
+ if (! guard->is_usable_filtered_guard)
+ continue;
+ if (exclude_confirmed && guard->confirmed_idx >= 0)
+ continue;
+ if (exclude_primary && guard->is_primary)
+ continue;
+ if (exclude_pending && guard->is_pending)
+ continue;
+ if (need_descriptor && !guard_has_descriptor(guard))
+ continue;
+ smartlist_add(reachable_filtered_sample, guard);
+ } SMARTLIST_FOREACH_END(guard);
+
+ log_info(LD_GUARD, " (After filters [%x], we have %d guards to consider.)",
+ flags, smartlist_len(reachable_filtered_sample));
+
+ if (smartlist_len(reachable_filtered_sample)) {
+ result = smartlist_choose(reachable_filtered_sample);
+ log_info(LD_GUARD, " (Selected %s.)",
+ result ? entry_guard_describe(result) : "<null>");
+ }
+ smartlist_free(reachable_filtered_sample);
+
+ return result;
+}
+
+/**
+ * Helper: compare two entry_guard_t by their confirmed_idx values.
+ * Used to sort the confirmed list.
+ */
+static int
+compare_guards_by_confirmed_idx(const void **a_, const void **b_)
+{
+ const entry_guard_t *a = *a_, *b = *b_;
+ if (a->confirmed_idx < b->confirmed_idx)
+ return -1;
+ else if (a->confirmed_idx > b->confirmed_idx)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * Find the confirmed guards from among the sampled guards in <b>gs</b>,
+ * and put them in confirmed_entry_guards in the correct
+ * order. Recalculate their indices.
+ */
+STATIC void
+entry_guards_update_confirmed(guard_selection_t *gs)
+{
+ smartlist_clear(gs->confirmed_entry_guards);
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ if (guard->confirmed_idx >= 0)
+ smartlist_add(gs->confirmed_entry_guards, guard);
+ } SMARTLIST_FOREACH_END(guard);
+
+ smartlist_sort(gs->confirmed_entry_guards, compare_guards_by_confirmed_idx);
+
+ int any_changed = 0;
+ SMARTLIST_FOREACH_BEGIN(gs->confirmed_entry_guards, entry_guard_t *, guard) {
+ if (guard->confirmed_idx != guard_sl_idx) {
+ any_changed = 1;
+ guard->confirmed_idx = guard_sl_idx;
+ }
+ } SMARTLIST_FOREACH_END(guard);
+
+ gs->next_confirmed_idx = smartlist_len(gs->confirmed_entry_guards);
+
+ if (any_changed) {
+ entry_guards_changed_for_guard_selection(gs);
+ }
+}
+
+/**
+ * Mark <b>guard</b> as a confirmed guard -- that is, one that we have
+ * connected to, and intend to use again.
+ */
+STATIC void
+make_guard_confirmed(guard_selection_t *gs, entry_guard_t *guard)
+{
+ if (BUG(guard->confirmed_on_date && guard->confirmed_idx >= 0))
+ return; // LCOV_EXCL_LINE
+
+ if (BUG(smartlist_contains(gs->confirmed_entry_guards, guard)))
+ return; // LCOV_EXCL_LINE
+
+ const int GUARD_LIFETIME = get_guard_lifetime();
+ guard->confirmed_on_date = randomize_time(approx_time(), GUARD_LIFETIME/10);
+
+ log_info(LD_GUARD, "Marking %s as a confirmed guard (index %d)",
+ entry_guard_describe(guard),
+ gs->next_confirmed_idx);
+
+ guard->confirmed_idx = gs->next_confirmed_idx++;
+ smartlist_add(gs->confirmed_entry_guards, guard);
+
+ // This confirmed guard might kick something else out of the primary
+ // guards.
+ gs->primary_guards_up_to_date = 0;
+
+ entry_guards_changed_for_guard_selection(gs);
+}
+
+/**
+ * Recalculate the list of primary guards (the ones we'd prefer to use) from
+ * the filtered sample and the confirmed list.
+ */
+STATIC void
+entry_guards_update_primary(guard_selection_t *gs)
+{
+ tor_assert(gs);
+
+ // prevent recursion. Recursion is potentially very bad here.
+ static int running = 0;
+ tor_assert(!running);
+ running = 1;
+
+ const int N_PRIMARY_GUARDS = get_n_primary_guards();
+
+ smartlist_t *new_primary_guards = smartlist_new();
+ smartlist_t *old_primary_guards = smartlist_new();
+ smartlist_add_all(old_primary_guards, gs->primary_entry_guards);
+
+ /* Set this flag now, to prevent the calls below from recursing. */
+ gs->primary_guards_up_to_date = 1;
+
+ /* First, can we fill it up with confirmed guards? */
+ SMARTLIST_FOREACH_BEGIN(gs->confirmed_entry_guards, entry_guard_t *, guard) {
+ if (smartlist_len(new_primary_guards) >= N_PRIMARY_GUARDS)
+ break;
+ if (! guard->is_filtered_guard)
+ continue;
+ guard->is_primary = 1;
+ smartlist_add(new_primary_guards, guard);
+ } SMARTLIST_FOREACH_END(guard);
+
+ /* Can we keep any older primary guards? First remove all the ones
+ * that we already kept. */
+ SMARTLIST_FOREACH_BEGIN(old_primary_guards, entry_guard_t *, guard) {
+ if (smartlist_contains(new_primary_guards, guard)) {
+ SMARTLIST_DEL_CURRENT_KEEPORDER(old_primary_guards, guard);
+ }
+ } SMARTLIST_FOREACH_END(guard);
+
+ /* Now add any that are still good. */
+ SMARTLIST_FOREACH_BEGIN(old_primary_guards, entry_guard_t *, guard) {
+ if (smartlist_len(new_primary_guards) >= N_PRIMARY_GUARDS)
+ break;
+ if (! guard->is_filtered_guard)
+ continue;
+ guard->is_primary = 1;
+ smartlist_add(new_primary_guards, guard);
+ SMARTLIST_DEL_CURRENT_KEEPORDER(old_primary_guards, guard);
+ } SMARTLIST_FOREACH_END(guard);
+
+ /* Mark the remaining previous primary guards as non-primary */
+ SMARTLIST_FOREACH_BEGIN(old_primary_guards, entry_guard_t *, guard) {
+ guard->is_primary = 0;
+ } SMARTLIST_FOREACH_END(guard);
+
+ /* Finally, fill out the list with sampled guards. */
+ while (smartlist_len(new_primary_guards) < N_PRIMARY_GUARDS) {
+ entry_guard_t *guard = sample_reachable_filtered_entry_guards(gs, NULL,
+ SAMPLE_EXCLUDE_CONFIRMED|
+ SAMPLE_EXCLUDE_PRIMARY|
+ SAMPLE_NO_UPDATE_PRIMARY);
+ if (!guard)
+ break;
+ guard->is_primary = 1;
+ smartlist_add(new_primary_guards, guard);
+ }
+
+#if 1
+ /* Debugging. */
+ SMARTLIST_FOREACH(gs->sampled_entry_guards, entry_guard_t *, guard, {
+ tor_assert_nonfatal(
+ bool_eq(guard->is_primary,
+ smartlist_contains(new_primary_guards, guard)));
+ });
+#endif /* 1 */
+
+ int any_change = 0;
+ if (smartlist_len(gs->primary_entry_guards) !=
+ smartlist_len(new_primary_guards)) {
+ any_change = 1;
+ } else {
+ SMARTLIST_FOREACH_BEGIN(gs->primary_entry_guards, entry_guard_t *, g) {
+ if (g != smartlist_get(new_primary_guards, g_sl_idx)) {
+ any_change = 1;
+ }
+ } SMARTLIST_FOREACH_END(g);
+ }
+
+ if (any_change) {
+ log_info(LD_GUARD, "Primary entry guards have changed. "
+ "New primary guard list is: ");
+ int n = smartlist_len(new_primary_guards);
+ SMARTLIST_FOREACH_BEGIN(new_primary_guards, entry_guard_t *, g) {
+ log_info(LD_GUARD, " %d/%d: %s%s%s",
+ g_sl_idx+1, n, entry_guard_describe(g),
+ g->confirmed_idx >= 0 ? " (confirmed)" : "",
+ g->is_filtered_guard ? "" : " (excluded by filter)");
+ } SMARTLIST_FOREACH_END(g);
+ }
+
+ smartlist_free(old_primary_guards);
+ smartlist_free(gs->primary_entry_guards);
+ gs->primary_entry_guards = new_primary_guards;
+ gs->primary_guards_up_to_date = 1;
+ running = 0;
+}
+
+/**
+ * Return the number of seconds after the last attempt at which we should
+ * retry a guard that has been failing since <b>failing_since</b>.
+ */
+static int
+get_retry_schedule(time_t failing_since, time_t now,
+ int is_primary)
+{
+ const unsigned SIX_HOURS = 6 * 3600;
+ const unsigned FOUR_DAYS = 4 * 86400;
+ const unsigned SEVEN_DAYS = 7 * 86400;
+
+ time_t tdiff;
+ if (now > failing_since) {
+ tdiff = now - failing_since;
+ } else {
+ tdiff = 0;
+ }
+
+ const struct {
+ time_t maximum; int primary_delay; int nonprimary_delay;
+ } delays[] = {
+ { SIX_HOURS, 10*60, 1*60*60 },
+ { FOUR_DAYS, 90*60, 4*60*60 },
+ { SEVEN_DAYS, 4*60*60, 18*60*60 },
+ { TIME_MAX, 9*60*60, 36*60*60 }
+ };
+
+ unsigned i;
+ for (i = 0; i < ARRAY_LENGTH(delays); ++i) {
+ if (tdiff <= delays[i].maximum) {
+ return is_primary ? delays[i].primary_delay : delays[i].nonprimary_delay;
+ }
+ }
+ /* LCOV_EXCL_START -- can't reach, since delays ends with TIME_MAX. */
+ tor_assert_nonfatal_unreached();
+ return 36*60*60;
+ /* LCOV_EXCL_STOP */
+}
+
+/**
+ * If <b>guard</b> is unreachable, consider whether enough time has passed
+ * to consider it maybe-reachable again.
+ */
+STATIC void
+entry_guard_consider_retry(entry_guard_t *guard)
+{
+ if (guard->is_reachable != GUARD_REACHABLE_NO)
+ return; /* No retry needed. */
+
+ const time_t now = approx_time();
+ const int delay =
+ get_retry_schedule(guard->failing_since, now, guard->is_primary);
+ const time_t last_attempt = guard->last_tried_to_connect;
+
+ if (BUG(last_attempt == 0) ||
+ now >= last_attempt + delay) {
+ /* We should mark this retriable. */
+ char tbuf[ISO_TIME_LEN+1];
+ format_local_iso_time(tbuf, last_attempt);
+ log_info(LD_GUARD, "Marked %s%sguard %s for possible retry, since we "
+ "haven't tried to use it since %s.",
+ guard->is_primary?"primary ":"",
+ guard->confirmed_idx>=0?"confirmed ":"",
+ entry_guard_describe(guard),
+ tbuf);
+
+ guard->is_reachable = GUARD_REACHABLE_MAYBE;
+ if (guard->is_filtered_guard)
+ guard->is_usable_filtered_guard = 1;
+ }
+}
+
+/** Tell the entry guards subsystem that we have confirmed that as of
+ * just now, we're on the internet. */
+void
+entry_guards_note_internet_connectivity(guard_selection_t *gs)
+{
+ gs->last_time_on_internet = approx_time();
+}
+
+/**
+ * Get a guard for use with a circuit. Prefer to pick a running primary
+ * guard; then a non-pending running filtered confirmed guard; then a
+ * non-pending runnable filtered guard. Update the
+ * <b>last_tried_to_connect</b> time and the <b>is_pending</b> fields of the
+ * guard as appropriate. Set <b>state_out</b> to the new guard-state
+ * of the circuit.
+ */
+STATIC entry_guard_t *
+select_entry_guard_for_circuit(guard_selection_t *gs,
+ guard_usage_t usage,
+ const entry_guard_restriction_t *rst,
+ unsigned *state_out)
+{
+ const int need_descriptor = (usage == GUARD_USAGE_TRAFFIC);
+ tor_assert(gs);
+ tor_assert(state_out);
+
+ if (!gs->primary_guards_up_to_date)
+ entry_guards_update_primary(gs);
+
+ int num_entry_guards = get_n_primary_guards_to_use(usage);
+ smartlist_t *usable_primary_guards = smartlist_new();
+
+ /* "If any entry in PRIMARY_GUARDS has {is_reachable} status of
+ <maybe> or <yes>, return the first such guard." */
+ SMARTLIST_FOREACH_BEGIN(gs->primary_entry_guards, entry_guard_t *, guard) {
+ entry_guard_consider_retry(guard);
+ if (! entry_guard_obeys_restriction(guard, rst))
+ continue;
+ if (guard->is_reachable != GUARD_REACHABLE_NO) {
+ if (need_descriptor && !guard_has_descriptor(guard)) {
+ continue;
+ }
+ *state_out = GUARD_CIRC_STATE_USABLE_ON_COMPLETION;
+ guard->last_tried_to_connect = approx_time();
+ smartlist_add(usable_primary_guards, guard);
+ if (smartlist_len(usable_primary_guards) >= num_entry_guards)
+ break;
+ }
+ } SMARTLIST_FOREACH_END(guard);
+
+ if (smartlist_len(usable_primary_guards)) {
+ entry_guard_t *guard = smartlist_choose(usable_primary_guards);
+ smartlist_free(usable_primary_guards);
+ log_info(LD_GUARD, "Selected primary guard %s for circuit.",
+ entry_guard_describe(guard));
+ return guard;
+ }
+ smartlist_free(usable_primary_guards);
+
+ /* "Otherwise, if the ordered intersection of {CONFIRMED_GUARDS}
+ and {USABLE_FILTERED_GUARDS} is nonempty, return the first
+ entry in that intersection that has {is_pending} set to
+ false." */
+ SMARTLIST_FOREACH_BEGIN(gs->confirmed_entry_guards, entry_guard_t *, guard) {
+ if (guard->is_primary)
+ continue; /* we already considered this one. */
+ if (! entry_guard_obeys_restriction(guard, rst))
+ continue;
+ entry_guard_consider_retry(guard);
+ if (guard->is_usable_filtered_guard && ! guard->is_pending) {
+ if (need_descriptor && !guard_has_descriptor(guard))
+ continue; /* not a bug */
+ guard->is_pending = 1;
+ guard->last_tried_to_connect = approx_time();
+ *state_out = GUARD_CIRC_STATE_USABLE_IF_NO_BETTER_GUARD;
+ log_info(LD_GUARD, "No primary guards available. Selected confirmed "
+ "guard %s for circuit. Will try other guards before using "
+ "this circuit.",
+ entry_guard_describe(guard));
+ return guard;
+ }
+ } SMARTLIST_FOREACH_END(guard);
+
+ /* "Otherwise, if there is no such entry, select a member at
+ random from {USABLE_FILTERED_GUARDS}." */
+ {
+ entry_guard_t *guard;
+ unsigned flags = 0;
+ if (need_descriptor)
+ flags |= SAMPLE_EXCLUDE_NO_DESCRIPTOR;
+ guard = sample_reachable_filtered_entry_guards(gs,
+ rst,
+ SAMPLE_EXCLUDE_CONFIRMED |
+ SAMPLE_EXCLUDE_PRIMARY |
+ SAMPLE_EXCLUDE_PENDING |
+ flags);
+ if (guard == NULL) {
+ log_info(LD_GUARD, "Absolutely no sampled guards were available. "
+ "Marking all guards for retry and starting from top again.");
+ mark_all_guards_maybe_reachable(gs);
+ return NULL;
+ }
+ guard->is_pending = 1;
+ guard->last_tried_to_connect = approx_time();
+ *state_out = GUARD_CIRC_STATE_USABLE_IF_NO_BETTER_GUARD;
+ log_info(LD_GUARD, "No primary or confirmed guards available. Selected "
+ "random guard %s for circuit. Will try other guards before "
+ "using this circuit.",
+ entry_guard_describe(guard));
+ return guard;
+ }
+}
+
+/**
+ * Note that we failed to connect to or build circuits through <b>guard</b>.
+ * Use with a guard returned by select_entry_guard_for_circuit().
+ */
+STATIC void
+entry_guards_note_guard_failure(guard_selection_t *gs,
+ entry_guard_t *guard)
+{
+ tor_assert(gs);
+
+ guard->is_reachable = GUARD_REACHABLE_NO;
+ guard->is_usable_filtered_guard = 0;
+
+ guard->is_pending = 0;
+ if (guard->failing_since == 0)
+ guard->failing_since = approx_time();
+
+ log_info(LD_GUARD, "Recorded failure for %s%sguard %s",
+ guard->is_primary?"primary ":"",
+ guard->confirmed_idx>=0?"confirmed ":"",
+ entry_guard_describe(guard));
+}
+
+/**
+ * Note that we successfully connected to, and built a circuit through
+ * <b>guard</b>. Given the old guard-state of the circuit in <b>old_state</b>,
+ * return the new guard-state of the circuit.
+ *
+ * Be aware: the circuit is only usable when its guard-state becomes
+ * GUARD_CIRC_STATE_COMPLETE.
+ **/
+STATIC unsigned
+entry_guards_note_guard_success(guard_selection_t *gs,
+ entry_guard_t *guard,
+ unsigned old_state)
+{
+ tor_assert(gs);
+
+ /* Save this, since we're about to overwrite it. */
+ const time_t last_time_on_internet = gs->last_time_on_internet;
+ gs->last_time_on_internet = approx_time();
+
+ guard->is_reachable = GUARD_REACHABLE_YES;
+ guard->failing_since = 0;
+ guard->is_pending = 0;
+ if (guard->is_filtered_guard)
+ guard->is_usable_filtered_guard = 1;
+
+ if (guard->confirmed_idx < 0) {
+ make_guard_confirmed(gs, guard);
+ if (!gs->primary_guards_up_to_date)
+ entry_guards_update_primary(gs);
+ }
+
+ unsigned new_state;
+ switch (old_state) {
+ case GUARD_CIRC_STATE_COMPLETE:
+ case GUARD_CIRC_STATE_USABLE_ON_COMPLETION:
+ new_state = GUARD_CIRC_STATE_COMPLETE;
+ break;
+ default:
+ tor_assert_nonfatal_unreached();
+ /* Fall through. */
+ case GUARD_CIRC_STATE_USABLE_IF_NO_BETTER_GUARD:
+ if (guard->is_primary) {
+ /* XXXX #20832 -- I don't actually like this logic. It seems to make
+ * us a little more susceptible to evil-ISP attacks. The mitigations
+ * I'm thinking of, however, aren't local to this point, so I'll leave
+ * it alone. */
+ /* This guard may have become primary by virtue of being confirmed.
+ * If so, the circuit for it is now complete.
+ */
+ new_state = GUARD_CIRC_STATE_COMPLETE;
+ } else {
+ new_state = GUARD_CIRC_STATE_WAITING_FOR_BETTER_GUARD;
+ }
+ break;
+ }
+
+ if (! guard->is_primary) {
+ if (last_time_on_internet + get_internet_likely_down_interval()
+ < approx_time()) {
+ mark_primary_guards_maybe_reachable(gs);
+ }
+ }
+
+ log_info(LD_GUARD, "Recorded success for %s%sguard %s",
+ guard->is_primary?"primary ":"",
+ guard->confirmed_idx>=0?"confirmed ":"",
+ entry_guard_describe(guard));
+
+ return new_state;
+}
+
+/**
+ * Helper: Return true iff <b>a</b> has higher priority than <b>b</b>.
+ */
+STATIC int
+entry_guard_has_higher_priority(entry_guard_t *a, entry_guard_t *b)
+{
+ tor_assert(a && b);
+ if (a == b)
+ return 0;
+
+ /* Confirmed is always better than unconfirmed; lower index better
+ than higher */
+ if (a->confirmed_idx < 0) {
+ if (b->confirmed_idx >= 0)
+ return 0;
+ } else {
+ if (b->confirmed_idx < 0)
+ return 1;
+
+ /* Lower confirmed_idx is better than higher. */
+ return (a->confirmed_idx < b->confirmed_idx);
+ }
+
+ /* If we reach this point, both are unconfirmed. If one is pending, it
+ * has higher priority. */
+ if (a->is_pending) {
+ if (! b->is_pending)
+ return 1;
+
+ /* Both are pending: earlier last_tried_connect wins. */
+ return a->last_tried_to_connect < b->last_tried_to_connect;
+ } else {
+ if (b->is_pending)
+ return 0;
+
+ /* Neither is pending: priorities are equal. */
+ return 0;
+ }
+}
+
+/** Release all storage held in <b>restriction</b> */
+STATIC void
+entry_guard_restriction_free_(entry_guard_restriction_t *rst)
+{
+ tor_free(rst);
+}
+
+/**
+ * Release all storage held in <b>state</b>.
+ */
+void
+circuit_guard_state_free_(circuit_guard_state_t *state)
+{
+ if (!state)
+ return;
+ entry_guard_restriction_free(state->restrictions);
+ entry_guard_handle_free(state->guard);
+ tor_free(state);
+}
+
+/** Allocate and return a new circuit_guard_state_t to track the result
+ * of using <b>guard</b> for a given operation. */
+MOCK_IMPL(STATIC circuit_guard_state_t *,
+circuit_guard_state_new,(entry_guard_t *guard, unsigned state,
+ entry_guard_restriction_t *rst))
+{
+ circuit_guard_state_t *result;
+
+ result = tor_malloc_zero(sizeof(circuit_guard_state_t));
+ result->guard = entry_guard_handle_new(guard);
+ result->state = state;
+ result->state_set_at = approx_time();
+ result->restrictions = rst;
+
+ return result;
+}
+
+/**
+ * Pick a suitable entry guard for a circuit in, and place that guard
+ * in *<b>chosen_node_out</b>. Set *<b>guard_state_out</b> to an opaque
+ * state object that will record whether the circuit is ready to be used
+ * or not. Return 0 on success; on failure, return -1.
+ *
+ * If a restriction is provided in <b>rst</b>, do not return any guards that
+ * violate it, and remember that restriction in <b>guard_state_out</b> for
+ * later use. (Takes ownership of the <b>rst</b> object.)
+ */
+int
+entry_guard_pick_for_circuit(guard_selection_t *gs,
+ guard_usage_t usage,
+ entry_guard_restriction_t *rst,
+ const node_t **chosen_node_out,
+ circuit_guard_state_t **guard_state_out)
+{
+ tor_assert(gs);
+ tor_assert(chosen_node_out);
+ tor_assert(guard_state_out);
+ *chosen_node_out = NULL;
+ *guard_state_out = NULL;
+
+ unsigned state = 0;
+ entry_guard_t *guard =
+ select_entry_guard_for_circuit(gs, usage, rst, &state);
+ if (! guard)
+ goto fail;
+ if (BUG(state == 0))
+ goto fail;
+ const node_t *node = node_get_by_id(guard->identity);
+ // XXXX #20827 check Ed ID.
+ if (! node)
+ goto fail;
+ if (BUG(usage != GUARD_USAGE_DIRGUARD &&
+ !node_has_preferred_descriptor(node, 1)))
+ goto fail;
+
+ *chosen_node_out = node;
+ *guard_state_out = circuit_guard_state_new(guard, state, rst);
+
+ return 0;
+ fail:
+ entry_guard_restriction_free(rst);
+ return -1;
+}
+
+/**
+ * Called by the circuit building module when a circuit has succeeded: informs
+ * the guards code that the guard in *<b>guard_state_p</b> is working, and
+ * advances the state of the guard module. On a GUARD_USABLE_NEVER return
+ * value, the circuit is broken and should not be used. On a GUARD_USABLE_NOW
+ * return value, the circuit is ready to use. On a GUARD_MAYBE_USABLE_LATER
+ * return value, the circuit should not be used until we find out whether
+ * preferred guards will work for us.
+ */
+guard_usable_t
+entry_guard_succeeded(circuit_guard_state_t **guard_state_p)
+{
+ if (BUG(*guard_state_p == NULL))
+ return GUARD_USABLE_NEVER;
+
+ entry_guard_t *guard = entry_guard_handle_get((*guard_state_p)->guard);
+ if (! guard || BUG(guard->in_selection == NULL))
+ return GUARD_USABLE_NEVER;
+
+ unsigned newstate =
+ entry_guards_note_guard_success(guard->in_selection, guard,
+ (*guard_state_p)->state);
+
+ (*guard_state_p)->state = newstate;
+ (*guard_state_p)->state_set_at = approx_time();
+
+ if (newstate == GUARD_CIRC_STATE_COMPLETE) {
+ return GUARD_USABLE_NOW;
+ } else {
+ return GUARD_MAYBE_USABLE_LATER;
+ }
+}
+
+/** Cancel the selection of *<b>guard_state_p</b> without declaring
+ * success or failure. It is safe to call this function if success or
+ * failure _has_ already been declared. */
+void
+entry_guard_cancel(circuit_guard_state_t **guard_state_p)
+{
+ if (BUG(*guard_state_p == NULL))
+ return;
+ entry_guard_t *guard = entry_guard_handle_get((*guard_state_p)->guard);
+ if (! guard)
+ return;
+
+ /* XXXX prop271 -- last_tried_to_connect_at will be erroneous here, but this
+ * function will only get called in "bug" cases anyway. */
+ guard->is_pending = 0;
+ circuit_guard_state_free(*guard_state_p);
+ *guard_state_p = NULL;
+}
+
+/**
+ * Called by the circuit building module when a circuit has failed:
+ * informs the guards code that the guard in *<b>guard_state_p</b> is
+ * not working, and advances the state of the guard module.
+ */
+void
+entry_guard_failed(circuit_guard_state_t **guard_state_p)
+{
+ if (BUG(*guard_state_p == NULL))
+ return;
+
+ entry_guard_t *guard = entry_guard_handle_get((*guard_state_p)->guard);
+ if (! guard || BUG(guard->in_selection == NULL))
+ return;
+
+ entry_guards_note_guard_failure(guard->in_selection, guard);
+
+ (*guard_state_p)->state = GUARD_CIRC_STATE_DEAD;
+ (*guard_state_p)->state_set_at = approx_time();
+}
+
+/**
+ * Run the entry_guard_failed() function on every circuit that is
+ * pending on <b>chan</b>.
+ */
+void
+entry_guard_chan_failed(channel_t *chan)
+{
+ if (!chan)
+ return;
+
+ smartlist_t *pending = smartlist_new();
+ circuit_get_all_pending_on_channel(pending, chan);
+ SMARTLIST_FOREACH_BEGIN(pending, circuit_t *, circ) {
+ if (!CIRCUIT_IS_ORIGIN(circ))
+ continue;
+
+ origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ);
+ if (origin_circ->guard_state) {
+ /* We might have no guard state if we didn't use a guard on this
+ * circuit (eg it's for a fallback directory). */
+ entry_guard_failed(&origin_circ->guard_state);
+ }
+ } SMARTLIST_FOREACH_END(circ);
+ smartlist_free(pending);
+}
+
+/**
+ * Return true iff every primary guard in <b>gs</b> is believed to
+ * be unreachable.
+ */
+STATIC int
+entry_guards_all_primary_guards_are_down(guard_selection_t *gs)
+{
+ tor_assert(gs);
+ if (!gs->primary_guards_up_to_date)
+ entry_guards_update_primary(gs);
+ SMARTLIST_FOREACH_BEGIN(gs->primary_entry_guards, entry_guard_t *, guard) {
+ entry_guard_consider_retry(guard);
+ if (guard->is_reachable != GUARD_REACHABLE_NO)
+ return 0;
+ } SMARTLIST_FOREACH_END(guard);
+ return 1;
+}
+
+/** Wrapper for entry_guard_has_higher_priority that compares the
+ * guard-priorities of a pair of circuits. Return 1 if <b>a</b> has higher
+ * priority than <b>b</b>.
+ *
+ * If a restriction is provided in <b>rst</b>, then do not consider
+ * <b>a</b> to have higher priority if it violates the restriction.
+ */
+static int
+circ_state_has_higher_priority(origin_circuit_t *a,
+ const entry_guard_restriction_t *rst,
+ origin_circuit_t *b)
+{
+ circuit_guard_state_t *state_a = origin_circuit_get_guard_state(a);
+ circuit_guard_state_t *state_b = origin_circuit_get_guard_state(b);
+
+ tor_assert(state_a);
+ tor_assert(state_b);
+
+ entry_guard_t *guard_a = entry_guard_handle_get(state_a->guard);
+ entry_guard_t *guard_b = entry_guard_handle_get(state_b->guard);
+
+ if (! guard_a) {
+ /* Unknown guard -- never higher priority. */
+ return 0;
+ } else if (! guard_b) {
+ /* Known guard -- higher priority than any unknown guard. */
+ return 1;
+ } else if (! entry_guard_obeys_restriction(guard_a, rst)) {
+ /* Restriction violated; guard_a cannot have higher priority. */
+ return 0;
+ } else {
+ /* Both known -- compare.*/
+ return entry_guard_has_higher_priority(guard_a, guard_b);
+ }
+}
+
+/**
+ * Look at all of the origin_circuit_t * objects in <b>all_circuits_in</b>,
+ * and see if any of them that were previously not ready to use for
+ * guard-related reasons are now ready to use. Place those circuits
+ * in <b>newly_complete_out</b>, and mark them COMPLETE.
+ *
+ * Return 1 if we upgraded any circuits, and 0 otherwise.
+ */
+int
+entry_guards_upgrade_waiting_circuits(guard_selection_t *gs,
+ const smartlist_t *all_circuits_in,
+ smartlist_t *newly_complete_out)
+{
+ tor_assert(gs);
+ tor_assert(all_circuits_in);
+ tor_assert(newly_complete_out);
+
+ if (! entry_guards_all_primary_guards_are_down(gs)) {
+ /* We only upgrade a waiting circuit if the primary guards are all
+ * down. */
+ log_debug(LD_GUARD, "Considered upgrading guard-stalled circuits, "
+ "but not all primary guards were definitely down.");
+ return 0;
+ }
+
+ int n_waiting = 0;
+ int n_complete = 0;
+ int n_complete_blocking = 0;
+ origin_circuit_t *best_waiting_circuit = NULL;
+ smartlist_t *all_circuits = smartlist_new();
+ SMARTLIST_FOREACH_BEGIN(all_circuits_in, origin_circuit_t *, circ) {
+ // We filter out circuits that aren't ours, or which we can't
+ // reason about.
+ circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
+ if (state == NULL)
+ continue;
+ entry_guard_t *guard = entry_guard_handle_get(state->guard);
+ if (!guard || guard->in_selection != gs)
+ continue;
+
+ smartlist_add(all_circuits, circ);
+ } SMARTLIST_FOREACH_END(circ);
+
+ SMARTLIST_FOREACH_BEGIN(all_circuits, origin_circuit_t *, circ) {
+ circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
+ if (BUG(state == NULL))
+ continue;
+
+ if (state->state == GUARD_CIRC_STATE_WAITING_FOR_BETTER_GUARD) {
+ ++n_waiting;
+ if (! best_waiting_circuit ||
+ circ_state_has_higher_priority(circ, NULL, best_waiting_circuit)) {
+ best_waiting_circuit = circ;
+ }
+ }
+ } SMARTLIST_FOREACH_END(circ);
+
+ if (! best_waiting_circuit) {
+ log_debug(LD_GUARD, "Considered upgrading guard-stalled circuits, "
+ "but didn't find any.");
+ goto no_change;
+ }
+
+ /* We'll need to keep track of what restrictions were used when picking this
+ * circuit, so that we don't allow any circuit without those restrictions to
+ * block it. */
+ const entry_guard_restriction_t *rst_on_best_waiting =
+ origin_circuit_get_guard_state(best_waiting_circuit)->restrictions;
+
+ /* First look at the complete circuits: Do any block this circuit? */
+ SMARTLIST_FOREACH_BEGIN(all_circuits, origin_circuit_t *, circ) {
+ /* "C2 "blocks" C1 if:
+ * C2 obeys all the restrictions that C1 had to obey, AND
+ * C2 has higher priority than C1, AND
+ * Either C2 is <complete>, or C2 is <waiting_for_better_guard>,
+ or C2 has been <usable_if_no_better_guard> for no more than
+ {NONPRIMARY_GUARD_CONNECT_TIMEOUT} seconds."
+ */
+ circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
+ if BUG((state == NULL))
+ continue;
+ if (state->state != GUARD_CIRC_STATE_COMPLETE)
+ continue;
+ ++n_complete;
+ if (circ_state_has_higher_priority(circ, rst_on_best_waiting,
+ best_waiting_circuit))
+ ++n_complete_blocking;
+ } SMARTLIST_FOREACH_END(circ);
+
+ if (n_complete_blocking) {
+ log_debug(LD_GUARD, "Considered upgrading guard-stalled circuits: found "
+ "%d complete and %d guard-stalled. At least one complete "
+ "circuit had higher priority, so not upgrading.",
+ n_complete, n_waiting);
+ goto no_change;
+ }
+
+ /* " * If any circuit C1 is <waiting_for_better_guard>, AND:
+ * All primary guards have reachable status of <no>.
+ * There is no circuit C2 that "blocks" C1.
+ Then, upgrade C1 to <complete>.""
+ */
+ int n_blockers_found = 0;
+ const time_t state_set_at_cutoff =
+ approx_time() - get_nonprimary_guard_connect_timeout();
+ SMARTLIST_FOREACH_BEGIN(all_circuits, origin_circuit_t *, circ) {
+ circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
+ if (BUG(state == NULL))
+ continue;
+ if (state->state != GUARD_CIRC_STATE_USABLE_IF_NO_BETTER_GUARD)
+ continue;
+ if (state->state_set_at <= state_set_at_cutoff)
+ continue;
+ if (circ_state_has_higher_priority(circ, rst_on_best_waiting,
+ best_waiting_circuit))
+ ++n_blockers_found;
+ } SMARTLIST_FOREACH_END(circ);
+
+ if (n_blockers_found) {
+ log_debug(LD_GUARD, "Considered upgrading guard-stalled circuits: found "
+ "%d guard-stalled, but %d pending circuit(s) had higher "
+ "guard priority, so not upgrading.",
+ n_waiting, n_blockers_found);
+ goto no_change;
+ }
+
+ /* Okay. We have a best waiting circuit, and we aren't waiting for
+ anything better. Add all circuits with that priority to the
+ list, and call them COMPLETE. */
+ int n_succeeded = 0;
+ SMARTLIST_FOREACH_BEGIN(all_circuits, origin_circuit_t *, circ) {
+ circuit_guard_state_t *state = origin_circuit_get_guard_state(circ);
+ if (BUG(state == NULL))
+ continue;
+ if (circ != best_waiting_circuit && rst_on_best_waiting) {
+ /* Can't upgrade other circ with same priority as best; might
+ be blocked. */
+ continue;
+ }
+ if (state->state != GUARD_CIRC_STATE_WAITING_FOR_BETTER_GUARD)
+ continue;
+ if (circ_state_has_higher_priority(best_waiting_circuit, NULL, circ))
+ continue;
+
+ state->state = GUARD_CIRC_STATE_COMPLETE;
+ state->state_set_at = approx_time();
+ smartlist_add(newly_complete_out, circ);
+ ++n_succeeded;
+ } SMARTLIST_FOREACH_END(circ);
+
+ log_info(LD_GUARD, "Considered upgrading guard-stalled circuits: found "
+ "%d guard-stalled, %d complete. %d of the guard-stalled "
+ "circuit(s) had high enough priority to upgrade.",
+ n_waiting, n_complete, n_succeeded);
+
+ tor_assert_nonfatal(n_succeeded >= 1);
+ smartlist_free(all_circuits);
+ return 1;
+
+ no_change:
+ smartlist_free(all_circuits);
+ return 0;
+}
+
+/**
+ * Return true iff the circuit whose state is <b>guard_state</b> should
+ * expire.
+ */
+int
+entry_guard_state_should_expire(circuit_guard_state_t *guard_state)
+{
+ if (guard_state == NULL)
+ return 0;
+ const time_t expire_if_waiting_since =
+ approx_time() - get_nonprimary_guard_idle_timeout();
+ return (guard_state->state == GUARD_CIRC_STATE_WAITING_FOR_BETTER_GUARD
+ && guard_state->state_set_at < expire_if_waiting_since);
+}
+
+/**
+ * Update all derived pieces of the guard selection state in <b>gs</b>.
+ * Return true iff we should stop using all previously generated circuits.
+ */
+int
+entry_guards_update_all(guard_selection_t *gs)
+{
+ sampled_guards_update_from_consensus(gs);
+ entry_guards_update_filtered_sets(gs);
+ entry_guards_update_confirmed(gs);
+ entry_guards_update_primary(gs);
+ return 0;
+}
+
+/**
+ * Return a newly allocated string for encoding the persistent parts of
+ * <b>guard</b> to the state file.
+ */
+STATIC char *
+entry_guard_encode_for_state(entry_guard_t *guard)
+{
+ /*
+ * The meta-format we use is K=V K=V K=V... where K can be any
+ * characters excepts space and =, and V can be any characters except
+ * space. The order of entries is not allowed to matter.
+ * Unrecognized K=V entries are persisted; recognized but erroneous
+ * entries are corrected.
+ */
+
+ smartlist_t *result = smartlist_new();
+ char tbuf[ISO_TIME_LEN+1];
+
+ tor_assert(guard);
+
+ smartlist_add_asprintf(result, "in=%s", guard->selection_name);
+ smartlist_add_asprintf(result, "rsa_id=%s",
+ hex_str(guard->identity, DIGEST_LEN));
+ if (guard->bridge_addr) {
+ smartlist_add_asprintf(result, "bridge_addr=%s:%d",
+ fmt_and_decorate_addr(&guard->bridge_addr->addr),
+ guard->bridge_addr->port);
+ }
+ if (strlen(guard->nickname) && is_legal_nickname(guard->nickname)) {
+ smartlist_add_asprintf(result, "nickname=%s", guard->nickname);
+ }
+
+ format_iso_time_nospace(tbuf, guard->sampled_on_date);
+ smartlist_add_asprintf(result, "sampled_on=%s", tbuf);
+
+ if (guard->sampled_by_version) {
+ smartlist_add_asprintf(result, "sampled_by=%s",
+ guard->sampled_by_version);
+ }
+
+ if (guard->unlisted_since_date > 0) {
+ format_iso_time_nospace(tbuf, guard->unlisted_since_date);
+ smartlist_add_asprintf(result, "unlisted_since=%s", tbuf);
+ }
+
+ smartlist_add_asprintf(result, "listed=%d",
+ (int)guard->currently_listed);
+
+ if (guard->confirmed_idx >= 0) {
+ format_iso_time_nospace(tbuf, guard->confirmed_on_date);
+ smartlist_add_asprintf(result, "confirmed_on=%s", tbuf);
+
+ smartlist_add_asprintf(result, "confirmed_idx=%d", guard->confirmed_idx);
+ }
+
+ const double EPSILON = 1.0e-6;
+
+ /* Make a copy of the pathbias object, since we will want to update
+ some of them */
+ guard_pathbias_t *pb = tor_memdup(&guard->pb, sizeof(*pb));
+ pb->use_successes = pathbias_get_use_success_count(guard);
+ pb->successful_circuits_closed = pathbias_get_close_success_count(guard);
+
+ #define PB_FIELD(field) do { \
+ if (pb->field >= EPSILON) { \
+ smartlist_add_asprintf(result, "pb_" #field "=%f", pb->field); \
+ } \
+ } while (0)
+ PB_FIELD(use_attempts);
+ PB_FIELD(use_successes);
+ PB_FIELD(circ_attempts);
+ PB_FIELD(circ_successes);
+ PB_FIELD(successful_circuits_closed);
+ PB_FIELD(collapsed_circuits);
+ PB_FIELD(unusable_circuits);
+ PB_FIELD(timeouts);
+ tor_free(pb);
+#undef PB_FIELD
+
+ if (guard->extra_state_fields)
+ smartlist_add_strdup(result, guard->extra_state_fields);
+
+ char *joined = smartlist_join_strings(result, " ", 0, NULL);
+ SMARTLIST_FOREACH(result, char *, cp, tor_free(cp));
+ smartlist_free(result);
+
+ return joined;
+}
+
+/**
+ * Given a string generated by entry_guard_encode_for_state(), parse it
+ * (if possible) and return an entry_guard_t object for it. Return NULL
+ * on complete failure.
+ */
+STATIC entry_guard_t *
+entry_guard_parse_from_state(const char *s)
+{
+ /* Unrecognized entries get put in here. */
+ smartlist_t *extra = smartlist_new();
+
+ /* These fields get parsed from the string. */
+ char *in = NULL;
+ char *rsa_id = NULL;
+ char *nickname = NULL;
+ char *sampled_on = NULL;
+ char *sampled_by = NULL;
+ char *unlisted_since = NULL;
+ char *listed = NULL;
+ char *confirmed_on = NULL;
+ char *confirmed_idx = NULL;
+ char *bridge_addr = NULL;
+
+ // pathbias
+ char *pb_use_attempts = NULL;
+ char *pb_use_successes = NULL;
+ char *pb_circ_attempts = NULL;
+ char *pb_circ_successes = NULL;
+ char *pb_successful_circuits_closed = NULL;
+ char *pb_collapsed_circuits = NULL;
+ char *pb_unusable_circuits = NULL;
+ char *pb_timeouts = NULL;
+
+ /* Split up the entries. Put the ones we know about in strings and the
+ * rest in "extra". */
+ {
+ smartlist_t *entries = smartlist_new();
+
+ strmap_t *vals = strmap_new(); // Maps keyword to location
+#define FIELD(f) \
+ strmap_set(vals, #f, &f);
+ FIELD(in);
+ FIELD(rsa_id);
+ FIELD(nickname);
+ FIELD(sampled_on);
+ FIELD(sampled_by);
+ FIELD(unlisted_since);
+ FIELD(listed);
+ FIELD(confirmed_on);
+ FIELD(confirmed_idx);
+ FIELD(bridge_addr);
+ FIELD(pb_use_attempts);
+ FIELD(pb_use_successes);
+ FIELD(pb_circ_attempts);
+ FIELD(pb_circ_successes);
+ FIELD(pb_successful_circuits_closed);
+ FIELD(pb_collapsed_circuits);
+ FIELD(pb_unusable_circuits);
+ FIELD(pb_timeouts);
+#undef FIELD
+
+ smartlist_split_string(entries, s, " ",
+ SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 0);
+
+ SMARTLIST_FOREACH_BEGIN(entries, char *, entry) {
+ const char *eq = strchr(entry, '=');
+ if (!eq) {
+ smartlist_add(extra, entry);
+ continue;
+ }
+ char *key = tor_strndup(entry, eq-entry);
+ char **target = strmap_get(vals, key);
+ if (target == NULL || *target != NULL) {
+ /* unrecognized or already set */
+ smartlist_add(extra, entry);
+ tor_free(key);
+ continue;
+ }
+
+ *target = tor_strdup(eq+1);
+ tor_free(key);
+ tor_free(entry);
+ } SMARTLIST_FOREACH_END(entry);
+
+ smartlist_free(entries);
+ strmap_free(vals, NULL);
+ }
+
+ entry_guard_t *guard = tor_malloc_zero(sizeof(entry_guard_t));
+ guard->is_persistent = 1;
+
+ if (in == NULL) {
+ log_warn(LD_CIRC, "Guard missing 'in' field");
+ goto err;
+ }
+
+ guard->selection_name = in;
+ in = NULL;
+
+ if (rsa_id == NULL) {
+ log_warn(LD_CIRC, "Guard missing RSA ID field");
+ goto err;
+ }
+
+ /* Process the identity and nickname. */
+ if (base16_decode(guard->identity, sizeof(guard->identity),
+ rsa_id, strlen(rsa_id)) != DIGEST_LEN) {
+ log_warn(LD_CIRC, "Unable to decode guard identity %s", escaped(rsa_id));
+ goto err;
+ }
+
+ if (nickname) {
+ strlcpy(guard->nickname, nickname, sizeof(guard->nickname));
+ } else {
+ guard->nickname[0]='$';
+ base16_encode(guard->nickname+1, sizeof(guard->nickname)-1,
+ guard->identity, DIGEST_LEN);
+ }
+
+ if (bridge_addr) {
+ tor_addr_port_t res;
+ memset(&res, 0, sizeof(res));
+ int r = tor_addr_port_parse(LOG_WARN, bridge_addr,
+ &res.addr, &res.port, -1);
+ if (r == 0)
+ guard->bridge_addr = tor_memdup(&res, sizeof(res));
+ /* On error, we already warned. */
+ }
+
+ /* Process the various time fields. */
+
+#define HANDLE_TIME(field) do { \
+ if (field) { \
+ int r = parse_iso_time_nospace(field, &field ## _time); \
+ if (r < 0) { \
+ log_warn(LD_CIRC, "Unable to parse %s %s from guard", \
+ #field, escaped(field)); \
+ field##_time = -1; \
+ } \
+ } \
+ } while (0)
+
+ time_t sampled_on_time = 0;
+ time_t unlisted_since_time = 0;
+ time_t confirmed_on_time = 0;
+
+ HANDLE_TIME(sampled_on);
+ HANDLE_TIME(unlisted_since);
+ HANDLE_TIME(confirmed_on);
+
+ if (sampled_on_time <= 0)
+ sampled_on_time = approx_time();
+ if (unlisted_since_time < 0)
+ unlisted_since_time = 0;
+ if (confirmed_on_time < 0)
+ confirmed_on_time = 0;
+
+ #undef HANDLE_TIME
+
+ guard->sampled_on_date = sampled_on_time;
+ guard->unlisted_since_date = unlisted_since_time;
+ guard->confirmed_on_date = confirmed_on_time;
+
+ /* Take sampled_by_version verbatim. */
+ guard->sampled_by_version = sampled_by;
+ sampled_by = NULL; /* prevent free */
+
+ /* Listed is a boolean */
+ if (listed && strcmp(listed, "0"))
+ guard->currently_listed = 1;
+
+ /* The index is a nonnegative integer. */
+ guard->confirmed_idx = -1;
+ if (confirmed_idx) {
+ int ok=1;
+ long idx = tor_parse_long(confirmed_idx, 10, 0, INT_MAX, &ok, NULL);
+ if (! ok) {
+ log_warn(LD_GUARD, "Guard has invalid confirmed_idx %s",
+ escaped(confirmed_idx));
+ } else {
+ guard->confirmed_idx = (int)idx;
+ }
+ }
+
+ /* Anything we didn't recognize gets crammed together */
+ if (smartlist_len(extra) > 0) {
+ guard->extra_state_fields = smartlist_join_strings(extra, " ", 0, NULL);
+ }
+
+ /* initialize non-persistent fields */
+ guard->is_reachable = GUARD_REACHABLE_MAYBE;
+
+#define PB_FIELD(field) \
+ do { \
+ if (pb_ ## field) { \
+ int ok = 1; \
+ double r = tor_parse_double(pb_ ## field, 0.0, 1e9, &ok, NULL); \
+ if (! ok) { \
+ log_warn(LD_CIRC, "Guard has invalid pb_%s %s", \
+ #field, pb_ ## field); \
+ } else { \
+ guard->pb.field = r; \
+ } \
+ } \
+ } while (0)
+ PB_FIELD(use_attempts);
+ PB_FIELD(use_successes);
+ PB_FIELD(circ_attempts);
+ PB_FIELD(circ_successes);
+ PB_FIELD(successful_circuits_closed);
+ PB_FIELD(collapsed_circuits);
+ PB_FIELD(unusable_circuits);
+ PB_FIELD(timeouts);
+#undef PB_FIELD
+
+ pathbias_check_use_success_count(guard);
+ pathbias_check_close_success_count(guard);
+
+ /* We update everything on this guard later, after we've parsed
+ * everything. */
+
+ goto done;
+
+ err:
+ // only consider it an error if the guard state was totally unparseable.
+ entry_guard_free(guard);
+ guard = NULL;
+
+ done:
+ tor_free(in);
+ tor_free(rsa_id);
+ tor_free(nickname);
+ tor_free(sampled_on);
+ tor_free(sampled_by);
+ tor_free(unlisted_since);
+ tor_free(listed);
+ tor_free(confirmed_on);
+ tor_free(confirmed_idx);
+ tor_free(bridge_addr);
+ tor_free(pb_use_attempts);
+ tor_free(pb_use_successes);
+ tor_free(pb_circ_attempts);
+ tor_free(pb_circ_successes);
+ tor_free(pb_successful_circuits_closed);
+ tor_free(pb_collapsed_circuits);
+ tor_free(pb_unusable_circuits);
+ tor_free(pb_timeouts);
+
+ SMARTLIST_FOREACH(extra, char *, cp, tor_free(cp));
+ smartlist_free(extra);
+
+ return guard;
+}
+
+/**
+ * Replace the Guards entries in <b>state</b> with a list of all our sampled
+ * guards.
+ */
+static void
+entry_guards_update_guards_in_state(or_state_t *state)
+{
+ if (!guard_contexts)
+ return;
+ config_line_t *lines = NULL;
+ config_line_t **nextline = &lines;
+
+ SMARTLIST_FOREACH_BEGIN(guard_contexts, guard_selection_t *, gs) {
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ if (guard->is_persistent == 0)
+ continue;
+ *nextline = tor_malloc_zero(sizeof(config_line_t));
+ (*nextline)->key = tor_strdup("Guard");
+ (*nextline)->value = entry_guard_encode_for_state(guard);
+ nextline = &(*nextline)->next;
+ } SMARTLIST_FOREACH_END(guard);
+ } SMARTLIST_FOREACH_END(gs);
+
+ config_free_lines(state->Guard);
+ state->Guard = lines;
+}
+
+/**
+ * Replace our sampled guards from the Guards entries in <b>state</b>. Return 0
+ * on success, -1 on failure. (If <b>set</b> is true, replace nothing -- only
+ * check whether replacing would work.)
+ */
+static int
+entry_guards_load_guards_from_state(or_state_t *state, int set)
+{
+ const config_line_t *line = state->Guard;
+ int n_errors = 0;
+
+ if (!guard_contexts)
+ guard_contexts = smartlist_new();
+
+ /* Wipe all our existing guard info. (we shouldn't have any, but
+ * let's be safe.) */
+ if (set) {
+ SMARTLIST_FOREACH_BEGIN(guard_contexts, guard_selection_t *, gs) {
+ guard_selection_free(gs);
+ if (curr_guard_context == gs)
+ curr_guard_context = NULL;
+ SMARTLIST_DEL_CURRENT(guard_contexts, gs);
+ } SMARTLIST_FOREACH_END(gs);
+ }
+
+ for ( ; line != NULL; line = line->next) {
+ entry_guard_t *guard = entry_guard_parse_from_state(line->value);
+ if (guard == NULL) {
+ ++n_errors;
+ continue;
+ }
+ tor_assert(guard->selection_name);
+ if (!strcmp(guard->selection_name, "legacy")) {
+ ++n_errors;
+ entry_guard_free(guard);
+ continue;
+ }
+
+ if (set) {
+ guard_selection_t *gs;
+ gs = get_guard_selection_by_name(guard->selection_name,
+ GS_TYPE_INFER, 1);
+ tor_assert(gs);
+ smartlist_add(gs->sampled_entry_guards, guard);
+ guard->in_selection = gs;
+ } else {
+ entry_guard_free(guard);
+ }
+ }
+
+ if (set) {
+ SMARTLIST_FOREACH_BEGIN(guard_contexts, guard_selection_t *, gs) {
+ entry_guards_update_all(gs);
+ } SMARTLIST_FOREACH_END(gs);
+ }
+ return n_errors ? -1 : 0;
+}
+
+/** If <b>digest</b> matches the identity of any node in the
+ * entry_guards list for the provided guard selection state,
+ return that node. Else return NULL. */
+entry_guard_t *
+entry_guard_get_by_id_digest_for_guard_selection(guard_selection_t *gs,
+ const char *digest)
+{
+ return get_sampled_guard_with_id(gs, (const uint8_t*)digest);
+}
+
+/** Return the node_t associated with a single entry_guard_t. May
+ * return NULL if the guard is not currently in the consensus. */
+const node_t *
+entry_guard_find_node(const entry_guard_t *guard)
+{
+ tor_assert(guard);
+ return node_get_by_id(guard->identity);
+}
+
+/** If <b>digest</b> matches the identity of any node in the
+ * entry_guards list for the default guard selection state,
+ return that node. Else return NULL. */
+entry_guard_t *
+entry_guard_get_by_id_digest(const char *digest)
+{
+ return entry_guard_get_by_id_digest_for_guard_selection(
+ get_guard_selection_info(), digest);
+}
+
+/** We are about to connect to bridge with identity <b>digest</b> to fetch its
+ * descriptor. Create a new guard state for this connection and return it. */
+circuit_guard_state_t *
+get_guard_state_for_bridge_desc_fetch(const char *digest)
+{
+ circuit_guard_state_t *guard_state = NULL;
+ entry_guard_t *guard = NULL;
+
+ guard = entry_guard_get_by_id_digest_for_guard_selection(
+ get_guard_selection_info(), digest);
+ if (!guard) {
+ return NULL;
+ }
+
+ /* Update the guard last_tried_to_connect time since it's checked by the
+ * guard susbsystem. */
+ guard->last_tried_to_connect = approx_time();
+
+ /* Create the guard state */
+ guard_state = circuit_guard_state_new(guard,
+ GUARD_CIRC_STATE_USABLE_ON_COMPLETION,
+ NULL);
+
+ return guard_state;
+}
+
+/** Release all storage held by <b>e</b>. */
+STATIC void
+entry_guard_free_(entry_guard_t *e)
+{
+ if (!e)
+ return;
+ entry_guard_handles_clear(e);
+ tor_free(e->sampled_by_version);
+ tor_free(e->extra_state_fields);
+ tor_free(e->selection_name);
+ tor_free(e->bridge_addr);
+ tor_free(e);
+}
+
+/** Return 0 if we're fine adding arbitrary routers out of the
+ * directory to our entry guard list, or return 1 if we have a
+ * list already and we must stick to it.
+ */
+int
+entry_list_is_constrained(const or_options_t *options)
+{
+ // XXXX #21425 look at the current selection.
+ if (options->EntryNodes)
+ return 1;
+ if (options->UseBridges)
+ return 1;
+ return 0;
+}
+
+/** Return the number of bridges that have descriptors that are marked with
+ * purpose 'bridge' and are running. If use_maybe_reachable is
+ * true, include bridges that might be reachable in the count.
+ * Otherwise, if it is false, only include bridges that have recently been
+ * found running in the count.
+ *
+ * We use this function to decide if we're ready to start building
+ * circuits through our bridges, or if we need to wait until the
+ * directory "server/authority" requests finish. */
+MOCK_IMPL(int,
+num_bridges_usable,(int use_maybe_reachable))
+{
+ int n_options = 0;
+
+ if (BUG(!get_options()->UseBridges)) {
+ return 0;
+ }
+ guard_selection_t *gs = get_guard_selection_info();
+ if (BUG(gs->type != GS_TYPE_BRIDGE)) {
+ return 0;
+ }
+
+ SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
+ /* Definitely not usable */
+ if (guard->is_reachable == GUARD_REACHABLE_NO)
+ continue;
+ /* If we want to be really sure the bridges will work, skip maybes */
+ if (!use_maybe_reachable && guard->is_reachable == GUARD_REACHABLE_MAYBE)
+ continue;
+ if (tor_digest_is_zero(guard->identity))
+ continue;
+ const node_t *node = node_get_by_id(guard->identity);
+ if (node && node->ri)
+ ++n_options;
+ } SMARTLIST_FOREACH_END(guard);
+
+ return n_options;
+}
+
+/** Check the pathbias use success count of <b>node</b> and disable it if it
+ * goes over our thresholds. */
+static void
+pathbias_check_use_success_count(entry_guard_t *node)
+{
+ const or_options_t *options = get_options();
+ const double EPSILON = 1.0e-9;
+
+ /* Note: We rely on the < comparison here to allow us to set a 0
+ * rate and disable the feature entirely. If refactoring, don't
+ * change to <= */
+ if (node->pb.use_attempts > EPSILON &&
+ pathbias_get_use_success_count(node)/node->pb.use_attempts
+ < pathbias_get_extreme_use_rate(options) &&
+ pathbias_get_dropguards(options)) {
+ node->pb.path_bias_disabled = 1;
+ log_info(LD_GENERAL,
+ "Path use bias is too high (%f/%f); disabling node %s",
+ node->pb.circ_successes, node->pb.circ_attempts,
+ node->nickname);
+ }
+}
+
+/** Check the pathbias close count of <b>node</b> and disable it if it goes
+ * over our thresholds. */
+static void
+pathbias_check_close_success_count(entry_guard_t *node)
+{
+ const or_options_t *options = get_options();
+ const double EPSILON = 1.0e-9;
+
+ /* Note: We rely on the < comparison here to allow us to set a 0
+ * rate and disable the feature entirely. If refactoring, don't
+ * change to <= */
+ if (node->pb.circ_attempts > EPSILON &&
+ pathbias_get_close_success_count(node)/node->pb.circ_attempts
+ < pathbias_get_extreme_rate(options) &&
+ pathbias_get_dropguards(options)) {
+ node->pb.path_bias_disabled = 1;
+ log_info(LD_GENERAL,
+ "Path bias is too high (%f/%f); disabling node %s",
+ node->pb.circ_successes, node->pb.circ_attempts,
+ node->nickname);
+ }
+}
+
+/** Parse <b>state</b> and learn about the entry guards it describes.
+ * If <b>set</b> is true, and there are no errors, replace the guard
+ * list in the default guard selection context with what we find.
+ * On success, return 0. On failure, alloc into *<b>msg</b> a string
+ * describing the error, and return -1.
+ */
+int
+entry_guards_parse_state(or_state_t *state, int set, char **msg)
+{
+ entry_guards_dirty = 0;
+ int r1 = entry_guards_load_guards_from_state(state, set);
+ entry_guards_dirty = 0;
+
+ if (r1 < 0) {
+ if (msg && *msg == NULL) {
+ *msg = tor_strdup("parsing error");
+ }
+ return -1;
+ }
+ return 0;
+}
+
+/** How long will we let a change in our guard nodes stay un-saved
+ * when we are trying to avoid disk writes? */
+#define SLOW_GUARD_STATE_FLUSH_TIME 600
+/** How long will we let a change in our guard nodes stay un-saved
+ * when we are not trying to avoid disk writes? */
+#define FAST_GUARD_STATE_FLUSH_TIME 30
+
+/** Our list of entry guards has changed for a particular guard selection
+ * context, or some element of one of our entry guards has changed for one.
+ * Write the changes to disk within the next few minutes.
+ */
+void
+entry_guards_changed_for_guard_selection(guard_selection_t *gs)
+{
+ time_t when;
+
+ tor_assert(gs != NULL);
+
+ entry_guards_dirty = 1;
+
+ if (get_options()->AvoidDiskWrites)
+ when = time(NULL) + SLOW_GUARD_STATE_FLUSH_TIME;
+ else
+ when = time(NULL) + FAST_GUARD_STATE_FLUSH_TIME;
+
+ /* or_state_save() will call entry_guards_update_state() and
+ entry_guards_update_guards_in_state()
+ */
+ or_state_mark_dirty(get_or_state(), when);
+}
+
+/** Our list of entry guards has changed for the default guard selection
+ * context, or some element of one of our entry guards has changed. Write
+ * the changes to disk within the next few minutes.
+ */
+void
+entry_guards_changed(void)
+{
+ entry_guards_changed_for_guard_selection(get_guard_selection_info());
+}
+
+/** If the entry guard info has not changed, do nothing and return.
+ * Otherwise, free the EntryGuards piece of <b>state</b> and create
+ * a new one out of the global entry_guards list, and then mark
+ * <b>state</b> dirty so it will get saved to disk.
+ */
+void
+entry_guards_update_state(or_state_t *state)
+{
+ entry_guards_dirty = 0;
+
+ // Handles all guard info.
+ entry_guards_update_guards_in_state(state);
+
+ entry_guards_dirty = 0;
+
+ if (!get_options()->AvoidDiskWrites)
+ or_state_mark_dirty(get_or_state(), 0);
+ entry_guards_dirty = 0;
+}
+
+/** Return true iff the circuit's guard can succeed that is can be used. */
+int
+entry_guard_could_succeed(const circuit_guard_state_t *guard_state)
+{
+ if (!guard_state) {
+ return 0;
+ }
+
+ entry_guard_t *guard = entry_guard_handle_get(guard_state->guard);
+ if (!guard || BUG(guard->in_selection == NULL)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Format a single entry guard in the format expected by the controller.
+ * Return a newly allocated string.
+ */
+STATIC char *
+getinfo_helper_format_single_entry_guard(const entry_guard_t *e)
+{
+ const char *status = NULL;
+ time_t when = 0;
+ const node_t *node;
+ char tbuf[ISO_TIME_LEN+1];
+ char nbuf[MAX_VERBOSE_NICKNAME_LEN+1];
+
+ /* This is going to be a bit tricky, since the status
+ * codes weren't really intended for prop271 guards.
+ *
+ * XXXX use a more appropriate format for exporting this information
+ */
+ if (e->confirmed_idx < 0) {
+ status = "never-connected";
+ } else if (! e->currently_listed) {
+ when = e->unlisted_since_date;
+ status = "unusable";
+ } else if (! e->is_filtered_guard) {
+ status = "unusable";
+ } else if (e->is_reachable == GUARD_REACHABLE_NO) {
+ when = e->failing_since;
+ status = "down";
+ } else {
+ status = "up";
+ }
+
+ node = entry_guard_find_node(e);
+ if (node) {
+ node_get_verbose_nickname(node, nbuf);
+ } else {
+ nbuf[0] = '$';
+ base16_encode(nbuf+1, sizeof(nbuf)-1, e->identity, DIGEST_LEN);
+ /* e->nickname field is not very reliable if we don't know about
+ * this router any longer; don't include it. */
+ }
+
+ char *result = NULL;
+ if (when) {
+ format_iso_time(tbuf, when);
+ tor_asprintf(&result, "%s %s %s\n", nbuf, status, tbuf);
+ } else {
+ tor_asprintf(&result, "%s %s\n", nbuf, status);
+ }
+ return result;
+}
+
+/** If <b>question</b> is the string "entry-guards", then dump
+ * to *<b>answer</b> a newly allocated string describing all of
+ * the nodes in the global entry_guards list. See control-spec.txt
+ * for details.
+ * For backward compatibility, we also handle the string "helper-nodes".
+ *
+ * XXX this should be totally redesigned after prop 271 too, and that's
+ * going to take some control spec work.
+ * */
+int
+getinfo_helper_entry_guards(control_connection_t *conn,
+ const char *question, char **answer,
+ const char **errmsg)
+{
+ guard_selection_t *gs = get_guard_selection_info();
+
+ tor_assert(gs != NULL);
+
+ (void) conn;
+ (void) errmsg;
+
+ if (!strcmp(question,"entry-guards") ||
+ !strcmp(question,"helper-nodes")) {
+ const smartlist_t *guards;
+ guards = gs->sampled_entry_guards;
+
+ smartlist_t *sl = smartlist_new();
+
+ SMARTLIST_FOREACH_BEGIN(guards, const entry_guard_t *, e) {
+ char *cp = getinfo_helper_format_single_entry_guard(e);
+ smartlist_add(sl, cp);
+ } SMARTLIST_FOREACH_END(e);
+ *answer = smartlist_join_strings(sl, "", 0, NULL);
+ SMARTLIST_FOREACH(sl, char *, c, tor_free(c));
+ smartlist_free(sl);
+ }
+ return 0;
+}
+
+/* Given the original bandwidth of a guard and its guardfraction,
+ * calculate how much bandwidth the guard should have as a guard and
+ * as a non-guard.
+ *
+ * Quoting from proposal236:
+ *
+ * Let Wpf denote the weight from the 'bandwidth-weights' line a
+ * client would apply to N for position p if it had the guard
+ * flag, Wpn the weight if it did not have the guard flag, and B the
+ * measured bandwidth of N in the consensus. Then instead of choosing
+ * N for position p proportionally to Wpf*B or Wpn*B, clients should
+ * choose N proportionally to F*Wpf*B + (1-F)*Wpn*B.
+ *
+ * This function fills the <b>guardfraction_bw</b> structure. It sets
+ * <b>guard_bw</b> to F*B and <b>non_guard_bw</b> to (1-F)*B.
+ */
+void
+guard_get_guardfraction_bandwidth(guardfraction_bandwidth_t *guardfraction_bw,
+ int orig_bandwidth,
+ uint32_t guardfraction_percentage)
+{
+ double guardfraction_fraction;
+
+ /* Turn the percentage into a fraction. */
+ tor_assert(guardfraction_percentage <= 100);
+ guardfraction_fraction = guardfraction_percentage / 100.0;
+
+ long guard_bw = tor_lround(guardfraction_fraction * orig_bandwidth);
+ tor_assert(guard_bw <= INT_MAX);
+
+ guardfraction_bw->guard_bw = (int) guard_bw;
+
+ guardfraction_bw->non_guard_bw = orig_bandwidth - (int) guard_bw;
+}
+
+/** Helper: Update the status of all entry guards, in whatever algorithm
+ * is used. Return true if we should stop using all previously generated
+ * circuits, by calling circuit_mark_all_unused_circs() and
+ * circuit_mark_all_dirty_circs_as_unusable().
+ */
+int
+guards_update_all(void)
+{
+ int mark_circuits = 0;
+ if (update_guard_selection_choice(get_options()))
+ mark_circuits = 1;
+
+ tor_assert(curr_guard_context);
+
+ if (entry_guards_update_all(curr_guard_context))
+ mark_circuits = 1;
+
+ return mark_circuits;
+}
+
+/** Helper: pick a guard for a circuit, with whatever algorithm is
+ used. */
+const node_t *
+guards_choose_guard(cpath_build_state_t *state,
+ uint8_t purpose,
+ circuit_guard_state_t **guard_state_out)
+{
+ const node_t *r = NULL;
+ const uint8_t *exit_id = NULL;
+ entry_guard_restriction_t *rst = NULL;
+
+ /* Only apply restrictions if we have a specific exit node in mind, and only
+ * if we are not doing vanguard circuits: we don't want to apply guard
+ * restrictions to vanguard circuits. */
+ if (state && !circuit_should_use_vanguards(purpose) &&
+ (exit_id = build_state_get_exit_rsa_id(state))) {
+ /* We're building to a targeted exit node, so that node can't be
+ * chosen as our guard for this circuit. Remember that fact in a
+ * restriction. */
+ rst = guard_create_exit_restriction(exit_id);
+ tor_assert(rst);
+ }
+ if (entry_guard_pick_for_circuit(get_guard_selection_info(),
+ GUARD_USAGE_TRAFFIC,
+ rst,
+ &r,
+ guard_state_out) < 0) {
+ tor_assert(r == NULL);
+ }
+ return r;
+}
+
+/** Remove all currently listed entry guards for a given guard selection
+ * context. This frees and replaces <b>gs</b>, so don't use <b>gs</b>
+ * after calling this function. */
+void
+remove_all_entry_guards_for_guard_selection(guard_selection_t *gs)
+{
+ // This function shouldn't exist. XXXX
+ tor_assert(gs != NULL);
+ char *old_name = tor_strdup(gs->name);
+ guard_selection_type_t old_type = gs->type;
+
+ SMARTLIST_FOREACH(gs->sampled_entry_guards, entry_guard_t *, entry, {
+ control_event_guard(entry->nickname, entry->identity, "DROPPED");
+ });
+
+ if (gs == curr_guard_context) {
+ curr_guard_context = NULL;
+ }
+
+ smartlist_remove(guard_contexts, gs);
+ guard_selection_free(gs);
+
+ gs = get_guard_selection_by_name(old_name, old_type, 1);
+ entry_guards_changed_for_guard_selection(gs);
+ tor_free(old_name);
+}
+
+/** Remove all currently listed entry guards, so new ones will be chosen.
+ *
+ * XXXX This function shouldn't exist -- it's meant to support the DROPGUARDS
+ * command, which is deprecated.
+ */
+void
+remove_all_entry_guards(void)
+{
+ remove_all_entry_guards_for_guard_selection(get_guard_selection_info());
+}
+
+/** Helper: pick a directory guard, with whatever algorithm is used. */
+const node_t *
+guards_choose_dirguard(uint8_t dir_purpose,
+ circuit_guard_state_t **guard_state_out)
+{
+ const node_t *r = NULL;
+ entry_guard_restriction_t *rst = NULL;
+
+ /* If we are fetching microdescs, don't query outdated dirservers. */
+ if (dir_purpose == DIR_PURPOSE_FETCH_MICRODESC) {
+ rst = guard_create_dirserver_md_restriction();
+ }
+
+ if (entry_guard_pick_for_circuit(get_guard_selection_info(),
+ GUARD_USAGE_DIRGUARD,
+ rst,
+ &r,
+ guard_state_out) < 0) {
+ tor_assert(r == NULL);
+ }
+ return r;
+}
+
+/**
+ * If we're running with a constrained guard set, then maybe mark our guards
+ * usable. Return 1 if we do; 0 if we don't.
+ */
+int
+guards_retry_optimistic(const or_options_t *options)
+{
+ if (! entry_list_is_constrained(options))
+ return 0;
+
+ mark_primary_guards_maybe_reachable(get_guard_selection_info());
+
+ return 1;
+}
+
+/**
+ * Check if we are missing any crucial dirinfo for the guard subsystem to
+ * work. Return NULL if everything went well, otherwise return a newly
+ * allocated string with an informative error message. In the latter case, use
+ * the genreal descriptor information <b>using_mds</b>, <b>num_present</b> and
+ * <b>num_usable</b> to improve the error message. */
+char *
+guard_selection_get_err_str_if_dir_info_missing(guard_selection_t *gs,
+ int using_mds,
+ int num_present, int num_usable)
+{
+ if (!gs->primary_guards_up_to_date)
+ entry_guards_update_primary(gs);
+
+ char *ret_str = NULL;
+ int n_missing_descriptors = 0;
+ int n_considered = 0;
+ int num_primary_to_check;
+
+ /* We want to check for the descriptor of at least the first two primary
+ * guards in our list, since these are the guards that we typically use for
+ * circuits. */
+ num_primary_to_check = get_n_primary_guards_to_use(GUARD_USAGE_TRAFFIC);
+ num_primary_to_check++;
+
+ SMARTLIST_FOREACH_BEGIN(gs->primary_entry_guards, entry_guard_t *, guard) {
+ entry_guard_consider_retry(guard);
+ if (guard->is_reachable == GUARD_REACHABLE_NO)
+ continue;
+ n_considered++;
+ if (!guard_has_descriptor(guard))
+ n_missing_descriptors++;
+ if (n_considered >= num_primary_to_check)
+ break;
+ } SMARTLIST_FOREACH_END(guard);
+
+ /* If we are not missing any descriptors, return NULL. */
+ if (!n_missing_descriptors) {
+ return NULL;
+ }
+
+ /* otherwise return a helpful error string */
+ tor_asprintf(&ret_str, "We're missing descriptors for %d/%d of our "
+ "primary entry guards (total %sdescriptors: %d/%d).",
+ n_missing_descriptors, num_primary_to_check,
+ using_mds?"micro":"", num_present, num_usable);
+
+ return ret_str;
+}
+
+/** As guard_selection_have_enough_dir_info_to_build_circuits, but uses
+ * the default guard selection. */
+char *
+entry_guards_get_err_str_if_dir_info_missing(int using_mds,
+ int num_present, int num_usable)
+{
+ return guard_selection_get_err_str_if_dir_info_missing(
+ get_guard_selection_info(),
+ using_mds,
+ num_present, num_usable);
+}
+
+/** Free one guard selection context */
+STATIC void
+guard_selection_free_(guard_selection_t *gs)
+{
+ if (!gs) return;
+
+ tor_free(gs->name);
+
+ if (gs->sampled_entry_guards) {
+ SMARTLIST_FOREACH(gs->sampled_entry_guards, entry_guard_t *, e,
+ entry_guard_free(e));
+ smartlist_free(gs->sampled_entry_guards);
+ gs->sampled_entry_guards = NULL;
+ }
+
+ smartlist_free(gs->confirmed_entry_guards);
+ smartlist_free(gs->primary_entry_guards);
+
+ tor_free(gs);
+}
+
+/** Release all storage held by the list of entry guards and related
+ * memory structs. */
+void
+entry_guards_free_all(void)
+{
+ /* Null out the default */
+ curr_guard_context = NULL;
+ /* Free all the guard contexts */
+ if (guard_contexts != NULL) {
+ SMARTLIST_FOREACH_BEGIN(guard_contexts, guard_selection_t *, gs) {
+ guard_selection_free(gs);
+ } SMARTLIST_FOREACH_END(gs);
+ smartlist_free(guard_contexts);
+ guard_contexts = NULL;
+ }
+ circuit_build_times_free_timeouts(get_circuit_build_times_mutable());
+}
diff --git a/src/feature/client/entrynodes.h b/src/feature/client/entrynodes.h
new file mode 100644
index 0000000000..5f9b5bdcba
--- /dev/null
+++ b/src/feature/client/entrynodes.h
@@ -0,0 +1,639 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file entrynodes.h
+ * \brief Header file for circuitbuild.c.
+ **/
+
+#ifndef TOR_ENTRYNODES_H
+#define TOR_ENTRYNODES_H
+
+#include "lib/container/handles.h"
+
+/* Forward declare for guard_selection_t; entrynodes.c has the real struct */
+typedef struct guard_selection_s guard_selection_t;
+
+/* Forward declare for entry_guard_t; the real declaration is private. */
+typedef struct entry_guard_t entry_guard_t;
+
+/* Forward declaration for circuit_guard_state_t; the real declaration is
+ private. */
+typedef struct circuit_guard_state_t circuit_guard_state_t;
+
+/* Forward declaration for entry_guard_restriction_t; the real declaration is
+ private. */
+typedef struct entry_guard_restriction_t entry_guard_restriction_t;
+
+/* Information about a guard's pathbias status.
+ * These fields are used in circpathbias.c to try to detect entry
+ * nodes that are failing circuits at a suspicious frequency.
+ */
+typedef struct guard_pathbias_t {
+ unsigned int path_bias_noticed : 1; /**< Did we alert the user about path
+ * bias for this node already? */
+ unsigned int path_bias_warned : 1; /**< Did we alert the user about path bias
+ * for this node already? */
+ unsigned int path_bias_extreme : 1; /**< Did we alert the user about path
+ * bias for this node already? */
+ unsigned int path_bias_disabled : 1; /**< Have we disabled this node because
+ * of path bias issues? */
+ unsigned int path_bias_use_noticed : 1; /**< Did we alert the user about path
+ * use bias for this node already? */
+ unsigned int path_bias_use_extreme : 1; /**< Did we alert the user about path
+ * use bias for this node already? */
+
+ double circ_attempts; /**< Number of circuits this guard has "attempted" */
+ double circ_successes; /**< Number of successfully built circuits using
+ * this guard as first hop. */
+ double successful_circuits_closed; /**< Number of circuits that carried
+ * streams successfully. */
+ double collapsed_circuits; /**< Number of fully built circuits that were
+ * remotely closed before any streams were
+ * attempted. */
+ double unusable_circuits; /**< Number of circuits for which streams were
+ * attempted, but none succeeded. */
+ double timeouts; /**< Number of 'right-censored' circuit timeouts for this
+ * guard. */
+ double use_attempts; /**< Number of circuits we tried to use with streams */
+ double use_successes; /**< Number of successfully used circuits using
+ * this guard as first hop. */
+} guard_pathbias_t;
+
+#if defined(ENTRYNODES_PRIVATE)
+#include "lib/crypt_ops/crypto_ed25519.h"
+
+/**
+ * @name values for entry_guard_t.is_reachable.
+ *
+ * See entry_guard_t.is_reachable for more information.
+ */
+/**@{*/
+#define GUARD_REACHABLE_NO 0
+#define GUARD_REACHABLE_YES 1
+#define GUARD_REACHABLE_MAYBE 2
+/**@}*/
+
+/** An entry_guard_t represents our information about a chosen long-term
+ * first hop, known as a "helper" node in the literature. We can't just
+ * use a node_t, since we want to remember these even when we
+ * don't have any directory info. */
+struct entry_guard_t {
+ HANDLE_ENTRY(entry_guard, entry_guard_t);
+
+ char nickname[MAX_HEX_NICKNAME_LEN+1];
+ char identity[DIGEST_LEN];
+ ed25519_public_key_t ed_id;
+
+ /**
+ * @name new guard selection algorithm fields.
+ *
+ * Only the new (prop271) algorithm uses these. For a more full
+ * description of the algorithm, see the module documentation for
+ * entrynodes.c
+ */
+ /**@{*/
+
+ /* == Persistent fields, present for all sampled guards. */
+ /** When was this guard added to the sample? */
+ time_t sampled_on_date;
+ /** Since what date has this guard been "unlisted"? A guard counts as
+ * unlisted if we have a live consensus that does not include it, or
+ * if we have a live consensus that does not include it as a usable
+ * guard. This field is zero when the guard is listed. */
+ time_t unlisted_since_date; // can be zero
+ /** What version of Tor added this guard to the sample? */
+ char *sampled_by_version;
+ /** Is this guard listed right now? If this is set, then
+ * unlisted_since_date should be set too. */
+ unsigned currently_listed : 1;
+
+ /* == Persistent fields, for confirmed guards only */
+ /** When was this guard confirmed? (That is, when did we first use it
+ * successfully and decide to keep it?) This field is zero if this is not a
+ * confirmed guard. */
+ time_t confirmed_on_date; /* 0 if not confirmed */
+ /**
+ * In what order was this guard confirmed? Guards with lower indices
+ * appear earlier on the confirmed list. If the confirmed list is compacted,
+ * this field corresponds to the index of this guard on the confirmed list.
+ *
+ * This field is set to -1 if this guard is not confirmed.
+ */
+ int confirmed_idx; /* -1 if not confirmed; otherwise the order that this
+ * item should occur in the CONFIRMED_GUARDS ordered
+ * list */
+
+ /**
+ * Which selection does this guard belong to?
+ */
+ char *selection_name;
+
+ /** Bridges only: address of the bridge. */
+ tor_addr_port_t *bridge_addr;
+
+ /* ==== Non-persistent fields. */
+ /* == These are used by sampled guards */
+ /** When did we last decide to try using this guard for a circuit? 0 for
+ * "not since we started up." */
+ time_t last_tried_to_connect;
+ /** How reachable do we consider this guard to be? One of
+ * GUARD_REACHABLE_NO, GUARD_REACHABLE_YES, or GUARD_REACHABLE_MAYBE. */
+ unsigned is_reachable : 2;
+ /** Boolean: true iff this guard is pending. A pending guard is one
+ * that we have an in-progress circuit through, and which we do not plan
+ * to try again until it either succeeds or fails. Primary guards can
+ * never be pending. */
+ unsigned is_pending : 1;
+ /** If true, don't write this guard to disk. (Used for bridges with unknown
+ * identities) */
+ unsigned is_persistent : 1;
+ /** When did we get the earliest connection failure for this guard?
+ * We clear this field on a successful connect. We do _not_ clear it
+ * when we mark the guard as "MAYBE" reachable.
+ */
+ time_t failing_since;
+
+ /* == Set inclusion flags. */
+ /** If true, this guard is in the filtered set. The filtered set includes
+ * all sampled guards that our configuration allows us to use. */
+ unsigned is_filtered_guard : 1;
+ /** If true, this guard is in the usable filtered set. The usable filtered
+ * set includes all filtered guards that are not believed to be
+ * unreachable. (That is, those for which is_reachable is not
+ * GUARD_REACHABLE_NO) */
+ unsigned is_usable_filtered_guard : 1;
+ unsigned is_primary:1;
+
+ /** This string holds any fields that we are maintaining because
+ * we saw them in the state, even if we don't understand them. */
+ char *extra_state_fields;
+
+ /** Backpointer to the guard selection that this guard belongs to.
+ * The entry_guard_t must never outlive its guard_selection. */
+ guard_selection_t *in_selection;
+ /**@}*/
+
+ /** Path bias information for this guard. */
+ guard_pathbias_t pb;
+};
+
+/**
+ * Possible rules for a guard selection to follow
+ */
+typedef enum guard_selection_type_t {
+ /** Infer the type of this selection from its name. */
+ GS_TYPE_INFER=0,
+ /** Use the normal guard selection algorithm, taking our sample from the
+ * complete list of guards in the consensus. */
+ GS_TYPE_NORMAL=1,
+ /** Use the normal guard selection algorithm, taking our sample from the
+ * configured bridges, and allowing it to grow as large as all the configured
+ * bridges */
+ GS_TYPE_BRIDGE,
+ /** Use the normal guard selection algorithm, taking our sample from the
+ * set of filtered nodes. */
+ GS_TYPE_RESTRICTED,
+} guard_selection_type_t;
+
+/**
+ * All of the the context for guard selection on a particular client.
+ *
+ * We maintain multiple guard selection contexts for a client, depending
+ * aspects on its current configuration -- whether an extremely
+ * restrictive EntryNodes is used, whether UseBridges is enabled, and so
+ * on.)
+ *
+ * See the module documentation for entrynodes.c for more information
+ * about guard selection algorithms.
+ */
+struct guard_selection_s {
+ /**
+ * The name for this guard-selection object. (Must not contain spaces).
+ */
+ char *name;
+
+ /**
+ * What rules does this guard-selection object follow?
+ */
+ guard_selection_type_t type;
+
+ /**
+ * A value of 1 means that primary_entry_guards is up-to-date with respect to
+ * the consensus and status info that we currently have; 0 means we need to
+ * recalculate it before using primary_entry_guards or the is_primary flag on
+ * any guard.
+ */
+ int primary_guards_up_to_date;
+
+ /**
+ * A list of the sampled entry guards, as entry_guard_t structures.
+ * Not in any particular order. When we 'sample' a guard, we are
+ * noting it as a possible guard to pick in the future. The use of
+ * sampling here prevents us from being forced by an attacker to try
+ * every guard on the network. This list is persistent.
+ */
+ smartlist_t *sampled_entry_guards;
+
+ /**
+ * Ordered list (from highest to lowest priority) of guards that we
+ * have successfully contacted and decided to use. Every member of
+ * this list is a member of sampled_entry_guards. Every member should
+ * have confirmed_on_date set, and have confirmed_idx greater than
+ * any earlier member of the list.
+ *
+ * This list is persistent. It is a subset of the elements in
+ * sampled_entry_guards, and its pointers point to elements of
+ * sampled_entry_guards.
+ */
+ smartlist_t *confirmed_entry_guards;
+
+ /**
+ * Ordered list (from highest to lowest priority) of guards that we
+ * are willing to use the most happily. These guards may or may not
+ * yet be confirmed yet. If we can use one of these guards, we are
+ * probably not on a network that is trying to restrict our guard
+ * choices.
+ *
+ * This list is a subset of the elements in
+ * sampled_entry_guards, and its pointers point to elements of
+ * sampled_entry_guards.
+ */
+ smartlist_t *primary_entry_guards;
+
+ /** When did we last successfully build a circuit or use a circuit? */
+ time_t last_time_on_internet;
+
+ /** What confirmed_idx value should the next-added member of
+ * confirmed_entry_guards receive? */
+ int next_confirmed_idx;
+
+};
+
+struct entry_guard_handle_t;
+
+/** Types of restrictions we impose when picking guard nodes */
+typedef enum guard_restriction_type_t {
+ /* Don't pick the same guard node as our exit node (or its family) */
+ RST_EXIT_NODE = 0,
+ /* Don't pick dirguards that have previously shown to be outdated */
+ RST_OUTDATED_MD_DIRSERVER = 1
+} guard_restriction_type_t;
+
+/**
+ * A restriction to remember which entry guards are off-limits for a given
+ * circuit.
+ *
+ * Note: This mechanism is NOT for recording which guards are never to be
+ * used: only which guards cannot be used on <em>one particular circuit</em>.
+ */
+struct entry_guard_restriction_t {
+ /* What type of restriction are we imposing? */
+ guard_restriction_type_t type;
+
+ /* In case of restriction type RST_EXIT_NODE, the guard's RSA identity
+ * digest must not equal this; and it must not be in the same family as any
+ * node with this digest. */
+ uint8_t exclude_id[DIGEST_LEN];
+};
+
+/**
+ * Per-circuit state to track whether we'll be able to use the circuit.
+ */
+struct circuit_guard_state_t {
+ /** Handle to the entry guard object for this circuit. */
+ struct entry_guard_handle_t *guard;
+ /** The time at which <b>state</b> last changed. */
+ time_t state_set_at;
+ /** One of GUARD_CIRC_STATE_* */
+ uint8_t state;
+
+ /**
+ * A set of restrictions that were placed on this guard when we selected it
+ * for this particular circuit. We need to remember the restrictions here,
+ * since any guard that breaks these restrictions will not block this
+ * circuit from becoming COMPLETE.
+ */
+ entry_guard_restriction_t *restrictions;
+};
+#endif /* defined(ENTRYNODES_PRIVATE) */
+
+/* Common entry points for old and new guard code */
+int guards_update_all(void);
+const node_t *guards_choose_guard(cpath_build_state_t *state,
+ uint8_t purpose,
+ circuit_guard_state_t **guard_state_out);
+const node_t *guards_choose_dirguard(uint8_t dir_purpose,
+ circuit_guard_state_t **guard_state_out);
+
+#if 1
+/* XXXX NM I would prefer that all of this stuff be private to
+ * entrynodes.c. */
+entry_guard_t *entry_guard_get_by_id_digest_for_guard_selection(
+ guard_selection_t *gs, const char *digest);
+entry_guard_t *entry_guard_get_by_id_digest(const char *digest);
+
+circuit_guard_state_t *
+get_guard_state_for_bridge_desc_fetch(const char *digest);
+
+void entry_guards_changed_for_guard_selection(guard_selection_t *gs);
+void entry_guards_changed(void);
+guard_selection_t * get_guard_selection_info(void);
+int num_live_entry_guards_for_guard_selection(
+ guard_selection_t *gs,
+ int for_directory);
+int num_live_entry_guards(int for_directory);
+#endif /* 1 */
+
+const node_t *entry_guard_find_node(const entry_guard_t *guard);
+const char *entry_guard_get_rsa_id_digest(const entry_guard_t *guard);
+const char *entry_guard_describe(const entry_guard_t *guard);
+guard_pathbias_t *entry_guard_get_pathbias_state(entry_guard_t *guard);
+
+/** Enum to specify how we're going to use a given guard, when we're picking
+ * one for immediate use. */
+typedef enum {
+ GUARD_USAGE_TRAFFIC = 0,
+ GUARD_USAGE_DIRGUARD = 1
+} guard_usage_t;
+
+#define circuit_guard_state_free(val) \
+ FREE_AND_NULL(circuit_guard_state_t, circuit_guard_state_free_, (val))
+
+void circuit_guard_state_free_(circuit_guard_state_t *state);
+int entry_guard_pick_for_circuit(guard_selection_t *gs,
+ guard_usage_t usage,
+ entry_guard_restriction_t *rst,
+ const node_t **chosen_node_out,
+ circuit_guard_state_t **guard_state_out);
+
+/* We just connected to an entry guard. What should we do with the circuit? */
+typedef enum {
+ GUARD_USABLE_NEVER = -1, /* Never use the circuit */
+ GUARD_MAYBE_USABLE_LATER = 0, /* Keep it. We might use it in the future */
+ GUARD_USABLE_NOW = 1, /* Use it right now */
+} guard_usable_t;
+
+guard_usable_t entry_guard_succeeded(circuit_guard_state_t **guard_state_p);
+void entry_guard_failed(circuit_guard_state_t **guard_state_p);
+void entry_guard_cancel(circuit_guard_state_t **guard_state_p);
+void entry_guard_chan_failed(channel_t *chan);
+int entry_guards_update_all(guard_selection_t *gs);
+int entry_guards_upgrade_waiting_circuits(guard_selection_t *gs,
+ const smartlist_t *all_circuits,
+ smartlist_t *newly_complete_out);
+int entry_guard_state_should_expire(circuit_guard_state_t *guard_state);
+void entry_guards_note_internet_connectivity(guard_selection_t *gs);
+
+int update_guard_selection_choice(const or_options_t *options);
+
+int entry_guard_could_succeed(const circuit_guard_state_t *guard_state);
+
+MOCK_DECL(int,num_bridges_usable,(int use_maybe_reachable));
+
+#ifdef ENTRYNODES_PRIVATE
+/**
+ * @name Default values for the parameters for the new (prop271) entry guard
+ * algorithm.
+ */
+/**@{*/
+/**
+ * We never let our sampled guard set grow larger than this percentage
+ * of the guards on the network.
+ */
+#define DFLT_MAX_SAMPLE_THRESHOLD_PERCENT 20
+/**
+ * We never let our sampled guard set grow larger than this number of
+ * guards.
+ */
+#define DFLT_MAX_SAMPLE_SIZE 60
+/**
+ * We always try to make our sample contain at least this many guards.
+ */
+#define DFLT_MIN_FILTERED_SAMPLE_SIZE 20
+/**
+ * If a guard is unlisted for this many days in a row, we remove it.
+ */
+#define DFLT_REMOVE_UNLISTED_GUARDS_AFTER_DAYS 20
+/**
+ * We remove unconfirmed guards from the sample after this many days,
+ * regardless of whether they are listed or unlisted.
+ */
+#define DFLT_GUARD_LIFETIME_DAYS 120
+/**
+ * We remove confirmed guards from the sample if they were sampled
+ * GUARD_LIFETIME_DAYS ago and confirmed this many days ago.
+ */
+#define DFLT_GUARD_CONFIRMED_MIN_LIFETIME_DAYS 60
+/**
+ * How many guards do we try to keep on our primary guard list?
+ */
+#define DFLT_N_PRIMARY_GUARDS 3
+/**
+ * Of the live guards on the primary guard list, how many do we consider when
+ * choosing a guard to use?
+ */
+#define DFLT_N_PRIMARY_GUARDS_TO_USE 1
+/**
+ * As DFLT_N_PRIMARY_GUARDS, but for choosing which directory guard to use.
+ */
+#define DFLT_N_PRIMARY_DIR_GUARDS_TO_USE 3
+/**
+ * If we haven't successfully built or used a circuit in this long, then
+ * consider that the internet is probably down.
+ */
+#define DFLT_INTERNET_LIKELY_DOWN_INTERVAL (10*60)
+/**
+ * If we're trying to connect to a nonprimary guard for at least this
+ * many seconds, and we haven't gotten the connection to work, we will treat
+ * lower-priority guards as usable.
+ */
+#define DFLT_NONPRIMARY_GUARD_CONNECT_TIMEOUT 15
+/**
+ * If a circuit has been sitting around in 'waiting for better guard' state
+ * for at least this long, we'll expire it.
+ */
+#define DFLT_NONPRIMARY_GUARD_IDLE_TIMEOUT (10*60)
+/**
+ * If our configuration retains fewer than this fraction of guards from the
+ * torrc, we are in a restricted setting.
+ */
+#define DFLT_MEANINGFUL_RESTRICTION_PERCENT 20
+/**
+ * If our configuration retains fewer than this fraction of guards from the
+ * torrc, we are in an extremely restricted setting, and should warn.
+ */
+#define DFLT_EXTREME_RESTRICTION_PERCENT 1
+/**@}*/
+
+STATIC double get_max_sample_threshold(void);
+STATIC int get_max_sample_size_absolute(void);
+STATIC int get_min_filtered_sample_size(void);
+STATIC int get_remove_unlisted_guards_after_days(void);
+STATIC int get_guard_lifetime(void);
+STATIC int get_guard_confirmed_min_lifetime(void);
+STATIC int get_n_primary_guards(void);
+STATIC int get_n_primary_guards_to_use(guard_usage_t usage);
+STATIC int get_internet_likely_down_interval(void);
+STATIC int get_nonprimary_guard_connect_timeout(void);
+STATIC int get_nonprimary_guard_idle_timeout(void);
+STATIC double get_meaningful_restriction_threshold(void);
+STATIC double get_extreme_restriction_threshold(void);
+
+HANDLE_DECL(entry_guard, entry_guard_t, STATIC)
+#define entry_guard_handle_free(h) \
+ FREE_AND_NULL(entry_guard_handle_t, entry_guard_handle_free_, (h))
+
+STATIC guard_selection_type_t guard_selection_infer_type(
+ guard_selection_type_t type_in,
+ const char *name);
+STATIC guard_selection_t *guard_selection_new(const char *name,
+ guard_selection_type_t type);
+STATIC guard_selection_t *get_guard_selection_by_name(
+ const char *name, guard_selection_type_t type, int create_if_absent);
+STATIC void guard_selection_free_(guard_selection_t *gs);
+#define guard_selection_free(gs) \
+ FREE_AND_NULL(guard_selection_t, guard_selection_free_, (gs))
+MOCK_DECL(STATIC int, entry_guard_is_listed,
+ (guard_selection_t *gs, const entry_guard_t *guard));
+STATIC const char *choose_guard_selection(const or_options_t *options,
+ const networkstatus_t *ns,
+ const guard_selection_t *old_selection,
+ guard_selection_type_t *type_out);
+STATIC entry_guard_t *get_sampled_guard_with_id(guard_selection_t *gs,
+ const uint8_t *rsa_id);
+
+MOCK_DECL(STATIC time_t, randomize_time, (time_t now, time_t max_backdate));
+
+MOCK_DECL(STATIC circuit_guard_state_t *,
+ circuit_guard_state_new,(entry_guard_t *guard, unsigned state,
+ entry_guard_restriction_t *rst));
+
+STATIC entry_guard_t *entry_guard_add_to_sample(guard_selection_t *gs,
+ const node_t *node);
+STATIC entry_guard_t *entry_guards_expand_sample(guard_selection_t *gs);
+STATIC char *entry_guard_encode_for_state(entry_guard_t *guard);
+STATIC entry_guard_t *entry_guard_parse_from_state(const char *s);
+#define entry_guard_free(e) \
+ FREE_AND_NULL(entry_guard_t, entry_guard_free_, (e))
+STATIC void entry_guard_free_(entry_guard_t *e);
+STATIC void entry_guards_update_filtered_sets(guard_selection_t *gs);
+STATIC int entry_guards_all_primary_guards_are_down(guard_selection_t *gs);
+/**
+ * @name Flags for sample_reachable_filtered_entry_guards()
+ */
+/**@{*/
+#define SAMPLE_EXCLUDE_CONFIRMED (1u<<0)
+#define SAMPLE_EXCLUDE_PRIMARY (1u<<1)
+#define SAMPLE_EXCLUDE_PENDING (1u<<2)
+#define SAMPLE_NO_UPDATE_PRIMARY (1u<<3)
+#define SAMPLE_EXCLUDE_NO_DESCRIPTOR (1u<<4)
+/**@}*/
+STATIC entry_guard_t *sample_reachable_filtered_entry_guards(
+ guard_selection_t *gs,
+ const entry_guard_restriction_t *rst,
+ unsigned flags);
+STATIC void entry_guard_consider_retry(entry_guard_t *guard);
+STATIC void make_guard_confirmed(guard_selection_t *gs, entry_guard_t *guard);
+STATIC void entry_guards_update_confirmed(guard_selection_t *gs);
+STATIC void entry_guards_update_primary(guard_selection_t *gs);
+STATIC int num_reachable_filtered_guards(const guard_selection_t *gs,
+ const entry_guard_restriction_t *rst);
+STATIC void sampled_guards_update_from_consensus(guard_selection_t *gs);
+/**
+ * @name Possible guard-states for a circuit.
+ */
+/**@{*/
+/** State for a circuit that can (so far as the guard subsystem is
+ * concerned) be used for actual traffic as soon as it is successfully
+ * opened. */
+#define GUARD_CIRC_STATE_USABLE_ON_COMPLETION 1
+/** State for an non-open circuit that we shouldn't use for actual
+ * traffic, when it completes, unless other circuits to preferable
+ * guards fail. */
+#define GUARD_CIRC_STATE_USABLE_IF_NO_BETTER_GUARD 2
+/** State for an open circuit that we shouldn't use for actual traffic
+ * unless other circuits to preferable guards fail. */
+#define GUARD_CIRC_STATE_WAITING_FOR_BETTER_GUARD 3
+/** State for a circuit that can (so far as the guard subsystem is
+ * concerned) be used for actual traffic. */
+#define GUARD_CIRC_STATE_COMPLETE 4
+/** State for a circuit that is unusable, and will not become usable. */
+#define GUARD_CIRC_STATE_DEAD 5
+/**@}*/
+STATIC void entry_guards_note_guard_failure(guard_selection_t *gs,
+ entry_guard_t *guard);
+STATIC entry_guard_t *select_entry_guard_for_circuit(guard_selection_t *gs,
+ guard_usage_t usage,
+ const entry_guard_restriction_t *rst,
+ unsigned *state_out);
+STATIC void mark_primary_guards_maybe_reachable(guard_selection_t *gs);
+STATIC unsigned entry_guards_note_guard_success(guard_selection_t *gs,
+ entry_guard_t *guard,
+ unsigned old_state);
+STATIC int entry_guard_has_higher_priority(entry_guard_t *a, entry_guard_t *b);
+STATIC char *getinfo_helper_format_single_entry_guard(const entry_guard_t *e);
+
+STATIC entry_guard_restriction_t *guard_create_exit_restriction(
+ const uint8_t *exit_id);
+
+STATIC entry_guard_restriction_t *guard_create_dirserver_md_restriction(void);
+
+STATIC void entry_guard_restriction_free_(entry_guard_restriction_t *rst);
+#define entry_guard_restriction_free(rst) \
+ FREE_AND_NULL(entry_guard_restriction_t, \
+ entry_guard_restriction_free_, (rst))
+
+#endif /* defined(ENTRYNODES_PRIVATE) */
+
+void remove_all_entry_guards_for_guard_selection(guard_selection_t *gs);
+void remove_all_entry_guards(void);
+
+struct bridge_info_t;
+void entry_guard_learned_bridge_identity(const tor_addr_port_t *addrport,
+ const uint8_t *rsa_id_digest);
+
+int entry_list_is_constrained(const or_options_t *options);
+int guards_retry_optimistic(const or_options_t *options);
+int entry_guards_parse_state_for_guard_selection(
+ guard_selection_t *gs, or_state_t *state, int set, char **msg);
+int entry_guards_parse_state(or_state_t *state, int set, char **msg);
+void entry_guards_update_state(or_state_t *state);
+int getinfo_helper_entry_guards(control_connection_t *conn,
+ const char *question, char **answer,
+ const char **errmsg);
+
+int entries_known_but_down(const or_options_t *options);
+void entries_retry_all(const or_options_t *options);
+
+char *entry_guards_get_err_str_if_dir_info_missing(int using_mds,
+ int num_present, int num_usable);
+char *guard_selection_get_err_str_if_dir_info_missing(guard_selection_t *gs,
+ int using_mds,
+ int num_present, int num_usable);
+
+void entry_guards_free_all(void);
+
+double pathbias_get_close_success_count(entry_guard_t *guard);
+double pathbias_get_use_success_count(entry_guard_t *guard);
+
+/** Contains the bandwidth of a relay as a guard and as a non-guard
+ * after the guardfraction has been considered. */
+typedef struct guardfraction_bandwidth_t {
+ /** Bandwidth as a guard after guardfraction has been considered. */
+ int guard_bw;
+ /** Bandwidth as a non-guard after guardfraction has been considered. */
+ int non_guard_bw;
+} guardfraction_bandwidth_t;
+
+int should_apply_guardfraction(const networkstatus_t *ns);
+
+void
+guard_get_guardfraction_bandwidth(guardfraction_bandwidth_t *guardfraction_bw,
+ int orig_bandwidth,
+ uint32_t guardfraction_percentage);
+
+#endif /* !defined(TOR_ENTRYNODES_H) */
diff --git a/src/feature/client/transports.c b/src/feature/client/transports.c
new file mode 100644
index 0000000000..1d3cb7b951
--- /dev/null
+++ b/src/feature/client/transports.c
@@ -0,0 +1,1738 @@
+/* Copyright (c) 2011-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file transports.c
+ * \brief Pluggable Transports related code.
+ *
+ * \details
+ * Each managed proxy is represented by a <b>managed_proxy_t</b>.
+ * Each managed proxy can support multiple transports.
+ * Each managed proxy gets configured through a multistep process.
+ *
+ * ::managed_proxy_list contains all the managed proxies this tor
+ * instance is supporting.
+ * In the ::managed_proxy_list there are ::unconfigured_proxies_n
+ * managed proxies that are still unconfigured.
+ *
+ * In every run_scheduled_event() tick, we attempt to launch and then
+ * configure the unconfiged managed proxies, using the configuration
+ * protocol defined in the 180_pluggable_transport.txt proposal. A
+ * managed proxy might need several ticks to get fully configured.
+ *
+ * When a managed proxy is fully configured, we register all its
+ * transports to the circuitbuild.c subsystem. At that point the
+ * transports are owned by the circuitbuild.c subsystem.
+ *
+ * When a managed proxy fails to follow the 180 configuration
+ * protocol, it gets marked as broken and gets destroyed.
+ *
+ * <b>In a little more detail:</b>
+ *
+ * While we are serially parsing torrc, we store all the transports
+ * that a proxy should spawn in its <em>transports_to_launch</em>
+ * element.
+ *
+ * When we finish reading the torrc, we spawn the managed proxy and
+ * expect {S,C}METHOD lines from its output. We add transports
+ * described by METHOD lines to its <em>transports</em> element, as
+ * transport_t structs.
+ *
+ * When the managed proxy stops spitting METHOD lines (signified by a
+ * '{S,C}METHODS DONE' message) we pass copies of its transports to
+ * the bridge subsystem. We keep copies of the 'transport_t's on the
+ * managed proxy to be able to associate the proxy with its
+ * transports, and we pass copies to the bridge subsystem so that
+ * transports can be associated with bridges.
+ * [ XXX We should try see whether the two copies are really needed
+ * and maybe cut it into a single copy of the 'transport_t' shared
+ * between the managed proxy and the bridge subsystem. Preliminary
+ * analysis shows that both copies are needed with the current code
+ * logic, because of race conditions that can cause dangling
+ * pointers. ]
+ *
+ * <b>In even more detail, this is what happens when a config read
+ * (like a SIGHUP or a SETCONF) occurs:</b>
+ *
+ * We immediately destroy all unconfigured proxies (We shouldn't have
+ * unconfigured proxies in the first place, except when the config
+ * read happens immediately after tor is launched.).
+ *
+ * We mark all managed proxies and transports to signify that they
+ * must be removed if they don't contribute by the new torrc
+ * (we mark using the <b>marked_for_removal</b> element).
+ * We also mark all managed proxies to signify that they might need to
+ * be restarted so that they end up supporting all the transports the
+ * new torrc wants them to support
+ * (we mark using the <b>was_around_before_config_read</b> element).
+ * We also clear their <b>transports_to_launch</b> list so that we can
+ * put there the transports we need to launch according to the new
+ * torrc.
+ *
+ * We then start parsing torrc again.
+ *
+ * Everytime we encounter a transport line using a managed proxy that
+ * was around before the config read, we cleanse that proxy from the
+ * removal mark. We also toggle the <b>check_if_restarts_needed</b>
+ * flag, so that on the next <b>pt_configure_remaining_proxies</b>
+ * tick, we investigate whether we need to restart the proxy so that
+ * it also spawns the new transports. If the post-config-read
+ * <b>transports_to_launch</b> list is identical to the pre-config-read
+ * one, it means that no changes were introduced to this proxy during
+ * the config read and no restart has to take place.
+ *
+ * During the post-config-read torrc parsing, we unmark all transports
+ * spawned by managed proxies that we find in our torrc.
+ * We do that so that if we don't need to restart a managed proxy, we
+ * can continue using its old transports normally.
+ * If we end up restarting the proxy, we destroy and unregister all
+ * old transports from the circuitbuild.c subsystem.
+ **/
+
+#define PT_PRIVATE
+#include "or/or.h"
+#include "or/bridges.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/circuitbuild.h"
+#include "or/transports.h"
+#include "or/router.h"
+#include "or/statefile.h"
+#include "or/connection_or.h"
+#include "or/ext_orport.h"
+#include "or/control.h"
+
+#include "lib/process/env.h"
+#include "lib/process/subprocess.h"
+
+static process_environment_t *
+create_managed_proxy_environment(const managed_proxy_t *mp);
+
+static inline int proxy_configuration_finished(const managed_proxy_t *mp);
+
+static void handle_finished_proxy(managed_proxy_t *mp);
+static void parse_method_error(const char *line, int is_server_method);
+#define parse_server_method_error(l) parse_method_error(l, 1)
+#define parse_client_method_error(l) parse_method_error(l, 0)
+
+/** Managed proxy protocol strings */
+#define PROTO_ENV_ERROR "ENV-ERROR"
+#define PROTO_NEG_SUCCESS "VERSION"
+#define PROTO_NEG_FAIL "VERSION-ERROR no-version"
+#define PROTO_CMETHOD "CMETHOD"
+#define PROTO_SMETHOD "SMETHOD"
+#define PROTO_CMETHOD_ERROR "CMETHOD-ERROR"
+#define PROTO_SMETHOD_ERROR "SMETHOD-ERROR"
+#define PROTO_CMETHODS_DONE "CMETHODS DONE"
+#define PROTO_SMETHODS_DONE "SMETHODS DONE"
+#define PROTO_PROXY_DONE "PROXY DONE"
+#define PROTO_PROXY_ERROR "PROXY-ERROR"
+
+/** The first and only supported - at the moment - configuration
+ protocol version. */
+#define PROTO_VERSION_ONE 1
+
+/** A list of pluggable transports found in torrc. */
+static smartlist_t *transport_list = NULL;
+
+/** Returns a transport_t struct for a transport proxy supporting the
+ protocol <b>name</b> listening at <b>addr</b>:<b>port</b> using
+ SOCKS version <b>socks_ver</b>. */
+STATIC transport_t *
+transport_new(const tor_addr_t *addr, uint16_t port,
+ const char *name, int socks_ver,
+ const char *extra_info_args)
+{
+ transport_t *t = tor_malloc_zero(sizeof(transport_t));
+
+ tor_addr_copy(&t->addr, addr);
+ t->port = port;
+ t->name = tor_strdup(name);
+ t->socks_version = socks_ver;
+ if (extra_info_args)
+ t->extra_info_args = tor_strdup(extra_info_args);
+
+ return t;
+}
+
+/** Free the pluggable transport struct <b>transport</b>. */
+void
+transport_free_(transport_t *transport)
+{
+ if (!transport)
+ return;
+
+ tor_free(transport->name);
+ tor_free(transport->extra_info_args);
+ tor_free(transport);
+}
+
+/** Mark every entry of the transport list to be removed on our next call to
+ * sweep_transport_list unless it has first been un-marked. */
+void
+mark_transport_list(void)
+{
+ if (!transport_list)
+ transport_list = smartlist_new();
+ SMARTLIST_FOREACH(transport_list, transport_t *, t,
+ t->marked_for_removal = 1);
+}
+
+/** Remove every entry of the transport list that was marked with
+ * mark_transport_list if it has not subsequently been un-marked. */
+void
+sweep_transport_list(void)
+{
+ if (!transport_list)
+ transport_list = smartlist_new();
+ SMARTLIST_FOREACH_BEGIN(transport_list, transport_t *, t) {
+ if (t->marked_for_removal) {
+ SMARTLIST_DEL_CURRENT(transport_list, t);
+ transport_free(t);
+ }
+ } SMARTLIST_FOREACH_END(t);
+}
+
+/** Initialize the pluggable transports list to empty, creating it if
+ * needed. */
+static void
+clear_transport_list(void)
+{
+ if (!transport_list)
+ transport_list = smartlist_new();
+ SMARTLIST_FOREACH(transport_list, transport_t *, t, transport_free(t));
+ smartlist_clear(transport_list);
+}
+
+/** Return a deep copy of <b>transport</b>. */
+static transport_t *
+transport_copy(const transport_t *transport)
+{
+ transport_t *new_transport = NULL;
+
+ tor_assert(transport);
+
+ new_transport = tor_malloc_zero(sizeof(transport_t));
+
+ new_transport->socks_version = transport->socks_version;
+ new_transport->name = tor_strdup(transport->name);
+ tor_addr_copy(&new_transport->addr, &transport->addr);
+ new_transport->port = transport->port;
+ new_transport->marked_for_removal = transport->marked_for_removal;
+
+ return new_transport;
+}
+
+/** Returns the transport in our transport list that has the name <b>name</b>.
+ * Else returns NULL. */
+MOCK_IMPL(transport_t *,
+transport_get_by_name,(const char *name))
+{
+ tor_assert(name);
+
+ if (!transport_list)
+ return NULL;
+
+ SMARTLIST_FOREACH_BEGIN(transport_list, transport_t *, transport) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ } SMARTLIST_FOREACH_END(transport);
+
+ return NULL;
+}
+
+/** Resolve any conflicts that the insertion of transport <b>t</b>
+ * might cause.
+ * Return 0 if <b>t</b> is OK and should be registered, 1 if there is
+ * a transport identical to <b>t</b> already registered and -1 if
+ * <b>t</b> cannot be added due to conflicts. */
+static int
+transport_resolve_conflicts(const transport_t *t)
+{
+ /* This is how we resolve transport conflicts:
+
+ If there is already a transport with the same name and addrport,
+ we either have duplicate torrc lines OR we are here post-HUP and
+ this transport was here pre-HUP as well. In any case, mark the
+ old transport so that it doesn't get removed and ignore the new
+ one. Our caller has to free the new transport so we return '1' to
+ signify this.
+
+ If there is already a transport with the same name but different
+ addrport:
+ * if it's marked for removal, it means that it either has a lower
+ priority than 't' in torrc (otherwise the mark would have been
+ cleared by the paragraph above), or it doesn't exist at all in
+ the post-HUP torrc. We destroy the old transport and register 't'.
+ * if it's *not* marked for removal, it means that it was newly
+ added in the post-HUP torrc or that it's of higher priority, in
+ this case we ignore 't'. */
+ transport_t *t_tmp = transport_get_by_name(t->name);
+ if (t_tmp) { /* same name */
+ if (tor_addr_eq(&t->addr, &t_tmp->addr) && (t->port == t_tmp->port)) {
+ /* same name *and* addrport */
+ t_tmp->marked_for_removal = 0;
+ return 1;
+ } else { /* same name but different addrport */
+ char *new_transport_addrport =
+ tor_strdup(fmt_addrport(&t->addr, t->port));
+ if (t_tmp->marked_for_removal) { /* marked for removal */
+ log_notice(LD_GENERAL, "You tried to add transport '%s' at '%s' "
+ "but there was already a transport marked for deletion at "
+ "'%s'. We deleted the old transport and registered the "
+ "new one.", t->name, new_transport_addrport,
+ fmt_addrport(&t_tmp->addr, t_tmp->port));
+ smartlist_remove(transport_list, t_tmp);
+ transport_free(t_tmp);
+ tor_free(new_transport_addrport);
+ } else { /* *not* marked for removal */
+ log_notice(LD_GENERAL, "You tried to add transport '%s' at '%s' "
+ "but the same transport already exists at '%s'. "
+ "Skipping.", t->name, new_transport_addrport,
+ fmt_addrport(&t_tmp->addr, t_tmp->port));
+ tor_free(new_transport_addrport);
+ return -1;
+ }
+ tor_free(new_transport_addrport);
+ }
+ }
+
+ return 0;
+}
+
+/** Add transport <b>t</b> to the internal list of pluggable
+ * transports.
+ * Returns 0 if the transport was added correctly, 1 if the same
+ * transport was already registered (in this case the caller must
+ * free the transport) and -1 if there was an error. */
+static int
+transport_add(transport_t *t)
+{
+ int r;
+ tor_assert(t);
+
+ r = transport_resolve_conflicts(t);
+
+ switch (r) {
+ case 0: /* should register transport */
+ if (!transport_list)
+ transport_list = smartlist_new();
+ smartlist_add(transport_list, t);
+ return 0;
+ default: /* let our caller know the return code */
+ return r;
+ }
+}
+
+/** Remember a new pluggable transport proxy at <b>addr</b>:<b>port</b>.
+ * <b>name</b> is set to the name of the protocol this proxy uses.
+ * <b>socks_ver</b> is set to the SOCKS version of the proxy. */
+MOCK_IMPL(int,
+transport_add_from_config, (const tor_addr_t *addr, uint16_t port,
+ const char *name, int socks_ver))
+{
+ transport_t *t = transport_new(addr, port, name, socks_ver, NULL);
+
+ int r = transport_add(t);
+
+ switch (r) {
+ case -1:
+ default:
+ log_notice(LD_GENERAL, "Could not add transport %s at %s. Skipping.",
+ t->name, fmt_addrport(&t->addr, t->port));
+ transport_free(t);
+ return -1;
+ case 1:
+ log_info(LD_GENERAL, "Successfully registered transport %s at %s.",
+ t->name, fmt_addrport(&t->addr, t->port));
+ transport_free(t); /* falling */
+ return 0;
+ case 0:
+ log_info(LD_GENERAL, "Successfully registered transport %s at %s.",
+ t->name, fmt_addrport(&t->addr, t->port));
+ return 0;
+ }
+}
+
+/** List of unconfigured managed proxies. */
+static smartlist_t *managed_proxy_list = NULL;
+/** Number of still unconfigured proxies. */
+static int unconfigured_proxies_n = 0;
+/** Boolean: True iff we might need to restart some proxies. */
+static int check_if_restarts_needed = 0;
+
+/** Return true if there are still unconfigured managed proxies, or proxies
+ * that need restarting. */
+int
+pt_proxies_configuration_pending(void)
+{
+ return unconfigured_proxies_n || check_if_restarts_needed;
+}
+
+/** Assert that the unconfigured_proxies_n value correctly matches the number
+ * of proxies in a state other than PT_PROTO_COMPLETE. */
+static void
+assert_unconfigured_count_ok(void)
+{
+ int n_completed = 0;
+ if (!managed_proxy_list) {
+ tor_assert(unconfigured_proxies_n == 0);
+ return;
+ }
+
+ SMARTLIST_FOREACH(managed_proxy_list, managed_proxy_t *, mp, {
+ if (mp->conf_state == PT_PROTO_COMPLETED)
+ ++n_completed;
+ });
+
+ tor_assert(n_completed + unconfigured_proxies_n ==
+ smartlist_len(managed_proxy_list));
+}
+
+/** Return true if <b>mp</b> has the same argv as <b>proxy_argv</b> */
+static int
+managed_proxy_has_argv(const managed_proxy_t *mp, char **proxy_argv)
+{
+ char **tmp1=proxy_argv;
+ char **tmp2=mp->argv;
+
+ tor_assert(tmp1);
+ tor_assert(tmp2);
+
+ while (*tmp1 && *tmp2) {
+ if (strcmp(*tmp1++, *tmp2++))
+ return 0;
+ }
+
+ if (!*tmp1 && !*tmp2)
+ return 1;
+
+ return 0;
+}
+
+/** Return a managed proxy with the same argv as <b>proxy_argv</b>.
+ * If no such managed proxy exists, return NULL. */
+static managed_proxy_t *
+get_managed_proxy_by_argv_and_type(char **proxy_argv, int is_server)
+{
+ if (!managed_proxy_list)
+ return NULL;
+
+ SMARTLIST_FOREACH_BEGIN(managed_proxy_list, managed_proxy_t *, mp) {
+ if (managed_proxy_has_argv(mp, proxy_argv) &&
+ mp->is_server == is_server)
+ return mp;
+ } SMARTLIST_FOREACH_END(mp);
+
+ return NULL;
+}
+
+/** Add <b>transport</b> to managed proxy <b>mp</b>. */
+static void
+add_transport_to_proxy(const char *transport, managed_proxy_t *mp)
+{
+ tor_assert(mp->transports_to_launch);
+ if (!smartlist_contains_string(mp->transports_to_launch, transport))
+ smartlist_add_strdup(mp->transports_to_launch, transport);
+}
+
+/** Called when a SIGHUP occurs. Returns true if managed proxy
+ * <b>mp</b> needs to be restarted after the SIGHUP, based on the new
+ * torrc. */
+static int
+proxy_needs_restart(const managed_proxy_t *mp)
+{
+ int ret = 1;
+ char* proxy_uri;
+
+ /* If the PT proxy config has changed, then all existing pluggable transports
+ * should be restarted.
+ */
+
+ proxy_uri = get_pt_proxy_uri();
+ if (strcmp_opt(proxy_uri, mp->proxy_uri) != 0)
+ goto needs_restart;
+
+ /* mp->transport_to_launch is populated with the names of the
+ transports that must be launched *after* the SIGHUP.
+ mp->transports is populated with the transports that were
+ launched *before* the SIGHUP.
+
+ Check if all the transports that need to be launched are already
+ launched: */
+
+ tor_assert(smartlist_len(mp->transports_to_launch) > 0);
+ tor_assert(mp->conf_state == PT_PROTO_COMPLETED);
+
+ if (smartlist_len(mp->transports_to_launch) != smartlist_len(mp->transports))
+ goto needs_restart;
+
+ SMARTLIST_FOREACH_BEGIN(mp->transports, const transport_t *, t) {
+ if (!smartlist_contains_string(mp->transports_to_launch, t->name))
+ goto needs_restart;
+
+ } SMARTLIST_FOREACH_END(t);
+
+ ret = 0;
+ needs_restart:
+ tor_free(proxy_uri);
+ return ret;
+}
+
+/** Managed proxy <b>mp</b> must be restarted. Do all the necessary
+ * preparations and then flag its state so that it will be relaunched
+ * in the next tick. */
+static void
+proxy_prepare_for_restart(managed_proxy_t *mp)
+{
+ transport_t *t_tmp = NULL;
+
+ tor_assert(mp->conf_state == PT_PROTO_COMPLETED);
+
+ /* destroy the process handle and terminate the process. */
+ tor_process_handle_destroy(mp->process_handle, 1);
+ mp->process_handle = NULL;
+
+ /* destroy all its registered transports, since we will no longer
+ use them. */
+ SMARTLIST_FOREACH_BEGIN(mp->transports, const transport_t *, t) {
+ t_tmp = transport_get_by_name(t->name);
+ if (t_tmp)
+ t_tmp->marked_for_removal = 1;
+ } SMARTLIST_FOREACH_END(t);
+ sweep_transport_list();
+
+ /* free the transport in mp->transports */
+ SMARTLIST_FOREACH(mp->transports, transport_t *, t, transport_free(t));
+ smartlist_clear(mp->transports);
+
+ /* Reset the proxy's HTTPS/SOCKS proxy */
+ tor_free(mp->proxy_uri);
+ mp->proxy_uri = get_pt_proxy_uri();
+ mp->proxy_supported = 0;
+
+ /* flag it as an infant proxy so that it gets launched on next tick */
+ mp->conf_state = PT_PROTO_INFANT;
+ unconfigured_proxies_n++;
+}
+
+/** Launch managed proxy <b>mp</b>. */
+static int
+launch_managed_proxy(managed_proxy_t *mp)
+{
+ int retval;
+
+ process_environment_t *env = create_managed_proxy_environment(mp);
+
+#ifdef _WIN32
+ /* Passing NULL as lpApplicationName makes Windows search for the .exe */
+ retval = tor_spawn_background(NULL,
+ (const char **)mp->argv,
+ env,
+ &mp->process_handle);
+#else /* !(defined(_WIN32)) */
+ retval = tor_spawn_background(mp->argv[0],
+ (const char **)mp->argv,
+ env,
+ &mp->process_handle);
+#endif /* defined(_WIN32) */
+
+ process_environment_free(env);
+
+ if (retval == PROCESS_STATUS_ERROR) {
+ log_warn(LD_GENERAL, "Managed proxy at '%s' failed at launch.",
+ mp->argv[0]);
+ return -1;
+ }
+
+ log_info(LD_CONFIG, "Managed proxy at '%s' has spawned with PID '%d'.",
+ mp->argv[0], tor_process_get_pid(mp->process_handle));
+
+ mp->conf_state = PT_PROTO_LAUNCHED;
+
+ return 0;
+}
+
+/** Check if any of the managed proxies we are currently trying to
+ * configure has anything new to say. */
+void
+pt_configure_remaining_proxies(void)
+{
+ int at_least_a_proxy_config_finished = 0;
+ smartlist_t *tmp = smartlist_new();
+
+ log_debug(LD_CONFIG, "Configuring remaining managed proxies (%d)!",
+ unconfigured_proxies_n);
+
+ /* Iterate over tmp, not managed_proxy_list, since configure_proxy can
+ * remove elements from managed_proxy_list. */
+ smartlist_add_all(tmp, managed_proxy_list);
+
+ assert_unconfigured_count_ok();
+
+ SMARTLIST_FOREACH_BEGIN(tmp, managed_proxy_t *, mp) {
+ tor_assert(mp->conf_state != PT_PROTO_BROKEN &&
+ mp->conf_state != PT_PROTO_FAILED_LAUNCH);
+
+ if (mp->was_around_before_config_read) {
+ /* This proxy is marked by a config read. Check whether we need
+ to restart it. */
+
+ mp->was_around_before_config_read = 0;
+
+ if (proxy_needs_restart(mp)) {
+ log_info(LD_GENERAL, "Preparing managed proxy '%s' for restart.",
+ mp->argv[0]);
+ proxy_prepare_for_restart(mp);
+ } else { /* it doesn't need to be restarted. */
+ log_info(LD_GENERAL, "Nothing changed for managed proxy '%s' after "
+ "HUP: not restarting.", mp->argv[0]);
+ }
+
+ continue;
+ }
+
+ /* If the proxy is not fully configured, try to configure it
+ further. */
+ if (!proxy_configuration_finished(mp))
+ if (configure_proxy(mp) == 1)
+ at_least_a_proxy_config_finished = 1;
+
+ } SMARTLIST_FOREACH_END(mp);
+
+ smartlist_free(tmp);
+ check_if_restarts_needed = 0;
+ assert_unconfigured_count_ok();
+
+ if (at_least_a_proxy_config_finished)
+ mark_my_descriptor_dirty("configured managed proxies");
+}
+
+/** Attempt to continue configuring managed proxy <b>mp</b>.
+ * Return 1 if the transport configuration finished, and return 0
+ * otherwise (if we still have more configuring to do for this
+ * proxy). */
+STATIC int
+configure_proxy(managed_proxy_t *mp)
+{
+ int configuration_finished = 0;
+ smartlist_t *proxy_output = NULL;
+ enum stream_status stream_status = 0;
+
+ /* if we haven't launched the proxy yet, do it now */
+ if (mp->conf_state == PT_PROTO_INFANT) {
+ if (launch_managed_proxy(mp) < 0) { /* launch fail */
+ mp->conf_state = PT_PROTO_FAILED_LAUNCH;
+ handle_finished_proxy(mp);
+ }
+ return 0;
+ }
+
+ tor_assert(mp->conf_state != PT_PROTO_INFANT);
+ tor_assert(mp->process_handle);
+
+ proxy_output =
+ tor_get_lines_from_handle(tor_process_get_stdout_pipe(mp->process_handle),
+ &stream_status);
+ if (!proxy_output) { /* failed to get input from proxy */
+ if (stream_status != IO_STREAM_EAGAIN) { /* bad stream status! */
+ mp->conf_state = PT_PROTO_BROKEN;
+ log_warn(LD_GENERAL, "The communication stream of managed proxy '%s' "
+ "is '%s'. Most probably the managed proxy stopped running. "
+ "This might be a bug of the managed proxy, a bug of Tor, or "
+ "a misconfiguration. Please enable logging on your managed "
+ "proxy and check the logs for errors.",
+ mp->argv[0], stream_status_to_string(stream_status));
+ }
+
+ goto done;
+ }
+
+ /* Handle lines. */
+ SMARTLIST_FOREACH_BEGIN(proxy_output, const char *, line) {
+ handle_proxy_line(line, mp);
+ if (proxy_configuration_finished(mp))
+ goto done;
+ } SMARTLIST_FOREACH_END(line);
+
+ done:
+ /* if the proxy finished configuring, exit the loop. */
+ if (proxy_configuration_finished(mp)) {
+ handle_finished_proxy(mp);
+ configuration_finished = 1;
+ }
+
+ if (proxy_output) {
+ SMARTLIST_FOREACH(proxy_output, char *, cp, tor_free(cp));
+ smartlist_free(proxy_output);
+ }
+
+ return configuration_finished;
+}
+
+/** Register server managed proxy <b>mp</b> transports to state */
+static void
+register_server_proxy(const managed_proxy_t *mp)
+{
+ tor_assert(mp->conf_state != PT_PROTO_COMPLETED);
+
+ SMARTLIST_FOREACH_BEGIN(mp->transports, transport_t *, t) {
+ save_transport_to_state(t->name, &t->addr, t->port);
+ log_notice(LD_GENERAL, "Registered server transport '%s' at '%s'",
+ t->name, fmt_addrport(&t->addr, t->port));
+ control_event_transport_launched("server", t->name, &t->addr, t->port);
+ } SMARTLIST_FOREACH_END(t);
+}
+
+/** Register all the transports supported by client managed proxy
+ * <b>mp</b> to the bridge subsystem. */
+static void
+register_client_proxy(const managed_proxy_t *mp)
+{
+ int r;
+
+ tor_assert(mp->conf_state != PT_PROTO_COMPLETED);
+
+ SMARTLIST_FOREACH_BEGIN(mp->transports, transport_t *, t) {
+ transport_t *transport_tmp = transport_copy(t);
+ r = transport_add(transport_tmp);
+ switch (r) {
+ case -1:
+ log_notice(LD_GENERAL, "Could not add transport %s. Skipping.", t->name);
+ transport_free(transport_tmp);
+ break;
+ case 0:
+ log_info(LD_GENERAL, "Successfully registered transport %s", t->name);
+ control_event_transport_launched("client", t->name, &t->addr, t->port);
+ break;
+ case 1:
+ log_info(LD_GENERAL, "Successfully registered transport %s", t->name);
+ control_event_transport_launched("client", t->name, &t->addr, t->port);
+ transport_free(transport_tmp);
+ break;
+ }
+ } SMARTLIST_FOREACH_END(t);
+}
+
+/** Register the transports of managed proxy <b>mp</b>. */
+static inline void
+register_proxy(const managed_proxy_t *mp)
+{
+ if (mp->is_server)
+ register_server_proxy(mp);
+ else
+ register_client_proxy(mp);
+}
+
+/** Free memory allocated by managed proxy <b>mp</b>. */
+STATIC void
+managed_proxy_destroy(managed_proxy_t *mp,
+ int also_terminate_process)
+{
+ SMARTLIST_FOREACH(mp->transports, transport_t *, t, transport_free(t));
+
+ /* free the transports smartlist */
+ smartlist_free(mp->transports);
+
+ /* free the transports_to_launch smartlist */
+ SMARTLIST_FOREACH(mp->transports_to_launch, char *, t, tor_free(t));
+ smartlist_free(mp->transports_to_launch);
+
+ /* remove it from the list of managed proxies */
+ if (managed_proxy_list)
+ smartlist_remove(managed_proxy_list, mp);
+
+ /* free the argv */
+ free_execve_args(mp->argv);
+
+ /* free the outgoing proxy URI */
+ tor_free(mp->proxy_uri);
+
+ tor_process_handle_destroy(mp->process_handle, also_terminate_process);
+ mp->process_handle = NULL;
+
+ tor_free(mp);
+}
+
+/** Convert the tor proxy options to a URI suitable for TOR_PT_PROXY.
+ * Return a newly allocated string containing the URI, or NULL if no
+ * proxy is set. */
+STATIC char *
+get_pt_proxy_uri(void)
+{
+ const or_options_t *options = get_options();
+ char *uri = NULL;
+
+ if (options->Socks4Proxy || options->Socks5Proxy || options->HTTPSProxy) {
+ char addr[TOR_ADDR_BUF_LEN+1];
+
+ if (options->Socks4Proxy) {
+ tor_addr_to_str(addr, &options->Socks4ProxyAddr, sizeof(addr), 1);
+ tor_asprintf(&uri, "socks4a://%s:%d", addr, options->Socks4ProxyPort);
+ } else if (options->Socks5Proxy) {
+ tor_addr_to_str(addr, &options->Socks5ProxyAddr, sizeof(addr), 1);
+ if (!options->Socks5ProxyUsername && !options->Socks5ProxyPassword) {
+ tor_asprintf(&uri, "socks5://%s:%d", addr, options->Socks5ProxyPort);
+ } else {
+ tor_asprintf(&uri, "socks5://%s:%s@%s:%d",
+ options->Socks5ProxyUsername,
+ options->Socks5ProxyPassword,
+ addr, options->Socks5ProxyPort);
+ }
+ } else if (options->HTTPSProxy) {
+ tor_addr_to_str(addr, &options->HTTPSProxyAddr, sizeof(addr), 1);
+ if (!options->HTTPSProxyAuthenticator) {
+ tor_asprintf(&uri, "http://%s:%d", addr, options->HTTPSProxyPort);
+ } else {
+ tor_asprintf(&uri, "http://%s@%s:%d", options->HTTPSProxyAuthenticator,
+ addr, options->HTTPSProxyPort);
+ }
+ }
+ }
+
+ return uri;
+}
+
+/** Handle a configured or broken managed proxy <b>mp</b>. */
+static void
+handle_finished_proxy(managed_proxy_t *mp)
+{
+ switch (mp->conf_state) {
+ case PT_PROTO_BROKEN: /* if broken: */
+ managed_proxy_destroy(mp, 1); /* annihilate it. */
+ break;
+ case PT_PROTO_FAILED_LAUNCH: /* if it failed before launching: */
+ managed_proxy_destroy(mp, 0); /* destroy it but don't terminate */
+ break;
+ case PT_PROTO_CONFIGURED: /* if configured correctly: */
+ if (mp->proxy_uri && !mp->proxy_supported) {
+ log_warn(LD_CONFIG, "Managed proxy '%s' did not configure the "
+ "specified outgoing proxy and will be terminated.",
+ mp->argv[0]);
+ managed_proxy_destroy(mp, 1); /* annihilate it. */
+ break;
+ }
+ register_proxy(mp); /* register its transports */
+ mp->conf_state = PT_PROTO_COMPLETED; /* and mark it as completed. */
+ break;
+ case PT_PROTO_INFANT:
+ case PT_PROTO_LAUNCHED:
+ case PT_PROTO_ACCEPTING_METHODS:
+ case PT_PROTO_COMPLETED:
+ default:
+ log_warn(LD_CONFIG, "Unexpected state '%d' of managed proxy '%s'.",
+ (int)mp->conf_state, mp->argv[0]);
+ tor_assert(0);
+ }
+
+ unconfigured_proxies_n--;
+}
+
+/** Return true if the configuration of the managed proxy <b>mp</b> is
+ finished. */
+static inline int
+proxy_configuration_finished(const managed_proxy_t *mp)
+{
+ return (mp->conf_state == PT_PROTO_CONFIGURED ||
+ mp->conf_state == PT_PROTO_BROKEN ||
+ mp->conf_state == PT_PROTO_FAILED_LAUNCH);
+}
+
+/** This function is called when a proxy sends an {S,C}METHODS DONE message. */
+static void
+handle_methods_done(const managed_proxy_t *mp)
+{
+ tor_assert(mp->transports);
+
+ if (smartlist_len(mp->transports) == 0)
+ log_notice(LD_GENERAL, "Managed proxy '%s' was spawned successfully, "
+ "but it didn't launch any pluggable transport listeners!",
+ mp->argv[0]);
+
+ log_info(LD_CONFIG, "%s managed proxy '%s' configuration completed!",
+ mp->is_server ? "Server" : "Client",
+ mp->argv[0]);
+}
+
+/** Handle a configuration protocol <b>line</b> received from a
+ * managed proxy <b>mp</b>. */
+STATIC void
+handle_proxy_line(const char *line, managed_proxy_t *mp)
+{
+ log_info(LD_GENERAL, "Got a line from managed proxy '%s': (%s)",
+ mp->argv[0], line);
+
+ if (!strcmpstart(line, PROTO_ENV_ERROR)) {
+ if (mp->conf_state != PT_PROTO_LAUNCHED)
+ goto err;
+
+ parse_env_error(line);
+ goto err;
+ } else if (!strcmpstart(line, PROTO_NEG_FAIL)) {
+ if (mp->conf_state != PT_PROTO_LAUNCHED)
+ goto err;
+
+ log_warn(LD_CONFIG, "Managed proxy could not pick a "
+ "configuration protocol version.");
+ goto err;
+ } else if (!strcmpstart(line, PROTO_NEG_SUCCESS)) {
+ if (mp->conf_state != PT_PROTO_LAUNCHED)
+ goto err;
+
+ if (parse_version(line,mp) < 0)
+ goto err;
+
+ tor_assert(mp->conf_protocol != 0);
+ mp->conf_state = PT_PROTO_ACCEPTING_METHODS;
+ return;
+ } else if (!strcmpstart(line, PROTO_CMETHODS_DONE)) {
+ if (mp->conf_state != PT_PROTO_ACCEPTING_METHODS)
+ goto err;
+
+ handle_methods_done(mp);
+
+ mp->conf_state = PT_PROTO_CONFIGURED;
+ return;
+ } else if (!strcmpstart(line, PROTO_SMETHODS_DONE)) {
+ if (mp->conf_state != PT_PROTO_ACCEPTING_METHODS)
+ goto err;
+
+ handle_methods_done(mp);
+
+ mp->conf_state = PT_PROTO_CONFIGURED;
+ return;
+ } else if (!strcmpstart(line, PROTO_CMETHOD_ERROR)) {
+ if (mp->conf_state != PT_PROTO_ACCEPTING_METHODS)
+ goto err;
+
+ parse_client_method_error(line);
+ goto err;
+ } else if (!strcmpstart(line, PROTO_SMETHOD_ERROR)) {
+ if (mp->conf_state != PT_PROTO_ACCEPTING_METHODS)
+ goto err;
+
+ parse_server_method_error(line);
+ goto err;
+ } else if (!strcmpstart(line, PROTO_CMETHOD)) {
+ if (mp->conf_state != PT_PROTO_ACCEPTING_METHODS)
+ goto err;
+
+ if (parse_cmethod_line(line, mp) < 0)
+ goto err;
+
+ return;
+ } else if (!strcmpstart(line, PROTO_SMETHOD)) {
+ if (mp->conf_state != PT_PROTO_ACCEPTING_METHODS)
+ goto err;
+
+ if (parse_smethod_line(line, mp) < 0)
+ goto err;
+
+ return;
+ } else if (!strcmpstart(line, PROTO_PROXY_DONE)) {
+ if (mp->conf_state != PT_PROTO_ACCEPTING_METHODS)
+ goto err;
+
+ if (mp->proxy_uri) {
+ mp->proxy_supported = 1;
+ return;
+ }
+
+ /* No proxy was configured, this should log */
+ } else if (!strcmpstart(line, PROTO_PROXY_ERROR)) {
+ if (mp->conf_state != PT_PROTO_ACCEPTING_METHODS)
+ goto err;
+
+ parse_proxy_error(line);
+ goto err;
+ } else if (!strcmpstart(line, SPAWN_ERROR_MESSAGE)) {
+ /* managed proxy launch failed: parse error message to learn why. */
+ int retval, child_state, saved_errno;
+ retval = tor_sscanf(line, SPAWN_ERROR_MESSAGE "%x/%x",
+ &child_state, &saved_errno);
+ if (retval == 2) {
+ log_warn(LD_GENERAL,
+ "Could not launch managed proxy executable at '%s' ('%s').",
+ mp->argv[0], strerror(saved_errno));
+ } else { /* failed to parse error message */
+ log_warn(LD_GENERAL,"Could not launch managed proxy executable at '%s'.",
+ mp->argv[0]);
+ }
+
+ mp->conf_state = PT_PROTO_FAILED_LAUNCH;
+ return;
+ }
+
+ log_notice(LD_GENERAL, "Unknown line received by managed proxy (%s).", line);
+ return;
+
+ err:
+ mp->conf_state = PT_PROTO_BROKEN;
+ log_warn(LD_CONFIG, "Managed proxy at '%s' failed the configuration protocol"
+ " and will be destroyed.", mp->argv[0]);
+}
+
+/** Parses an ENV-ERROR <b>line</b> and warns the user accordingly. */
+STATIC void
+parse_env_error(const char *line)
+{
+ /* (Length of the protocol string) plus (a space) and (the first char of
+ the error message) */
+ if (strlen(line) < (strlen(PROTO_ENV_ERROR) + 2))
+ log_notice(LD_CONFIG, "Managed proxy sent us an %s without an error "
+ "message.", PROTO_ENV_ERROR);
+
+ log_warn(LD_CONFIG, "Managed proxy couldn't understand the "
+ "pluggable transport environment variables. (%s)",
+ line+strlen(PROTO_ENV_ERROR)+1);
+}
+
+/** Handles a VERSION <b>line</b>. Updates the configuration protocol
+ * version in <b>mp</b>. */
+STATIC int
+parse_version(const char *line, managed_proxy_t *mp)
+{
+ if (strlen(line) < (strlen(PROTO_NEG_SUCCESS) + 2)) {
+ log_warn(LD_CONFIG, "Managed proxy sent us malformed %s line.",
+ PROTO_NEG_SUCCESS);
+ return -1;
+ }
+
+ if (strcmp("1", line+strlen(PROTO_NEG_SUCCESS)+1)) { /* hardcoded temp */
+ log_warn(LD_CONFIG, "Managed proxy tried to negotiate on version '%s'. "
+ "We only support version '1'", line+strlen(PROTO_NEG_SUCCESS)+1);
+ return -1;
+ }
+
+ mp->conf_protocol = PROTO_VERSION_ONE; /* temp. till more versions appear */
+ return 0;
+}
+
+/** Parses {C,S}METHOD-ERROR <b>line</b> and warns the user
+ * accordingly. If <b>is_server</b> it is an SMETHOD-ERROR,
+ * otherwise it is a CMETHOD-ERROR. */
+static void
+parse_method_error(const char *line, int is_server)
+{
+ const char* error = is_server ?
+ PROTO_SMETHOD_ERROR : PROTO_CMETHOD_ERROR;
+
+ /* (Length of the protocol string) plus (a space) and (the first char of
+ the error message) */
+ if (strlen(line) < (strlen(error) + 2))
+ log_warn(LD_CONFIG, "Managed proxy sent us an %s without an error "
+ "message.", error);
+
+ log_warn(LD_CONFIG, "%s managed proxy encountered a method error. (%s)",
+ is_server ? "Server" : "Client",
+ line+strlen(error)+1);
+}
+
+/** A helper for parse_{c,s}method_line(), bootstraps its
+ * functionalities. If <b>is_smethod</b> is true then the
+ * the line to parse is a SMETHOD line otherwise it is a
+ * CMETHOD line*/
+static int
+parse_method_line_helper(const char *line,
+ managed_proxy_t *mp,
+ int is_smethod)
+{
+ int item_index = 0;
+ int r;
+
+ char *transport_name=NULL;
+ char *args_string=NULL;
+ char *addrport=NULL;
+ int socks_ver=PROXY_NONE;
+ char *address=NULL;
+ uint16_t port = 0;
+
+ const char *method_str = is_smethod ? PROTO_SMETHOD : PROTO_CMETHOD;
+ const int min_args_count = is_smethod ? 3 : 4;
+
+ tor_addr_t tor_addr;
+ transport_t *transport=NULL;
+ smartlist_t *items= smartlist_new();
+
+ smartlist_split_string(items, line, NULL,
+ SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, -1);
+ if (smartlist_len(items) < min_args_count) {
+ log_warn(LD_CONFIG, "Managed proxy sent us a %s line "
+ "with too few arguments.", method_str);
+ goto err;
+ }
+
+ tor_assert(!strcmp(smartlist_get(items, item_index),method_str));
+ ++item_index;
+
+ transport_name = smartlist_get(items,item_index);
+ ++item_index;
+ if (!string_is_C_identifier(transport_name)) {
+ log_warn(LD_CONFIG, "Transport name is not a C identifier (%s).",
+ transport_name);
+ goto err;
+ }
+
+ /** Check for the proxy method sent to us in CMETHOD line. */
+ if (!is_smethod) {
+ const char *socks_ver_str = smartlist_get(items,item_index);
+ ++item_index;
+
+ if (!strcmp(socks_ver_str,"socks4")) {
+ socks_ver = PROXY_SOCKS4;
+ } else if (!strcmp(socks_ver_str,"socks5")) {
+ socks_ver = PROXY_SOCKS5;
+ } else {
+ log_warn(LD_CONFIG, "Client managed proxy sent us a proxy protocol "
+ "we don't recognize. (%s)", socks_ver_str);
+ goto err;
+ }
+ }
+
+ addrport = smartlist_get(items, item_index);
+ ++item_index;
+ if (tor_addr_port_split(LOG_WARN, addrport, &address, &port)<0) {
+ log_warn(LD_CONFIG, "Error parsing transport address '%s'", addrport);
+ goto err;
+ }
+
+ if (!port) {
+ log_warn(LD_CONFIG,
+ "Transport address '%s' has no port.", addrport);
+ goto err;
+ }
+
+ if (tor_addr_parse(&tor_addr, address) < 0) {
+ log_warn(LD_CONFIG, "Error parsing transport address '%s'", address);
+ goto err;
+ }
+
+ /** Check for options in the SMETHOD line. */
+ if (is_smethod && smartlist_len(items) > min_args_count) {
+ /* Seems like there are also some [options] in the SMETHOD line.
+ Let's see if we can parse them. */
+ char *options_string = smartlist_get(items, item_index);
+ log_debug(LD_CONFIG, "Got options_string: %s", options_string);
+ if (!strcmpstart(options_string, "ARGS:")) {
+ args_string = options_string+strlen("ARGS:");
+ log_debug(LD_CONFIG, "Got ARGS: %s", args_string);
+ }
+ }
+
+ transport = transport_new(&tor_addr, port, transport_name,
+ socks_ver, args_string);
+
+ smartlist_add(mp->transports, transport);
+
+ /** Logs info about line parsing success for client or server */
+ if (is_smethod) {
+ log_info(LD_CONFIG, "Server transport %s at %s:%d.",
+ transport_name, address, (int)port);
+ } else {
+ log_info(LD_CONFIG, "Transport %s at %s:%d with SOCKS %d. "
+ "Attached to managed proxy.",
+ transport_name, address, (int)port, socks_ver);
+ }
+
+ r=0;
+ goto done;
+
+ err:
+ r = -1;
+
+ done:
+ SMARTLIST_FOREACH(items, char*, s, tor_free(s));
+ smartlist_free(items);
+ tor_free(address);
+ return r;
+}
+
+/** Parses an SMETHOD <b>line</b> and if well-formed it registers the
+ * new transport in <b>mp</b>. */
+STATIC int
+parse_smethod_line(const char *line, managed_proxy_t *mp)
+{
+ /* Example of legit SMETHOD line:
+ SMETHOD obfs2 0.0.0.0:25612 ARGS:secret=supersekrit,key=superkey */
+ return parse_method_line_helper(line, mp, 1);
+}
+
+/** Parses a CMETHOD <b>line</b>, and if well-formed it registers
+ * the new transport in <b>mp</b>. */
+STATIC int
+parse_cmethod_line(const char *line, managed_proxy_t *mp)
+{
+ /* Example of legit CMETHOD line:
+ CMETHOD obfs2 socks5 127.0.0.1:35713 */
+ return parse_method_line_helper(line, mp, 0);
+}
+
+/** Parses an PROXY-ERROR <b>line</b> and warns the user accordingly. */
+STATIC void
+parse_proxy_error(const char *line)
+{
+ /* (Length of the protocol string) plus (a space) and (the first char of
+ the error message) */
+ if (strlen(line) < (strlen(PROTO_PROXY_ERROR) + 2))
+ log_notice(LD_CONFIG, "Managed proxy sent us an %s without an error "
+ "message.", PROTO_PROXY_ERROR);
+
+ log_warn(LD_CONFIG, "Managed proxy failed to configure the "
+ "pluggable transport's outgoing proxy. (%s)",
+ line+strlen(PROTO_PROXY_ERROR)+1);
+}
+
+/** Return a newly allocated string that tor should place in
+ * TOR_PT_SERVER_TRANSPORT_OPTIONS while configuring the server
+ * manged proxy in <b>mp</b>. Return NULL if no such options are found. */
+STATIC char *
+get_transport_options_for_server_proxy(const managed_proxy_t *mp)
+{
+ char *options_string = NULL;
+ smartlist_t *string_sl = smartlist_new();
+
+ tor_assert(mp->is_server);
+
+ /** Loop over the transports of the proxy. If we have options for
+ any of them, format them appropriately and place them in our
+ smartlist. Finally, join our smartlist to get the final
+ string. */
+ SMARTLIST_FOREACH_BEGIN(mp->transports_to_launch, const char *, transport) {
+ smartlist_t *options_tmp_sl = NULL;
+ options_tmp_sl = get_options_for_server_transport(transport);
+ if (!options_tmp_sl)
+ continue;
+
+ /** Loop over the options of this transport, escape them, and
+ place them in the smartlist. */
+ SMARTLIST_FOREACH_BEGIN(options_tmp_sl, const char *, options) {
+ char *escaped_opts = tor_escape_str_for_pt_args(options, ":;\\");
+ smartlist_add_asprintf(string_sl, "%s:%s",
+ transport, escaped_opts);
+ tor_free(escaped_opts);
+ } SMARTLIST_FOREACH_END(options);
+
+ SMARTLIST_FOREACH(options_tmp_sl, char *, c, tor_free(c));
+ smartlist_free(options_tmp_sl);
+ } SMARTLIST_FOREACH_END(transport);
+
+ if (smartlist_len(string_sl)) {
+ options_string = smartlist_join_strings(string_sl, ";", 0, NULL);
+ }
+
+ SMARTLIST_FOREACH(string_sl, char *, t, tor_free(t));
+ smartlist_free(string_sl);
+
+ return options_string;
+}
+
+/** Return the string that tor should place in TOR_PT_SERVER_BINDADDR
+ * while configuring the server managed proxy in <b>mp</b>. The
+ * string is stored in the heap, and it's the responsibility of
+ * the caller to deallocate it after its use. */
+static char *
+get_bindaddr_for_server_proxy(const managed_proxy_t *mp)
+{
+ char *bindaddr_result = NULL;
+ char *bindaddr_tmp = NULL;
+ smartlist_t *string_tmp = smartlist_new();
+
+ tor_assert(mp->is_server);
+
+ SMARTLIST_FOREACH_BEGIN(mp->transports_to_launch, char *, t) {
+ bindaddr_tmp = get_stored_bindaddr_for_server_transport(t);
+
+ smartlist_add_asprintf(string_tmp, "%s-%s", t, bindaddr_tmp);
+
+ tor_free(bindaddr_tmp);
+ } SMARTLIST_FOREACH_END(t);
+
+ bindaddr_result = smartlist_join_strings(string_tmp, ",", 0, NULL);
+
+ SMARTLIST_FOREACH(string_tmp, char *, t, tor_free(t));
+ smartlist_free(string_tmp);
+
+ return bindaddr_result;
+}
+
+/** Return a newly allocated process_environment_t * for <b>mp</b>'s
+ * process. */
+static process_environment_t *
+create_managed_proxy_environment(const managed_proxy_t *mp)
+{
+ const or_options_t *options = get_options();
+
+ /* Environment variables to be added to or set in mp's environment. */
+ smartlist_t *envs = smartlist_new();
+ /* XXXX The next time someone touches this code, shorten the name of
+ * set_environment_variable_in_smartlist, add a
+ * set_env_var_in_smartlist_asprintf function, and get rid of the
+ * silly extra envs smartlist. */
+
+ /* The final environment to be passed to mp. */
+ smartlist_t *merged_env_vars = get_current_process_environment_variables();
+
+ process_environment_t *env;
+
+ {
+ char *state_tmp = get_datadir_fname("pt_state/"); /* XXX temp */
+ smartlist_add_asprintf(envs, "TOR_PT_STATE_LOCATION=%s", state_tmp);
+ tor_free(state_tmp);
+ }
+
+ smartlist_add_strdup(envs, "TOR_PT_MANAGED_TRANSPORT_VER=1");
+
+ {
+ char *transports_to_launch =
+ smartlist_join_strings(mp->transports_to_launch, ",", 0, NULL);
+
+ smartlist_add_asprintf(envs,
+ mp->is_server ?
+ "TOR_PT_SERVER_TRANSPORTS=%s" :
+ "TOR_PT_CLIENT_TRANSPORTS=%s",
+ transports_to_launch);
+
+ tor_free(transports_to_launch);
+ }
+
+ if (mp->is_server) {
+ {
+ char *orport_tmp =
+ get_first_listener_addrport_string(CONN_TYPE_OR_LISTENER);
+ if (orport_tmp) {
+ smartlist_add_asprintf(envs, "TOR_PT_ORPORT=%s", orport_tmp);
+ tor_free(orport_tmp);
+ }
+ }
+
+ {
+ char *bindaddr_tmp = get_bindaddr_for_server_proxy(mp);
+ smartlist_add_asprintf(envs, "TOR_PT_SERVER_BINDADDR=%s", bindaddr_tmp);
+ tor_free(bindaddr_tmp);
+ }
+
+ {
+ char *server_transport_options =
+ get_transport_options_for_server_proxy(mp);
+ if (server_transport_options) {
+ smartlist_add_asprintf(envs, "TOR_PT_SERVER_TRANSPORT_OPTIONS=%s",
+ server_transport_options);
+ tor_free(server_transport_options);
+ }
+ }
+
+ /* XXXX Remove the '=' here once versions of obfsproxy which
+ * assert that this env var exists are sufficiently dead.
+ *
+ * (If we remove this line entirely, some joker will stick this
+ * variable in Tor's environment and crash PTs that try to parse
+ * it even when not run in server mode.) */
+
+ if (options->ExtORPort_lines) {
+ char *ext_or_addrport_tmp =
+ get_first_listener_addrport_string(CONN_TYPE_EXT_OR_LISTENER);
+ char *cookie_file_loc = get_ext_or_auth_cookie_file_name();
+
+ if (ext_or_addrport_tmp) {
+ smartlist_add_asprintf(envs, "TOR_PT_EXTENDED_SERVER_PORT=%s",
+ ext_or_addrport_tmp);
+ }
+ smartlist_add_asprintf(envs, "TOR_PT_AUTH_COOKIE_FILE=%s",
+ cookie_file_loc);
+
+ tor_free(ext_or_addrport_tmp);
+ tor_free(cookie_file_loc);
+
+ } else {
+ smartlist_add_asprintf(envs, "TOR_PT_EXTENDED_SERVER_PORT=");
+ }
+
+ /* All new versions of tor will keep stdin open, so PTs can use it
+ * as a reliable termination detection mechanism.
+ */
+ smartlist_add_asprintf(envs, "TOR_PT_EXIT_ON_STDIN_CLOSE=1");
+ } else {
+ /* If ClientTransportPlugin has a HTTPS/SOCKS proxy configured, set the
+ * TOR_PT_PROXY line.
+ */
+
+ if (mp->proxy_uri) {
+ smartlist_add_asprintf(envs, "TOR_PT_PROXY=%s", mp->proxy_uri);
+ }
+ }
+
+ SMARTLIST_FOREACH_BEGIN(envs, const char *, env_var) {
+ set_environment_variable_in_smartlist(merged_env_vars, env_var,
+ tor_free_, 1);
+ } SMARTLIST_FOREACH_END(env_var);
+
+ env = process_environment_make(merged_env_vars);
+
+ smartlist_free(envs);
+
+ SMARTLIST_FOREACH(merged_env_vars, void *, x, tor_free(x));
+ smartlist_free(merged_env_vars);
+
+ return env;
+}
+
+/** Create and return a new managed proxy for <b>transport</b> using
+ * <b>proxy_argv</b>. Also, add it to the global managed proxy list. If
+ * <b>is_server</b> is true, it's a server managed proxy. Takes ownership of
+ * <b>proxy_argv</b>.
+ *
+ * Requires that proxy_argv have at least one element. */
+STATIC managed_proxy_t *
+managed_proxy_create(const smartlist_t *with_transport_list,
+ char **proxy_argv, int is_server)
+{
+ managed_proxy_t *mp = tor_malloc_zero(sizeof(managed_proxy_t));
+ mp->conf_state = PT_PROTO_INFANT;
+ mp->is_server = is_server;
+ mp->argv = proxy_argv;
+ mp->transports = smartlist_new();
+ mp->proxy_uri = get_pt_proxy_uri();
+
+ mp->transports_to_launch = smartlist_new();
+ SMARTLIST_FOREACH(with_transport_list, const char *, transport,
+ add_transport_to_proxy(transport, mp));
+
+ /* register the managed proxy */
+ if (!managed_proxy_list)
+ managed_proxy_list = smartlist_new();
+ smartlist_add(managed_proxy_list, mp);
+ unconfigured_proxies_n++;
+
+ assert_unconfigured_count_ok();
+
+ return mp;
+}
+
+/** Register proxy with <b>proxy_argv</b>, supporting transports in
+ * <b>transport_list</b>, to the managed proxy subsystem.
+ * If <b>is_server</b> is true, then the proxy is a server proxy.
+ *
+ * Takes ownership of proxy_argv.
+ *
+ * Requires that proxy_argv be a NULL-terminated array of command-line
+ * elements, containing at least one element.
+ **/
+MOCK_IMPL(void,
+pt_kickstart_proxy, (const smartlist_t *with_transport_list,
+ char **proxy_argv, int is_server))
+{
+ managed_proxy_t *mp=NULL;
+ transport_t *old_transport = NULL;
+
+ if (!proxy_argv || !proxy_argv[0]) {
+ return;
+ }
+
+ mp = get_managed_proxy_by_argv_and_type(proxy_argv, is_server);
+
+ if (!mp) { /* we haven't seen this proxy before */
+ managed_proxy_create(with_transport_list, proxy_argv, is_server);
+
+ } else { /* known proxy. add its transport to its transport list */
+ if (mp->was_around_before_config_read) {
+ /* If this managed proxy was around even before we read the
+ config this time, it means that it was already enabled before
+ and is not useless and should be kept. If it's marked for
+ removal, unmark it and make sure that we check whether it
+ needs to be restarted. */
+ if (mp->marked_for_removal) {
+ mp->marked_for_removal = 0;
+ check_if_restarts_needed = 1;
+ }
+
+ /* For each new transport, check if the managed proxy used to
+ support it before the SIGHUP. If that was the case, make sure
+ it doesn't get removed because we might reuse it. */
+ SMARTLIST_FOREACH_BEGIN(with_transport_list, const char *, transport) {
+ old_transport = transport_get_by_name(transport);
+ if (old_transport)
+ old_transport->marked_for_removal = 0;
+ } SMARTLIST_FOREACH_END(transport);
+ }
+
+ SMARTLIST_FOREACH(with_transport_list, const char *, transport,
+ add_transport_to_proxy(transport, mp));
+ free_execve_args(proxy_argv);
+ }
+}
+
+/** Frees the array of pointers in <b>arg</b> used as arguments to
+ execve(2). */
+STATIC void
+free_execve_args(char **arg)
+{
+ char **tmp = arg;
+ while (*tmp) /* use the fact that the last element of the array is a
+ NULL pointer to know when to stop freeing */
+ tor_free_(*tmp++);
+
+ tor_free(arg);
+}
+
+/** Tor will read its config.
+ * Prepare the managed proxy list so that proxies not used in the new
+ * config will shutdown, and proxies that need to spawn different
+ * transports will do so. */
+void
+pt_prepare_proxy_list_for_config_read(void)
+{
+ if (!managed_proxy_list)
+ return;
+
+ assert_unconfigured_count_ok();
+ SMARTLIST_FOREACH_BEGIN(managed_proxy_list, managed_proxy_t *, mp) {
+ /* Destroy unconfigured proxies. */
+ if (mp->conf_state != PT_PROTO_COMPLETED) {
+ SMARTLIST_DEL_CURRENT(managed_proxy_list, mp);
+ managed_proxy_destroy(mp, 1);
+ unconfigured_proxies_n--;
+ continue;
+ }
+
+ tor_assert(mp->conf_state == PT_PROTO_COMPLETED);
+
+ /* Mark all proxies for removal, and also note that they have been
+ here before the config read. */
+ mp->marked_for_removal = 1;
+ mp->was_around_before_config_read = 1;
+ SMARTLIST_FOREACH(mp->transports_to_launch, char *, t, tor_free(t));
+ smartlist_clear(mp->transports_to_launch);
+ } SMARTLIST_FOREACH_END(mp);
+
+ assert_unconfigured_count_ok();
+
+ tor_assert(unconfigured_proxies_n == 0);
+}
+
+/** Return a smartlist containing the ports where our pluggable
+ * transports are listening. */
+smartlist_t *
+get_transport_proxy_ports(void)
+{
+ smartlist_t *sl = NULL;
+
+ if (!managed_proxy_list)
+ return NULL;
+
+ /** XXX assume that external proxy ports have been forwarded
+ manually */
+ SMARTLIST_FOREACH_BEGIN(managed_proxy_list, const managed_proxy_t *, mp) {
+ if (!mp->is_server || mp->conf_state != PT_PROTO_COMPLETED)
+ continue;
+
+ if (!sl) sl = smartlist_new();
+
+ tor_assert(mp->transports);
+ SMARTLIST_FOREACH(mp->transports, const transport_t *, t,
+ smartlist_add_asprintf(sl, "%u:%u", t->port, t->port));
+
+ } SMARTLIST_FOREACH_END(mp);
+
+ return sl;
+}
+
+/** Return the pluggable transport string that we should display in
+ * our extra-info descriptor. If we shouldn't display such a string,
+ * or we have nothing to display, return NULL. The string is
+ * allocated on the heap and it's the responsibility of the caller to
+ * free it. */
+char *
+pt_get_extra_info_descriptor_string(void)
+{
+ char *the_string = NULL;
+ smartlist_t *string_chunks = NULL;
+
+ if (!managed_proxy_list)
+ return NULL;
+
+ string_chunks = smartlist_new();
+
+ /* For each managed proxy, add its transports to the chunks list. */
+ SMARTLIST_FOREACH_BEGIN(managed_proxy_list, const managed_proxy_t *, mp) {
+ if ((!mp->is_server) || (mp->conf_state != PT_PROTO_COMPLETED))
+ continue;
+
+ tor_assert(mp->transports);
+
+ SMARTLIST_FOREACH_BEGIN(mp->transports, const transport_t *, t) {
+ char *transport_args = NULL;
+
+ /* If the transport proxy returned "0.0.0.0" as its address, and
+ * we know our external IP address, use it. Otherwise, use the
+ * returned address. */
+ const char *addrport = NULL;
+ uint32_t external_ip_address = 0;
+ if (tor_addr_is_null(&t->addr) &&
+ router_pick_published_address(get_options(),
+ &external_ip_address, 0) >= 0) {
+ tor_addr_t addr;
+ tor_addr_from_ipv4h(&addr, external_ip_address);
+ addrport = fmt_addrport(&addr, t->port);
+ } else {
+ addrport = fmt_addrport(&t->addr, t->port);
+ }
+
+ /* If this transport has any arguments with it, prepend a space
+ to them so that we can add them to the transport line. */
+ if (t->extra_info_args)
+ tor_asprintf(&transport_args, " %s", t->extra_info_args);
+
+ smartlist_add_asprintf(string_chunks,
+ "transport %s %s%s",
+ t->name, addrport,
+ transport_args ? transport_args : "");
+ tor_free(transport_args);
+ } SMARTLIST_FOREACH_END(t);
+
+ } SMARTLIST_FOREACH_END(mp);
+
+ if (smartlist_len(string_chunks) == 0) {
+ smartlist_free(string_chunks);
+ return NULL;
+ }
+
+ /* Join all the chunks into the final string. */
+ the_string = smartlist_join_strings(string_chunks, "\n", 1, NULL);
+
+ SMARTLIST_FOREACH(string_chunks, char *, s, tor_free(s));
+ smartlist_free(string_chunks);
+
+ return the_string;
+}
+
+/** Stringify the SOCKS arguments in <b>socks_args</b> according to
+ * 180_pluggable_transport.txt. The string is allocated on the heap
+ * and it's the responsibility of the caller to free it after use. */
+char *
+pt_stringify_socks_args(const smartlist_t *socks_args)
+{
+ /* tmp place to store escaped socks arguments, so that we can
+ concatenate them up afterwards */
+ smartlist_t *sl_tmp = NULL;
+ char *escaped_string = NULL;
+ char *new_string = NULL;
+
+ tor_assert(socks_args);
+ tor_assert(smartlist_len(socks_args) > 0);
+
+ sl_tmp = smartlist_new();
+
+ SMARTLIST_FOREACH_BEGIN(socks_args, const char *, s) {
+ /* Escape ';' and '\'. */
+ escaped_string = tor_escape_str_for_pt_args(s, ";\\");
+ if (!escaped_string)
+ goto done;
+
+ smartlist_add(sl_tmp, escaped_string);
+ } SMARTLIST_FOREACH_END(s);
+
+ new_string = smartlist_join_strings(sl_tmp, ";", 0, NULL);
+
+ done:
+ SMARTLIST_FOREACH(sl_tmp, char *, s, tor_free(s));
+ smartlist_free(sl_tmp);
+
+ return new_string;
+}
+
+/** Return a string of the SOCKS arguments that we should pass to the
+ * pluggable transports proxy in <b>addr</b>:<b>port</b> according to
+ * 180_pluggable_transport.txt. The string is allocated on the heap
+ * and it's the responsibility of the caller to free it after use. */
+char *
+pt_get_socks_args_for_proxy_addrport(const tor_addr_t *addr, uint16_t port)
+{
+ const smartlist_t *socks_args = NULL;
+
+ socks_args = get_socks_args_by_bridge_addrport(addr, port);
+ if (!socks_args)
+ return NULL;
+
+ return pt_stringify_socks_args(socks_args);
+}
+
+/** The tor config was read.
+ * Destroy all managed proxies that were marked by a previous call to
+ * prepare_proxy_list_for_config_read() and are not used by the new
+ * config. */
+void
+sweep_proxy_list(void)
+{
+ if (!managed_proxy_list)
+ return;
+ assert_unconfigured_count_ok();
+ SMARTLIST_FOREACH_BEGIN(managed_proxy_list, managed_proxy_t *, mp) {
+ if (mp->marked_for_removal) {
+ SMARTLIST_DEL_CURRENT(managed_proxy_list, mp);
+ managed_proxy_destroy(mp, 1);
+ }
+ } SMARTLIST_FOREACH_END(mp);
+ assert_unconfigured_count_ok();
+}
+
+/** Release all storage held by the pluggable transports subsystem. */
+void
+pt_free_all(void)
+{
+ if (transport_list) {
+ clear_transport_list();
+ smartlist_free(transport_list);
+ transport_list = NULL;
+ }
+
+ if (managed_proxy_list) {
+ /* If the proxy is in PT_PROTO_COMPLETED, it has registered its
+ transports and it's the duty of the circuitbuild.c subsystem to
+ free them. Otherwise, it hasn't registered its transports yet
+ and we should free them here. */
+ SMARTLIST_FOREACH(managed_proxy_list, managed_proxy_t *, mp, {
+ SMARTLIST_DEL_CURRENT(managed_proxy_list, mp);
+ managed_proxy_destroy(mp, 1);
+ });
+
+ smartlist_free(managed_proxy_list);
+ managed_proxy_list=NULL;
+ }
+}
+
+/** Return a newly allocated string equal to <b>string</b>, except that every
+ * character in <b>chars_to_escape</b> is preceded by a backslash. */
+char *
+tor_escape_str_for_pt_args(const char *string, const char *chars_to_escape)
+{
+ char *new_string = NULL;
+ char *new_cp = NULL;
+ size_t length, new_length;
+
+ tor_assert(string);
+
+ length = strlen(string);
+
+ if (!length) /* If we were given the empty string, return the same. */
+ return tor_strdup("");
+ /* (new_length > SIZE_MAX) => ((length * 2) + 1 > SIZE_MAX) =>
+ (length*2 > SIZE_MAX - 1) => (length > (SIZE_MAX - 1)/2) */
+ if (length > (SIZE_MAX - 1)/2) /* check for overflow */
+ return NULL;
+
+ /* this should be enough even if all characters must be escaped */
+ new_length = (length * 2) + 1;
+
+ new_string = new_cp = tor_malloc(new_length);
+
+ while (*string) {
+ if (strchr(chars_to_escape, *string))
+ *new_cp++ = '\\';
+
+ *new_cp++ = *string++;
+ }
+
+ *new_cp = '\0'; /* NUL-terminate the new string */
+
+ return new_string;
+}
diff --git a/src/feature/client/transports.h b/src/feature/client/transports.h
new file mode 100644
index 0000000000..d304dcd485
--- /dev/null
+++ b/src/feature/client/transports.h
@@ -0,0 +1,147 @@
+/* Copyright (c) 2003-2004, Roger Dingledine
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file transports.h
+ * \brief Headers for transports.c
+ **/
+
+#ifndef TOR_TRANSPORTS_H
+#define TOR_TRANSPORTS_H
+
+/** Represents a pluggable transport used by a bridge. */
+typedef struct transport_t {
+ /** SOCKS version: One of PROXY_SOCKS4, PROXY_SOCKS5. */
+ int socks_version;
+ /** Name of pluggable transport protocol */
+ char *name;
+ /** The IP address where the transport bound and is waiting for
+ * connections. */
+ tor_addr_t addr;
+ /** Port of proxy */
+ uint16_t port;
+ /** Boolean: We are re-parsing our transport list, and we are going to remove
+ * this one if we don't find it in the list of configured transports. */
+ unsigned marked_for_removal : 1;
+ /** Arguments for this transport that must be written to the
+ extra-info descriptor. */
+ char *extra_info_args;
+} transport_t;
+
+void mark_transport_list(void);
+void sweep_transport_list(void);
+MOCK_DECL(int, transport_add_from_config,
+ (const tor_addr_t *addr, uint16_t port,
+ const char *name, int socks_ver));
+void transport_free_(transport_t *transport);
+#define transport_free(tr) FREE_AND_NULL(transport_t, transport_free_, (tr))
+
+MOCK_DECL(transport_t*, transport_get_by_name, (const char *name));
+
+MOCK_DECL(void, pt_kickstart_proxy,
+ (const smartlist_t *transport_list, char **proxy_argv,
+ int is_server));
+
+#define pt_kickstart_client_proxy(tl, pa) \
+ pt_kickstart_proxy(tl, pa, 0)
+#define pt_kickstart_server_proxy(tl, pa) \
+ pt_kickstart_proxy(tl, pa, 1)
+
+void pt_configure_remaining_proxies(void);
+
+int pt_proxies_configuration_pending(void);
+
+char *pt_get_extra_info_descriptor_string(void);
+
+void pt_free_all(void);
+
+void pt_prepare_proxy_list_for_config_read(void);
+void sweep_proxy_list(void);
+
+smartlist_t *get_transport_proxy_ports(void);
+char *pt_stringify_socks_args(const smartlist_t *socks_args);
+
+char *pt_get_socks_args_for_proxy_addrport(const tor_addr_t *addr,
+ uint16_t port);
+
+char *tor_escape_str_for_pt_args(const char *string,
+ const char *chars_to_escape);
+
+#ifdef PT_PRIVATE
+/** State of the managed proxy configuration protocol. */
+enum pt_proto_state {
+ PT_PROTO_INFANT, /* was just born */
+ PT_PROTO_LAUNCHED, /* was just launched */
+ PT_PROTO_ACCEPTING_METHODS, /* accepting methods */
+ PT_PROTO_CONFIGURED, /* configured successfully */
+ PT_PROTO_COMPLETED, /* configure and registered its transports */
+ PT_PROTO_BROKEN, /* broke during the protocol */
+ PT_PROTO_FAILED_LAUNCH /* failed while launching */
+};
+
+struct process_handle_t;
+
+/** Structure containing information of a managed proxy. */
+typedef struct {
+ enum pt_proto_state conf_state; /* the current configuration state */
+ char **argv; /* the cli arguments of this proxy */
+ int conf_protocol; /* the configuration protocol version used */
+
+ char *proxy_uri; /* the outgoing proxy in TOR_PT_PROXY URI format */
+ unsigned int proxy_supported : 1; /* the proxy honors TOR_PT_PROXY */
+
+ int is_server; /* is it a server proxy? */
+
+ /* A pointer to the process handle of this managed proxy. */
+ struct process_handle_t *process_handle;
+
+ int pid; /* The Process ID this managed proxy is using. */
+
+ /** Boolean: We are re-parsing our config, and we are going to
+ * remove this managed proxy if we don't find it any transport
+ * plugins that use it. */
+ unsigned int marked_for_removal : 1;
+
+ /** Boolean: We got a SIGHUP while this proxy was running. We use
+ * this flag to signify that this proxy might need to be restarted
+ * so that it can listen for other transports according to the new
+ * torrc. */
+ unsigned int was_around_before_config_read : 1;
+
+ /* transports to-be-launched by this proxy */
+ smartlist_t *transports_to_launch;
+
+ /* The 'transports' list contains all the transports this proxy has
+ launched. */
+ smartlist_t *transports;
+} managed_proxy_t;
+
+STATIC transport_t *transport_new(const tor_addr_t *addr, uint16_t port,
+ const char *name, int socks_ver,
+ const char *extra_info_args);
+STATIC int parse_cmethod_line(const char *line, managed_proxy_t *mp);
+STATIC int parse_smethod_line(const char *line, managed_proxy_t *mp);
+
+STATIC int parse_version(const char *line, managed_proxy_t *mp);
+STATIC void parse_env_error(const char *line);
+STATIC void parse_proxy_error(const char *line);
+STATIC void handle_proxy_line(const char *line, managed_proxy_t *mp);
+STATIC char *get_transport_options_for_server_proxy(const managed_proxy_t *mp);
+
+STATIC void managed_proxy_destroy(managed_proxy_t *mp,
+ int also_terminate_process);
+
+STATIC managed_proxy_t *managed_proxy_create(const smartlist_t *transport_list,
+ char **proxy_argv, int is_server);
+
+STATIC int configure_proxy(managed_proxy_t *mp);
+
+STATIC char* get_pt_proxy_uri(void);
+
+STATIC void free_execve_args(char **arg);
+
+#endif /* defined(PT_PRIVATE) */
+
+#endif /* !defined(TOR_TRANSPORTS_H) */