summaryrefslogtreecommitdiff
path: root/src/or
diff options
context:
space:
mode:
Diffstat (limited to 'src/or')
-rw-r--r--src/or/buffers.c28
-rw-r--r--src/or/channel.c4
-rw-r--r--src/or/channel.h2
-rw-r--r--src/or/circuitbuild.c2
-rw-r--r--src/or/circuitlist.c6
-rw-r--r--src/or/circuitmux.c20
-rw-r--r--src/or/circuitmux_ewma.c8
-rw-r--r--src/or/config.c138
-rw-r--r--src/or/connection.c230
-rw-r--r--src/or/connection.h67
-rw-r--r--src/or/control.c8
-rw-r--r--src/or/directory.c479
-rw-r--r--src/or/directory.h31
-rw-r--r--src/or/dirserv.c4
-rw-r--r--src/or/dns.c6
-rw-r--r--src/or/entrynodes.c2
-rw-r--r--src/or/eventdns_tor.h3
-rw-r--r--src/or/fp_pair.c4
-rw-r--r--src/or/geoip.c4
-rw-r--r--src/or/hibernate.c2
-rw-r--r--src/or/include.am3
-rw-r--r--src/or/keypin.c8
-rw-r--r--src/or/main.c24
-rw-r--r--src/or/microdesc.c4
-rw-r--r--src/or/networkstatus.c358
-rw-r--r--src/or/networkstatus.h8
-rw-r--r--src/or/nodelist.c10
-rw-r--r--src/or/or.h157
-rw-r--r--src/or/policies.c2
-rw-r--r--src/or/relay.c4
-rw-r--r--src/or/rendcommon.h2
-rw-r--r--src/or/rephist.c6
-rw-r--r--src/or/routerlist.c47
-rw-r--r--src/or/routerlist.h9
-rw-r--r--src/or/routerparse.c6
-rw-r--r--src/or/transports.c6
36 files changed, 1403 insertions, 299 deletions
diff --git a/src/or/buffers.c b/src/or/buffers.c
index cc2f6f409b..4696bec8f4 100644
--- a/src/or/buffers.c
+++ b/src/or/buffers.c
@@ -78,7 +78,7 @@ static int parse_socks_client(const uint8_t *data, size_t datalen,
/** Return the next character in <b>chunk</b> onto which data can be appended.
* If the chunk is full, this might be off the end of chunk->mem. */
-static INLINE char *
+static inline char *
CHUNK_WRITE_PTR(chunk_t *chunk)
{
return chunk->data + chunk->datalen;
@@ -86,7 +86,7 @@ CHUNK_WRITE_PTR(chunk_t *chunk)
/** Return the number of bytes that can be written onto <b>chunk</b> without
* running out of space. */
-static INLINE size_t
+static inline size_t
CHUNK_REMAINING_CAPACITY(const chunk_t *chunk)
{
return (chunk->mem + chunk->memlen) - (chunk->data + chunk->datalen);
@@ -94,7 +94,7 @@ CHUNK_REMAINING_CAPACITY(const chunk_t *chunk)
/** Move all bytes stored in <b>chunk</b> to the front of <b>chunk</b>->mem,
* to free up space at the end. */
-static INLINE void
+static inline void
chunk_repack(chunk_t *chunk)
{
if (chunk->datalen && chunk->data != &chunk->mem[0]) {
@@ -118,7 +118,7 @@ chunk_free_unchecked(chunk_t *chunk)
total_bytes_allocated_in_chunks -= CHUNK_ALLOC_SIZE(chunk->memlen);
tor_free(chunk);
}
-static INLINE chunk_t *
+static inline chunk_t *
chunk_new_with_alloc_size(size_t alloc)
{
chunk_t *ch;
@@ -136,7 +136,7 @@ chunk_new_with_alloc_size(size_t alloc)
/** Expand <b>chunk</b> until it can hold <b>sz</b> bytes, and return a
* new pointer to <b>chunk</b>. Old pointers are no longer valid. */
-static INLINE chunk_t *
+static inline chunk_t *
chunk_grow(chunk_t *chunk, size_t sz)
{
off_t offset;
@@ -165,7 +165,7 @@ chunk_grow(chunk_t *chunk, size_t sz)
/** Return the allocation size we'd like to use to hold <b>target</b>
* bytes. */
-static INLINE size_t
+static inline size_t
preferred_chunk_size(size_t target)
{
size_t sz = MIN_CHUNK_ALLOC;
@@ -255,7 +255,7 @@ buf_get_first_chunk_data(const buf_t *buf, const char **cp, size_t *sz)
#endif
/** Remove the first <b>n</b> bytes from buf. */
-static INLINE void
+static inline void
buf_remove_from_front(buf_t *buf, size_t n)
{
tor_assert(buf->datalen >= n);
@@ -452,7 +452,7 @@ buf_get_total_allocation(void)
* <b>chunk</b> (which must be on <b>buf</b>). If we get an EOF, set
* *<b>reached_eof</b> to 1. Return -1 on error, 0 on eof or blocking,
* and the number of bytes read otherwise. */
-static INLINE int
+static inline int
read_to_chunk(buf_t *buf, chunk_t *chunk, tor_socket_t fd, size_t at_most,
int *reached_eof, int *socket_error)
{
@@ -488,7 +488,7 @@ read_to_chunk(buf_t *buf, chunk_t *chunk, tor_socket_t fd, size_t at_most,
/** As read_to_chunk(), but return (negative) error code on error, blocking,
* or TLS, and the number of bytes read otherwise. */
-static INLINE int
+static inline int
read_to_chunk_tls(buf_t *buf, chunk_t *chunk, tor_tls_t *tls,
size_t at_most)
{
@@ -611,7 +611,7 @@ read_to_buf_tls(tor_tls_t *tls, size_t at_most, buf_t *buf)
* the bytes written from *<b>buf_flushlen</b>. Return the number of bytes
* written on success, 0 on blocking, -1 on failure.
*/
-static INLINE int
+static inline int
flush_chunk(tor_socket_t s, buf_t *buf, chunk_t *chunk, size_t sz,
size_t *buf_flushlen)
{
@@ -646,7 +646,7 @@ flush_chunk(tor_socket_t s, buf_t *buf, chunk_t *chunk, size_t sz,
* bytes written from *<b>buf_flushlen</b>. Return the number of bytes
* written on success, and a TOR_TLS error code on failure or blocking.
*/
-static INLINE int
+static inline int
flush_chunk_tls(tor_tls_t *tls, buf_t *buf, chunk_t *chunk,
size_t sz, size_t *buf_flushlen)
{
@@ -797,7 +797,7 @@ write_to_buf(const char *string, size_t string_len, buf_t *buf)
/** Helper: copy the first <b>string_len</b> bytes from <b>buf</b>
* onto <b>string</b>.
*/
-static INLINE void
+static inline void
peek_from_buf(char *string, size_t string_len, const buf_t *buf)
{
chunk_t *chunk;
@@ -842,7 +842,7 @@ fetch_from_buf(char *string, size_t string_len, buf_t *buf)
/** True iff the cell command <b>command</b> is one that implies a
* variable-length cell in Tor link protocol <b>linkproto</b>. */
-static INLINE int
+static inline int
cell_command_is_var_length(uint8_t command, int linkproto)
{
/* If linkproto is v2 (2), CELL_VERSIONS is the only variable-length cells
@@ -1083,7 +1083,7 @@ buf_find_pos_of_char(char ch, buf_pos_t *out)
/** Advance <b>pos</b> by a single character, if there are any more characters
* in the buffer. Returns 0 on success, -1 on failure. */
-static INLINE int
+static inline int
buf_pos_inc(buf_pos_t *pos)
{
++pos->pos;
diff --git a/src/or/channel.c b/src/or/channel.c
index 21522a5303..46e833854b 100644
--- a/src/or/channel.c
+++ b/src/or/channel.c
@@ -127,13 +127,13 @@ typedef struct channel_idmap_entry_s {
TOR_LIST_HEAD(channel_list_s, channel_s) channel_list;
} channel_idmap_entry_t;
-static INLINE unsigned
+static inline unsigned
channel_idmap_hash(const channel_idmap_entry_t *ent)
{
return (unsigned) siphash24g(ent->digest, DIGEST_LEN);
}
-static INLINE int
+static inline int
channel_idmap_eq(const channel_idmap_entry_t *a,
const channel_idmap_entry_t *b)
{
diff --git a/src/or/channel.h b/src/or/channel.h
index 2b38ca7e19..5fa2aa8ab7 100644
--- a/src/or/channel.h
+++ b/src/or/channel.h
@@ -531,7 +531,7 @@ channel_t * channel_next_with_digest(channel_t *chan);
CHANNEL_IS_OPEN(chan) || \
CHANNEL_IS_MAINT(chan))
-static INLINE int
+static inline int
channel_is_in_state(channel_t *chan, channel_state_t state)
{
return chan->state == state;
diff --git a/src/or/circuitbuild.c b/src/or/circuitbuild.c
index 933d70bd8b..719d27caa9 100644
--- a/src/or/circuitbuild.c
+++ b/src/or/circuitbuild.c
@@ -745,7 +745,7 @@ inform_testing_reachability(void)
/** Return true iff we should send a create_fast cell to start building a given
* circuit */
-static INLINE int
+static inline int
should_use_create_fast_for_circuit(origin_circuit_t *circ)
{
const or_options_t *options = get_options();
diff --git a/src/or/circuitlist.c b/src/or/circuitlist.c
index 15b8748158..dcbeb1e2bb 100644
--- a/src/or/circuitlist.c
+++ b/src/or/circuitlist.c
@@ -71,7 +71,7 @@ typedef struct chan_circid_circuit_map_t {
/** Helper for hash tables: compare the channel and circuit ID for a and
* b, and return less than, equal to, or greater than zero appropriately.
*/
-static INLINE int
+static inline int
chan_circid_entries_eq_(chan_circid_circuit_map_t *a,
chan_circid_circuit_map_t *b)
{
@@ -80,7 +80,7 @@ chan_circid_entries_eq_(chan_circid_circuit_map_t *a,
/** Helper: return a hash based on circuit ID and the pointer value of
* chan in <b>a</b>. */
-static INLINE unsigned int
+static inline unsigned int
chan_circid_entry_hash_(chan_circid_circuit_map_t *a)
{
/* Try to squeze the siphash input into 8 bytes to save any extra siphash
@@ -1049,7 +1049,7 @@ circuit_get_by_global_id(uint32_t id)
* If <b>found_entry_out</b> is provided, set it to true if we have a
* placeholder entry for circid/chan, and leave it unset otherwise.
*/
-static INLINE circuit_t *
+static inline circuit_t *
circuit_get_by_circid_channel_impl(circid_t circ_id, channel_t *chan,
int *found_entry_out)
{
diff --git a/src/or/circuitmux.c b/src/or/circuitmux.c
index a77bffac90..94d1eb66e3 100644
--- a/src/or/circuitmux.c
+++ b/src/or/circuitmux.c
@@ -186,10 +186,10 @@ struct chanid_circid_muxinfo_t {
* Static function declarations
*/
-static INLINE int
+static inline int
chanid_circid_entries_eq(chanid_circid_muxinfo_t *a,
chanid_circid_muxinfo_t *b);
-static INLINE unsigned int
+static inline unsigned int
chanid_circid_entry_hash(chanid_circid_muxinfo_t *a);
static chanid_circid_muxinfo_t *
circuitmux_find_map_entry(circuitmux_t *cmux, circuit_t *circ);
@@ -199,12 +199,12 @@ circuitmux_make_circuit_active(circuitmux_t *cmux, circuit_t *circ,
static void
circuitmux_make_circuit_inactive(circuitmux_t *cmux, circuit_t *circ,
cell_direction_t direction);
-static INLINE void
+static inline void
circuitmux_move_active_circ_to_tail(circuitmux_t *cmux, circuit_t *circ,
cell_direction_t direction);
-static INLINE circuit_t **
+static inline circuit_t **
circuitmux_next_active_circ_p(circuitmux_t *cmux, circuit_t *circ);
-static INLINE circuit_t **
+static inline circuit_t **
circuitmux_prev_active_circ_p(circuitmux_t *cmux, circuit_t *circ);
static void circuitmux_assert_okay_pass_one(circuitmux_t *cmux);
static void circuitmux_assert_okay_pass_two(circuitmux_t *cmux);
@@ -226,7 +226,7 @@ static int64_t global_destroy_ctr = 0;
* used by circuitmux_notify_xmit_cells().
*/
-static INLINE void
+static inline void
circuitmux_move_active_circ_to_tail(circuitmux_t *cmux, circuit_t *circ,
cell_direction_t direction)
{
@@ -306,7 +306,7 @@ circuitmux_move_active_circ_to_tail(circuitmux_t *cmux, circuit_t *circ,
circuitmux_assert_okay_paranoid(cmux);
}
-static INLINE circuit_t **
+static inline circuit_t **
circuitmux_next_active_circ_p(circuitmux_t *cmux, circuit_t *circ)
{
tor_assert(cmux);
@@ -319,7 +319,7 @@ circuitmux_next_active_circ_p(circuitmux_t *cmux, circuit_t *circ)
}
}
-static INLINE circuit_t **
+static inline circuit_t **
circuitmux_prev_active_circ_p(circuitmux_t *cmux, circuit_t *circ)
{
tor_assert(cmux);
@@ -338,7 +338,7 @@ circuitmux_prev_active_circ_p(circuitmux_t *cmux, circuit_t *circ)
* than zero appropriately.
*/
-static INLINE int
+static inline int
chanid_circid_entries_eq(chanid_circid_muxinfo_t *a,
chanid_circid_muxinfo_t *b)
{
@@ -349,7 +349,7 @@ chanid_circid_entries_eq(chanid_circid_muxinfo_t *a,
* Helper: return a hash based on circuit ID and channel ID in a.
*/
-static INLINE unsigned int
+static inline unsigned int
chanid_circid_entry_hash(chanid_circid_muxinfo_t *a)
{
return (((unsigned int)(a->circ_id) << 8) ^
diff --git a/src/or/circuitmux_ewma.c b/src/or/circuitmux_ewma.c
index 1c0318de06..0c61fb2ec4 100644
--- a/src/or/circuitmux_ewma.c
+++ b/src/or/circuitmux_ewma.c
@@ -115,7 +115,7 @@ TO_EWMA_POL_CIRC_DATA(circuitmux_policy_circ_data_t *);
* if the cast is impossible.
*/
-static INLINE ewma_policy_data_t *
+static inline ewma_policy_data_t *
TO_EWMA_POL_DATA(circuitmux_policy_data_t *pol)
{
if (!pol) return NULL;
@@ -130,7 +130,7 @@ TO_EWMA_POL_DATA(circuitmux_policy_data_t *pol)
* and assert if the cast is impossible.
*/
-static INLINE ewma_policy_circ_data_t *
+static inline ewma_policy_circ_data_t *
TO_EWMA_POL_CIRC_DATA(circuitmux_policy_circ_data_t *pol)
{
if (!pol) return NULL;
@@ -147,7 +147,7 @@ static int compare_cell_ewma_counts(const void *p1, const void *p2);
static unsigned cell_ewma_tick_from_timeval(const struct timeval *now,
double *remainder_out);
static circuit_t * cell_ewma_to_circuit(cell_ewma_t *ewma);
-static INLINE double get_scale_factor(unsigned from_tick, unsigned to_tick);
+static inline double get_scale_factor(unsigned from_tick, unsigned to_tick);
static cell_ewma_t * pop_first_cell_ewma(ewma_policy_data_t *pol);
static void remove_cell_ewma(ewma_policy_data_t *pol, cell_ewma_t *ewma);
static void scale_single_cell_ewma(cell_ewma_t *ewma, unsigned cur_tick);
@@ -644,7 +644,7 @@ cell_ewma_set_scale_factor(const or_options_t *options,
/** Return the multiplier necessary to convert the value of a cell sent in
* 'from_tick' to one sent in 'to_tick'. */
-static INLINE double
+static inline double
get_scale_factor(unsigned from_tick, unsigned to_tick)
{
/* This math can wrap around, but that's okay: unsigned overflow is
diff --git a/src/or/config.c b/src/or/config.c
index d8e9296f45..9b570323df 100644
--- a/src/or/config.c
+++ b/src/or/config.c
@@ -252,6 +252,7 @@ static config_var_t option_vars_[] = {
V(ExtORPortCookieAuthFileGroupReadable, BOOL, "0"),
V(ExtraInfoStatistics, BOOL, "1"),
V(FallbackDir, LINELIST, NULL),
+ V(UseDefaultFallbackDirs, BOOL, "1"),
OBSOLETE("FallbackNetworkstatusFile"),
V(FascistFirewall, BOOL, "0"),
@@ -309,6 +310,7 @@ static config_var_t option_vars_[] = {
V(Socks5ProxyUsername, STRING, NULL),
V(Socks5ProxyPassword, STRING, NULL),
V(KeepalivePeriod, INTERVAL, "5 minutes"),
+ V(KeepBindCapabilities, AUTOBOOL, "auto"),
VAR("Log", LINELIST, Logs, NULL),
V(LogMessageDomains, BOOL, "0"),
V(LogTimeGranularity, MSEC_INTERVAL, "1 second"),
@@ -475,10 +477,40 @@ static config_var_t option_vars_[] = {
V(TestingClientConsensusDownloadSchedule, CSV_INTERVAL, "0, 0, 60, "
"300, 600, 1800, 3600, 3600, 3600, "
"10800, 21600, 43200"),
+ /* With the TestingClientBootstrapConsensus*Download* below:
+ * Clients with only authorities will try:
+ * - 3 authorities over 10 seconds, then wait 60 minutes.
+ * Clients with authorities and fallbacks will try:
+ * - 2 authorities and 4 fallbacks over 21 seconds, then wait 60 minutes.
+ * Clients will also retry when an application request arrives.
+ * After a number of failed reqests, clients retry every 3 days + 1 hour.
+ *
+ * Clients used to try 2 authorities over 10 seconds, then wait for
+ * 60 minutes or an application request.
+ *
+ * When clients have authorities and fallbacks available, they use these
+ * schedules: (we stagger the times to avoid thundering herds) */
+ V(TestingClientBootstrapConsensusAuthorityDownloadSchedule, CSV_INTERVAL,
+ "10, 11, 3600, 10800, 25200, 54000, 111600, 262800" /* 3 days + 1 hour */),
+ V(TestingClientBootstrapConsensusFallbackDownloadSchedule, CSV_INTERVAL,
+ "0, 1, 4, 11, 3600, 10800, 25200, 54000, 111600, 262800"),
+ /* When clients only have authorities available, they use this schedule: */
+ V(TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule, CSV_INTERVAL,
+ "0, 3, 7, 3600, 10800, 25200, 54000, 111600, 262800"),
+ /* We don't want to overwhelm slow networks (or mirrors whose replies are
+ * blocked), but we also don't want to fail if only some mirrors are
+ * blackholed. Clients will try 3 directories simultaneously.
+ * (Relays never use simultaneous connections.) */
+ V(TestingClientBootstrapConsensusMaxInProgressTries, UINT, "3"),
V(TestingBridgeDownloadSchedule, CSV_INTERVAL, "3600, 900, 900, 3600"),
V(TestingClientMaxIntervalWithoutRequest, INTERVAL, "10 minutes"),
V(TestingDirConnectionMaxStall, INTERVAL, "5 minutes"),
V(TestingConsensusMaxDownloadTries, UINT, "8"),
+ /* Since we try connections rapidly and simultaneously, we can afford
+ * to give up earlier. (This protects against overloading directories.) */
+ V(TestingClientBootstrapConsensusMaxDownloadTries, UINT, "7"),
+ /* We want to give up much earlier if we're only using authorities. */
+ V(TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries, UINT, "4"),
V(TestingDescriptorMaxDownloadTries, UINT, "8"),
V(TestingMicrodescMaxDownloadTries, UINT, "8"),
V(TestingCertMaxDownloadTries, UINT, "8"),
@@ -525,10 +557,18 @@ static const config_var_t testing_tor_network_defaults[] = {
"15, 20, 30, 60"),
V(TestingClientConsensusDownloadSchedule, CSV_INTERVAL, "0, 0, 5, 10, "
"15, 20, 30, 60"),
+ V(TestingClientBootstrapConsensusAuthorityDownloadSchedule, CSV_INTERVAL,
+ "0, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 16, 32, 60"),
+ V(TestingClientBootstrapConsensusFallbackDownloadSchedule, CSV_INTERVAL,
+ "0, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 16, 32, 60"),
+ V(TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule, CSV_INTERVAL,
+ "0, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 16, 32, 60"),
V(TestingBridgeDownloadSchedule, CSV_INTERVAL, "60, 30, 30, 60"),
V(TestingClientMaxIntervalWithoutRequest, INTERVAL, "5 seconds"),
V(TestingDirConnectionMaxStall, INTERVAL, "30 seconds"),
V(TestingConsensusMaxDownloadTries, UINT, "80"),
+ V(TestingClientBootstrapConsensusMaxDownloadTries, UINT, "80"),
+ V(TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries, UINT, "80"),
V(TestingDescriptorMaxDownloadTries, UINT, "80"),
V(TestingMicrodescMaxDownloadTries, UINT, "80"),
V(TestingCertMaxDownloadTries, UINT, "80"),
@@ -564,7 +604,8 @@ static int parse_ports(or_options_t *options, int validate_only,
char **msg_out, int *n_ports_out,
int *world_writable_control_socket);
static int check_server_ports(const smartlist_t *ports,
- const or_options_t *options);
+ const or_options_t *options,
+ int *num_low_ports_out);
static int validate_data_directory(or_options_t *options);
static int write_configuration_file(const char *fname,
@@ -994,6 +1035,7 @@ consider_adding_dir_servers(const or_options_t *options,
!smartlist_len(router_get_fallback_dir_servers()) || !old_options ||
!config_lines_eq(options->DirAuthorities, old_options->DirAuthorities) ||
!config_lines_eq(options->FallbackDir, old_options->FallbackDir) ||
+ (options->UseDefaultFallbackDirs != old_options->UseDefaultFallbackDirs) ||
!config_lines_eq(options->AlternateBridgeAuthority,
old_options->AlternateBridgeAuthority) ||
!config_lines_eq(options->AlternateDirAuthority,
@@ -1022,8 +1064,8 @@ consider_adding_dir_servers(const or_options_t *options,
type |= V3_DIRINFO | EXTRAINFO_DIRINFO | MICRODESC_DIRINFO;
/* Only add the default fallback directories when the DirAuthorities,
* AlternateDirAuthority, and FallbackDir directory config options
- * are set to their defaults. */
- if (!options->FallbackDir) {
+ * are set to their defaults, and when UseDefaultFallbackDirs is 1. */
+ if (!options->FallbackDir && options->UseDefaultFallbackDirs) {
add_default_fallback_dir_servers();
}
}
@@ -1048,6 +1090,9 @@ consider_adding_dir_servers(const or_options_t *options,
return 0;
}
+/* Helps determine flags to pass to switch_id. */
+static int have_low_ports = -1;
+
/** Fetch the active option list, and take actions based on it. All of the
* things we do should survive being done repeatedly. If present,
* <b>old_options</b> contains the previous value of the options.
@@ -1182,7 +1227,16 @@ options_act_reversible(const or_options_t *old_options, char **msg)
/* Setuid/setgid as appropriate */
if (options->User) {
- if (switch_id(options->User) != 0) {
+ tor_assert(have_low_ports != -1);
+ unsigned switch_id_flags = 0;
+ if (options->KeepBindCapabilities == 1) {
+ switch_id_flags |= SWITCH_ID_KEEP_BINDLOW;
+ switch_id_flags |= SWITCH_ID_WARN_IF_NO_CAPS;
+ }
+ if (options->KeepBindCapabilities == -1 && have_low_ports) {
+ switch_id_flags |= SWITCH_ID_KEEP_BINDLOW;
+ }
+ if (switch_id(options->User, switch_id_flags) != 0) {
/* No need to roll back, since you can't change the value. */
*msg = tor_strdup("Problem with User value. See logs for details.");
goto done;
@@ -3550,6 +3604,13 @@ options_validate(or_options_t *old_options, or_options_t *options,
if (validate_addr_policies(options, msg) < 0)
return -1;
+ /* If FallbackDir is set, we don't UseDefaultFallbackDirs */
+ if (options->UseDefaultFallbackDirs && options->FallbackDir) {
+ log_info(LD_CONFIG, "You have set UseDefaultFallbackDirs 1 and "
+ "FallbackDir(s). Ignoring UseDefaultFallbackDirs, and "
+ "using the FallbackDir(s) you have set.");
+ }
+
if (validate_dir_servers(options, old_options) < 0)
REJECT("Directory authority/fallback line did not parse. See logs "
"for details.");
@@ -3752,10 +3813,16 @@ options_validate(or_options_t *old_options, or_options_t *options,
CHECK_DEFAULT(TestingClientDownloadSchedule);
CHECK_DEFAULT(TestingServerConsensusDownloadSchedule);
CHECK_DEFAULT(TestingClientConsensusDownloadSchedule);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusAuthorityDownloadSchedule);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusFallbackDownloadSchedule);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule);
CHECK_DEFAULT(TestingBridgeDownloadSchedule);
CHECK_DEFAULT(TestingClientMaxIntervalWithoutRequest);
CHECK_DEFAULT(TestingDirConnectionMaxStall);
CHECK_DEFAULT(TestingConsensusMaxDownloadTries);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusMaxDownloadTries);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries);
+ CHECK_DEFAULT(TestingClientBootstrapConsensusMaxInProgressTries);
CHECK_DEFAULT(TestingDescriptorMaxDownloadTries);
CHECK_DEFAULT(TestingMicrodescMaxDownloadTries);
CHECK_DEFAULT(TestingCertMaxDownloadTries);
@@ -3830,11 +3897,41 @@ options_validate(or_options_t *old_options, or_options_t *options,
}
if (options->TestingConsensusMaxDownloadTries < 2) {
- REJECT("TestingConsensusMaxDownloadTries must be greater than 1.");
+ REJECT("TestingConsensusMaxDownloadTries must be greater than 2.");
} else if (options->TestingConsensusMaxDownloadTries > 800) {
COMPLAIN("TestingConsensusMaxDownloadTries is insanely high.");
}
+ if (options->TestingClientBootstrapConsensusMaxDownloadTries < 2) {
+ REJECT("TestingClientBootstrapConsensusMaxDownloadTries must be greater "
+ "than 2."
+ );
+ } else if (options->TestingClientBootstrapConsensusMaxDownloadTries > 800) {
+ COMPLAIN("TestingClientBootstrapConsensusMaxDownloadTries is insanely "
+ "high.");
+ }
+
+ if (options->TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries
+ < 2) {
+ REJECT("TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries must "
+ "be greater than 2."
+ );
+ } else if (
+ options->TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries
+ > 800) {
+ COMPLAIN("TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries is "
+ "insanely high.");
+ }
+
+ if (options->TestingClientBootstrapConsensusMaxInProgressTries < 1) {
+ REJECT("TestingClientBootstrapConsensusMaxInProgressTries must be greater "
+ "than 0.");
+ } else if (options->TestingClientBootstrapConsensusMaxInProgressTries
+ > 100) {
+ COMPLAIN("TestingClientBootstrapConsensusMaxInProgressTries is insanely "
+ "high.");
+ }
+
if (options->TestingDescriptorMaxDownloadTries < 2) {
REJECT("TestingDescriptorMaxDownloadTries must be greater than 1.");
} else if (options->TestingDescriptorMaxDownloadTries > 800) {
@@ -4014,6 +4111,12 @@ options_transition_allowed(const or_options_t *old,
return -1;
}
+ if (old->KeepBindCapabilities != new_val->KeepBindCapabilities) {
+ *msg = tor_strdup("While Tor is running, changing KeepBindCapabilities is "
+ "not allowed.");
+ return -1;
+ }
+
if (!opt_streq(old->SyslogIdentityTag, new_val->SyslogIdentityTag)) {
*msg = tor_strdup("While Tor is running, changing "
"SyslogIdentityTag is not allowed.");
@@ -6584,10 +6687,13 @@ parse_ports(or_options_t *options, int validate_only,
}
}
- if (check_server_ports(ports, options) < 0) {
+ int n_low_ports = 0;
+ if (check_server_ports(ports, options, &n_low_ports) < 0) {
*msg = tor_strdup("Misconfigured server ports");
goto err;
}
+ if (have_low_ports < 0)
+ have_low_ports = (n_low_ports > 0);
*n_ports_out = smartlist_len(ports);
@@ -6641,10 +6747,12 @@ parse_ports(or_options_t *options, int validate_only,
}
/** Given a list of <b>port_cfg_t</b> in <b>ports</b>, check them for internal
- * consistency and warn as appropriate. */
+ * consistency and warn as appropriate. Set *<b>n_low_ports_out</b> to the
+ * number of sub-1024 ports we will be binding. */
static int
check_server_ports(const smartlist_t *ports,
- const or_options_t *options)
+ const or_options_t *options,
+ int *n_low_ports_out)
{
int n_orport_advertised = 0;
int n_orport_advertised_ipv4 = 0;
@@ -6707,16 +6815,24 @@ check_server_ports(const smartlist_t *ports,
r = -1;
}
- if (n_low_port && options->AccountingMax) {
+ if (n_low_port && options->AccountingMax &&
+ (!have_capability_support() || options->KeepBindCapabilities == 0)) {
+ const char *extra = "";
+ if (options->KeepBindCapabilities == 0 && have_capability_support())
+ extra = ", and you have disabled KeepBindCapabilities.";
log_warn(LD_CONFIG,
"You have set AccountingMax to use hibernation. You have also "
- "chosen a low DirPort or OrPort. This combination can make Tor stop "
+ "chosen a low DirPort or OrPort%s."
+ "This combination can make Tor stop "
"working when it tries to re-attach the port after a period of "
"hibernation. Please choose a different port or turn off "
"hibernation unless you know this combination will work on your "
- "platform.");
+ "platform.", extra);
}
+ if (n_low_ports_out)
+ *n_low_ports_out = n_low_port;
+
return r;
}
diff --git a/src/or/connection.c b/src/or/connection.c
index bff994d385..7df02b538c 100644
--- a/src/or/connection.c
+++ b/src/or/connection.c
@@ -1618,13 +1618,18 @@ connection_init_accepted_conn(connection_t *conn,
return 0;
}
-static int
-connection_connect_sockaddr(connection_t *conn,
+/** Take conn, make a nonblocking socket; try to connect to
+ * sa, binding to bindaddr if sa is not localhost. If fail, return -1 and if
+ * applicable put your best guess about errno into *<b>socket_error</b>.
+ * If connected return 1, if EAGAIN return 0.
+ */
+MOCK_IMPL(STATIC int,
+connection_connect_sockaddr,(connection_t *conn,
const struct sockaddr *sa,
socklen_t sa_len,
const struct sockaddr *bindaddr,
socklen_t bindaddr_len,
- int *socket_error)
+ int *socket_error))
{
tor_socket_t s;
int inprogress = 0;
@@ -4222,6 +4227,19 @@ connection_write_to_buf_impl_,(const char *string, size_t len,
}
}
+/** Return a connection_t * from get_connection_array() that satisfies test on
+ * var, and that is not marked for close. */
+#define CONN_GET_TEMPLATE(var, test) \
+ STMT_BEGIN \
+ smartlist_t *conns = get_connection_array(); \
+ SMARTLIST_FOREACH(conns, connection_t *, var, \
+ { \
+ if (var && (test) && !var->marked_for_close) \
+ return var; \
+ }); \
+ return NULL; \
+ STMT_END
+
/** Return a connection with given type, address, port, and purpose;
* or NULL if no such connection exists (or if all such connections are marked
* for close). */
@@ -4230,17 +4248,11 @@ connection_get_by_type_addr_port_purpose(int type,
const tor_addr_t *addr, uint16_t port,
int purpose)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH(conns, connection_t *, conn,
- {
- if (conn->type == type &&
+ CONN_GET_TEMPLATE(conn,
+ (conn->type == type &&
tor_addr_eq(&conn->addr, addr) &&
conn->port == port &&
- conn->purpose == purpose &&
- !conn->marked_for_close)
- return conn;
- });
- return NULL;
+ conn->purpose == purpose));
}
/** Return the stream with id <b>id</b> if it is not already marked for
@@ -4249,13 +4261,7 @@ connection_get_by_type_addr_port_purpose(int type,
connection_t *
connection_get_by_global_id(uint64_t id)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH(conns, connection_t *, conn,
- {
- if (conn->global_identifier == id)
- return conn;
- });
- return NULL;
+ CONN_GET_TEMPLATE(conn, conn->global_identifier == id);
}
/** Return a connection of type <b>type</b> that is not marked for close.
@@ -4263,13 +4269,7 @@ connection_get_by_global_id(uint64_t id)
connection_t *
connection_get_by_type(int type)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH(conns, connection_t *, conn,
- {
- if (conn->type == type && !conn->marked_for_close)
- return conn;
- });
- return NULL;
+ CONN_GET_TEMPLATE(conn, conn->type == type);
}
/** Return a connection of type <b>type</b> that is in state <b>state</b>,
@@ -4278,13 +4278,7 @@ connection_get_by_type(int type)
connection_t *
connection_get_by_type_state(int type, int state)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH(conns, connection_t *, conn,
- {
- if (conn->type == type && conn->state == state && !conn->marked_for_close)
- return conn;
- });
- return NULL;
+ CONN_GET_TEMPLATE(conn, conn->type == type && conn->state == state);
}
/** Return a connection of type <b>type</b> that has rendquery equal
@@ -4295,55 +4289,142 @@ connection_t *
connection_get_by_type_state_rendquery(int type, int state,
const char *rendquery)
{
- smartlist_t *conns = get_connection_array();
-
tor_assert(type == CONN_TYPE_DIR ||
type == CONN_TYPE_AP || type == CONN_TYPE_EXIT);
tor_assert(rendquery);
- SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
- if (conn->type == type &&
- !conn->marked_for_close &&
- (!state || state == conn->state)) {
- if (type == CONN_TYPE_DIR &&
+ CONN_GET_TEMPLATE(conn,
+ (conn->type == type &&
+ (!state || state == conn->state)) &&
+ (
+ (type == CONN_TYPE_DIR &&
TO_DIR_CONN(conn)->rend_data &&
!rend_cmp_service_ids(rendquery,
TO_DIR_CONN(conn)->rend_data->onion_address))
- return conn;
- else if (CONN_IS_EDGE(conn) &&
+ ||
+ (CONN_IS_EDGE(conn) &&
TO_EDGE_CONN(conn)->rend_data &&
!rend_cmp_service_ids(rendquery,
TO_EDGE_CONN(conn)->rend_data->onion_address))
- return conn;
- }
- } SMARTLIST_FOREACH_END(conn);
- return NULL;
+ ));
}
+#define CONN_FIRST_AND_FREE_TEMPLATE(sl) \
+ STMT_BEGIN \
+ if (smartlist_len(sl) > 0) { \
+ void *first_item = smartlist_get(sl, 0); \
+ smartlist_free(sl); \
+ return first_item; \
+ } else { \
+ smartlist_free(sl); \
+ return NULL; \
+ } \
+ STMT_END
+
+
/** Return a directory connection (if any one exists) that is fetching
- * the item described by <b>state</b>/<b>resource</b> */
+ * the item described by <b>purpose</b>/<b>resource</b>, otherwise return NULL.
+ */
dir_connection_t *
-connection_dir_get_by_purpose_and_resource(int purpose,
+connection_dir_get_by_purpose_and_resource(
+ int purpose,
const char *resource)
{
- smartlist_t *conns = get_connection_array();
+ smartlist_t *conns = connection_dir_list_by_purpose_and_resource(
+ purpose,
+ resource);
+ CONN_FIRST_AND_FREE_TEMPLATE(conns);
+}
- SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
- dir_connection_t *dirconn;
- if (conn->type != CONN_TYPE_DIR || conn->marked_for_close ||
- conn->purpose != purpose)
- continue;
- dirconn = TO_DIR_CONN(conn);
- if (dirconn->requested_resource == NULL) {
- if (resource == NULL)
- return dirconn;
- } else if (resource) {
- if (0 == strcmp(resource, dirconn->requested_resource))
- return dirconn;
- }
- } SMARTLIST_FOREACH_END(conn);
+/** Return a new smartlist of dir_connection_t * from get_connection_array()
+ * that satisfy conn_test on connection_t *conn_var, and dirconn_test on
+ * dir_connection_t *dirconn_var. conn_var must be of CONN_TYPE_DIR and not
+ * marked for close to be included in the list. */
+#define DIR_CONN_LIST_TEMPLATE(conn_var, conn_test, \
+ dirconn_var, dirconn_test) \
+ STMT_BEGIN \
+ smartlist_t *conns = get_connection_array(); \
+ smartlist_t *dir_conns = smartlist_new(); \
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn_var) { \
+ if (conn_var && (conn_test) \
+ && conn_var->type == CONN_TYPE_DIR \
+ && !conn_var->marked_for_close) { \
+ dir_connection_t *dirconn_var = TO_DIR_CONN(conn_var); \
+ if (dirconn_var && (dirconn_test)) { \
+ smartlist_add(dir_conns, dirconn_var); \
+ } \
+ } \
+ } SMARTLIST_FOREACH_END(conn_var); \
+ return dir_conns; \
+ STMT_END
+
+/** Return a list of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>. If there are none,
+ * return an empty list. This list must be freed using smartlist_free,
+ * but the pointers in it must not be freed.
+ * Note that this list should not be cached, as the pointers in it can be
+ * freed if their connections close. */
+smartlist_t *
+connection_dir_list_by_purpose_and_resource(
+ int purpose,
+ const char *resource)
+{
+ DIR_CONN_LIST_TEMPLATE(conn,
+ conn->purpose == purpose,
+ dirconn,
+ 0 == strcmp_opt(resource,
+ dirconn->requested_resource));
+}
- return NULL;
+/** Return a directory connection (if any one exists) that is fetching
+ * the item described by <b>purpose</b>/<b>resource</b>/<b>state</b>,
+ * otherwise return NULL. */
+dir_connection_t *
+connection_dir_get_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state)
+{
+ smartlist_t *conns =
+ connection_dir_list_by_purpose_resource_and_state(
+ purpose,
+ resource,
+ state);
+ CONN_FIRST_AND_FREE_TEMPLATE(conns);
+}
+
+#undef CONN_FIRST_AND_FREE_TEMPLATE
+
+/** Return a list of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>/<b>state</b>. If there are
+ * none, return an empty list. This list must be freed using smartlist_free,
+ * but the pointers in it must not be freed.
+ * Note that this list should not be cached, as the pointers in it can be
+ * freed if their connections close. */
+smartlist_t *
+connection_dir_list_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state)
+{
+ DIR_CONN_LIST_TEMPLATE(conn,
+ conn->purpose == purpose && conn->state == state,
+ dirconn,
+ 0 == strcmp_opt(resource,
+ dirconn->requested_resource));
+}
+
+#undef DIR_CONN_LIST_TEMPLATE
+
+/** Return an arbitrary active OR connection that isn't <b>this_conn</b>.
+ *
+ * We use this to guess if we should tell the controller that we
+ * didn't manage to connect to any of our bridges. */
+static connection_t *
+connection_get_another_active_or_conn(const or_connection_t *this_conn)
+{
+ CONN_GET_TEMPLATE(conn,
+ conn != TO_CONN(this_conn) && conn->type == CONN_TYPE_OR);
}
/** Return 1 if there are any active OR connections apart from
@@ -4354,23 +4435,18 @@ connection_dir_get_by_purpose_and_resource(int purpose,
int
any_other_active_or_conns(const or_connection_t *this_conn)
{
- smartlist_t *conns = get_connection_array();
- SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
- if (conn == TO_CONN(this_conn)) { /* don't consider this conn */
- continue;
- }
-
- if (conn->type == CONN_TYPE_OR &&
- !conn->marked_for_close) {
- log_debug(LD_DIR, "%s: Found an OR connection: %s",
- __func__, conn->address);
- return 1;
- }
- } SMARTLIST_FOREACH_END(conn);
+ connection_t *conn = connection_get_another_active_or_conn(this_conn);
+ if (conn != NULL) {
+ log_debug(LD_DIR, "%s: Found an OR connection: %s",
+ __func__, conn->address);
+ return 1;
+ }
return 0;
}
+#undef CONN_GET_TEMPLATE
+
/** Return 1 if <b>conn</b> is a listener conn, else return 0. */
int
connection_is_listener(connection_t *conn)
diff --git a/src/or/connection.h b/src/or/connection.h
index 48929c3d4c..59ea6d898e 100644
--- a/src/or/connection.h
+++ b/src/or/connection.h
@@ -146,12 +146,12 @@ static void connection_write_to_buf(const char *string, size_t len,
/* DOCDOC connection_write_to_buf_zlib */
static void connection_write_to_buf_zlib(const char *string, size_t len,
dir_connection_t *conn, int done);
-static INLINE void
+static inline void
connection_write_to_buf(const char *string, size_t len, connection_t *conn)
{
connection_write_to_buf_impl_(string, len, conn, 0);
}
-static INLINE void
+static inline void
connection_write_to_buf_zlib(const char *string, size_t len,
dir_connection_t *conn, int done)
{
@@ -163,7 +163,7 @@ static size_t connection_get_inbuf_len(connection_t *conn);
/* DOCDOC connection_get_outbuf_len */
static size_t connection_get_outbuf_len(connection_t *conn);
-static INLINE size_t
+static inline size_t
connection_get_inbuf_len(connection_t *conn)
{
IF_HAS_BUFFEREVENT(conn, {
@@ -173,7 +173,7 @@ connection_get_inbuf_len(connection_t *conn)
}
}
-static INLINE size_t
+static inline size_t
connection_get_outbuf_len(connection_t *conn)
{
IF_HAS_BUFFEREVENT(conn, {
@@ -193,7 +193,57 @@ connection_t *connection_get_by_type_state(int type, int state);
connection_t *connection_get_by_type_state_rendquery(int type, int state,
const char *rendquery);
dir_connection_t *connection_dir_get_by_purpose_and_resource(
- int state, const char *resource);
+ int purpose,
+ const char *resource);
+dir_connection_t *connection_dir_get_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state);
+smartlist_t *connection_dir_list_by_purpose_and_resource(
+ int purpose,
+ const char *resource);
+smartlist_t *connection_dir_list_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state);
+
+#define CONN_LEN_AND_FREE_TEMPLATE(sl) \
+ STMT_BEGIN \
+ int len = smartlist_len(sl); \
+ smartlist_free(sl); \
+ return len; \
+ STMT_END
+
+/** Return a count of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>. */
+static inline int
+connection_dir_count_by_purpose_and_resource(
+ int purpose,
+ const char *resource)
+{
+ smartlist_t *conns = connection_dir_list_by_purpose_and_resource(
+ purpose,
+ resource);
+ CONN_LEN_AND_FREE_TEMPLATE(conns);
+}
+
+/** Return a count of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>/<b>state</b>. */
+static inline int
+connection_dir_count_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state)
+{
+ smartlist_t *conns =
+ connection_dir_list_by_purpose_resource_and_state(
+ purpose,
+ resource,
+ state);
+ CONN_LEN_AND_FREE_TEMPLATE(conns);
+}
+
+#undef CONN_LEN_AND_FREE_TEMPLATE
int any_other_active_or_conns(const or_connection_t *this_conn);
@@ -239,6 +289,13 @@ void connection_buckets_note_empty_ts(uint32_t *timestamp_var,
int tokens_before,
size_t tokens_removed,
const struct timeval *tvnow);
+MOCK_DECL(STATIC int,connection_connect_sockaddr,
+ (connection_t *conn,
+ const struct sockaddr *sa,
+ socklen_t sa_len,
+ const struct sockaddr *bindaddr,
+ socklen_t bindaddr_len,
+ int *socket_error));
#endif
#endif
diff --git a/src/or/control.c b/src/or/control.c
index 34d03befa6..66182fe2a4 100644
--- a/src/or/control.c
+++ b/src/or/control.c
@@ -192,7 +192,7 @@ static void flush_queued_events_cb(evutil_socket_t fd, short what, void *arg);
/** Given a control event code for a message event, return the corresponding
* log severity. */
-static INLINE int
+static inline int
event_to_log_severity(int event)
{
switch (event) {
@@ -206,7 +206,7 @@ event_to_log_severity(int event)
}
/** Given a log severity, return the corresponding control event code. */
-static INLINE int
+static inline int
log_severity_to_event(int severity)
{
switch (severity) {
@@ -325,7 +325,7 @@ control_event_is_interesting(int event)
/** Append a NUL-terminated string <b>s</b> to the end of
* <b>conn</b>-\>outbuf.
*/
-static INLINE void
+static inline void
connection_write_str_to_buf(const char *s, control_connection_t *conn)
{
size_t len = strlen(s);
@@ -428,7 +428,7 @@ read_escaped_data(const char *data, size_t len, char **out)
/** If the first <b>in_len_max</b> characters in <b>start</b> contain a
* double-quoted string with escaped characters, return the length of that
* string (as encoded, including quotes). Otherwise return -1. */
-static INLINE int
+static inline int
get_escaped_string_length(const char *start, size_t in_len_max,
int *chars_out)
{
diff --git a/src/or/directory.c b/src/or/directory.c
index 4e5644b854..8370095e92 100644
--- a/src/or/directory.c
+++ b/src/or/directory.c
@@ -425,14 +425,17 @@ directory_pick_generic_dirserver(dirinfo_type_t type, int pds_flags,
* Use <b>pds_flags</b> as arguments to router_pick_directory_server()
* or router_pick_trusteddirserver().
*/
-MOCK_IMPL(void, directory_get_from_dirserver, (uint8_t dir_purpose,
- uint8_t router_purpose,
- const char *resource,
- int pds_flags))
+MOCK_IMPL(void, directory_get_from_dirserver, (
+ uint8_t dir_purpose,
+ uint8_t router_purpose,
+ const char *resource,
+ int pds_flags,
+ download_want_authority_t want_authority))
{
const routerstatus_t *rs = NULL;
const or_options_t *options = get_options();
- int prefer_authority = directory_fetches_from_authorities(options);
+ int prefer_authority = (directory_fetches_from_authorities(options)
+ || want_authority == DL_WANT_AUTHORITY);
int require_authority = 0;
int get_via_tor = purpose_needs_anonymity(dir_purpose, router_purpose);
dirinfo_type_t type = dir_fetch_type(dir_purpose, router_purpose, resource);
@@ -958,6 +961,12 @@ directory_initiate_command_rend(const tor_addr_t *_addr,
return;
}
+ /* ensure we don't make excess connections when we're already downloading
+ * a consensus during bootstrap */
+ if (connection_dir_avoid_extra_connection_for_purpose(dir_purpose)) {
+ return;
+ }
+
conn = dir_connection_new(tor_addr_family(&addr));
/* set up conn so it's got all the data we need to remember */
@@ -998,6 +1007,9 @@ directory_initiate_command_rend(const tor_addr_t *_addr,
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
/* fall through */
case 0:
+ if (connection_dir_close_consensus_conn_if_extra(conn)) {
+ return;
+ }
/* queue the command on the outbuf */
directory_send_command(conn, dir_purpose, 1, resource,
payload, payload_len,
@@ -1041,6 +1053,9 @@ directory_initiate_command_rend(const tor_addr_t *_addr,
connection_mark_for_close(TO_CONN(conn));
return;
}
+ if (connection_dir_close_consensus_conn_if_extra(conn)) {
+ return;
+ }
conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
/* queue the command on the outbuf */
directory_send_command(conn, dir_purpose, 0, resource,
@@ -3423,8 +3438,205 @@ connection_dir_finished_flushing(dir_connection_t *conn)
return 0;
}
+/* A helper function for connection_dir_close_consensus_conn_if_extra()
+ * and connection_dir_close_extra_consensus_conns() that returns 0 if
+ * we can't have, or don't want to close, excess consensus connections. */
+STATIC int
+connection_dir_would_close_consensus_conn_helper(void)
+{
+ const or_options_t *options = get_options();
+
+ /* we're only interested in closing excess connections if we could
+ * have created any in the first place */
+ if (!networkstatus_consensus_can_use_multiple_directories(options)) {
+ return 0;
+ }
+
+ /* We want to close excess connections downloading a consensus.
+ * If there aren't any excess, we don't have anything to close. */
+ if (!networkstatus_consensus_has_excess_connections()) {
+ return 0;
+ }
+
+ /* If we have excess connections, but none of them are downloading a
+ * consensus, and we are still bootstrapping (that is, we have no usable
+ * consensus), we don't want to close any until one starts downloading. */
+ if (!networkstatus_consensus_is_downloading_usable_flavor()
+ && networkstatus_consensus_is_boostrapping(time(NULL))) {
+ return 0;
+ }
+
+ /* If we have just stopped bootstrapping (that is, just parsed a consensus),
+ * we might still have some excess connections hanging around. So we still
+ * have to check if we want to close any, even if we've stopped
+ * bootstrapping. */
+ return 1;
+}
+
+/* Check if we would close excess consensus connections. If we would, any
+ * new consensus connection would become excess immediately, so return 1.
+ * Otherwise, return 0. */
+int
+connection_dir_avoid_extra_connection_for_purpose(unsigned int purpose)
+{
+ const or_options_t *options = get_options();
+
+ /* We're not interested in connections that aren't fetching a consensus. */
+ if (purpose != DIR_PURPOSE_FETCH_CONSENSUS) {
+ return 0;
+ }
+
+ /* we're only interested in avoiding excess connections if we could
+ * have created any in the first place */
+ if (!networkstatus_consensus_can_use_multiple_directories(options)) {
+ return 0;
+ }
+
+ /* If there are connections downloading a consensus, and we are still
+ * bootstrapping (that is, we have no usable consensus), we can be sure that
+ * any further connections would be excess. */
+ if (networkstatus_consensus_is_downloading_usable_flavor()
+ && networkstatus_consensus_is_boostrapping(time(NULL))) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Check if we have excess consensus download connection attempts, and close
+ * conn:
+ * - if we don't have a consensus, and we're downloading a consensus, and conn
+ * is not downloading a consensus yet, close it;
+ * - if we do have a consensus, conn is excess, close it. */
+int
+connection_dir_close_consensus_conn_if_extra(dir_connection_t *conn)
+{
+ tor_assert(conn);
+ tor_assert(conn->base_.type == CONN_TYPE_DIR);
+
+ /* We're not interested in connections that aren't fetching a consensus. */
+ if (conn->base_.purpose != DIR_PURPOSE_FETCH_CONSENSUS) {
+ return 0;
+ }
+
+ /* The connection has already been closed */
+ if (conn->base_.marked_for_close) {
+ return 0;
+ }
+
+ if (!connection_dir_would_close_consensus_conn_helper()) {
+ return 0;
+ }
+
+ const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ time(NULL));
+
+ /* We don't want to check other connections to see if they are downloading,
+ * as this is prone to race-conditions. So leave it for
+ * connection_dir_consider_close_extra_consensus_conns() to clean up.
+ *
+ * But if conn has just started connecting, or we have a consensus already,
+ * we can be sure it's not needed any more. */
+ if (!we_are_bootstrapping
+ || conn->base_.state == DIR_CONN_STATE_CONNECTING) {
+ connection_close_immediate(&conn->base_);
+ connection_mark_for_close(&conn->base_);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Check if we have excess consensus download connection attempts, and close
+ * them:
+ * - if we don't have a consensus, and we're downloading a consensus, keep an
+ * earlier connection, or a connection to a fallback directory, and close
+ * all other connections;
+ * - if we do have a consensus, close all connections: they are all excess. */
+void
+connection_dir_close_extra_consensus_conns(void)
+{
+ if (!connection_dir_would_close_consensus_conn_helper()) {
+ return;
+ }
+
+ int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ time(NULL));
+
+ const char *usable_resource = networkstatus_get_flavor_name(
+ usable_consensus_flavor());
+ smartlist_t *consens_usable_conns =
+ connection_dir_list_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource);
+
+ /* If we want to keep a connection that's downloading, find a connection to
+ * keep, favouring:
+ * - connections opened earlier (they are likely to have progressed further)
+ * - connections to fallbacks (to reduce the load on authorities) */
+ dir_connection_t *kept_download_conn = NULL;
+ int kept_is_authority = 0;
+ if (we_are_bootstrapping) {
+ SMARTLIST_FOREACH_BEGIN(consens_usable_conns,
+ dir_connection_t *, d) {
+ tor_assert(d);
+ int d_is_authority = router_digest_is_trusted_dir(d->identity_digest);
+ /* keep the first connection that is past the connecting state, but
+ * prefer fallbacks. */
+ if (d->base_.state != DIR_CONN_STATE_CONNECTING) {
+ if (!kept_download_conn || (kept_is_authority && !d_is_authority)) {
+ kept_download_conn = d;
+ kept_is_authority = d_is_authority;
+ /* we've found the earliest fallback, and want to keep it regardless
+ * of any other connections */
+ if (!kept_is_authority)
+ break;
+ }
+ }
+ } SMARTLIST_FOREACH_END(d);
+ }
+
+ SMARTLIST_FOREACH_BEGIN(consens_usable_conns,
+ dir_connection_t *, d) {
+ tor_assert(d);
+ /* don't close this connection if it's the one we want to keep */
+ if (kept_download_conn && d == kept_download_conn)
+ continue;
+ /* mark all other connections for close */
+ if (!d->base_.marked_for_close) {
+ connection_close_immediate(&d->base_);
+ connection_mark_for_close(&d->base_);
+ }
+ } SMARTLIST_FOREACH_END(d);
+
+ smartlist_free(consens_usable_conns);
+ consens_usable_conns = NULL;
+
+ /* make sure we've closed all excess connections */
+ const int final_connecting_conn_count =
+ connection_dir_count_by_purpose_resource_and_state(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource,
+ DIR_CONN_STATE_CONNECTING);
+ if (final_connecting_conn_count > 0) {
+ log_warn(LD_BUG, "Expected 0 consensus connections connecting after "
+ "cleanup, got %d.", final_connecting_conn_count);
+ }
+ const int expected_final_conn_count = (we_are_bootstrapping ? 1 : 0);
+ const int final_conn_count =
+ connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource);
+ if (final_conn_count > expected_final_conn_count) {
+ log_warn(LD_BUG, "Expected %d consensus connections after cleanup, got "
+ "%d.", expected_final_conn_count, final_connecting_conn_count);
+ }
+}
+
/** Connected handler for directory connections: begin sending data to the
- * server */
+ * server, and return 0, or, if the connection is an excess bootstrap
+ * connection, close all excess bootstrap connections.
+ * Only used when connections don't immediately connect. */
int
connection_dir_finished_connecting(dir_connection_t *conn)
{
@@ -3435,31 +3647,64 @@ connection_dir_finished_connecting(dir_connection_t *conn)
log_debug(LD_HTTP,"Dir connection to router %s:%u established.",
conn->base_.address,conn->base_.port);
- conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; /* start flushing conn */
+ if (connection_dir_close_consensus_conn_if_extra(conn)) {
+ return -1;
+ }
+
+ /* start flushing conn */
+ conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING;
return 0;
}
/** Decide which download schedule we want to use based on descriptor type
- * in <b>dls</b> and whether we are acting as directory <b>server</b>, and
- * then return a list of int pointers defining download delays in seconds.
- * Helper function for download_status_increment_failure() and
- * download_status_reset(). */
+ * in <b>dls</b> and <b>options</b>.
+ * Then return a list of int pointers defining download delays in seconds.
+ * Helper function for download_status_increment_failure(),
+ * download_status_reset(), and download_status_increment_attempt(). */
static const smartlist_t *
-find_dl_schedule_and_len(download_status_t *dls, int server)
+find_dl_schedule(download_status_t *dls, const or_options_t *options)
{
+ /* XX/teor Replace with dir_server_mode from #12538 */
+ const int dir_server = options->DirPort_set;
+ const int multi_d = networkstatus_consensus_can_use_multiple_directories(
+ options);
+ const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ time(NULL));
+ const int use_fallbacks = networkstatus_consensus_can_use_extra_fallbacks(
+ options);
switch (dls->schedule) {
case DL_SCHED_GENERIC:
- if (server)
- return get_options()->TestingServerDownloadSchedule;
- else
- return get_options()->TestingClientDownloadSchedule;
+ if (dir_server) {
+ return options->TestingServerDownloadSchedule;
+ } else {
+ return options->TestingClientDownloadSchedule;
+ }
case DL_SCHED_CONSENSUS:
- if (server)
- return get_options()->TestingServerConsensusDownloadSchedule;
- else
- return get_options()->TestingClientConsensusDownloadSchedule;
+ if (!multi_d) {
+ return options->TestingServerConsensusDownloadSchedule;
+ } else {
+ if (we_are_bootstrapping) {
+ if (!use_fallbacks) {
+ /* A bootstrapping client without extra fallback directories */
+ return
+ options->TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule;
+ } else if (dls->want_authority) {
+ /* A bootstrapping client with extra fallback directories, but
+ * connecting to an authority */
+ return
+ options->TestingClientBootstrapConsensusAuthorityDownloadSchedule;
+ } else {
+ /* A bootstrapping client connecting to extra fallback directories
+ */
+ return
+ options->TestingClientBootstrapConsensusFallbackDownloadSchedule;
+ }
+ } else {
+ return options->TestingClientConsensusDownloadSchedule;
+ }
+ }
case DL_SCHED_BRIDGE:
- return get_options()->TestingBridgeDownloadSchedule;
+ return options->TestingBridgeDownloadSchedule;
default:
tor_assert(0);
}
@@ -3468,54 +3713,168 @@ find_dl_schedule_and_len(download_status_t *dls, int server)
return NULL;
}
-/** Called when an attempt to download <b>dls</b> has failed with HTTP status
+/* Find the current delay for dls based on schedule.
+ * Set dls->next_attempt_at based on now, and return the delay.
+ * Helper for download_status_increment_failure and
+ * download_status_increment_attempt. */
+STATIC int
+download_status_schedule_get_delay(download_status_t *dls,
+ const smartlist_t *schedule,
+ time_t now)
+{
+ tor_assert(dls);
+ tor_assert(schedule);
+
+ int delay = INT_MAX;
+ uint8_t dls_schedule_position = (dls->increment_on
+ == DL_SCHED_INCREMENT_ATTEMPT
+ ? dls->n_download_attempts
+ : dls->n_download_failures);
+
+ if (dls_schedule_position < smartlist_len(schedule))
+ delay = *(int *)smartlist_get(schedule, dls_schedule_position);
+ else if (dls_schedule_position == IMPOSSIBLE_TO_DOWNLOAD)
+ delay = INT_MAX;
+ else
+ delay = *(int *)smartlist_get(schedule, smartlist_len(schedule) - 1);
+
+ /* A negative delay makes no sense. Knowing that delay is
+ * non-negative allows us to safely do the wrapping check below. */
+ tor_assert(delay >= 0);
+
+ /* Avoid now+delay overflowing INT_MAX, by comparing with a subtraction
+ * that won't overflow (since delay is non-negative). */
+ if (delay < INT_MAX && now <= INT_MAX - delay) {
+ dls->next_attempt_at = now+delay;
+ } else {
+ dls->next_attempt_at = TIME_MAX;
+ }
+
+ return delay;
+}
+
+/* Log a debug message about item, which increments on increment_action, has
+ * incremented dls_n_download_increments times. The message varies based on
+ * was_schedule_incremented (if not, not_incremented_response is logged), and
+ * the values of increment, dls_next_attempt_at, and now.
+ * Helper for download_status_increment_failure and
+ * download_status_increment_attempt. */
+static void
+download_status_log_helper(const char *item, int was_schedule_incremented,
+ const char *increment_action,
+ const char *not_incremented_response,
+ uint8_t dls_n_download_increments, int increment,
+ time_t dls_next_attempt_at, time_t now)
+{
+ if (item) {
+ if (!was_schedule_incremented)
+ log_debug(LD_DIR, "%s %s %d time(s); I'll try again %s.",
+ item, increment_action, (int)dls_n_download_increments,
+ not_incremented_response);
+ else if (increment == 0)
+ log_debug(LD_DIR, "%s %s %d time(s); I'll try again immediately.",
+ item, increment_action, (int)dls_n_download_increments);
+ else if (dls_next_attempt_at < TIME_MAX)
+ log_debug(LD_DIR, "%s %s %d time(s); I'll try again in %d seconds.",
+ item, increment_action, (int)dls_n_download_increments,
+ (int)(dls_next_attempt_at-now));
+ else
+ log_debug(LD_DIR, "%s %s %d time(s); Giving up for a while.",
+ item, increment_action, (int)dls_n_download_increments);
+ }
+}
+
+/** Determine when a failed download attempt should be retried.
+ * Called when an attempt to download <b>dls</b> has failed with HTTP status
* <b>status_code</b>. Increment the failure count (if the code indicates a
- * real failure) and set <b>dls</b>-\>next_attempt_at to an appropriate time
- * in the future. */
+ * real failure, or if we're a server) and set <b>dls</b>-\>next_attempt_at to
+ * an appropriate time in the future and return it.
+ * If <b>dls->increment_on</b> is DL_SCHED_INCREMENT_ATTEMPT, increment the
+ * failure count, and return a time in the far future for the next attempt (to
+ * avoid an immediate retry). */
time_t
download_status_increment_failure(download_status_t *dls, int status_code,
const char *item, int server, time_t now)
{
- const smartlist_t *schedule;
- int increment;
+ int increment = -1;
tor_assert(dls);
+
+ /* only count the failure if it's permanent, or we're a server */
if (status_code != 503 || server) {
if (dls->n_download_failures < IMPOSSIBLE_TO_DOWNLOAD-1)
++dls->n_download_failures;
}
- schedule = find_dl_schedule_and_len(dls, server);
+ if (dls->increment_on == DL_SCHED_INCREMENT_FAILURE) {
+ /* We don't find out that a failure-based schedule has attempted a
+ * connection until that connection fails.
+ * We'll never find out about successful connections, but this doesn't
+ * matter, because schedules are reset after a successful download.
+ */
+ if (dls->n_download_attempts < IMPOSSIBLE_TO_DOWNLOAD-1)
+ ++dls->n_download_attempts;
- if (dls->n_download_failures < smartlist_len(schedule))
- increment = *(int *)smartlist_get(schedule, dls->n_download_failures);
- else if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD)
- increment = INT_MAX;
- else
- increment = *(int *)smartlist_get(schedule, smartlist_len(schedule) - 1);
+ /* only return a failure retry time if this schedule increments on failures
+ */
+ const smartlist_t *schedule = find_dl_schedule(dls, get_options());
+ increment = download_status_schedule_get_delay(dls, schedule, now);
+ }
- if (increment < INT_MAX)
- dls->next_attempt_at = now+increment;
- else
- dls->next_attempt_at = TIME_MAX;
+ download_status_log_helper(item, !dls->increment_on, "failed",
+ "concurrently", dls->n_download_failures,
+ increment, dls->next_attempt_at, now);
- if (item) {
- if (increment == 0)
- log_debug(LD_DIR, "%s failed %d time(s); I'll try again immediately.",
- item, (int)dls->n_download_failures);
- else if (dls->next_attempt_at < TIME_MAX)
- log_debug(LD_DIR, "%s failed %d time(s); I'll try again in %d seconds.",
- item, (int)dls->n_download_failures,
- (int)(dls->next_attempt_at-now));
- else
- log_debug(LD_DIR, "%s failed %d time(s); Giving up for a while.",
- item, (int)dls->n_download_failures);
+ if (dls->increment_on == DL_SCHED_INCREMENT_ATTEMPT) {
+ /* stop this schedule retrying on failure, it will launch concurrent
+ * connections instead */
+ return TIME_MAX;
+ } else {
+ return dls->next_attempt_at;
+ }
+}
+
+/** Determine when the next download attempt should be made when using an
+ * attempt-based (potentially concurrent) download schedule.
+ * Called when an attempt to download <b>dls</b> is being initiated.
+ * Increment the attempt count and set <b>dls</b>-\>next_attempt_at to an
+ * appropriate time in the future and return it.
+ * If <b>dls->increment_on</b> is DL_SCHED_INCREMENT_FAILURE, don't increment
+ * the attempts, and return a time in the far future (to avoid launching a
+ * concurrent attempt). */
+time_t
+download_status_increment_attempt(download_status_t *dls, const char *item,
+ time_t now)
+{
+ int delay = -1;
+ tor_assert(dls);
+
+ if (dls->increment_on == DL_SCHED_INCREMENT_FAILURE) {
+ /* this schedule should retry on failure, and not launch any concurrent
+ attempts */
+ log_info(LD_BUG, "Tried to launch an attempt-based connection on a "
+ "failure-based schedule.");
+ return TIME_MAX;
}
+
+ if (dls->n_download_attempts < IMPOSSIBLE_TO_DOWNLOAD-1)
+ ++dls->n_download_attempts;
+
+ const smartlist_t *schedule = find_dl_schedule(dls, get_options());
+ delay = download_status_schedule_get_delay(dls, schedule, now);
+
+ download_status_log_helper(item, dls->increment_on, "attempted",
+ "on failure", dls->n_download_attempts,
+ delay, dls->next_attempt_at, now);
+
return dls->next_attempt_at;
}
/** Reset <b>dls</b> so that it will be considered downloadable
* immediately, and/or to show that we don't need it anymore.
*
+ * Must be called to initialise a download schedule, otherwise the zeroth item
+ * in the schedule will never be used.
+ *
* (We find the zeroth element of the download schedule, and set
* next_attempt_at to be the appropriate offset from 'now'. In most
* cases this means setting it to 'now', so the item will be immediately
@@ -3524,14 +3883,16 @@ download_status_increment_failure(download_status_t *dls, int status_code,
void
download_status_reset(download_status_t *dls)
{
- if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD)
+ if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD
+ || dls->n_download_attempts == IMPOSSIBLE_TO_DOWNLOAD)
return; /* Don't reset this. */
- const smartlist_t *schedule = find_dl_schedule_and_len(
- dls, get_options()->DirPort_set);
+ const smartlist_t *schedule = find_dl_schedule(dls, get_options());
dls->n_download_failures = 0;
+ dls->n_download_attempts = 0;
dls->next_attempt_at = time(NULL) + *(int *)smartlist_get(schedule, 0);
+ /* Don't reset dls->want_authority or dls->increment_on */
}
/** Return the number of failures on <b>dls</b> since the last success (if
@@ -3542,6 +3903,22 @@ download_status_get_n_failures(const download_status_t *dls)
return dls->n_download_failures;
}
+/** Return the number of attempts to download <b>dls</b> since the last success
+ * (if any). This can differ from download_status_get_n_failures() due to
+ * outstanding concurrent attempts. */
+int
+download_status_get_n_attempts(const download_status_t *dls)
+{
+ return dls->n_download_attempts;
+}
+
+/** Return the next time to attempt to download <b>dls</b>. */
+time_t
+download_status_get_next_attempt_at(const download_status_t *dls)
+{
+ return dls->next_attempt_at;
+}
+
/** Called when one or more routerdesc (or extrainfo, if <b>was_extrainfo</b>)
* fetches have failed (with uppercase fingerprints listed in <b>failed</b>,
* either as descriptor digests or as identity digests based on
diff --git a/src/or/directory.h b/src/or/directory.h
index 427183cac9..2644e5703e 100644
--- a/src/or/directory.h
+++ b/src/or/directory.h
@@ -16,10 +16,12 @@ int directories_have_accepted_server_descriptor(void);
void directory_post_to_dirservers(uint8_t dir_purpose, uint8_t router_purpose,
dirinfo_type_t type, const char *payload,
size_t payload_len, size_t extrainfo_len);
-MOCK_DECL(void, directory_get_from_dirserver, (uint8_t dir_purpose,
- uint8_t router_purpose,
- const char *resource,
- int pds_flags));
+MOCK_DECL(void, directory_get_from_dirserver, (
+ uint8_t dir_purpose,
+ uint8_t router_purpose,
+ const char *resource,
+ int pds_flags,
+ download_want_authority_t want_authority));
void directory_get_from_all_authorities(uint8_t dir_purpose,
uint8_t router_purpose,
const char *resource);
@@ -72,6 +74,9 @@ void directory_initiate_command(const tor_addr_t *addr,
const char *resource,
const char *payload, size_t payload_len,
time_t if_modified_since);
+int connection_dir_avoid_extra_connection_for_purpose(unsigned int purpose);
+int connection_dir_close_consensus_conn_if_extra(dir_connection_t *conn);
+void connection_dir_close_extra_consensus_conns(void);
#define DSR_HEX (1<<0)
#define DSR_BASE64 (1<<1)
@@ -90,6 +95,8 @@ int router_supports_extrainfo(const char *identity_digest, int is_authority);
time_t download_status_increment_failure(download_status_t *dls,
int status_code, const char *item,
int server, time_t now);
+time_t download_status_increment_attempt(download_status_t *dls,
+ const char *item, time_t now);
/** Increment the failure count of the download_status_t <b>dls</b>, with
* the optional status code <b>sc</b>. */
#define download_status_failed(dls, sc) \
@@ -101,23 +108,27 @@ static int download_status_is_ready(download_status_t *dls, time_t now,
int max_failures);
/** Return true iff, as of <b>now</b>, the resource tracked by <b>dls</b> is
* ready to get its download reattempted. */
-static INLINE int
+static inline int
download_status_is_ready(download_status_t *dls, time_t now,
int max_failures)
{
- return (dls->n_download_failures <= max_failures
- && dls->next_attempt_at <= now);
+ int under_failure_limit = (dls->n_download_failures <= max_failures
+ && dls->n_download_attempts <= max_failures);
+ return (under_failure_limit && dls->next_attempt_at <= now);
}
static void download_status_mark_impossible(download_status_t *dl);
/** Mark <b>dl</b> as never downloadable. */
-static INLINE void
+static inline void
download_status_mark_impossible(download_status_t *dl)
{
dl->n_download_failures = IMPOSSIBLE_TO_DOWNLOAD;
+ dl->n_download_attempts = IMPOSSIBLE_TO_DOWNLOAD;
}
int download_status_get_n_failures(const download_status_t *dls);
+int download_status_get_n_attempts(const download_status_t *dls);
+time_t download_status_get_next_attempt_at(const download_status_t *dls);
#ifdef TOR_UNIT_TESTS
/* Used only by directory.c and test_dir.c */
@@ -131,6 +142,10 @@ STATIC int directory_handle_command_get(dir_connection_t *conn,
const char *headers,
const char *req_body,
size_t req_body_len);
+STATIC int connection_dir_would_close_consensus_conn_helper(void);
+STATIC int download_status_schedule_get_delay(download_status_t *dls,
+ const smartlist_t *schedule,
+ time_t now);
#endif
#endif
diff --git a/src/or/dirserv.c b/src/or/dirserv.c
index 8d9f166556..39563c3932 100644
--- a/src/or/dirserv.c
+++ b/src/or/dirserv.c
@@ -797,7 +797,7 @@ list_single_server_status(const routerinfo_t *desc, int is_live)
}
/* DOCDOC running_long_enough_to_decide_unreachable */
-static INLINE int
+static inline int
running_long_enough_to_decide_unreachable(void)
{
return time_of_process_start
@@ -1302,7 +1302,7 @@ static uint32_t guard_bandwidth_excluding_exits_kb = 0;
/** Helper: estimate the uptime of a router given its stated uptime and the
* amount of time since it last stated its stated uptime. */
-static INLINE long
+static inline long
real_uptime(const routerinfo_t *router, time_t now)
{
if (now < router->cache_info.published_on)
diff --git a/src/or/dns.c b/src/or/dns.c
index f98181756e..3f5dfd2a8a 100644
--- a/src/or/dns.c
+++ b/src/or/dns.c
@@ -134,7 +134,7 @@ static int dns_is_broken_for_ipv6 = 0;
/** Function to compare hashed resolves on their addresses; used to
* implement hash tables. */
-static INLINE int
+static inline int
cached_resolves_eq(cached_resolve_t *a, cached_resolve_t *b)
{
/* make this smarter one day? */
@@ -143,7 +143,7 @@ cached_resolves_eq(cached_resolve_t *a, cached_resolve_t *b)
}
/** Hash function for cached_resolve objects */
-static INLINE unsigned int
+static inline unsigned int
cached_resolve_hash(cached_resolve_t *a)
{
return (unsigned) siphash24g((const uint8_t*)a->address, strlen(a->address));
@@ -1126,7 +1126,7 @@ dns_cancel_pending_resolve,(const char *address))
/** Return true iff <b>address</b> is one of the addresses we use to verify
* that well-known sites aren't being hijacked by our DNS servers. */
-static INLINE int
+static inline int
is_test_address(const char *address)
{
const or_options_t *options = get_options();
diff --git a/src/or/entrynodes.c b/src/or/entrynodes.c
index ebf675166b..bf71fc30c0 100644
--- a/src/or/entrynodes.c
+++ b/src/or/entrynodes.c
@@ -2205,7 +2205,7 @@ fetch_bridge_descriptors(const or_options_t *options, time_t now)
log_info(LD_DIR, "Fetching bridge info '%s' from bridge authority.",
resource);
directory_get_from_dirserver(DIR_PURPOSE_FETCH_SERVERDESC,
- ROUTER_PURPOSE_BRIDGE, resource, 0);
+ ROUTER_PURPOSE_BRIDGE, resource, 0, DL_WANT_AUTHORITY);
}
}
SMARTLIST_FOREACH_END(bridge);
diff --git a/src/or/eventdns_tor.h b/src/or/eventdns_tor.h
index 9d51f0960e..f41c5c0099 100644
--- a/src/or/eventdns_tor.h
+++ b/src/or/eventdns_tor.h
@@ -12,9 +12,6 @@ typedef unsigned int uint;
#ifndef HAVE_U_CHAR
typedef unsigned char u_char;
#endif
-#ifdef _WIN32
-#define inline __inline
-#endif
#include "torint.h"
/* These are for debugging possible memory leaks. */
diff --git a/src/or/fp_pair.c b/src/or/fp_pair.c
index 42bebcd847..c863d4176c 100644
--- a/src/or/fp_pair.c
+++ b/src/or/fp_pair.c
@@ -21,7 +21,7 @@ struct fp_pair_map_s {
*/
/** Compare fp_pair_entry_t objects by key value. */
-static INLINE int
+static inline int
fp_pair_map_entries_eq(const fp_pair_map_entry_t *a,
const fp_pair_map_entry_t *b)
{
@@ -29,7 +29,7 @@ fp_pair_map_entries_eq(const fp_pair_map_entry_t *a,
}
/** Return a hash value for an fp_pair_entry_t. */
-static INLINE unsigned int
+static inline unsigned int
fp_pair_map_entry_hash(const fp_pair_map_entry_t *a)
{
tor_assert(sizeof(a->key) == DIGEST_LEN*2);
diff --git a/src/or/geoip.c b/src/or/geoip.c
index a868daea47..3ef1672f52 100644
--- a/src/or/geoip.c
+++ b/src/or/geoip.c
@@ -482,7 +482,7 @@ static HT_HEAD(clientmap, clientmap_entry_t) client_history =
HT_INITIALIZER();
/** Hashtable helper: compute a hash of a clientmap_entry_t. */
-static INLINE unsigned
+static inline unsigned
clientmap_entry_hash(const clientmap_entry_t *a)
{
unsigned h = (unsigned) tor_addr_hash(&a->addr);
@@ -493,7 +493,7 @@ clientmap_entry_hash(const clientmap_entry_t *a)
return h;
}
/** Hashtable helper: compare two clientmap_entry_t values for equality. */
-static INLINE int
+static inline int
clientmap_entries_eq(const clientmap_entry_t *a, const clientmap_entry_t *b)
{
if (strcmp_opt(a->transport_name, b->transport_name))
diff --git a/src/or/hibernate.c b/src/or/hibernate.c
index 356e11f6ec..5f727e27d4 100644
--- a/src/or/hibernate.c
+++ b/src/or/hibernate.c
@@ -490,7 +490,7 @@ reset_accounting(time_t now)
}
/** Return true iff we should save our bandwidth usage to disk. */
-static INLINE int
+static inline int
time_to_record_bandwidth_usage(time_t now)
{
/* Note every 600 sec */
diff --git a/src/or/include.am b/src/or/include.am
index 264c4ae802..1180239c89 100644
--- a/src/or/include.am
+++ b/src/or/include.am
@@ -124,9 +124,6 @@ src_or_tor_cov_LDADD = src/or/libtor-testing.a src/common/libor-testing.a \
src/common/libor-event-testing.a src/trunnel/libor-trunnel-testing.a \
@TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ @TOR_LIBEVENT_LIBS@ @TOR_OPENSSL_LIBS@ \
@TOR_LIB_WS32@ @TOR_LIB_GDI@ @CURVE25519_LIBS@ @TOR_SYSTEMD_LIBS@
-export TESTING_TOR_BINARY=$(top_builddir)/src/or/tor-cov
-else
-export TESTING_TOR_BINARY=$(top_builddir)/src/or/tor
endif
ORHEADERS = \
diff --git a/src/or/keypin.c b/src/or/keypin.c
index 047d2b069b..574a76d51e 100644
--- a/src/or/keypin.c
+++ b/src/or/keypin.c
@@ -57,14 +57,14 @@ static HT_HEAD(edmap, keypin_ent_st) the_ed_map = HT_INITIALIZER();
/** Hashtable helper: compare two keypin table entries and return true iff
* they have the same RSA key IDs. */
-static INLINE int
+static inline int
keypin_ents_eq_rsa(const keypin_ent_t *a, const keypin_ent_t *b)
{
return tor_memeq(a->rsa_id, b->rsa_id, sizeof(a->rsa_id));
}
/** Hashtable helper: hash a keypin table entries based on its RSA key ID */
-static INLINE unsigned
+static inline unsigned
keypin_ent_hash_rsa(const keypin_ent_t *a)
{
return (unsigned) siphash24g(a->rsa_id, sizeof(a->rsa_id));
@@ -72,14 +72,14 @@ return (unsigned) siphash24g(a->rsa_id, sizeof(a->rsa_id));
/** Hashtable helper: compare two keypin table entries and return true iff
* they have the same ed25519 keys */
-static INLINE int
+static inline int
keypin_ents_eq_ed(const keypin_ent_t *a, const keypin_ent_t *b)
{
return tor_memeq(a->ed25519_key, b->ed25519_key, sizeof(a->ed25519_key));
}
/** Hashtable helper: hash a keypin table entries based on its ed25519 key */
-static INLINE unsigned
+static inline unsigned
keypin_ent_hash_ed(const keypin_ent_t *a)
{
return (unsigned) siphash24g(a->ed25519_key, sizeof(a->ed25519_key));
diff --git a/src/or/main.c b/src/or/main.c
index 527e2b1ffe..455cba4513 100644
--- a/src/or/main.c
+++ b/src/or/main.c
@@ -1460,6 +1460,11 @@ run_scheduled_events(time_t now)
dirvote_act(options, now);
}
+ /* 2d. Cleanup excess consensus bootstrap connections every second.
+ * connection_dir_close_consensus_conn_if_extra() will close connections
+ * that are clearly excess, but this check is more thorough. */
+ connection_dir_close_extra_consensus_conns();
+
/* 3a. Every second, we examine pending circuits and prune the
* ones which have been pending for more than a few seconds.
* We do this before step 4, so it can try building more if
@@ -1876,18 +1881,29 @@ check_for_reachability_bw_callback(time_t now, const or_options_t *options)
static int
fetch_networkstatus_callback(time_t now, const or_options_t *options)
{
- /* 2c. Every minute (or every second if TestingTorNetwork), check
- * whether we want to download any networkstatus documents. */
+ /* 2c. Every minute (or every second if TestingTorNetwork, or during
+ * client bootstrap), check whether we want to download any networkstatus
+ * documents. */
/* How often do we check whether we should download network status
* documents? */
-#define networkstatus_dl_check_interval(o) ((o)->TestingTorNetwork ? 1 : 60)
+ const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ now);
+ const int prefer_mirrors = !directory_fetches_from_authorities(
+ get_options());
+ int networkstatus_dl_check_interval = 60;
+ /* check more often when testing, or when bootstrapping from mirrors
+ * (connection limits prevent too many connections being made) */
+ if (options->TestingTorNetwork
+ || (we_are_bootstrapping && prefer_mirrors)) {
+ networkstatus_dl_check_interval = 1;
+ }
if (should_delay_dir_fetches(options, NULL))
return PERIODIC_EVENT_NO_UPDATE;
update_networkstatus_downloads(now);
- return networkstatus_dl_check_interval(options);
+ return networkstatus_dl_check_interval;
}
static int
diff --git a/src/or/microdesc.c b/src/or/microdesc.c
index a9bab3ddc6..dc23bcb632 100644
--- a/src/or/microdesc.c
+++ b/src/or/microdesc.c
@@ -47,14 +47,14 @@ struct microdesc_cache_t {
static microdesc_cache_t *get_microdesc_cache_noload(void);
/** Helper: computes a hash of <b>md</b> to place it in a hash table. */
-static INLINE unsigned int
+static inline unsigned int
microdesc_hash_(microdesc_t *md)
{
return (unsigned) siphash24g(md->digest, sizeof(md->digest));
}
/** Helper: compares <b>a</b> and </b> for equality for hash-table purposes. */
-static INLINE int
+static inline int
microdesc_eq_(microdesc_t *a, microdesc_t *b)
{
return tor_memeq(a->digest, b->digest, DIGEST256_LEN);
diff --git a/src/or/networkstatus.c b/src/or/networkstatus.c
index 71a2c0f121..173c109d60 100644
--- a/src/or/networkstatus.c
+++ b/src/or/networkstatus.c
@@ -85,8 +85,30 @@ static time_t time_to_download_next_consensus[N_CONSENSUS_FLAVORS];
/** Download status for the current consensus networkstatus. */
static download_status_t consensus_dl_status[N_CONSENSUS_FLAVORS] =
{
- { 0, 0, DL_SCHED_CONSENSUS },
- { 0, 0, DL_SCHED_CONSENSUS },
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_FAILURE },
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_FAILURE },
+ };
+
+#define N_CONSENSUS_BOOTSTRAP_SCHEDULES 2
+#define CONSENSUS_BOOTSTRAP_SOURCE_AUTHORITY 0
+#define CONSENSUS_BOOTSTRAP_SOURCE_ANY_DIRSERVER 1
+
+/* Using DL_SCHED_INCREMENT_ATTEMPT on these schedules means that
+ * download_status_increment_failure won't increment these entries.
+ * However, any bootstrap connection failures that occur after we have
+ * a valid consensus will count against the failure counts on the non-bootstrap
+ * schedules. There should only be one of these, as all the others will have
+ * been cancelled. (This doesn't seem to be a significant issue.) */
+static download_status_t
+ consensus_bootstrap_dl_status[N_CONSENSUS_BOOTSTRAP_SCHEDULES] =
+ {
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_AUTHORITY,
+ DL_SCHED_INCREMENT_ATTEMPT },
+ /* During bootstrap, DL_WANT_ANY_DIRSERVER means "use fallbacks". */
+ { 0, 0, 0, DL_SCHED_CONSENSUS, DL_WANT_ANY_DIRSERVER,
+ DL_SCHED_INCREMENT_ATTEMPT },
};
/** True iff we have logged a warning about this OR's version being older than
@@ -97,6 +119,10 @@ static int have_warned_about_old_version = 0;
static int have_warned_about_new_version = 0;
static void routerstatus_list_update_named_server_map(void);
+static void update_consensus_bootstrap_multiple_downloads(
+ time_t now,
+ const or_options_t *options,
+ int we_are_bootstrapping);
/** Forget that we've warned about anything networkstatus-related, so we will
* give fresh warnings if the same behavior happens again. */
@@ -122,6 +148,9 @@ networkstatus_reset_download_failures(void)
for (i=0; i < N_CONSENSUS_FLAVORS; ++i)
download_status_reset(&consensus_dl_status[i]);
+
+ for (i=0; i < N_CONSENSUS_BOOTSTRAP_SCHEDULES; ++i)
+ download_status_reset(&consensus_bootstrap_dl_status[i]);
}
/** Read every cached v3 consensus networkstatus from the disk. */
@@ -734,6 +763,55 @@ we_want_to_fetch_flavor(const or_options_t *options, int flavor)
* fetching certs before we check whether there is a better one? */
#define DELAY_WHILE_FETCHING_CERTS (20*60)
+/* Check if a downloaded consensus flavor should still wait for certificates
+ * to download now.
+ * If so, return 1. If not, fail dls and return 0. */
+static int
+check_consensus_waiting_for_certs(int flavor, time_t now,
+ download_status_t *dls)
+{
+ consensus_waiting_for_certs_t *waiting;
+
+ /* We should always have a known flavor, because we_want_to_fetch_flavor()
+ * filters out unknown flavors. */
+ tor_assert(flavor >= 0 && flavor < N_CONSENSUS_FLAVORS);
+
+ waiting = &consensus_waiting_for_certs[flavor];
+ if (waiting->consensus) {
+ /* XXXX make sure this doesn't delay sane downloads. */
+ if (waiting->set_at + DELAY_WHILE_FETCHING_CERTS > now) {
+ return 1;
+ } else {
+ if (!waiting->dl_failed) {
+ download_status_failed(dls, 0);
+ waiting->dl_failed=1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Return the maximum download tries for a consensus, based on options and
+ * whether we_are_bootstrapping. */
+static int
+consensus_max_download_tries(const or_options_t *options,
+ int we_are_bootstrapping)
+{
+ int use_fallbacks = networkstatus_consensus_can_use_extra_fallbacks(options);
+
+ if (we_are_bootstrapping) {
+ if (use_fallbacks) {
+ return options->TestingClientBootstrapConsensusMaxDownloadTries;
+ } else {
+ return
+ options->TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries;
+ }
+ }
+
+ return options->TestingConsensusMaxDownloadTries;
+}
+
/** If we want to download a fresh consensus, launch a new download as
* appropriate. */
static void
@@ -741,12 +819,19 @@ update_consensus_networkstatus_downloads(time_t now)
{
int i;
const or_options_t *options = get_options();
+ const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping(
+ now);
+ const int use_multi_conn =
+ networkstatus_consensus_can_use_multiple_directories(options);
+
+ if (should_delay_dir_fetches(options, NULL))
+ return;
for (i=0; i < N_CONSENSUS_FLAVORS; ++i) {
/* XXXX need some way to download unknown flavors if we are caching. */
const char *resource;
- consensus_waiting_for_certs_t *waiting;
networkstatus_t *c;
+ int max_in_progress_conns = 1;
if (! we_want_to_fetch_flavor(options, i))
continue;
@@ -762,35 +847,166 @@ update_consensus_networkstatus_downloads(time_t now)
resource = networkstatus_get_flavor_name(i);
- /* Let's make sure we remembered to update consensus_dl_status */
- tor_assert(consensus_dl_status[i].schedule == DL_SCHED_CONSENSUS);
+ /* Check if we already have enough connections in progress */
+ if (we_are_bootstrapping) {
+ max_in_progress_conns =
+ options->TestingClientBootstrapConsensusMaxInProgressTries;
+ }
+ if (connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ resource)
+ >= max_in_progress_conns) {
+ continue;
+ }
- if (!download_status_is_ready(&consensus_dl_status[i], now,
- options->TestingConsensusMaxDownloadTries))
- continue; /* We failed downloading a consensus too recently. */
- if (connection_dir_get_by_purpose_and_resource(
- DIR_PURPOSE_FETCH_CONSENSUS, resource))
- continue; /* There's an in-progress download.*/
+ /* Check if we want to launch another download for a usable consensus.
+ * Only used during bootstrap. */
+ if (we_are_bootstrapping && use_multi_conn
+ && i == usable_consensus_flavor()) {
+
+ /* Check if we're already downloading a usable consensus */
+ int consens_conn_count =
+ connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ resource);
+ int connect_consens_conn_count =
+ connection_dir_count_by_purpose_resource_and_state(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ resource,
+ DIR_CONN_STATE_CONNECTING);
+
+ if (i == usable_consensus_flavor()
+ && connect_consens_conn_count < consens_conn_count) {
+ continue;
+ }
- waiting = &consensus_waiting_for_certs[i];
- if (waiting->consensus) {
- /* XXXX make sure this doesn't delay sane downloads. */
- if (waiting->set_at + DELAY_WHILE_FETCHING_CERTS > now) {
- continue; /* We're still getting certs for this one. */
- } else {
- if (!waiting->dl_failed) {
- download_status_failed(&consensus_dl_status[i], 0);
- waiting->dl_failed=1;
- }
+ /* Make multiple connections for a bootstrap consensus download */
+ update_consensus_bootstrap_multiple_downloads(now, options,
+ we_are_bootstrapping);
+ } else {
+ /* Check if we failed downloading a consensus too recently */
+ int max_dl_tries = consensus_max_download_tries(options,
+ we_are_bootstrapping);
+
+ /* Let's make sure we remembered to update consensus_dl_status */
+ tor_assert(consensus_dl_status[i].schedule == DL_SCHED_CONSENSUS);
+
+ if (!download_status_is_ready(&consensus_dl_status[i],
+ now,
+ max_dl_tries)) {
+ continue;
}
+
+ /* Check if we're waiting for certificates to download */
+ if (check_consensus_waiting_for_certs(i, now, &consensus_dl_status[i]))
+ continue;
+
+ /* Try the requested attempt */
+ log_info(LD_DIR, "Launching %s standard networkstatus consensus "
+ "download.", networkstatus_get_flavor_name(i));
+ directory_get_from_dirserver(DIR_PURPOSE_FETCH_CONSENSUS,
+ ROUTER_PURPOSE_GENERAL, resource,
+ PDS_RETRY_IF_NO_SERVERS,
+ consensus_dl_status[i].want_authority);
}
+ }
+}
- log_info(LD_DIR, "Launching %s networkstatus consensus download.",
- networkstatus_get_flavor_name(i));
+/** When we're bootstrapping, launch one or more consensus download
+ * connections, if schedule indicates connection(s) should be made after now.
+ * If is_authority, connect to an authority, otherwise, use a fallback
+ * directory mirror.
+ */
+static void
+update_consensus_bootstrap_attempt_downloads(
+ time_t now,
+ const or_options_t *options,
+ int we_are_bootstrapping,
+ download_status_t *dls,
+ download_want_authority_t want_authority)
+{
+ int max_dl_tries = consensus_max_download_tries(options,
+ we_are_bootstrapping);
+ const char *resource = networkstatus_get_flavor_name(
+ usable_consensus_flavor());
+
+ /* Let's make sure we remembered to update schedule */
+ tor_assert(dls->schedule == DL_SCHED_CONSENSUS);
+
+ /* Allow for multiple connections in the same second, if the schedule value
+ * is 0. */
+ while (download_status_is_ready(dls, now, max_dl_tries)) {
+ log_info(LD_DIR, "Launching %s bootstrap %s networkstatus consensus "
+ "download.", resource, (want_authority == DL_WANT_AUTHORITY
+ ? "authority"
+ : "mirror"));
directory_get_from_dirserver(DIR_PURPOSE_FETCH_CONSENSUS,
ROUTER_PURPOSE_GENERAL, resource,
- PDS_RETRY_IF_NO_SERVERS);
+ PDS_RETRY_IF_NO_SERVERS, want_authority);
+ /* schedule the next attempt */
+ download_status_increment_attempt(dls, resource, now);
+ }
+}
+
+/** If we're bootstrapping, check the connection schedules and see if we want
+ * to make additional, potentially concurrent, consensus download
+ * connections.
+ * Only call when bootstrapping, and when we want to make additional
+ * connections. Only nodes that satisfy
+ * networkstatus_consensus_can_use_multiple_directories make additonal
+ * connections.
+ */
+static void
+update_consensus_bootstrap_multiple_downloads(time_t now,
+ const or_options_t *options,
+ int we_are_bootstrapping)
+{
+ const int usable_flavor = usable_consensus_flavor();
+
+ /* make sure we can use multiple connections */
+ if (!networkstatus_consensus_can_use_multiple_directories(options)) {
+ return;
+ }
+
+ /* If we've managed to validate a usable consensus, don't make additonal
+ * connections. */
+ if (!we_are_bootstrapping) {
+ return;
+ }
+
+ /* Launch concurrent consensus download attempt(s) based on the mirror and
+ * authority schedules. Try the mirror first - this makes it slightly more
+ * likely that we'll connect to the fallback first, and then end the
+ * authority connection attempt. */
+
+ /* If a consensus download fails because it's waiting for certificates,
+ * we'll fail both the authority and fallback schedules. This is better than
+ * failing only one of the schedules, and having the other continue
+ * unchecked.
+ */
+
+ /* If we don't have or can't use extra fallbacks, don't try them. */
+ if (networkstatus_consensus_can_use_extra_fallbacks(options)) {
+ download_status_t *dls_f =
+ &consensus_bootstrap_dl_status[CONSENSUS_BOOTSTRAP_SOURCE_ANY_DIRSERVER];
+
+ if (!check_consensus_waiting_for_certs(usable_flavor, now, dls_f)) {
+ /* During bootstrap, DL_WANT_ANY_DIRSERVER means "use fallbacks". */
+ update_consensus_bootstrap_attempt_downloads(now, options,
+ we_are_bootstrapping, dls_f,
+ DL_WANT_ANY_DIRSERVER);
+ }
+ }
+
+ /* Now try an authority. */
+ download_status_t *dls_a =
+ &consensus_bootstrap_dl_status[CONSENSUS_BOOTSTRAP_SOURCE_AUTHORITY];
+
+ if (!check_consensus_waiting_for_certs(usable_flavor, now, dls_a)) {
+ update_consensus_bootstrap_attempt_downloads(now, options,
+ we_are_bootstrapping, dls_a,
+ DL_WANT_AUTHORITY);
}
}
@@ -1057,6 +1273,100 @@ networkstatus_get_reasonably_live_consensus(time_t now, int flavor)
return NULL;
}
+/** Check if we're bootstrapping a consensus download. This means that we are
+ * only using the authorities and fallback directory mirrors to download the
+ * consensus flavour we'll use. */
+int
+networkstatus_consensus_is_boostrapping(time_t now)
+{
+ /* If we don't have a consensus, we must still be bootstrapping */
+ return !networkstatus_get_reasonably_live_consensus(
+ now,
+ usable_consensus_flavor());
+}
+
+/** Check if we can use multiple directories for a consensus download.
+ * Only clients (including bridges, but excluding bridge clients) benefit
+ * from multiple simultaneous consensus downloads. */
+int
+networkstatus_consensus_can_use_multiple_directories(
+ const or_options_t *options)
+{
+ /* If we are a client, bridge, bridge client, or hidden service */
+ return (!directory_fetches_from_authorities(options));
+}
+
+/** Check if we can use fallback directory mirrors for a consensus download.
+ * Only clients that have a list of additional fallbacks can use fallbacks. */
+int
+networkstatus_consensus_can_use_extra_fallbacks(const or_options_t *options)
+{
+ /* If we are a client, and we have additional mirrors, we can use them.
+ * The list length comparisons are a quick way to check if we have any
+ * non-authority fallback directories. If we ever have any authorities that
+ * aren't fallback directories, we will need to change this code. */
+ return (!directory_fetches_from_authorities(options)
+ && (smartlist_len(router_get_fallback_dir_servers())
+ > smartlist_len(router_get_trusted_dir_servers())));
+}
+
+/* Check if there is more than 1 consensus connection retrieving the usable
+ * consensus flavor. If so, return 1, if not, return 0.
+ *
+ * During normal operation, Tor only makes one consensus download
+ * connection. But clients can make multiple simultaneous consensus
+ * connections to improve bootstrap speed and reliability.
+ *
+ * If there is more than one connection, we must have connections left
+ * over from bootstrapping. However, some of the connections may have
+ * completed and been cleaned up, so it is not sufficient to check the
+ * return value of this function to see if a client could make multiple
+ * bootstrap connections. Use
+ * networkstatus_consensus_can_use_multiple_directories()
+ * and networkstatus_consensus_is_boostrapping(). */
+int
+networkstatus_consensus_has_excess_connections(void)
+{
+ const char *usable_resource = networkstatus_get_flavor_name(
+ usable_consensus_flavor());
+ const int consens_conn_usable_count =
+ connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource);
+ /* The maximum number of connections we want downloading a usable consensus
+ * Always 1, whether bootstrapping or not. */
+ const int max_expected_consens_conn_usable_count = 1;
+
+ if (consens_conn_usable_count > max_expected_consens_conn_usable_count) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Is tor currently downloading a consensus of the usable flavor? */
+int
+networkstatus_consensus_is_downloading_usable_flavor(void)
+{
+ const char *usable_resource = networkstatus_get_flavor_name(
+ usable_consensus_flavor());
+ const int consens_conn_usable_count =
+ connection_dir_count_by_purpose_and_resource(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource);
+
+ const int connect_consens_conn_usable_count =
+ connection_dir_count_by_purpose_resource_and_state(
+ DIR_PURPOSE_FETCH_CONSENSUS,
+ usable_resource,
+ DIR_CONN_STATE_CONNECTING);
+ if (connect_consens_conn_usable_count < consens_conn_usable_count) {
+ return 1;
+ }
+
+ return 0;
+}
+
/** Given two router status entries for the same router identity, return 1 if
* if the contents have changed between them. Otherwise, return 0. */
static int
diff --git a/src/or/networkstatus.h b/src/or/networkstatus.h
index d6e9e37013..4cb33c3fc0 100644
--- a/src/or/networkstatus.h
+++ b/src/or/networkstatus.h
@@ -70,6 +70,14 @@ MOCK_DECL(networkstatus_t *,networkstatus_get_latest_consensus_by_flavor,
networkstatus_t *networkstatus_get_live_consensus(time_t now);
networkstatus_t *networkstatus_get_reasonably_live_consensus(time_t now,
int flavor);
+int networkstatus_consensus_is_boostrapping(time_t now);
+int networkstatus_consensus_can_use_multiple_directories(
+ const or_options_t *options);
+int networkstatus_consensus_can_use_extra_fallbacks(
+ const or_options_t *options);
+int networkstatus_consensus_has_excess_connections(void);
+int networkstatus_consensus_is_downloading_usable_flavor(void);
+
#define NSSET_FROM_CACHE 1
#define NSSET_WAS_WAITING_FOR_CERTS 2
#define NSSET_DONT_DOWNLOAD_CERTS 4
diff --git a/src/or/nodelist.c b/src/or/nodelist.c
index 2f272a1d56..fc27207851 100644
--- a/src/or/nodelist.c
+++ b/src/or/nodelist.c
@@ -57,13 +57,13 @@ typedef struct nodelist_t {
} nodelist_t;
-static INLINE unsigned int
+static inline unsigned int
node_id_hash(const node_t *node)
{
return (unsigned) siphash24g(node->identity, DIGEST_LEN);
}
-static INLINE unsigned int
+static inline unsigned int
node_id_eq(const node_t *node1, const node_t *node2)
{
return tor_memeq(node1->identity, node2->identity, DIGEST_LEN);
@@ -291,7 +291,7 @@ nodelist_set_consensus(networkstatus_t *ns)
}
/** Helper: return true iff a node has a usable amount of information*/
-static INLINE int
+static inline int
node_is_usable(const node_t *node)
{
return (node->rs) || (node->ri);
@@ -1021,7 +1021,7 @@ nodelist_refresh_countries(void)
/** Return true iff router1 and router2 have similar enough network addresses
* that we should treat them as being in the same family */
-static INLINE int
+static inline int
addrs_in_same_network_family(const tor_addr_t *a1,
const tor_addr_t *a2)
{
@@ -1045,7 +1045,7 @@ node_nickname_matches(const node_t *node, const char *nickname)
}
/** Return true iff <b>node</b> is named by some nickname in <b>lst</b>. */
-static INLINE int
+static inline int
node_in_nickname_smartlist(const smartlist_t *lst, const node_t *node)
{
if (!lst) return 0;
diff --git a/src/or/or.h b/src/or/or.h
index 8fd141bec9..e92a4bba39 100644
--- a/src/or/or.h
+++ b/src/or/or.h
@@ -915,18 +915,18 @@ typedef enum {
#define VAR_CELL_MAX_HEADER_SIZE 7
static int get_cell_network_size(int wide_circ_ids);
-static INLINE int get_cell_network_size(int wide_circ_ids)
+static inline int get_cell_network_size(int wide_circ_ids)
{
return wide_circ_ids ? CELL_MAX_NETWORK_SIZE : CELL_MAX_NETWORK_SIZE - 2;
}
static int get_var_cell_header_size(int wide_circ_ids);
-static INLINE int get_var_cell_header_size(int wide_circ_ids)
+static inline int get_var_cell_header_size(int wide_circ_ids)
{
return wide_circ_ids ? VAR_CELL_MAX_HEADER_SIZE :
VAR_CELL_MAX_HEADER_SIZE - 2;
}
static int get_circ_id_size(int wide_circ_ids);
-static INLINE int get_circ_id_size(int wide_circ_ids)
+static inline int get_circ_id_size(int wide_circ_ids)
{
return wide_circ_ids ? 4 : 2;
}
@@ -1799,38 +1799,38 @@ static control_connection_t *TO_CONTROL_CONN(connection_t *);
* invalid. */
static listener_connection_t *TO_LISTENER_CONN(connection_t *);
-static INLINE or_connection_t *TO_OR_CONN(connection_t *c)
+static inline or_connection_t *TO_OR_CONN(connection_t *c)
{
tor_assert(c->magic == OR_CONNECTION_MAGIC);
return DOWNCAST(or_connection_t, c);
}
-static INLINE dir_connection_t *TO_DIR_CONN(connection_t *c)
+static inline dir_connection_t *TO_DIR_CONN(connection_t *c)
{
tor_assert(c->magic == DIR_CONNECTION_MAGIC);
return DOWNCAST(dir_connection_t, c);
}
-static INLINE edge_connection_t *TO_EDGE_CONN(connection_t *c)
+static inline edge_connection_t *TO_EDGE_CONN(connection_t *c)
{
tor_assert(c->magic == EDGE_CONNECTION_MAGIC ||
c->magic == ENTRY_CONNECTION_MAGIC);
return DOWNCAST(edge_connection_t, c);
}
-static INLINE entry_connection_t *TO_ENTRY_CONN(connection_t *c)
+static inline entry_connection_t *TO_ENTRY_CONN(connection_t *c)
{
tor_assert(c->magic == ENTRY_CONNECTION_MAGIC);
return (entry_connection_t*) SUBTYPE_P(c, entry_connection_t, edge_.base_);
}
-static INLINE entry_connection_t *EDGE_TO_ENTRY_CONN(edge_connection_t *c)
+static inline entry_connection_t *EDGE_TO_ENTRY_CONN(edge_connection_t *c)
{
tor_assert(c->base_.magic == ENTRY_CONNECTION_MAGIC);
return (entry_connection_t*) SUBTYPE_P(c, entry_connection_t, edge_);
}
-static INLINE control_connection_t *TO_CONTROL_CONN(connection_t *c)
+static inline control_connection_t *TO_CONTROL_CONN(connection_t *c)
{
tor_assert(c->magic == CONTROL_CONNECTION_MAGIC);
return DOWNCAST(control_connection_t, c);
}
-static INLINE listener_connection_t *TO_LISTENER_CONN(connection_t *c)
+static inline listener_connection_t *TO_LISTENER_CONN(connection_t *c)
{
tor_assert(c->magic == LISTENER_CONNECTION_MAGIC);
return DOWNCAST(listener_connection_t, c);
@@ -1946,8 +1946,8 @@ typedef enum {
} saved_location_t;
#define saved_location_bitfield_t ENUM_BF(saved_location_t)
-/** Enumeration: what kind of download schedule are we using for a given
- * object? */
+/** Enumeration: what directory object is being downloaded?
+ * This determines which schedule is selected to perform the download. */
typedef enum {
DL_SCHED_GENERIC = 0,
DL_SCHED_CONSENSUS = 1,
@@ -1955,15 +1955,74 @@ typedef enum {
} download_schedule_t;
#define download_schedule_bitfield_t ENUM_BF(download_schedule_t)
+/** Enumeration: is the download schedule for downloading from an authority,
+ * or from any available directory mirror?
+ * During bootstrap, "any" means a fallback (or an authority, if there
+ * are no fallbacks).
+ * When we have a valid consensus, "any" means any directory server. */
+typedef enum {
+ DL_WANT_ANY_DIRSERVER = 0,
+ DL_WANT_AUTHORITY = 1,
+} download_want_authority_t;
+#define download_want_authority_bitfield_t \
+ ENUM_BF(download_want_authority_t)
+
+/** Enumeration: do we want to increment the schedule position each time a
+ * connection is attempted (these attempts can be concurrent), or do we want
+ * to increment the schedule position after a connection fails? */
+typedef enum {
+ DL_SCHED_INCREMENT_FAILURE = 0,
+ DL_SCHED_INCREMENT_ATTEMPT = 1,
+} download_schedule_increment_t;
+#define download_schedule_increment_bitfield_t \
+ ENUM_BF(download_schedule_increment_t)
+
/** Information about our plans for retrying downloads for a downloadable
- * object. */
+ * directory object.
+ * Each type of downloadable directory object has a corresponding retry
+ * <b>schedule</b>, which can be different depending on whether the object is
+ * being downloaded from an authority or a mirror (<b>want_authority</b>).
+ * <b>next_attempt_at</b> contains the next time we will attempt to download
+ * the object.
+ * For schedules that <b>increment_on</b> failure, <b>n_download_failures</b>
+ * is used to determine the position in the schedule. (Each schedule is a
+ * smartlist of integer delays, parsed from a CSV option.) Every time a
+ * connection attempt fails, <b>n_download_failures</b> is incremented,
+ * the new delay value is looked up from the schedule, and
+ * <b>next_attempt_at</b> is set delay seconds from the time the previous
+ * connection failed. Therefore, at most one failure-based connection can be
+ * in progress for each download_status_t.
+ * For schedules that <b>increment_on</b> attempt, <b>n_download_attempts</b>
+ * is used to determine the position in the schedule. Every time a
+ * connection attempt is made, <b>n_download_attempts</b> is incremented,
+ * the new delay value is looked up from the schedule, and
+ * <b>next_attempt_at</b> is set delay seconds from the time the previous
+ * connection was attempted. Therefore, multiple concurrent attempted-based
+ * connections can be in progress for each download_status_t.
+ * After an object is successfully downloaded, any other concurrent connections
+ * are terminated. A new schedule which starts at position 0 is used for
+ * subsequent downloads of the same object.
+ */
typedef struct download_status_t {
- time_t next_attempt_at; /**< When should we try downloading this descriptor
+ time_t next_attempt_at; /**< When should we try downloading this object
* again? */
- uint8_t n_download_failures; /**< Number of failures trying to download the
- * most recent descriptor. */
- download_schedule_bitfield_t schedule : 8;
-
+ uint8_t n_download_failures; /**< Number of failed downloads of the most
+ * recent object, since the last success. */
+ uint8_t n_download_attempts; /**< Number of (potentially concurrent) attempts
+ * to download the most recent object, since
+ * the last success. */
+ download_schedule_bitfield_t schedule : 8; /**< What kind of object is being
+ * downloaded? This determines the
+ * schedule used for the download.
+ */
+ download_want_authority_bitfield_t want_authority : 1; /**< Is the download
+ * happening from an authority
+ * or a mirror? This determines
+ * the schedule used for the
+ * download. */
+ download_schedule_increment_bitfield_t increment_on : 1; /**< does this
+ * schedule increment on each attempt,
+ * or after each failure? */
} download_status_t;
/** If n_download_failures is this high, the download can never happen. */
@@ -3289,27 +3348,27 @@ static const origin_circuit_t *CONST_TO_ORIGIN_CIRCUIT(const circuit_t *);
/** Return 1 iff <b>node</b> has Exit flag and no BadExit flag.
* Otherwise, return 0.
*/
-static INLINE int node_is_good_exit(const node_t *node)
+static inline int node_is_good_exit(const node_t *node)
{
return node->is_exit && ! node->is_bad_exit;
}
-static INLINE or_circuit_t *TO_OR_CIRCUIT(circuit_t *x)
+static inline or_circuit_t *TO_OR_CIRCUIT(circuit_t *x)
{
tor_assert(x->magic == OR_CIRCUIT_MAGIC);
return DOWNCAST(or_circuit_t, x);
}
-static INLINE const or_circuit_t *CONST_TO_OR_CIRCUIT(const circuit_t *x)
+static inline const or_circuit_t *CONST_TO_OR_CIRCUIT(const circuit_t *x)
{
tor_assert(x->magic == OR_CIRCUIT_MAGIC);
return DOWNCAST(or_circuit_t, x);
}
-static INLINE origin_circuit_t *TO_ORIGIN_CIRCUIT(circuit_t *x)
+static inline origin_circuit_t *TO_ORIGIN_CIRCUIT(circuit_t *x)
{
tor_assert(x->magic == ORIGIN_CIRCUIT_MAGIC);
return DOWNCAST(origin_circuit_t, x);
}
-static INLINE const origin_circuit_t *CONST_TO_ORIGIN_CIRCUIT(
+static inline const origin_circuit_t *CONST_TO_ORIGIN_CIRCUIT(
const circuit_t *x)
{
tor_assert(x->magic == ORIGIN_CIRCUIT_MAGIC);
@@ -3758,6 +3817,8 @@ typedef struct {
/** List of fallback directory servers */
config_line_t *FallbackDir;
+ /** Whether to use the default hard-coded FallbackDirs */
+ int UseDefaultFallbackDirs;
/** Weight to apply to all directory authority rates if considering them
* along with fallbackdirs */
@@ -4069,6 +4130,36 @@ typedef struct {
* on testing networks. */
smartlist_t *TestingClientConsensusDownloadSchedule;
+ /** Schedule for when clients should download consensuses from authorities
+ * if they are bootstrapping (that is, they don't have a usable, reasonably
+ * live consensus). Only used by clients fetching from a list of fallback
+ * directory mirrors.
+ *
+ * This schedule is incremented by (potentially concurrent) connection
+ * attempts, unlike other schedules, which are incremented by connection
+ * failures. Only altered on testing networks. */
+ smartlist_t *TestingClientBootstrapConsensusAuthorityDownloadSchedule;
+
+ /** Schedule for when clients should download consensuses from fallback
+ * directory mirrors if they are bootstrapping (that is, they don't have a
+ * usable, reasonably live consensus). Only used by clients fetching from a
+ * list of fallback directory mirrors.
+ *
+ * This schedule is incremented by (potentially concurrent) connection
+ * attempts, unlike other schedules, which are incremented by connection
+ * failures. Only altered on testing networks. */
+ smartlist_t *TestingClientBootstrapConsensusFallbackDownloadSchedule;
+
+ /** Schedule for when clients should download consensuses from authorities
+ * if they are bootstrapping (that is, they don't have a usable, reasonably
+ * live consensus). Only used by clients which don't have or won't fetch
+ * from a list of fallback directory mirrors.
+ *
+ * This schedule is incremented by (potentially concurrent) connection
+ * attempts, unlike other schedules, which are incremented by connection
+ * failures. Only altered on testing networks. */
+ smartlist_t *TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule;
+
/** Schedule for when clients should download bridge descriptors. Only
* altered on testing networks. */
smartlist_t *TestingBridgeDownloadSchedule;
@@ -4086,6 +4177,21 @@ typedef struct {
* up? Only altered on testing networks. */
int TestingConsensusMaxDownloadTries;
+ /** How many times will a client try to fetch a consensus while
+ * bootstrapping using a list of fallback directories, before it gives up?
+ * Only altered on testing networks. */
+ int TestingClientBootstrapConsensusMaxDownloadTries;
+
+ /** How many times will a client try to fetch a consensus while
+ * bootstrapping using only a list of authorities, before it gives up?
+ * Only altered on testing networks. */
+ int TestingClientBootstrapConsensusAuthorityOnlyMaxDownloadTries;
+
+ /** How many simultaneous in-progress connections will we make when trying
+ * to fetch a consensus before we wait for one to complete, timeout, or
+ * error out? Only altered on testing networks. */
+ int TestingClientBootstrapConsensusMaxInProgressTries;
+
/** How many times will we try to download a router's descriptor before
* giving up? Only altered on testing networks. */
int TestingDescriptorMaxDownloadTries;
@@ -4326,6 +4432,9 @@ typedef struct {
int keygen_passphrase_fd;
int change_key_passphrase;
char *master_key_fname;
+
+ /** Autobool: Do we try to retain capabilities if we can? */
+ int KeepBindCapabilities;
} or_options_t;
/** Persistent state for an onion router, as saved to disk. */
@@ -4398,7 +4507,7 @@ typedef struct {
/** Change the next_write time of <b>state</b> to <b>when</b>, unless the
* state is already scheduled to be written to disk earlier than <b>when</b>.
*/
-static INLINE void or_state_mark_dirty(or_state_t *state, time_t when)
+static inline void or_state_mark_dirty(or_state_t *state, time_t when)
{
if (state->next_write > when)
state->next_write = when;
diff --git a/src/or/policies.c b/src/or/policies.c
index 7934d162ea..32a7ec2da4 100644
--- a/src/or/policies.c
+++ b/src/or/policies.c
@@ -628,7 +628,7 @@ typedef struct policy_map_ent_t {
static HT_HEAD(policy_map, policy_map_ent_t) policy_root = HT_INITIALIZER();
/** Return true iff a and b are equal. */
-static INLINE int
+static inline int
policy_eq(policy_map_ent_t *a, policy_map_ent_t *b)
{
return cmp_single_addr_policy(a->policy, b->policy) == 0;
diff --git a/src/or/relay.c b/src/or/relay.c
index aed6bf7009..ee2f041dbd 100644
--- a/src/or/relay.c
+++ b/src/or/relay.c
@@ -2256,7 +2256,7 @@ circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint)
static size_t total_cells_allocated = 0;
/** Release storage held by <b>cell</b>. */
-static INLINE void
+static inline void
packed_cell_free_unchecked(packed_cell_t *cell)
{
--total_cells_allocated;
@@ -2300,7 +2300,7 @@ dump_cell_pool_usage(int severity)
}
/** Allocate a new copy of packed <b>cell</b>. */
-static INLINE packed_cell_t *
+static inline packed_cell_t *
packed_cell_copy(const cell_t *cell, int wide_circ_ids)
{
packed_cell_t *c = packed_cell_new();
diff --git a/src/or/rendcommon.h b/src/or/rendcommon.h
index 3b2f86d614..04e34af453 100644
--- a/src/or/rendcommon.h
+++ b/src/or/rendcommon.h
@@ -19,7 +19,7 @@ typedef enum rend_intro_point_failure_t {
} rend_intro_point_failure_t;
/** Free all storage associated with <b>data</b> */
-static INLINE void
+static inline void
rend_data_free(rend_data_t *data)
{
if (!data) {
diff --git a/src/or/rephist.c b/src/or/rephist.c
index 343a06658a..d55317947c 100644
--- a/src/or/rephist.c
+++ b/src/or/rephist.c
@@ -920,7 +920,7 @@ parse_possibly_bad_iso_time(const char *s, time_t *time_out)
* that's about as much before <b>now</b> as <b>t</b> was before
* <b>stored_at</b>.
*/
-static INLINE time_t
+static inline time_t
correct_time(time_t t, time_t now, time_t stored_at, time_t started_measuring)
{
if (t < started_measuring - 24*60*60*365)
@@ -1190,7 +1190,7 @@ commit_max(bw_array_t *b)
}
/** Shift the current observation time of <b>b</b> forward by one second. */
-static INLINE void
+static inline void
advance_obs(bw_array_t *b)
{
int nextidx;
@@ -1216,7 +1216,7 @@ advance_obs(bw_array_t *b)
/** Add <b>n</b> bytes to the number of bytes in <b>b</b> for second
* <b>when</b>. */
-static INLINE void
+static inline void
add_obs(bw_array_t *b, time_t when, uint64_t n)
{
if (when < b->cur_obs_time)
diff --git a/src/or/routerlist.c b/src/or/routerlist.c
index 8bded42468..28b5eb1184 100644
--- a/src/or/routerlist.c
+++ b/src/or/routerlist.c
@@ -278,7 +278,7 @@ trusted_dirs_reload_certs(void)
/** Helper: return true iff we already have loaded the exact cert
* <b>cert</b>. */
-static INLINE int
+static inline int
already_have_cert(authority_cert_t *cert)
{
cert_list_t *cl = get_cert_list(cert->cache_info.identity_digest);
@@ -897,8 +897,10 @@ authority_certs_fetch_missing(networkstatus_t *status, time_t now)
if (smartlist_len(fps) > 1) {
resource = smartlist_join_strings(fps, "", 0, NULL);
+ /* XXX - do we want certs from authorities or mirrors? - teor */
directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0,
- resource, PDS_RETRY_IF_NO_SERVERS);
+ resource, PDS_RETRY_IF_NO_SERVERS,
+ DL_WANT_ANY_DIRSERVER);
tor_free(resource);
}
/* else we didn't add any: they were all pending */
@@ -941,8 +943,10 @@ authority_certs_fetch_missing(networkstatus_t *status, time_t now)
if (smartlist_len(fp_pairs) > 1) {
resource = smartlist_join_strings(fp_pairs, "", 0, NULL);
+ /* XXX - do we want certs from authorities or mirrors? - teor */
directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0,
- resource, PDS_RETRY_IF_NO_SERVERS);
+ resource, PDS_RETRY_IF_NO_SERVERS,
+ DL_WANT_ANY_DIRSERVER);
tor_free(resource);
}
/* else they were all pending */
@@ -985,7 +989,7 @@ router_should_rebuild_store(desc_store_t *store)
/** Return the desc_store_t in <b>rl</b> that should be used to store
* <b>sd</b>. */
-static INLINE desc_store_t *
+static inline desc_store_t *
desc_get_store(routerlist_t *rl, const signed_descriptor_t *sd)
{
if (sd->is_extrainfo)
@@ -1358,7 +1362,9 @@ router_get_trusteddirserver_by_digest(const char *digest)
}
/** Return the dir_server_t for the fallback dirserver whose identity
- * key hashes to <b>digest</b>, or NULL if no such authority is known.
+ * key hashes to <b>digest</b>, or NULL if no such fallback is in the list of
+ * fallback_dir_servers. (fallback_dir_servers is affected by the FallbackDir
+ * and UseDefaultFallbackDirs torrc options.)
*/
dir_server_t *
router_get_fallback_dirserver_by_digest(const char *digest)
@@ -1366,6 +1372,9 @@ router_get_fallback_dirserver_by_digest(const char *digest)
if (!fallback_dir_servers)
return NULL;
+ if (!digest)
+ return NULL;
+
SMARTLIST_FOREACH(fallback_dir_servers, dir_server_t *, ds,
{
if (tor_memeq(ds->digest, digest, DIGEST_LEN))
@@ -1375,6 +1384,17 @@ router_get_fallback_dirserver_by_digest(const char *digest)
return NULL;
}
+/** Return 1 if any fallback dirserver's identity key hashes to <b>digest</b>,
+ * or 0 if no such fallback is in the list of fallback_dir_servers.
+ * (fallback_dir_servers is affected by the FallbackDir and
+ * UseDefaultFallbackDirs torrc options.)
+ */
+int
+router_digest_is_fallback_dir(const char *digest)
+{
+ return (router_get_fallback_dirserver_by_digest(digest) != NULL);
+}
+
/** Return the dir_server_t for the directory authority whose
* v3 identity key hashes to <b>digest</b>, or NULL if no such authority
* is known.
@@ -1897,7 +1917,7 @@ scale_array_elements_to_u64(u64_dbl_t *entries, int n_entries,
#if SIZEOF_VOID_P == 8
#define gt_i64_timei(a,b) ((a) > (b))
#else
-static INLINE int
+static inline int
gt_i64_timei(uint64_t a, uint64_t b)
{
int64_t diff = (int64_t) (b - a);
@@ -1975,7 +1995,7 @@ bridge_get_advertised_bandwidth_bounded(routerinfo_t *router)
/** Return bw*1000, unless bw*1000 would overflow, in which case return
* INT32_MAX. */
-static INLINE int32_t
+static inline int32_t
kb_to_bytes(uint32_t bw)
{
return (bw > (INT32_MAX/1000)) ? INT32_MAX : bw*1000;
@@ -2790,7 +2810,7 @@ dump_routerlist_mem_usage(int severity)
* in <b>sl</b> at position <b>idx</b>. Otherwise, search <b>sl</b> for
* <b>ri</b>. Return the index of <b>ri</b> in <b>sl</b>, or -1 if <b>ri</b>
* is not in <b>sl</b>. */
-static INLINE int
+static inline int
routerlist_find_elt_(smartlist_t *sl, void *ri, int idx)
{
if (idx < 0) {
@@ -4397,14 +4417,14 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads,
tor_free(cp);
if (source) {
- /* We know which authority we want. */
+ /* We know which authority or directory mirror we want. */
directory_initiate_command_routerstatus(source, purpose,
ROUTER_PURPOSE_GENERAL,
DIRIND_ONEHOP,
resource, NULL, 0, 0);
} else {
directory_get_from_dirserver(purpose, ROUTER_PURPOSE_GENERAL, resource,
- pds_flags);
+ pds_flags, DL_WANT_ANY_DIRSERVER);
}
tor_free(resource);
}
@@ -4686,9 +4706,14 @@ launch_dummy_descriptor_download_as_needed(time_t now,
last_descriptor_download_attempted + DUMMY_DOWNLOAD_INTERVAL < now &&
last_dummy_download + DUMMY_DOWNLOAD_INTERVAL < now) {
last_dummy_download = now;
+ /* XX/teor - do we want an authority here, because they are less likely
+ * to give us the wrong address? (See #17782)
+ * I'm leaving the previous behaviour intact, because I don't like
+ * the idea of some relays contacting an authority every 20 minutes. */
directory_get_from_dirserver(DIR_PURPOSE_FETCH_SERVERDESC,
ROUTER_PURPOSE_GENERAL, "authority.z",
- PDS_RETRY_IF_NO_SERVERS);
+ PDS_RETRY_IF_NO_SERVERS,
+ DL_WANT_ANY_DIRSERVER);
}
}
diff --git a/src/or/routerlist.h b/src/or/routerlist.h
index 3c4c9cde2d..339e34ae03 100644
--- a/src/or/routerlist.h
+++ b/src/or/routerlist.h
@@ -50,6 +50,7 @@ const routerstatus_t *router_pick_directory_server(dirinfo_type_t type,
dir_server_t *router_get_trusteddirserver_by_digest(const char *d);
dir_server_t *router_get_fallback_dirserver_by_digest(
const char *digest);
+int router_digest_is_fallback_dir(const char *digest);
dir_server_t *trusteddirserver_get_by_v3_auth_digest(const char *d);
const routerstatus_t *router_pick_trusteddirserver(dirinfo_type_t type,
int flags);
@@ -109,7 +110,7 @@ static int WRA_NEVER_DOWNLOADABLE(was_router_added_t s);
* was added. It might still be necessary to check whether the descriptor
* generator should be notified.
*/
-static INLINE int
+static inline int
WRA_WAS_ADDED(was_router_added_t s) {
return s == ROUTER_ADDED_SUCCESSFULLY || s == ROUTER_ADDED_NOTIFY_GENERATOR;
}
@@ -120,7 +121,7 @@ WRA_WAS_ADDED(was_router_added_t s) {
* - it was outdated.
* - its certificates were expired.
*/
-static INLINE int WRA_WAS_OUTDATED(was_router_added_t s)
+static inline int WRA_WAS_OUTDATED(was_router_added_t s)
{
return (s == ROUTER_WAS_TOO_OLD ||
s == ROUTER_IS_ALREADY_KNOWN ||
@@ -130,13 +131,13 @@ static INLINE int WRA_WAS_OUTDATED(was_router_added_t s)
}
/** Return true iff the outcome code in <b>s</b> indicates that the descriptor
* was flat-out rejected. */
-static INLINE int WRA_WAS_REJECTED(was_router_added_t s)
+static inline int WRA_WAS_REJECTED(was_router_added_t s)
{
return (s == ROUTER_AUTHDIR_REJECTS);
}
/** Return true iff the outcome code in <b>s</b> indicates that the descriptor
* was flat-out rejected. */
-static INLINE int WRA_NEVER_DOWNLOADABLE(was_router_added_t s)
+static inline int WRA_NEVER_DOWNLOADABLE(was_router_added_t s)
{
return (s == ROUTER_AUTHDIR_REJECTS ||
s == ROUTER_BAD_EI ||
diff --git a/src/or/routerparse.c b/src/or/routerparse.c
index f898ef8aef..f6619cb902 100644
--- a/src/or/routerparse.c
+++ b/src/or/routerparse.c
@@ -2061,7 +2061,7 @@ authority_cert_parse_from_string(const char *s, const char **end_of_string)
* object (starting with "r " at the start of a line). If none is found,
* return the start of the directory footer, or the next directory signature.
* If none is found, return the end of the string. */
-static INLINE const char *
+static inline const char *
find_start_of_next_routerstatus(const char *s)
{
const char *eos, *footer, *sig;
@@ -3930,7 +3930,7 @@ token_clear(directory_token_t *tok)
* Return <b>tok</b> on success, or a new ERR_ token if the token didn't
* conform to the syntax we wanted.
**/
-static INLINE directory_token_t *
+static inline directory_token_t *
token_check_object(memarea_t *area, const char *kwd,
directory_token_t *tok, obj_syntax o_syn)
{
@@ -3995,7 +3995,7 @@ token_check_object(memarea_t *area, const char *kwd,
* number of parsed elements into the n_args field of <b>tok</b>. Allocate
* all storage in <b>area</b>. Return the number of arguments parsed, or
* return -1 if there was an insanely high number of arguments. */
-static INLINE int
+static inline int
get_token_arguments(memarea_t *area, directory_token_t *tok,
const char *s, const char *eol)
{
diff --git a/src/or/transports.c b/src/or/transports.c
index ba2c784c2c..81b8db2508 100644
--- a/src/or/transports.c
+++ b/src/or/transports.c
@@ -105,7 +105,7 @@
static process_environment_t *
create_managed_proxy_environment(const managed_proxy_t *mp);
-static INLINE int proxy_configuration_finished(const managed_proxy_t *mp);
+static inline int proxy_configuration_finished(const managed_proxy_t *mp);
static void handle_finished_proxy(managed_proxy_t *mp);
static void parse_method_error(const char *line, int is_server_method);
@@ -713,7 +713,7 @@ register_client_proxy(const managed_proxy_t *mp)
}
/** Register the transports of managed proxy <b>mp</b>. */
-static INLINE void
+static inline void
register_proxy(const managed_proxy_t *mp)
{
if (mp->is_server)
@@ -828,7 +828,7 @@ handle_finished_proxy(managed_proxy_t *mp)
/** Return true if the configuration of the managed proxy <b>mp</b> is
finished. */
-static INLINE int
+static inline int
proxy_configuration_finished(const managed_proxy_t *mp)
{
return (mp->conf_state == PT_PROTO_CONFIGURED ||