diff options
author | Nick Mathewson <nickm@torproject.org> | 2015-12-15 12:57:57 -0500 |
---|---|---|
committer | Nick Mathewson <nickm@torproject.org> | 2015-12-15 12:57:57 -0500 |
commit | a7d44731d9ac831cd68f34ac640b50cdde3a60af (patch) | |
tree | 88a149ac07f30c0af5be47cc7d8f9ed9d57db244 /src/or/directory.c | |
parent | 54433993c7c84dc9af878ebaf8dd1deae8c595e3 (diff) | |
parent | d72af1085a1d9ed6f2c9bdb5e9c85eba991db842 (diff) | |
download | tor-a7d44731d9ac831cd68f34ac640b50cdde3a60af.tar.gz tor-a7d44731d9ac831cd68f34ac640b50cdde3a60af.zip |
Merge remote-tracking branch 'teor/feature4483-v10-squashed'
Diffstat (limited to 'src/or/directory.c')
-rw-r--r-- | src/or/directory.c | 479 |
1 files changed, 428 insertions, 51 deletions
diff --git a/src/or/directory.c b/src/or/directory.c index 4e5644b854..63bbdafd3f 100644 --- a/src/or/directory.c +++ b/src/or/directory.c @@ -425,14 +425,17 @@ directory_pick_generic_dirserver(dirinfo_type_t type, int pds_flags, * Use <b>pds_flags</b> as arguments to router_pick_directory_server() * or router_pick_trusteddirserver(). */ -MOCK_IMPL(void, directory_get_from_dirserver, (uint8_t dir_purpose, - uint8_t router_purpose, - const char *resource, - int pds_flags)) +MOCK_IMPL(void, directory_get_from_dirserver, ( + uint8_t dir_purpose, + uint8_t router_purpose, + const char *resource, + int pds_flags, + download_want_authority_t want_authority)) { const routerstatus_t *rs = NULL; const or_options_t *options = get_options(); - int prefer_authority = directory_fetches_from_authorities(options); + int prefer_authority = (directory_fetches_from_authorities(options) + || want_authority == DL_WANT_AUTHORITY); int require_authority = 0; int get_via_tor = purpose_needs_anonymity(dir_purpose, router_purpose); dirinfo_type_t type = dir_fetch_type(dir_purpose, router_purpose, resource); @@ -958,6 +961,12 @@ directory_initiate_command_rend(const tor_addr_t *_addr, return; } + /* ensure we don't make excess connections when we're already downloading + * a consensus during bootstrap */ + if (connection_dir_avoid_extra_connection_for_purpose(dir_purpose)) { + return; + } + conn = dir_connection_new(tor_addr_family(&addr)); /* set up conn so it's got all the data we need to remember */ @@ -998,6 +1007,9 @@ directory_initiate_command_rend(const tor_addr_t *_addr, conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; /* fall through */ case 0: + if (connection_dir_close_consensus_conn_if_extra(conn)) { + return; + } /* queue the command on the outbuf */ directory_send_command(conn, dir_purpose, 1, resource, payload, payload_len, @@ -1041,6 +1053,9 @@ directory_initiate_command_rend(const tor_addr_t *_addr, connection_mark_for_close(TO_CONN(conn)); return; } + if (connection_dir_close_consensus_conn_if_extra(conn)) { + return; + } conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; /* queue the command on the outbuf */ directory_send_command(conn, dir_purpose, 0, resource, @@ -3423,8 +3438,205 @@ connection_dir_finished_flushing(dir_connection_t *conn) return 0; } +/* A helper function for connection_dir_close_consensus_conn_if_extra() + * and connection_dir_close_extra_consensus_conns() that returns 0 if + * we can't have, or don't want to close, excess consensus connections. */ +int +connection_dir_would_close_consensus_conn_helper(void) +{ + const or_options_t *options = get_options(); + + /* we're only interested in closing excess connections if we could + * have created any in the first place */ + if (!networkstatus_consensus_can_use_multiple_directories(options)) { + return 0; + } + + /* We want to close excess connections downloading a consensus. + * If there aren't any excess, we don't have anything to close. */ + if (!networkstatus_consensus_has_excess_connections()) { + return 0; + } + + /* If we have excess connections, but none of them are downloading a + * consensus, and we are still bootstrapping (that is, we have no usable + * consensus), we don't want to close any until one starts downloading. */ + if (!networkstatus_consensus_is_downloading_usable_flavor() + && networkstatus_consensus_is_boostrapping(time(NULL))) { + return 0; + } + + /* If we have just stopped bootstrapping (that is, just parsed a consensus), + * we might still have some excess connections hanging around. So we still + * have to check if we want to close any, even if we've stopped + * bootstrapping. */ + return 1; +} + +/* Check if we would close excess consensus connections. If we would, any + * new consensus connection would become excess immediately, so return 1. + * Otherwise, return 0. */ +int +connection_dir_avoid_extra_connection_for_purpose(unsigned int purpose) +{ + const or_options_t *options = get_options(); + + /* We're not interested in connections that aren't fetching a consensus. */ + if (purpose != DIR_PURPOSE_FETCH_CONSENSUS) { + return 0; + } + + /* we're only interested in avoiding excess connections if we could + * have created any in the first place */ + if (!networkstatus_consensus_can_use_multiple_directories(options)) { + return 0; + } + + /* If there are connections downloading a consensus, and we are still + * bootstrapping (that is, we have no usable consensus), we can be sure that + * any further connections would be excess. */ + if (networkstatus_consensus_is_downloading_usable_flavor() + && networkstatus_consensus_is_boostrapping(time(NULL))) { + return 1; + } + + return 0; +} + +/* Check if we have excess consensus download connection attempts, and close + * conn: + * - if we don't have a consensus, and we're downloading a consensus, and conn + * is not downloading a consensus yet, close it; + * - if we do have a consensus, conn is excess, close it. */ +int +connection_dir_close_consensus_conn_if_extra(dir_connection_t *conn) +{ + tor_assert(conn); + tor_assert(conn->base_.type == CONN_TYPE_DIR); + + /* We're not interested in connections that aren't fetching a consensus. */ + if (conn->base_.purpose != DIR_PURPOSE_FETCH_CONSENSUS) { + return 0; + } + + /* The connection has already been closed */ + if (conn->base_.marked_for_close) { + return 0; + } + + if (!connection_dir_would_close_consensus_conn_helper()) { + return 0; + } + + const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping( + time(NULL)); + + /* We don't want to check other connections to see if they are downloading, + * as this is prone to race-conditions. So leave it for + * connection_dir_consider_close_extra_consensus_conns() to clean up. + * + * But if conn has just started connecting, or we have a consensus already, + * we can be sure it's not needed any more. */ + if (!we_are_bootstrapping + || conn->base_.state == DIR_CONN_STATE_CONNECTING) { + connection_close_immediate(&conn->base_); + connection_mark_for_close(&conn->base_); + return -1; + } + + return 0; +} + +/* Check if we have excess consensus download connection attempts, and close + * them: + * - if we don't have a consensus, and we're downloading a consensus, keep an + * earlier connection, or a connection to a fallback directory, and close + * all other connections; + * - if we do have a consensus, close all connections: they are all excess. */ +void +connection_dir_close_extra_consensus_conns(void) +{ + if (!connection_dir_would_close_consensus_conn_helper()) { + return; + } + + int we_are_bootstrapping = networkstatus_consensus_is_boostrapping( + time(NULL)); + + const char *usable_resource = networkstatus_get_flavor_name( + usable_consensus_flavor()); + smartlist_t *consens_usable_conns = + connection_dir_list_by_purpose_and_resource( + DIR_PURPOSE_FETCH_CONSENSUS, + usable_resource); + + /* If we want to keep a connection that's downloading, find a connection to + * keep, favouring: + * - connections opened earlier (they are likely to have progressed further) + * - connections to fallbacks (to reduce the load on authorities) */ + dir_connection_t *kept_download_conn = NULL; + int kept_is_authority = 0; + if (we_are_bootstrapping) { + SMARTLIST_FOREACH_BEGIN(consens_usable_conns, + dir_connection_t *, d) { + tor_assert(d); + int d_is_authority = router_digest_is_trusted_dir(d->identity_digest); + /* keep the first connection that is past the connecting state, but + * prefer fallbacks. */ + if (d->base_.state != DIR_CONN_STATE_CONNECTING) { + if (!kept_download_conn || (kept_is_authority && !d_is_authority)) { + kept_download_conn = d; + kept_is_authority = d_is_authority; + /* we've found the earliest fallback, and want to keep it regardless + * of any other connections */ + if (!kept_is_authority) + break; + } + } + } SMARTLIST_FOREACH_END(d); + } + + SMARTLIST_FOREACH_BEGIN(consens_usable_conns, + dir_connection_t *, d) { + tor_assert(d); + /* don't close this connection if it's the one we want to keep */ + if (kept_download_conn && d == kept_download_conn) + continue; + /* mark all other connections for close */ + if (!d->base_.marked_for_close) { + connection_close_immediate(&d->base_); + connection_mark_for_close(&d->base_); + } + } SMARTLIST_FOREACH_END(d); + + smartlist_free(consens_usable_conns); + consens_usable_conns = NULL; + + /* make sure we've closed all excess connections */ + const int final_connecting_conn_count = + connection_dir_count_by_purpose_resource_and_state( + DIR_PURPOSE_FETCH_CONSENSUS, + usable_resource, + DIR_CONN_STATE_CONNECTING); + if (final_connecting_conn_count > 0) { + log_warn(LD_BUG, "Expected 0 consensus connections connecting after " + "cleanup, got %d.", final_connecting_conn_count); + } + const int expected_final_conn_count = (we_are_bootstrapping ? 1 : 0); + const int final_conn_count = + connection_dir_count_by_purpose_and_resource( + DIR_PURPOSE_FETCH_CONSENSUS, + usable_resource); + if (final_conn_count > expected_final_conn_count) { + log_warn(LD_BUG, "Expected %d consensus connections after cleanup, got " + "%d.", expected_final_conn_count, final_connecting_conn_count); + } +} + /** Connected handler for directory connections: begin sending data to the - * server */ + * server, and return 0, or, if the connection is an excess bootstrap + * connection, close all excess bootstrap connections. + * Only used when connections don't immediately connect. */ int connection_dir_finished_connecting(dir_connection_t *conn) { @@ -3435,31 +3647,64 @@ connection_dir_finished_connecting(dir_connection_t *conn) log_debug(LD_HTTP,"Dir connection to router %s:%u established.", conn->base_.address,conn->base_.port); - conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; /* start flushing conn */ + if (connection_dir_close_consensus_conn_if_extra(conn)) { + return -1; + } + + /* start flushing conn */ + conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; return 0; } /** Decide which download schedule we want to use based on descriptor type - * in <b>dls</b> and whether we are acting as directory <b>server</b>, and - * then return a list of int pointers defining download delays in seconds. - * Helper function for download_status_increment_failure() and - * download_status_reset(). */ + * in <b>dls</b> and <b>options</b>. + * Then return a list of int pointers defining download delays in seconds. + * Helper function for download_status_increment_failure(), + * download_status_reset(), and download_status_increment_attempt(). */ static const smartlist_t * -find_dl_schedule_and_len(download_status_t *dls, int server) +find_dl_schedule(download_status_t *dls, const or_options_t *options) { + /* XX/teor Replace with dir_server_mode from #12538 */ + const int dir_server = options->DirPort_set; + const int multi_d = networkstatus_consensus_can_use_multiple_directories( + options); + const int we_are_bootstrapping = networkstatus_consensus_is_boostrapping( + time(NULL)); + const int use_fallbacks = networkstatus_consensus_can_use_extra_fallbacks( + options); switch (dls->schedule) { case DL_SCHED_GENERIC: - if (server) - return get_options()->TestingServerDownloadSchedule; - else - return get_options()->TestingClientDownloadSchedule; + if (dir_server) { + return options->TestingServerDownloadSchedule; + } else { + return options->TestingClientDownloadSchedule; + } case DL_SCHED_CONSENSUS: - if (server) - return get_options()->TestingServerConsensusDownloadSchedule; - else - return get_options()->TestingClientConsensusDownloadSchedule; + if (!multi_d) { + return options->TestingServerConsensusDownloadSchedule; + } else { + if (we_are_bootstrapping) { + if (!use_fallbacks) { + /* A bootstrapping client without extra fallback directories */ + return + options->TestingClientBootstrapConsensusAuthorityOnlyDownloadSchedule; + } else if (dls->want_authority) { + /* A bootstrapping client with extra fallback directories, but + * connecting to an authority */ + return + options->TestingClientBootstrapConsensusAuthorityDownloadSchedule; + } else { + /* A bootstrapping client connecting to extra fallback directories + */ + return + options->TestingClientBootstrapConsensusFallbackDownloadSchedule; + } + } else { + return options->TestingClientConsensusDownloadSchedule; + } + } case DL_SCHED_BRIDGE: - return get_options()->TestingBridgeDownloadSchedule; + return options->TestingBridgeDownloadSchedule; default: tor_assert(0); } @@ -3468,54 +3713,168 @@ find_dl_schedule_and_len(download_status_t *dls, int server) return NULL; } -/** Called when an attempt to download <b>dls</b> has failed with HTTP status +/* Find the current delay for dls based on schedule. + * Set dls->next_attempt_at based on now, and return the delay. + * Helper for download_status_increment_failure and + * download_status_increment_attempt. */ +STATIC int +download_status_schedule_get_delay(download_status_t *dls, + const smartlist_t *schedule, + time_t now) +{ + tor_assert(dls); + tor_assert(schedule); + + int delay = INT_MAX; + uint8_t dls_schedule_position = (dls->increment_on + == DL_SCHED_INCREMENT_ATTEMPT + ? dls->n_download_attempts + : dls->n_download_failures); + + if (dls_schedule_position < smartlist_len(schedule)) + delay = *(int *)smartlist_get(schedule, dls_schedule_position); + else if (dls_schedule_position == IMPOSSIBLE_TO_DOWNLOAD) + delay = INT_MAX; + else + delay = *(int *)smartlist_get(schedule, smartlist_len(schedule) - 1); + + /* A negative delay makes no sense. Knowing that delay is + * non-negative allows us to safely do the wrapping check below. */ + tor_assert(delay >= 0); + + /* Avoid now+delay overflowing INT_MAX, by comparing with a subtraction + * that won't overflow (since delay is non-negative). */ + if (delay < INT_MAX && now <= INT_MAX - delay) { + dls->next_attempt_at = now+delay; + } else { + dls->next_attempt_at = TIME_MAX; + } + + return delay; +} + +/* Log a debug message about item, which increments on increment_action, has + * incremented dls_n_download_increments times. The message varies based on + * was_schedule_incremented (if not, not_incremented_response is logged), and + * the values of increment, dls_next_attempt_at, and now. + * Helper for download_status_increment_failure and + * download_status_increment_attempt. */ +static void +download_status_log_helper(const char *item, int was_schedule_incremented, + const char *increment_action, + const char *not_incremented_response, + uint8_t dls_n_download_increments, int increment, + time_t dls_next_attempt_at, time_t now) +{ + if (item) { + if (!was_schedule_incremented) + log_debug(LD_DIR, "%s %s %d time(s); I'll try again %s.", + item, increment_action, (int)dls_n_download_increments, + not_incremented_response); + else if (increment == 0) + log_debug(LD_DIR, "%s %s %d time(s); I'll try again immediately.", + item, increment_action, (int)dls_n_download_increments); + else if (dls_next_attempt_at < TIME_MAX) + log_debug(LD_DIR, "%s %s %d time(s); I'll try again in %d seconds.", + item, increment_action, (int)dls_n_download_increments, + (int)(dls_next_attempt_at-now)); + else + log_debug(LD_DIR, "%s %s %d time(s); Giving up for a while.", + item, increment_action, (int)dls_n_download_increments); + } +} + +/** Determine when a failed download attempt should be retried. + * Called when an attempt to download <b>dls</b> has failed with HTTP status * <b>status_code</b>. Increment the failure count (if the code indicates a - * real failure) and set <b>dls</b>-\>next_attempt_at to an appropriate time - * in the future. */ + * real failure, or if we're a server) and set <b>dls</b>-\>next_attempt_at to + * an appropriate time in the future and return it. + * If <b>dls->increment_on</b> is DL_SCHED_INCREMENT_ATTEMPT, increment the + * failure count, and return a time in the far future for the next attempt (to + * avoid an immediate retry). */ time_t download_status_increment_failure(download_status_t *dls, int status_code, const char *item, int server, time_t now) { - const smartlist_t *schedule; - int increment; + int increment = -1; tor_assert(dls); + + /* only count the failure if it's permanent, or we're a server */ if (status_code != 503 || server) { if (dls->n_download_failures < IMPOSSIBLE_TO_DOWNLOAD-1) ++dls->n_download_failures; } - schedule = find_dl_schedule_and_len(dls, server); + if (dls->increment_on == DL_SCHED_INCREMENT_FAILURE) { + /* We don't find out that a failure-based schedule has attempted a + * connection until that connection fails. + * We'll never find out about successful connections, but this doesn't + * matter, because schedules are reset after a successful download. + */ + if (dls->n_download_attempts < IMPOSSIBLE_TO_DOWNLOAD-1) + ++dls->n_download_attempts; - if (dls->n_download_failures < smartlist_len(schedule)) - increment = *(int *)smartlist_get(schedule, dls->n_download_failures); - else if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD) - increment = INT_MAX; - else - increment = *(int *)smartlist_get(schedule, smartlist_len(schedule) - 1); + /* only return a failure retry time if this schedule increments on failures + */ + const smartlist_t *schedule = find_dl_schedule(dls, get_options()); + increment = download_status_schedule_get_delay(dls, schedule, now); + } - if (increment < INT_MAX) - dls->next_attempt_at = now+increment; - else - dls->next_attempt_at = TIME_MAX; + download_status_log_helper(item, !dls->increment_on, "failed", + "concurrently", dls->n_download_failures, + increment, dls->next_attempt_at, now); - if (item) { - if (increment == 0) - log_debug(LD_DIR, "%s failed %d time(s); I'll try again immediately.", - item, (int)dls->n_download_failures); - else if (dls->next_attempt_at < TIME_MAX) - log_debug(LD_DIR, "%s failed %d time(s); I'll try again in %d seconds.", - item, (int)dls->n_download_failures, - (int)(dls->next_attempt_at-now)); - else - log_debug(LD_DIR, "%s failed %d time(s); Giving up for a while.", - item, (int)dls->n_download_failures); + if (dls->increment_on == DL_SCHED_INCREMENT_ATTEMPT) { + /* stop this schedule retrying on failure, it will launch concurrent + * connections instead */ + return TIME_MAX; + } else { + return dls->next_attempt_at; + } +} + +/** Determine when the next download attempt should be made when using an + * attempt-based (potentially concurrent) download schedule. + * Called when an attempt to download <b>dls</b> is being initiated. + * Increment the attempt count and set <b>dls</b>-\>next_attempt_at to an + * appropriate time in the future and return it. + * If <b>dls->increment_on</b> is DL_SCHED_INCREMENT_FAILURE, don't increment + * the attempts, and return a time in the far future (to avoid launching a + * concurrent attempt). */ +time_t +download_status_increment_attempt(download_status_t *dls, const char *item, + time_t now) +{ + int delay = -1; + tor_assert(dls); + + if (dls->increment_on == DL_SCHED_INCREMENT_FAILURE) { + /* this schedule should retry on failure, and not launch any concurrent + attempts */ + log_info(LD_BUG, "Tried to launch an attempt-based connection on a " + "failure-based schedule."); + return TIME_MAX; } + + if (dls->n_download_attempts < IMPOSSIBLE_TO_DOWNLOAD-1) + ++dls->n_download_attempts; + + const smartlist_t *schedule = find_dl_schedule(dls, get_options()); + delay = download_status_schedule_get_delay(dls, schedule, now); + + download_status_log_helper(item, dls->increment_on, "attempted", + "on failure", dls->n_download_attempts, + delay, dls->next_attempt_at, now); + return dls->next_attempt_at; } /** Reset <b>dls</b> so that it will be considered downloadable * immediately, and/or to show that we don't need it anymore. * + * Must be called to initialise a download schedule, otherwise the zeroth item + * in the schedule will never be used. + * * (We find the zeroth element of the download schedule, and set * next_attempt_at to be the appropriate offset from 'now'. In most * cases this means setting it to 'now', so the item will be immediately @@ -3524,14 +3883,16 @@ download_status_increment_failure(download_status_t *dls, int status_code, void download_status_reset(download_status_t *dls) { - if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD) + if (dls->n_download_failures == IMPOSSIBLE_TO_DOWNLOAD + || dls->n_download_attempts == IMPOSSIBLE_TO_DOWNLOAD) return; /* Don't reset this. */ - const smartlist_t *schedule = find_dl_schedule_and_len( - dls, get_options()->DirPort_set); + const smartlist_t *schedule = find_dl_schedule(dls, get_options()); dls->n_download_failures = 0; + dls->n_download_attempts = 0; dls->next_attempt_at = time(NULL) + *(int *)smartlist_get(schedule, 0); + /* Don't reset dls->want_authority or dls->increment_on */ } /** Return the number of failures on <b>dls</b> since the last success (if @@ -3542,6 +3903,22 @@ download_status_get_n_failures(const download_status_t *dls) return dls->n_download_failures; } +/** Return the number of attempts to download <b>dls</b> since the last success + * (if any). This can differ from download_status_get_n_failures() due to + * outstanding concurrent attempts. */ +int +download_status_get_n_attempts(const download_status_t *dls) +{ + return dls->n_download_attempts; +} + +/** Return the next time to attempt to download <b>dls</b>. */ +time_t +download_status_get_next_attempt_at(const download_status_t *dls) +{ + return dls->next_attempt_at; +} + /** Called when one or more routerdesc (or extrainfo, if <b>was_extrainfo</b>) * fetches have failed (with uppercase fingerprints listed in <b>failed</b>, * either as descriptor digests or as identity digests based on |