From 29f15a97edb05d175b97154e0b1c96fd04485ee2 Mon Sep 17 00:00:00 2001 From: Arlo Breault Date: Tue, 23 Sep 2014 12:19:15 -0400 Subject: Make router_pick_directory_server respect PDS_NO_EXISTING_* --- src/or/routerlist.c | 53 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 10 deletions(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/routerlist.c b/src/or/routerlist.c index 8d29b89ea9..2fe007d9e7 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -65,7 +65,7 @@ static int compute_weighted_bandwidths(const smartlist_t *sl, bandwidth_weight_rule_t rule, u64_dbl_t **bandwidths_out); static const routerstatus_t *router_pick_directory_server_impl( - dirinfo_type_t auth, int flags); + dirinfo_type_t auth, int flags, int *n_busy_out); static const routerstatus_t *router_pick_trusteddirserver_impl( const smartlist_t *sourcelist, dirinfo_type_t auth, int flags, int *n_busy_out); @@ -1239,6 +1239,7 @@ router_get_fallback_dir_servers(void) const routerstatus_t * router_pick_directory_server(dirinfo_type_t type, int flags) { + int busy = 0; const routerstatus_t *choice; if (get_options()->PreferTunneledDirConns) flags |= PDS_PREFER_TUNNELED_DIR_CONNS_; @@ -1246,17 +1247,26 @@ router_pick_directory_server(dirinfo_type_t type, int flags) if (!routerlist) return NULL; - choice = router_pick_directory_server_impl(type, flags); + choice = router_pick_directory_server_impl(type, flags, &busy); if (choice || !(flags & PDS_RETRY_IF_NO_SERVERS)) return choice; + if (busy) { + /* If the reason that we got no server is that servers are "busy", + * we must be excluding good servers because we already have serverdesc + * fetches with them. Do not mark down servers up because of this. */ + tor_assert((flags & (PDS_NO_EXISTING_SERVERDESC_FETCH| + PDS_NO_EXISTING_MICRODESC_FETCH))); + return NULL; + } + log_info(LD_DIR, "No reachable router entries for dirservers. " "Trying them all again."); /* mark all authdirservers as up again */ mark_all_dirservers_up(fallback_dir_servers); /* try again */ - choice = router_pick_directory_server_impl(type, flags); + choice = router_pick_directory_server_impl(type, flags, NULL); return choice; } @@ -1375,7 +1385,8 @@ router_pick_dirserver_generic(smartlist_t *sourcelist, * that we can use with BEGINDIR. */ static const routerstatus_t * -router_pick_directory_server_impl(dirinfo_type_t type, int flags) +router_pick_directory_server_impl(dirinfo_type_t type, int flags, + int *n_busy_out) { const or_options_t *options = get_options(); const node_t *result; @@ -1384,11 +1395,13 @@ router_pick_directory_server_impl(dirinfo_type_t type, int flags) smartlist_t *overloaded_direct, *overloaded_tunnel; time_t now = time(NULL); const networkstatus_t *consensus = networkstatus_get_latest_consensus(); - int requireother = ! (flags & PDS_ALLOW_SELF); - int fascistfirewall = ! (flags & PDS_IGNORE_FASCISTFIREWALL); - int prefer_tunnel = (flags & PDS_PREFER_TUNNELED_DIR_CONNS_); - int for_guard = (flags & PDS_FOR_GUARD); - int try_excluding = 1, n_excluded = 0; + const int requireother = ! (flags & PDS_ALLOW_SELF); + const int fascistfirewall = ! (flags & PDS_IGNORE_FASCISTFIREWALL); + const int prefer_tunnel = (flags & PDS_PREFER_TUNNELED_DIR_CONNS_); + const int no_serverdesc_fetching = (flags & PDS_NO_EXISTING_SERVERDESC_FETCH); + const int no_microdesc_fetching = (flags & PDS_NO_EXISTING_MICRODESC_FETCH); + const int for_guard = (flags & PDS_FOR_GUARD); + int try_excluding = 1, n_excluded = 0, n_busy = 0; if (!consensus) return NULL; @@ -1435,7 +1448,24 @@ router_pick_directory_server_impl(dirinfo_type_t type, int flags) } /* XXXX IP6 proposal 118 */ - tor_addr_from_ipv4h(&addr, node->rs->addr); + tor_addr_from_ipv4h(&addr, status->addr); + + if (no_serverdesc_fetching && ( + connection_get_by_type_addr_port_purpose( + CONN_TYPE_DIR, &addr, status->dir_port, DIR_PURPOSE_FETCH_SERVERDESC) + || connection_get_by_type_addr_port_purpose( + CONN_TYPE_DIR, &addr, status->dir_port, DIR_PURPOSE_FETCH_EXTRAINFO) + )) { + ++n_busy; + continue; + } + + if (no_microdesc_fetching && connection_get_by_type_addr_port_purpose( + CONN_TYPE_DIR, &addr, status->dir_port, DIR_PURPOSE_FETCH_MICRODESC) + ) { + ++n_busy; + continue; + } is_overloaded = status->last_dir_503_at + DIR_503_TIMEOUT > now; @@ -1476,6 +1506,9 @@ router_pick_directory_server_impl(dirinfo_type_t type, int flags) smartlist_free(overloaded_direct); smartlist_free(overloaded_tunnel); + if (n_busy_out) + *n_busy_out = n_busy; + if (result == NULL && try_excluding && !options->StrictNodes && n_excluded) { /* If we got no result, and we are excluding nodes, and StrictNodes is * not set, try again without excluding nodes. */ -- cgit v1.2.3-54-g00ecf From 21d5dbd474d5dad10a2bfa800df078f7fdc8c40b Mon Sep 17 00:00:00 2001 From: Arlo Breault Date: Tue, 23 Sep 2014 12:21:08 -0400 Subject: Refactor initiate_descriptor_downloads() to be safer (It's smarter to use asprintf and join than character pointers and a long buffer.) --- src/or/routerlist.c | 46 +++++++++++++++++++++++----------------------- src/or/routerlist.h | 4 ++++ 2 files changed, 27 insertions(+), 23 deletions(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/routerlist.c b/src/or/routerlist.c index 2fe007d9e7..96814ca778 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -4289,18 +4289,13 @@ list_pending_fpsk_downloads(fp_pair_map_t *result) * range.) If source is given, download from source; * otherwise, download from an appropriate random directory server. */ -static void -initiate_descriptor_downloads(const routerstatus_t *source, - int purpose, - smartlist_t *digests, - int lo, int hi, int pds_flags) +MOCK_IMPL(STATIC void, initiate_descriptor_downloads, + (const routerstatus_t *source, int purpose, smartlist_t *digests, + int lo, int hi, int pds_flags)) { - int i, n = hi-lo; char *resource, *cp; - size_t r_len; - int digest_len = DIGEST_LEN, enc_digest_len = HEX_DIGEST_LEN; - char sep = '+'; + char *sep = "+"; int b64_256 = 0; if (purpose == DIR_PURPOSE_FETCH_MICRODESC) { @@ -4308,32 +4303,37 @@ initiate_descriptor_downloads(const routerstatus_t *source, * 256-bit digests. */ digest_len = DIGEST256_LEN; enc_digest_len = BASE64_DIGEST256_LEN; - sep = '-'; + sep = "-"; b64_256 = 1; } - if (n <= 0) - return; if (lo < 0) lo = 0; if (hi > smartlist_len(digests)) hi = smartlist_len(digests); - r_len = 8 + (enc_digest_len+1)*n; - cp = resource = tor_malloc(r_len); - memcpy(cp, "d/", 2); - cp += 2; - for (i = lo; i < hi; ++i) { + if (hi-lo <= 0) + return; + + digest_len += 1; // for the NULL + smartlist_t *tmp = smartlist_new(); + + for (; lo < hi; ++lo) { + cp = tor_malloc(enc_digest_len); if (b64_256) { - digest256_to_base64(cp, smartlist_get(digests, i)); + digest256_to_base64(cp, smartlist_get(digests, lo)); } else { - base16_encode(cp, r_len-(cp-resource), - smartlist_get(digests,i), digest_len); + base16_encode(cp, enc_digest_len, smartlist_get(digests, lo), digest_len); } - cp += enc_digest_len; - *cp++ = sep; + smartlist_add(tmp, cp); } - memcpy(cp-1, ".z", 3); + + cp = smartlist_join_strings(tmp, sep, 0, NULL); + tor_asprintf(&resource, "d/%s.z", cp); + + SMARTLIST_FOREACH(tmp, char *, cp1, tor_free(cp1)); + smartlist_free(tmp); + tor_free(cp); if (source) { /* We know which authority we want. */ diff --git a/src/or/routerlist.h b/src/or/routerlist.h index cfa8683861..1e8b7c9721 100644 --- a/src/or/routerlist.h +++ b/src/or/routerlist.h @@ -211,6 +211,10 @@ STATIC int choose_array_element_by_weight(const u64_dbl_t *entries, int n_entries); STATIC void scale_array_elements_to_u64(u64_dbl_t *entries, int n_entries, uint64_t *total_out); +MOCK_DECL(STATIC void, initiate_descriptor_downloads, + (const routerstatus_t *source, int purpose, smartlist_t *digests, + int lo, int hi, int pds_flags)); + #endif #endif -- cgit v1.2.3-54-g00ecf From 5ed5ac185bf6f30438af1638f30e04418ed27aff Mon Sep 17 00:00:00 2001 From: Arlo Breault Date: Tue, 23 Sep 2014 12:22:28 -0400 Subject: Send more descriptor requests per attempt when using tunneled connections --- src/or/directory.c | 3 +- src/or/routerlist.c | 152 +++++++++++++++++++++++++++------------------------- 2 files changed, 81 insertions(+), 74 deletions(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/directory.c b/src/or/directory.c index 6fc5c206a1..7f272edaae 100644 --- a/src/or/directory.c +++ b/src/or/directory.c @@ -1261,7 +1261,8 @@ directory_send_command(dir_connection_t *conn, return; } - if (strlen(proxystring) + strlen(url) >= 4096) { + /* warn in the non-tunneled case */ + if (direct && (strlen(proxystring) + strlen(url) >= 4096)) { log_warn(LD_BUG, "Squid does not like URLs longer than 4095 bytes, and this " "one is %d bytes long: %s%s", diff --git a/src/or/routerlist.c b/src/or/routerlist.c index 96814ca778..5bcfdb45bf 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -4354,8 +4354,16 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads, * 4058/41 (40 for the hash and 1 for the + that separates them) => 98 * So use 96 because it's a nice number. */ -#define MAX_DL_PER_REQUEST 96 -#define MAX_MICRODESC_DL_PER_REQUEST 92 +int +max_dl_per_request(const or_options_t *options, int purpose) +{ + int max = 96; + if (purpose == DIR_PURPOSE_FETCH_MICRODESC) { + max = options->TunnelDirConns ? 1000 : 92; + } + return max; +} + /** Don't split our requests so finely that we are requesting fewer than * this number per server. */ #define MIN_DL_PER_REQUEST 4 @@ -4377,92 +4385,89 @@ launch_descriptor_downloads(int purpose, smartlist_t *downloadable, const routerstatus_t *source, time_t now) { - int should_delay = 0, n_downloadable; const or_options_t *options = get_options(); const char *descname; + const int fetch_microdesc = (purpose == DIR_PURPOSE_FETCH_MICRODESC); - tor_assert(purpose == DIR_PURPOSE_FETCH_SERVERDESC || - purpose == DIR_PURPOSE_FETCH_MICRODESC); + tor_assert(fetch_microdesc || purpose == DIR_PURPOSE_FETCH_SERVERDESC); + descname = fetch_microdesc ? "microdesc" : "routerdesc"; - descname = (purpose == DIR_PURPOSE_FETCH_SERVERDESC) ? - "routerdesc" : "microdesc"; + int n_downloadable = smartlist_len(downloadable); + if (!n_downloadable) + return; - n_downloadable = smartlist_len(downloadable); if (!directory_fetches_dir_info_early(options)) { if (n_downloadable >= MAX_DL_TO_DELAY) { log_debug(LD_DIR, "There are enough downloadable %ss to launch requests.", descname); - should_delay = 0; } else { - should_delay = (last_descriptor_download_attempted + - options->TestingClientMaxIntervalWithoutRequest) > now; - if (!should_delay && n_downloadable) { - if (last_descriptor_download_attempted) { - log_info(LD_DIR, - "There are not many downloadable %ss, but we've " - "been waiting long enough (%d seconds). Downloading.", - descname, - (int)(now-last_descriptor_download_attempted)); - } else { - log_info(LD_DIR, - "There are not many downloadable %ss, but we haven't " - "tried downloading descriptors recently. Downloading.", - descname); - } + + /* should delay */ + if ((last_descriptor_download_attempted + + options->TestingClientMaxIntervalWithoutRequest) > now) + return; + + if (last_descriptor_download_attempted) { + log_info(LD_DIR, + "There are not many downloadable %ss, but we've " + "been waiting long enough (%d seconds). Downloading.", + descname, + (int)(now-last_descriptor_download_attempted)); + } else { + log_info(LD_DIR, + "There are not many downloadable %ss, but we haven't " + "tried downloading descriptors recently. Downloading.", + descname); } + } } - if (! should_delay && n_downloadable) { - int i, n_per_request; - const char *req_plural = "", *rtr_plural = ""; - int pds_flags = PDS_RETRY_IF_NO_SERVERS; - if (! authdir_mode_any_nonhidserv(options)) { - /* If we wind up going to the authorities, we want to only open one - * connection to each authority at a time, so that we don't overload - * them. We do this by setting PDS_NO_EXISTING_SERVERDESC_FETCH - * regardless of whether we're a cache or not; it gets ignored if we're - * not calling router_pick_trusteddirserver. - * - * Setting this flag can make initiate_descriptor_downloads() ignore - * requests. We need to make sure that we do in fact call - * update_router_descriptor_downloads() later on, once the connections - * have succeeded or failed. - */ - pds_flags |= (purpose == DIR_PURPOSE_FETCH_MICRODESC) ? - PDS_NO_EXISTING_MICRODESC_FETCH : - PDS_NO_EXISTING_SERVERDESC_FETCH; - } + int i, n_per_request, max_dl_per_req; + const char *req_plural = "", *rtr_plural = ""; + int pds_flags = PDS_RETRY_IF_NO_SERVERS; + if (!authdir_mode_any_nonhidserv(options) || fetch_microdesc) { + /* If we wind up going to the authorities, we want to only open one + * connection to each authority at a time, so that we don't overload + * them. We do this by setting PDS_NO_EXISTING_SERVERDESC_FETCH + * regardless of whether we're a cache or not. + * + * Setting this flag can make initiate_descriptor_downloads() ignore + * requests. We need to make sure that we do in fact call + * update_router_descriptor_downloads() later on, once the connections + * have succeeded or failed. + */ + pds_flags |= fetch_microdesc ? + PDS_NO_EXISTING_MICRODESC_FETCH : + PDS_NO_EXISTING_SERVERDESC_FETCH; + } - n_per_request = CEIL_DIV(n_downloadable, MIN_REQUESTS); - if (purpose == DIR_PURPOSE_FETCH_MICRODESC) { - if (n_per_request > MAX_MICRODESC_DL_PER_REQUEST) - n_per_request = MAX_MICRODESC_DL_PER_REQUEST; - } else { - if (n_per_request > MAX_DL_PER_REQUEST) - n_per_request = MAX_DL_PER_REQUEST; - } - if (n_per_request < MIN_DL_PER_REQUEST) - n_per_request = MIN_DL_PER_REQUEST; - - if (n_downloadable > n_per_request) - req_plural = rtr_plural = "s"; - else if (n_downloadable > 1) - rtr_plural = "s"; - - log_info(LD_DIR, - "Launching %d request%s for %d %s%s, %d at a time", - CEIL_DIV(n_downloadable, n_per_request), req_plural, - n_downloadable, descname, rtr_plural, n_per_request); - smartlist_sort_digests(downloadable); - for (i=0; i < n_downloadable; i += n_per_request) { - initiate_descriptor_downloads(source, purpose, - downloadable, i, i+n_per_request, - pds_flags); - } - last_descriptor_download_attempted = now; + n_per_request = CEIL_DIV(n_downloadable, MIN_REQUESTS); + max_dl_per_req = max_dl_per_request(options, purpose); + + if (n_per_request > max_dl_per_req) + n_per_request = max_dl_per_req; + + if (n_per_request < MIN_DL_PER_REQUEST) + n_per_request = MIN_DL_PER_REQUEST; + + if (n_downloadable > n_per_request) + req_plural = rtr_plural = "s"; + else if (n_downloadable > 1) + rtr_plural = "s"; + + log_info(LD_DIR, + "Launching %d request%s for %d %s%s, %d at a time", + CEIL_DIV(n_downloadable, n_per_request), req_plural, + n_downloadable, descname, rtr_plural, n_per_request); + smartlist_sort_digests(downloadable); + for (i=0; i < n_downloadable; i += n_per_request) { + initiate_descriptor_downloads(source, purpose, + downloadable, i, i+n_per_request, + pds_flags); } + last_descriptor_download_attempted = now; } /** For any descriptor that we want that's currently listed in @@ -4698,9 +4703,10 @@ update_extrainfo_downloads(time_t now) n_no_ei, n_have, n_delay, n_pending, smartlist_len(wanted)); smartlist_shuffle(wanted); - for (i = 0; i < smartlist_len(wanted); i += MAX_DL_PER_REQUEST) { + int max_dl_per_req = max_dl_per_request(options, DIR_PURPOSE_FETCH_EXTRAINFO); + for (i = 0; i < smartlist_len(wanted); i += max_dl_per_req) { initiate_descriptor_downloads(NULL, DIR_PURPOSE_FETCH_EXTRAINFO, - wanted, i, i + MAX_DL_PER_REQUEST, + wanted, i, i+max_dl_per_req, PDS_RETRY_IF_NO_SERVERS|PDS_NO_EXISTING_SERVERDESC_FETCH); } -- cgit v1.2.3-54-g00ecf From 06bda506003826bf9a28aec3afe0b7b1ae6cc9c0 Mon Sep 17 00:00:00 2001 From: Nick Mathewson Date: Tue, 23 Sep 2014 12:32:02 -0400 Subject: fixup! Download microdescriptors if you're a cache --- src/or/routerlist.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/routerlist.c b/src/or/routerlist.c index 5bcfdb45bf..ff2f54de87 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -4295,8 +4295,9 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads, { char *resource, *cp; int digest_len = DIGEST_LEN, enc_digest_len = HEX_DIGEST_LEN; - char *sep = "+"; + const char *sep = "+"; int b64_256 = 0; + smartlist_t *tmp; if (purpose == DIR_PURPOSE_FETCH_MICRODESC) { /* Microdescriptors are downloaded by "-"-separated base64-encoded @@ -4316,7 +4317,7 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads, return; digest_len += 1; // for the NULL - smartlist_t *tmp = smartlist_new(); + tmp = smartlist_new(); for (; lo < hi; ++lo) { cp = tor_malloc(enc_digest_len); -- cgit v1.2.3-54-g00ecf From 02464694b2fa901e04b8a8f1c467a0773e6d4c27 Mon Sep 17 00:00:00 2001 From: Nick Mathewson Date: Tue, 23 Sep 2014 12:34:17 -0400 Subject: fixup! Send more descriptor requests per attempt when using tunneled connections Compilation fixes --- src/or/routerlist.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/routerlist.c b/src/or/routerlist.c index ff2f54de87..e0cb5174c3 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -4355,7 +4355,7 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads, * 4058/41 (40 for the hash and 1 for the + that separates them) => 98 * So use 96 because it's a nice number. */ -int +static int max_dl_per_request(const or_options_t *options, int purpose) { int max = 96; @@ -4389,11 +4389,15 @@ launch_descriptor_downloads(int purpose, const or_options_t *options = get_options(); const char *descname; const int fetch_microdesc = (purpose == DIR_PURPOSE_FETCH_MICRODESC); + int n_downloadable = smartlist_len(downloadable); + + int i, n_per_request, max_dl_per_req; + const char *req_plural = "", *rtr_plural = ""; + int pds_flags = PDS_RETRY_IF_NO_SERVERS; tor_assert(fetch_microdesc || purpose == DIR_PURPOSE_FETCH_SERVERDESC); descname = fetch_microdesc ? "microdesc" : "routerdesc"; - int n_downloadable = smartlist_len(downloadable); if (!n_downloadable) return; @@ -4425,9 +4429,6 @@ launch_descriptor_downloads(int purpose, } } - int i, n_per_request, max_dl_per_req; - const char *req_plural = "", *rtr_plural = ""; - int pds_flags = PDS_RETRY_IF_NO_SERVERS; if (!authdir_mode_any_nonhidserv(options) || fetch_microdesc) { /* If we wind up going to the authorities, we want to only open one * connection to each authority at a time, so that we don't overload @@ -4648,7 +4649,7 @@ update_extrainfo_downloads(time_t now) routerlist_t *rl; smartlist_t *wanted; digestmap_t *pending; - int old_routers, i; + int old_routers, i, max_dl_per_req; int n_no_ei = 0, n_pending = 0, n_have = 0, n_delay = 0; if (! options->DownloadExtraInfo) return; @@ -4704,7 +4705,8 @@ update_extrainfo_downloads(time_t now) n_no_ei, n_have, n_delay, n_pending, smartlist_len(wanted)); smartlist_shuffle(wanted); - int max_dl_per_req = max_dl_per_request(options, DIR_PURPOSE_FETCH_EXTRAINFO); + + max_dl_per_req = max_dl_per_request(options, DIR_PURPOSE_FETCH_EXTRAINFO); for (i = 0; i < smartlist_len(wanted); i += max_dl_per_req) { initiate_descriptor_downloads(NULL, DIR_PURPOSE_FETCH_EXTRAINFO, wanted, i, i+max_dl_per_req, -- cgit v1.2.3-54-g00ecf From 55b21b366c4a8c237dda0a967c0c499e18fb0b4c Mon Sep 17 00:00:00 2001 From: Nick Mathewson Date: Tue, 23 Sep 2014 12:47:39 -0400 Subject: fixup! Make router_pick_directory_server respect PDS_NO_EXISTING_* Document n_busy_out, and set it correctly when we goto retry_without_exclude. --- src/or/routerlist.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/routerlist.c b/src/or/routerlist.c index e0cb5174c3..e65898636a 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -1383,6 +1383,10 @@ router_pick_dirserver_generic(smartlist_t *sourcelist, * * If the PDS_PREFER_TUNNELED_DIR_CONNS_ flag is set, prefer directory servers * that we can use with BEGINDIR. + * + * If n_busy_out is provided, set *n_busy_out to the number of + * directories that we excluded for no other reason than + * PDS_NO_EXISTING_SERVERDESC_FETCH or PDS_NO_EXISTING_MICRODESC_FETCH. */ static const routerstatus_t * router_pick_directory_server_impl(dirinfo_type_t type, int flags, @@ -1506,17 +1510,18 @@ router_pick_directory_server_impl(dirinfo_type_t type, int flags, smartlist_free(overloaded_direct); smartlist_free(overloaded_tunnel); - if (n_busy_out) - *n_busy_out = n_busy; - if (result == NULL && try_excluding && !options->StrictNodes && n_excluded) { /* If we got no result, and we are excluding nodes, and StrictNodes is * not set, try again without excluding nodes. */ try_excluding = 0; n_excluded = 0; + n_busy = 0; goto retry_without_exclude; } + if (n_busy_out) + *n_busy_out = n_busy; + return result ? result->rs : NULL; } -- cgit v1.2.3-54-g00ecf From 0fdfdae7e3a88a9172a51f36ac6b536b5687d401 Mon Sep 17 00:00:00 2001 From: Nick Mathewson Date: Tue, 23 Sep 2014 12:56:16 -0400 Subject: fixup! Refactor initiate_descriptor_downloads() to be safer Calculate digest_len correctly. Also, refactor setting of initial variables to look a little nicer. --- src/or/routerlist.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/routerlist.c b/src/or/routerlist.c index e65898636a..efa4abbac1 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -4299,18 +4299,23 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads, int lo, int hi, int pds_flags)) { char *resource, *cp; - int digest_len = DIGEST_LEN, enc_digest_len = HEX_DIGEST_LEN; - const char *sep = "+"; - int b64_256 = 0; + int digest_len, enc_digest_len; + const char *sep; + int b64_256; smartlist_t *tmp; if (purpose == DIR_PURPOSE_FETCH_MICRODESC) { /* Microdescriptors are downloaded by "-"-separated base64-encoded * 256-bit digests. */ digest_len = DIGEST256_LEN; - enc_digest_len = BASE64_DIGEST256_LEN; + enc_digest_len = BASE64_DIGEST256_LEN + 1; sep = "-"; b64_256 = 1; + } else { + digest_len = DIGEST_LEN; + enc_digest_len = HEX_DIGEST_LEN + 1; + sep = "+"; + b64_256 = 0; } if (lo < 0) @@ -4321,7 +4326,6 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads, if (hi-lo <= 0) return; - digest_len += 1; // for the NULL tmp = smartlist_new(); for (; lo < hi; ++lo) { -- cgit v1.2.3-54-g00ecf From 055ad9c5fb0de4295ccb05357d6a2d8ad29d03e4 Mon Sep 17 00:00:00 2001 From: Nick Mathewson Date: Tue, 23 Sep 2014 12:57:10 -0400 Subject: fixup! Send more descriptor requests per attempt when using tunneled connections Limit the number of simultaneous connections to a single router for server descriptors too. --- src/or/routerlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/routerlist.c b/src/or/routerlist.c index efa4abbac1..ec9e801ef2 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -4438,7 +4438,7 @@ launch_descriptor_downloads(int purpose, } } - if (!authdir_mode_any_nonhidserv(options) || fetch_microdesc) { + if (!authdir_mode_any_nonhidserv(options)) { /* If we wind up going to the authorities, we want to only open one * connection to each authority at a time, so that we don't overload * them. We do this by setting PDS_NO_EXISTING_SERVERDESC_FETCH -- cgit v1.2.3-54-g00ecf From 6523eff9b3b5e06521da010e238b4cd23ed24e82 Mon Sep 17 00:00:00 2001 From: Nick Mathewson Date: Tue, 23 Sep 2014 13:04:22 -0400 Subject: Send long URLs when requesting ordinary server descriptors too. --- src/or/routerlist.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/routerlist.c b/src/or/routerlist.c index ec9e801ef2..e0a55d898f 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -4369,7 +4369,10 @@ max_dl_per_request(const or_options_t *options, int purpose) { int max = 96; if (purpose == DIR_PURPOSE_FETCH_MICRODESC) { - max = options->TunnelDirConns ? 1000 : 92; + max = 92; + } + if (options->TunnelDirConns) { + max = 1000; } return max; } -- cgit v1.2.3-54-g00ecf From ac9b0a3110ea4eea63133c6d2e3572b2cfd22bd6 Mon Sep 17 00:00:00 2001 From: Nick Mathewson Date: Mon, 29 Sep 2014 10:56:38 -0400 Subject: Try to make max_dl_per_request a bit smarter --- src/or/routerlist.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'src/or/routerlist.c') diff --git a/src/or/routerlist.c b/src/or/routerlist.c index e0a55d898f..6b1bacae74 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -4358,21 +4358,25 @@ MOCK_IMPL(STATIC void, initiate_descriptor_downloads, tor_free(resource); } -/** Max amount of hashes to download per request. - * Since squid does not like URLs >= 4096 bytes we limit it to 96. - * 4096 - strlen(http://255.255.255.255/tor/server/d/.z) == 4058 - * 4058/41 (40 for the hash and 1 for the + that separates them) => 98 - * So use 96 because it's a nice number. +/** Return the max number of hashes to put in a URL for a given request. */ static int max_dl_per_request(const or_options_t *options, int purpose) { + /* Since squid does not like URLs >= 4096 bytes we limit it to 96. + * 4096 - strlen(http://255.255.255.255/tor/server/d/.z) == 4058 + * 4058/41 (40 for the hash and 1 for the + that separates them) => 98 + * So use 96 because it's a nice number. + */ int max = 96; if (purpose == DIR_PURPOSE_FETCH_MICRODESC) { max = 92; } - if (options->TunnelDirConns) { - max = 1000; + /* If we're going to tunnel our connections, we can ask for a lot more + * in a request. */ + if (options->TunnelDirConns && + !directory_fetches_from_authorities(options)) { + max = 500; } return max; } -- cgit v1.2.3-54-g00ecf