diff options
author | Nick Mathewson <nickm@torproject.org> | 2016-02-28 15:51:22 +0100 |
---|---|---|
committer | Nick Mathewson <nickm@torproject.org> | 2016-02-28 15:51:22 +0100 |
commit | 69fc025e956f5a87db0860a1be54e6d3612c8074 (patch) | |
tree | efb5de801e05e8f02de37a907065f4ce7c083f2f /scripts | |
parent | 88ad2f5fb215d30d07e2433ee1d9acc7bb19d0ef (diff) | |
parent | e2202146d16af22502fd4166ac926fefa6528dff (diff) | |
download | tor-69fc025e956f5a87db0860a1be54e6d3612c8074.tar.gz tor-69fc025e956f5a87db0860a1be54e6d3612c8074.zip |
Merge remote-tracking branch 'teor/fallbacks-201602-v2'
Diffstat (limited to 'scripts')
-rw-r--r-- | scripts/maint/fallback.whitelist | 4 | ||||
-rwxr-xr-x | scripts/maint/updateFallbackDirs.py | 92 |
2 files changed, 88 insertions, 8 deletions
diff --git a/scripts/maint/fallback.whitelist b/scripts/maint/fallback.whitelist index d5ddcd2e82..6edb368345 100644 --- a/scripts/maint/fallback.whitelist +++ b/scripts/maint/fallback.whitelist @@ -37,7 +37,6 @@ # https://lists.torproject.org/pipermail/tor-relays/2016-January/008555.html 62.210.124.124:9030 orport=9001 id=86E78DD3720C78DA8673182EF96C54B162CD660C ipv6=[2001:bc8:3f23:100::1]:9001 62.210.124.124:9130 orport=9101 id=2EBD117806EE43C3CC885A8F1E4DC60F207E7D3E ipv6=[2001:bc8:3f23:100::1]:9101 -# DirPort TBC in next consensus 212.47.237.95:9030 orport=9001 id=3F5D8A879C58961BB45A3D26AC41B543B40236D6 212.47.237.95:9130 orport=9101 id=6FB38EB22E57EF7ED5EF00238F6A48E553735D88 @@ -76,7 +75,8 @@ 81.7.14.246:80 orport=443 id=CE75BF0972ADD52AF8807602374E495C815DB304 ipv6=[2a02:180:a:51::dead]:443 # https://lists.torproject.org/pipermail/tor-relays/2015-December/008384.html -149.202.98.161:80 orport=443 id=54660C671B47E6986B465B80444414BD19E5A34B ipv6=[2001:41d0:8:4528::161]:443 +# Sent additional email to teor with fingerprint change +149.202.98.161:80 orport=443 id=FC64CD763F8C1A319BFBBF62551684F4E1E42332 ipv6=[2001:41d0:8:4528::161]:443 193.111.136.162:80 orport=443 id=C79552275DFCD486B942510EF663ED36ACA1A84B ipv6=[2001:4ba0:cafe:10d0::1]:443 # https://lists.torproject.org/pipermail/tor-relays/2015-December/008416.html diff --git a/scripts/maint/updateFallbackDirs.py b/scripts/maint/updateFallbackDirs.py index fd7d350c2e..d110335de4 100755 --- a/scripts/maint/updateFallbackDirs.py +++ b/scripts/maint/updateFallbackDirs.py @@ -26,11 +26,17 @@ import dateutil.parser # bson_lazy provides bson #from bson import json_util +from stem.descriptor.remote import DescriptorDownloader + import logging logging.basicConfig(level=logging.DEBUG) ## Top-Level Configuration +# Perform DirPort checks over IPv6? +# If you know IPv6 works for you, set this to True +PERFORM_IPV6_DIRPORT_CHECKS = False + # Output all candidate fallbacks, or only output selected fallbacks? OUTPUT_CANDIDATES = False @@ -73,7 +79,11 @@ MAX_LIST_FILE_SIZE = 1024 * 1024 ## Eligibility Settings -ADDRESS_AND_PORT_STABLE_DAYS = 120 +# Reduced due to a bug in tor where a relay submits a 0 DirPort when restarted +# This causes OnionOO to (correctly) reset its stability timer +# This issue is fixed in 0.2.7.7 and master. +# Until then, the CUTOFFs below ensure a decent level of stability. +ADDRESS_AND_PORT_STABLE_DAYS = 7 # What time-weighted-fraction of these flags must FallbackDirs # Equal or Exceed? CUTOFF_RUNNING = .95 @@ -84,6 +94,16 @@ CUTOFF_GUARD = .95 # .00 means no bad exits PERMITTED_BADEXIT = .00 +# Clients will time out after 30 seconds trying to download a consensus +# So allow fallback directories half that to deliver a consensus +# The exact download times might change based on the network connection +# running this script, but only by a few seconds +# There is also about a second of python overhead +CONSENSUS_DOWNLOAD_SPEED_MAX = 15.0 +# If the relay fails a consensus check, retry the download +# This avoids delisting a relay due to transient network conditions +CONSENSUS_DOWNLOAD_RETRY = True + ## List Length Limits # The target for these parameters is 20% of the guards in the network @@ -93,7 +113,7 @@ FALLBACK_PROPORTION_OF_GUARDS = None if OUTPUT_CANDIDATES else 0.2 # Limit the number of fallbacks (eliminating lowest by weight) MAX_FALLBACK_COUNT = None if OUTPUT_CANDIDATES else 500 # Emit a C #error if the number of fallbacks is below -MIN_FALLBACK_COUNT = 100 +MIN_FALLBACK_COUNT = 50 ## Fallback Weight Settings @@ -344,6 +364,7 @@ def onionoo_fetch(what, **kwargs): # Check for freshness if last_mod < required_freshness: if last_mod_date is not None: + # This check sometimes fails transiently, retry the script if it does date_message = "Outdated data: last updated " + last_mod_date else: date_message = "No data: never downloaded " @@ -390,7 +411,7 @@ def fetch(what, **kwargs): ## Fallback Candidate Class class Candidate(object): - CUTOFF_ADDRESS_AND_PORT_STABLE = (datetime.datetime.now() + CUTOFF_ADDRESS_AND_PORT_STABLE = (datetime.datetime.utcnow() - datetime.timedelta(ADDRESS_AND_PORT_STABLE_DAYS)) def __init__(self, details): @@ -583,7 +604,7 @@ class Candidate(object): periods = history.keys() periods.sort(key = lambda x: history[x]['interval']) - now = datetime.datetime.now() + now = datetime.datetime.utcnow() newest = now for p in periods: h = history[p] @@ -802,7 +823,57 @@ class Candidate(object): def original_fallback_weight_fraction(self, total_weight): return float(self.original_consensus_weight()) / total_weight - def fallbackdir_line(self, total_weight, original_total_weight): + @staticmethod + def fallback_consensus_dl_speed(dirip, dirport, nickname, max_time): + downloader = DescriptorDownloader() + start = datetime.datetime.utcnow() + # there appears to be about 1 second of overhead when comparing stem's + # internal trace time and the elapsed time calculated here + downloader.get_consensus(endpoints = [(dirip, dirport)]).run() + elapsed = (datetime.datetime.utcnow() - start).total_seconds() + if elapsed > max_time: + status = 'too slow' + else: + status = 'ok' + logging.debug(('Consensus download: %0.2fs %s from %s (%s:%d), ' + + 'max download time %0.2fs.') % (elapsed, status, + nickname, dirip, dirport, + max_time)) + return elapsed + + def fallback_consensus_dl_check(self): + ipv4_speed = Candidate.fallback_consensus_dl_speed(self.dirip, + self.dirport, + self._data['nickname'], + CONSENSUS_DOWNLOAD_SPEED_MAX) + if self.ipv6addr is not None and PERFORM_IPV6_DIRPORT_CHECKS: + # Clients assume the IPv6 DirPort is the same as the IPv4 DirPort + ipv6_speed = Candidate.fallback_consensus_dl_speed(self.ipv6addr, + self.dirport, + self._data['nickname'], + CONSENSUS_DOWNLOAD_SPEED_MAX) + else: + ipv6_speed = None + # Now retry the relay if it took too long the first time + if (ipv4_speed > CONSENSUS_DOWNLOAD_SPEED_MAX + and CONSENSUS_DOWNLOAD_RETRY): + ipv4_speed = Candidate.fallback_consensus_dl_speed(self.dirip, + self.dirport, + self._data['nickname'], + CONSENSUS_DOWNLOAD_SPEED_MAX) + if (self.ipv6addr is not None and PERFORM_IPV6_DIRPORT_CHECKS + and ipv6_speed > CONSENSUS_DOWNLOAD_SPEED_MAX + and CONSENSUS_DOWNLOAD_RETRY): + ipv6_speed = Candidate.fallback_consensus_dl_speed(self.ipv6addr, + self.dirport, + self._data['nickname'], + CONSENSUS_DOWNLOAD_SPEED_MAX) + + return (ipv4_speed <= CONSENSUS_DOWNLOAD_SPEED_MAX + and (not PERFORM_IPV6_DIRPORT_CHECKS + or ipv6_speed <= CONSENSUS_DOWNLOAD_SPEED_MAX)) + + def fallbackdir_line(self, total_weight, original_total_weight, dl_speed_ok): # /* # nickname # flags @@ -813,6 +884,7 @@ class Candidate(object): # "address:dirport orport=port id=fingerprint" # "[ipv6=addr:orport]" # "weight=num", + # # Multiline C comment s = '/*' s += '\n' @@ -839,6 +911,10 @@ class Candidate(object): s += '\n' s += '*/' s += '\n' + # Comment out the fallback directory entry if it's too slow + # See the debug output for which address and port is failing + if not dl_speed_ok: + s += '/* Consensus download failed or was too slow:\n' # Multi-Line C string with trailing comma (part of a string list) # This makes it easier to diff the file, and remove IPv6 lines using grep # Integers don't need escaping @@ -852,6 +928,9 @@ class Candidate(object): cleanse_c_string(self.ipv6addr), cleanse_c_string(self.ipv6orport)) s += '\n' s += '" weight=%d",'%(weight) + if not dl_speed_ok: + s += '\n' + s += '*/' return s ## Fallback Candidate List Class @@ -1249,7 +1328,8 @@ def list_fallbacks(): print describe_fetch_source(s) for x in candidates.fallbacks[:max_count]: - print x.fallbackdir_line(total_weight, pre_clamp_total_weight) + dl_speed_ok = x.fallback_consensus_dl_check() + print x.fallbackdir_line(total_weight, pre_clamp_total_weight, dl_speed_ok) #print json.dumps(candidates[x]._data, sort_keys=True, indent=4, # separators=(',', ': '), default=json_util.default) |